repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
chunghyunhee/GoldMine
[ "d9cb6279588aff88d4b7a9225024c017e3de0333" ]
[ "experiment/hyper-params-search/hps/algorithms/ga/ParticleSwarmOptimization.py" ]
[ "## SA + PSO + GA + boundary(GA)\n\nimport numpy as np\nimport random\nimport time\n\nfrom hps.algorithms.HPOptimizationAbstract import HPOptimizationAbstract\nfrom hps.algorithms.ga.SimulatedAnnealing_3 import SimulatedAnnealing\nfrom hps.algorithms.ga.GeneticAlgorithm import GeneticAlgorithm\n\nclass ParticleSwarmOptimization(GeneticAlgorithm, SimulatedAnnealing, HPOptimizationAbstract):\n def __init__(self, **kwargs):\n # inheritance init\n super(ParticleSwarmOptimization, self).__init__(**kwargs)\n self._check_hpo_params()\n self.DUP_CHECK = False\n\n def _check_hpo_params(self):\n self._n_pop = self._n_params\n\n ## PSO\n self._k = self._hpo_params[\"k\"] # decide local optima\n self._w = self._hpo_params[\"w\"]\n self._n_steps = self._hpo_params[\"n_steps\"]\n self._c1 = self._hpo_params[\"c1\"] # cognitive constants\n self._c2 = self._hpo_params[\"c2\"] # social constants\n self._delta = self._hpo_params[\"delta\"] # modified PSO\n self.count = self._hpo_params[\"count\"]\n\n ## SA\n self._T0 = self._hpo_params[\"T0\"]\n self._alpha = self._hpo_params[\"alpha\"]\n\n ## GA\n self._top = int(float(self._n_params * 0.5))\n self._n_prob = self._n_params\n self._mut_prob = self._hpo_params[\"mut_prob\"]\n self._cx_prob = self._hpo_params[\"cx_prob\"]\n self._n_sel = int(float(self._hpo_params[\"sel_ratio\"] * self._n_prob))\n self._n_mut = int(float(self._hpo_params[\"mut_ratio\"] * self._n_prob))\n self._n_cx = int(float(self._hpo_params[\"cx_ratio\"] * self._n_prob))\n\n # generate candidate function\n def _generate(self, param_list, score_list, iter_num):\n result_param_list = list()\n p_best_list = list()\n bound_dict_list = list()\n\n # generate random hyperparameter\n best_param_list = self._particle(param_list)\n\n # pbest\n p_best = self._p_best(best_param_list, score_list)\n p_best_list.append(p_best)\n\n # 상위 score의 particle은 GA로 새로 생성\n if len(bound_dict_list) == 0:\n # self._pbounds값으로 대체\n for i in range(len(best_param_list)):\n # bound_dict_list에 첫번째 값은 pbounds\n bound_dict_list.append(self._pbounds)\n GA_param_list = self._generate_GA_particle(best_param_list, bound_dict_list)\n else :\n bound_dict_list = self.ga_boundary(best_param_list, iter_num, bound_dict_list)\n GA_param_list = self._generate_GA_particle(best_param_list, bound_dict_list)\n\n # gbest갱신\n g_best, p_best_list = self._g_best(GA_param_list, p_best_list)\n # k번동안 update되지 않으면 sa로 새로 갱신\n g_best_pso = self.update_gbest(g_best)\n\n self.LOGGER.info(\"{}\".format(g_best_pso))\n\n # position 변경\n compute_velocity_params = self.compute_velocity(GA_param_list, p_best, g_best_pso)\n update_position_params = self.update_position(GA_param_list, compute_velocity_params)\n result_param_list += update_position_params\n\n # if duplicate, generate new particle\n result_param_list = self._remove_duplicate_params(result_param_list)\n\n num_result_params = len(result_param_list)\n ## leak\n if num_result_params < self._n_pop:\n result_param_list += self._generate_param_dict_list(self._n_pop - num_result_params)\n ## over\n elif num_result_params > self._n_pop:\n random.shuffle(result_param_list)\n result_param_list = result_param_list[:self._n_pop]\n\n return result_param_list\n\n def _generate_GA_particle(self, param_list, bound_dict_list):\n top_param_list = list()\n\n for i in range(1, self._top):\n top_param_list.append(param_list[i])\n\n # GA 적용\n result_param_list = list() # 결과반환\n best_param_list = list() # initial hyperparameter\n\n best_param_list += top_param_list\n sel_params = self._selection(best_param_list)\n mut_params = self._mutation(best_param_list, bound_dict_list)\n cx_params = self._crossover(best_param_list)\n\n result_param_list += sel_params + mut_params + cx_params\n\n # 전체 particle list에서 GA 생성한 부분만 새로 채워서 반환\n for i in range(1, self._top):\n param_list[i] = result_param_list[i]\n\n return param_list\n\n\n # 해당 iteration 중 모든 particle에서 최대\n def _p_best(self, param_list, score_list):\n if len(score_list) == 0:\n return param_list[0]\n else :\n max_score_value = max(score_list)\n for i in range(len(score_list)):\n if max_score_value == score_list[i]:\n return param_list[i]\n\n # global에서 최대\n def _g_best(self, param_list, p_best_list):\n all_list = list()\n\n if len(p_best_list) == 0:\n all_list.append(param_list[0])\n return param_list[0], all_list\n else:\n global_value = max(p_best_list)\n for i in range(len(p_best_list)):\n if global_value == p_best_list[i]:\n all_list.append(global_value)\n return global_value, all_list\n\n # global value를 받아 sa진행\n def update_gbest(self, global_dict):\n self.count += 1 # 현제 step count\n\n if self.count % self._k == 0 :\n result_param_list = list()\n best_param_list = list()\n\n best_param_list.append(global_dict)\n neighbor_param_list = self._neighbor_selection(best_param_list)\n result_param_list += best_param_list + neighbor_param_list # ( glbal_best , neighbor_candidate )\n\n if len(self.score_list) != 0:\n result_param_list = self.accept(result_param_list)\n\n return result_param_list[0]\n\n else :\n result_param_list = list()\n result_param_list.append(global_dict)\n return result_param_list[0]\n\n\n # random init particle position\n def _particle(self, param_list):\n if len(param_list) == 0:\n return self._generate_stratified_param_list(self._n_pop)\n else :\n return param_list\n\n\n\n def compute_velocity(self,param_dict_list, pos_best_i, g_best_i):\n # initialize each velocity dictionary in list\n velocity_list = list()\n velocity_dict = dict()\n\n for _, key in enumerate(self._pbounds):\n velocity_dict[key] = random.uniform(-1, 1)\n for _ in range(self._n_pop):\n velocity_list.append(velocity_dict)\n\n for i, param_dict in enumerate(param_dict_list):\n for j in param_dict.keys():\n ## gbest값에 따라 parameter다르게 선정\n r1 = random.random()\n r2 = random.random()\n\n # modified velocity for multi-dim\n if type(param_dict[j]) == int or type(param_dict[j]) == float:\n\n if (abs(velocity_list[i][j]) + abs(g_best_i[j] - param_dict[j]) < self._delta) and type(param_dict[j] == float):\n velocity_list[i][j] = (2*random.random()-1) * self._delta\n else:\n vel_cognitive = self._c1*r1*(pos_best_i[j] - param_dict[j])\n vel_social = self._c2*r2*(g_best_i[j] - param_dict[j])\n velocity_list[i][j] = self._w * velocity_list[i][j] + vel_cognitive + vel_social\n\n else :\n vel_cognitive = self._c1 * r1\n vel_social = self._c2 * r2\n velocity_list[i][j] = self._w * velocity_list[i][j] + vel_cognitive + vel_social\n\n return velocity_list\n\n\n # update position based on updated velocity\n def update_position(self, param_list, velocity_i):\n\n for i, param_dict in enumerate(param_list):\n for j in param_dict.keys():\n if type(param_dict[j]) == int or type(param_dict[j]) == float:\n param_dict[j] = param_dict[j] + velocity_i[i][j]\n # 범위 설정\n min = self._pbounds[j][0]\n max = self._pbounds[j][1]\n param_dict[j] = np.clip(param_dict[j], min, max)\n # categorical 변수의 경우\n else :\n param_dict[j] = param_dict[j]\n return param_list\n\n # GA에 들어가는 boundary\n ## abstract, GA_mutate 변경\n def ga_boundary(self, param_list, iter, bound_dict_list):\n\n # mutation rate init\n mutrate = self._mut_prob\n\n # 각 particle 별 bounds 따로 생성\n for i, param_dict in enumerate(param_list):\n for j in param_dict.keys():\n inner_bound_list = list()\n\n if type(param_dict[j]) == int or type(param_dict[j]) == float :\n\n # 이전의 bound에서 값 벋아서 변경\n mutrange = (bound_dict_list[i][1] - bound_dict_list[i][0]) * ( 1 - iter / self._n_steps )**( 5/mutrate )\n\n upper_bounds = param_dict[j] + mutrange\n lower_bounds = param_dict[j] - mutrange\n\n # 기존 범위에서 벗어나는지 확인\n if lower_bounds < self._pbounds[j][0] :\n lower_bounds = self._pbounds[j][0]\n if upper_bounds > self._pbounds[j][1]:\n upper_bounds = self._pbounds[j][1]\n\n inner_bound_list.append(lower_bounds)\n inner_bound_list.append(upper_bounds)\n\n # param별 bound지정\n param_dict[j] = inner_bound_list\n\n bound_dict_list.append(param_dict)\n return bound_dict_list\n\n # boundary 변경할 수 있는 mutation methods\n def _mutation(self, param_dict_list, bound_dict_list):\n mut_params = list()\n\n for param_dict in param_dict_list[:self._n_mut]:\n temp_param_list = list()\n temp_param_dict = dict()\n\n # 각 particle별로 bound range생성\n for j in range(len(bound_dict_list)):\n for _ , key in enumerate(bound_dict_list[j]):\n if np.random.rand() > self._mut_prob:\n temp_param_dict[key] = self._generate_new_param(key, bound_dict_list[j])\n else :\n temp_param_dict[key] = param_dict[key]\n temp_param_list.append(temp_param_dict)\n\n mut_params += temp_param_list\n\n return mut_params\n\n# main __init__ to execute in this single file\nif __name__ == '__main__':\n hprs_info = {\n \"hpo_params\" : {\n \"w\" : 0.1,\n \"delta\" : 1,\n \"n_params\" : 10,\n \"n_steps\" : 20,\n \"c1\": 0.3,\n \"c2\": 0.3,\n \"k_val\": 5,\n \"eval_key\": \"accuracy\"\n },\n \"ml_params\":{\n \"model_param\":{\n \"input_units\" : \"100\",\n \"output_units\" : \"1\",\n \"global_step\" : \"10\",\n \"early_type\" : \"2\",\n \"min_step\" : \"10\",\n \"early_key\" : \"accuracy\",\n \"early_value\" : \"0.98\",\n \"method_type\" : \"Basic\",\n \"global_sn\" : \"0\",\n \"alg_sn\" : \"0\",\n \"algorithm_type\" : \"classifier\",\n \"job_type\" : \"learn\"\n },\n \"pbounds\":{\n \"dropout_prob\": [0, 0.5],\n \"optimizer_fn\": \"Adam\",\n \"learning_rate\": 0.8,\n \"act_fn\": \"Sigmoid\",\n \"hidden_units\" : 50\n }\n }\n }\n pso = ParticleSwarmOptimization(hps_info = hprs_info)\n best_params = pso._generate([], [])\n\n print(best_params)" ]
[ [ "numpy.random.rand", "numpy.clip" ] ]
zhuyuecai/spektral
[ "6ef68e265e5304e864c7daa1c250a62d2ef4aa78" ]
[ "examples/other/graph_signal_classification_mnist.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.losses import SparseCategoricalCrossentropy\nfrom tensorflow.keras.metrics import sparse_categorical_accuracy\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.regularizers import l2\n\nfrom spektral.data import MixedLoader\nfrom spektral.datasets.mnist import MNIST\nfrom spektral.layers import GCNConv, GlobalSumPool\nfrom spektral.layers.ops import sp_matrix_to_sp_tensor\n\n# Parameters\nbatch_size = 32 # Batch size\nepochs = 1000 # Number of training epochs\npatience = 10 # Patience for early stopping\nl2_reg = 5e-4 # Regularization rate for l2\n\n# Load data\ndata = MNIST()\n\n# The adjacency matrix is stored as an attribute of the dataset.\n# Create filter for GCN and convert to sparse tensor.\ndata.a = GCNConv.preprocess(data.a)\ndata.a = sp_matrix_to_sp_tensor(data.a)\n\n# Train/valid/test split\ndata_tr, data_te = data[:-10000], data[-10000:]\nnp.random.shuffle(data_tr)\ndata_tr, data_va = data_tr[:-10000], data_tr[-10000:]\n\n# We use a MixedLoader since the dataset is in mixed mode\nloader_tr = MixedLoader(data_tr, batch_size=batch_size, epochs=epochs)\nloader_va = MixedLoader(data_va, batch_size=batch_size)\nloader_te = MixedLoader(data_te, batch_size=batch_size)\n\n\n# Build model\nclass Net(Model):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.conv1 = GCNConv(32, activation=\"elu\", kernel_regularizer=l2(l2_reg))\n self.conv2 = GCNConv(32, activation=\"elu\", kernel_regularizer=l2(l2_reg))\n self.flatten = GlobalSumPool()\n self.fc1 = Dense(512, activation=\"relu\")\n self.fc2 = Dense(10, activation=\"softmax\") # MNIST has 10 classes\n\n def call(self, inputs):\n x, a = inputs\n x = self.conv1([x, a])\n x = self.conv2([x, a])\n output = self.flatten(x)\n output = self.fc1(output)\n output = self.fc2(output)\n\n return output\n\n\n# Create model\nmodel = Net()\noptimizer = Adam()\nloss_fn = SparseCategoricalCrossentropy()\n\n\n# Training function\n@tf.function\ndef train_on_batch(inputs, target):\n with tf.GradientTape() as tape:\n predictions = model(inputs, training=True)\n loss = loss_fn(target, predictions) + sum(model.losses)\n acc = tf.reduce_mean(sparse_categorical_accuracy(target, predictions))\n\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return loss, acc\n\n\n# Evaluation function\ndef evaluate(loader):\n step = 0\n results = []\n for batch in loader:\n step += 1\n inputs, target = batch\n predictions = model(inputs, training=False)\n loss = loss_fn(target, predictions)\n acc = tf.reduce_mean(sparse_categorical_accuracy(target, predictions))\n results.append((loss, acc, len(target))) # Keep track of batch size\n if step == loader.steps_per_epoch:\n results = np.array(results)\n return np.average(results[:, :-1], 0, weights=results[:, -1])\n\n\n# Setup training\nbest_val_loss = 99999\ncurrent_patience = patience\nstep = 0\n\n# Training loop\nresults_tr = []\nfor batch in loader_tr:\n step += 1\n\n # Training step\n inputs, target = batch\n loss, acc = train_on_batch(inputs, target)\n results_tr.append((loss, acc, len(target)))\n\n if step == loader_tr.steps_per_epoch:\n results_va = evaluate(loader_va)\n if results_va[0] < best_val_loss:\n best_val_loss = results_va[0]\n current_patience = patience\n results_te = evaluate(loader_te)\n else:\n current_patience -= 1\n if current_patience == 0:\n print(\"Early stopping\")\n break\n\n # Print results\n results_tr = np.array(results_tr)\n results_tr = np.average(results_tr[:, :-1], 0, weights=results_tr[:, -1])\n print(\n \"Train loss: {:.4f}, acc: {:.4f} | \"\n \"Valid loss: {:.4f}, acc: {:.4f} | \"\n \"Test loss: {:.4f}, acc: {:.4f}\".format(\n *results_tr, *results_va, *results_te\n )\n )\n\n # Reset epoch\n results_tr = []\n step = 0\n" ]
[ [ "numpy.array", "tensorflow.GradientTape", "tensorflow.keras.metrics.sparse_categorical_accuracy", "numpy.random.shuffle", "tensorflow.keras.layers.Dense", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.regularizers.l2", "numpy.average", "tensorflow.keras.optimizers.Adam" ] ]
simon-schaefer/mantrap
[ "9a2b3f32a0005cc0cb79bb78924f09da5a94587d" ]
[ "mantrap/modules/base/optimization_module.py" ]
[ "import abc\nimport logging\nimport typing\n\nimport numpy as np\nimport torch\n\nimport mantrap.environment\n\n\nclass OptimizationModule(abc.ABC):\n\n def __init__(self, t_horizon: int, weight: float = 0.0,\n env: mantrap.environment.base.GraphBasedEnvironment = None,\n has_slack: bool = False, slack_weight: float = 0.0):\n \"\"\"General objective and constraint module.\n\n For an unified and general implementation of objective and constraint function modules, this superclass\n implements methods for computing both, either analytically or numerically based on the PyTorch autograd\n package. Thereby all objective and constraint computations should be purely based on the robot's (ego)\n trajectory, as well as the possibility to perform further roll-outs in a given simulation environment.\n\n Combining objective and constraint in one module object grants the possibility to introduce soft\n constraints using slack variables, while also merely implementing pure objective and constraint\n modules seamlessly.\n For making the constraints soft, slack variables are added internally to the objective and updated\n with every constraint call. Therefore only the `has_slack` flag has to be set to True. However for\n simplicity we assume that then each constraint of the defined module is soft, otherwise the module\n can just divided into two modules.\n\n For multiprocessing the same module object is shared over all processes (even sharing memory), in order to\n avoid repeated pre-computation steps online. However to avoid racing conditions this means that internal\n variables of the class object are also shared and not owned by the process. Altering these variables in\n one process would then lead to un-expected outcomes in the other processes. Therefore each function comes\n with a `tag` argument which classifies the current process the function runs in. When internal variables\n have to be used, then they should be assigned to some dictionary with the tags as keys, so that the\n function only alters variables which are assigned to this process.\n \"\"\"\n assert t_horizon is None or t_horizon >= 1\n assert weight is None or weight >= 0.0\n\n self._weight = weight\n self._t_horizon = t_horizon\n\n # Giving every optimization module access to a (large) simulation environment object is not\n # necessary and an un-necessary use of space, even when it is just a pointer.\n self._env = env # may be None\n if env is not None:\n assert env.ego is not None\n\n # Logging variables for objective and gradient values. For logging the latest variables are stored\n # as class parameters and appended to the log when calling the `logging()` function, in order to avoid\n # appending multiple values within one optimization step.\n self._constraint_current = {} # type: typing.Dict[str, np.ndarray]\n self._obj_current = {} # type: typing.Dict[str, float]\n self._grad_current = {} # type: typing.Dict[str, np.ndarray]\n self._jacobian_current = {} # type: typing.Dict[str, np.ndarray]\n\n # Slack variables - Slack variables are part of both the constraints and the objective function,\n # therefore have to stored internally to be shared between both functions. However as discussed\n # above during multi-processing the same module object is shared over multiple processes,\n # therefore store the slack variable values in dictionaries assigned to the processes tag.\n assert slack_weight >= 0.0\n self._slack = {} # type: typing.Dict[str, torch.Tensor]\n self._has_slack = has_slack\n self._slack_weight = slack_weight\n\n def reset_env(self, env: mantrap.environment.base.GraphBasedEnvironment):\n if self._env is not None:\n self._env = env\n\n ###########################################################################\n # Objective ###############################################################\n ###########################################################################\n def objective(self, ego_trajectory: torch.Tensor, ado_ids: typing.List[str], tag: str) -> float:\n \"\"\"Determine objective value for passed ego trajectory by calling the internal `compute()` method.\n\n :param ego_trajectory: planned ego trajectory (t_horizon, 5).\n :param ado_ids: ghost ids which should be taken into account for computation.\n :param tag: name of optimization call (name of the core).\n \"\"\"\n objective = self.compute_objective(ego_trajectory, ado_ids=ado_ids, tag=tag)\n\n # Convert objective in standard optimization format (as float).\n if objective is None:\n obj_value = 0.0 # if objective not defined simply return 0.0\n else:\n obj_value = float(objective.item())\n\n # Return objective adds the objectives weight as well as normalizes it.\n return self._return_objective(obj_value, tag=tag)\n\n def compute_objective(self, ego_trajectory: torch.Tensor, ado_ids: typing.List[str], tag: str\n ) -> typing.Union[torch.Tensor, None]:\n \"\"\"Determine internal objective value + slack variables.\n \n Add slack based part of objective function. The value of the slack variable can only be\n updated if the constraints have been computed before. However using general optimization\n frameworks we cannot enforce the order to method calls, therefore to be surely synced\n we have to compute the constraints here first (!).\n Add the slack-based objective if the constraint is violated, otherwise add zero (since\n a constraint should not be optimised, just be feasible). The large `slack_weight` will\n thereby force the optimiser to make some decision to become feasible again.\n\n :param ego_trajectory: planned ego trajectory (t_horizon, 5).\n :param ado_ids: ghost ids which should be taken into account for computation.\n :param tag: name of optimization call (name of the core).\n \"\"\"\n assert mantrap.utility.shaping.check_ego_trajectory(ego_trajectory, pos_and_vel_only=True)\n obj_value = self._objective_core(ego_trajectory, ado_ids=ado_ids, tag=tag)\n\n if self._has_slack:\n obj_value = torch.zeros(1) if obj_value is None else obj_value\n _ = self.compute_constraint(ego_trajectory, ado_ids=ado_ids, tag=tag)\n slack_non_zero = torch.max(self._slack[tag], torch.zeros_like(self._slack[tag]))\n obj_value += self._slack_weight * slack_non_zero.sum()\n\n return obj_value\n\n @abc.abstractmethod\n def _objective_core(self, ego_trajectory: torch.Tensor, ado_ids: typing.List[str], tag: str\n ) -> typing.Union[torch.Tensor, None]:\n \"\"\"Determine objective value core method.\n\n The objective value should be returned either as PyTorch tensor or `None`. It cannot be simplified as\n floating point number directly, as next to its value it is important to return the gradient function,\n when computing its gradient. When the objective is not defined, simply return `None`.\n\n :param ego_trajectory: planned ego trajectory (t_horizon, 5).\n :param ado_ids: ghost ids which should be taken into account for computation.\n :param tag: name of optimization call (name of the core).\n \"\"\"\n raise NotImplementedError\n\n ###########################################################################\n # Gradient ################################################################\n ###########################################################################\n def gradient(self, ego_trajectory: torch.Tensor, grad_wrt: torch.Tensor, ado_ids: typing.List[str], tag: str\n ) -> np.ndarray:\n \"\"\"Determine gradient vector for passed ego trajectory. Therefore determine the objective value by\n calling the internal `compute()` method and en passant build a computation graph. Then using the pytorch\n auto-grad library compute the gradient vector through the previously built computation graph.\n\n :param ego_trajectory: planned ego trajectory (t_horizon, 5).\n :param grad_wrt: vector w.r.t. which the gradient should be determined.\n :param ado_ids: ghost ids which should be taken into account for computation.\n :param tag: name of optimization call (name of the core).\n \"\"\"\n assert mantrap.utility.shaping.check_ego_trajectory(ego_trajectory, pos_and_vel_only=True)\n\n # Analytical solutions are more exact and (usually more efficient) to compute, when known, compared\n # to the numerical \"graphical\" solution. Therefore, first check whether an analytical solution is\n # defined for this module.\n gradient_analytical = self.compute_gradient_analytically(ego_trajectory, grad_wrt, ado_ids=ado_ids, tag=tag)\n if gradient_analytical is not None:\n gradient = gradient_analytical\n\n # Otherwise compute the jacobian using torch auto-grad function, for each constraint individually.\n else:\n assert grad_wrt.requires_grad\n assert ego_trajectory.requires_grad # otherwise objective cannot have gradient function\n\n # Compute the objective value and check whether a gradient between the value and the\n # ego_trajectory input (which has been assured to require a gradient) exists, if the\n # module-conditions for that are met.\n objective = self._objective_core(ego_trajectory, ado_ids=ado_ids, tag=tag)\n\n # If objective is None return an zero gradient of the length of the `grad_wrt` tensor.\n # In general the objective might not be affected by the `ego_trajectory`, then it does not have\n # a gradient function and the gradient is not defined. Then the objective gradient is assumed\n # to be zero.\n if objective is None or not self.gradient_condition():\n gradient = np.zeros(grad_wrt.numel())\n\n # Otherwise compute the gradient \"numerically\" using the PyTorch auto-grad package.\n else:\n gradient = self.compute_gradient_auto_grad(objective, grad_wrt=grad_wrt)\n\n return self._return_gradient(gradient, tag=tag)\n\n def compute_gradient_analytically(\n self, ego_trajectory: torch.Tensor, grad_wrt: torch.Tensor, ado_ids: typing.List[str], tag: str\n ) -> typing.Union[np.ndarray, None]:\n \"\"\"Compute objective gradient vector analytically.\n\n While the gradient vector of the objective can be computed automatically using PyTorch's automatic\n differentiation package there might be an analytic solution, which is when known for sure more\n efficient to compute. Although it is against the convention to use torch representations whenever\n possible, this function returns numpy arrays, since the main gradient() function has to return\n a numpy array. Hence, not computing based on numpy arrays would just introduce an un-necessary\n `.detach().numpy()`.\n\n When no analytical solution is defined (or too hard to determine) return None.\n\n :param ego_trajectory: planned ego trajectory (t_horizon, 5).\n :param grad_wrt: vector w.r.t. which the gradient should be determined.\n :param ado_ids: ghost ids which should be taken into account for computation.\n :param tag: name of optimization call (name of the core).\n \"\"\"\n return None\n\n ###########################################################################\n # Constraint ##############################################################\n ###########################################################################\n def constraint(self, ego_trajectory: torch.Tensor, ado_ids: typing.List[str], tag: str) -> np.ndarray:\n \"\"\"Determine constraint value for passed ego trajectory by calling the internal `compute()` method.\n\n :param ego_trajectory: planned ego trajectory (t_horizon, 5).\n :param ado_ids: ghost ids which should be taken into account for computation.\n :param tag: name of optimization call (name of the core).\n \"\"\"\n constraints = self.compute_constraint(ego_trajectory, ado_ids=ado_ids, tag=tag)\n\n # Convert constraints in standard optimization format (as numpy arrays).\n if constraints is None:\n constraints = np.array([])\n else:\n constraints = constraints.detach().numpy()\n\n # Return constraint normalizes the constraint after it has been computed.\n return self._return_constraint(constraints, tag=tag)\n\n def compute_constraint(self, ego_trajectory: torch.Tensor, ado_ids: typing.List[str], tag: str\n ) -> typing.Union[torch.Tensor, None]:\n \"\"\"Determine internal constraints + slack constraints.\n\n Compute internal constraints and convert them to equality constraints by updating and adding the\n slack variables. Then add further constraints for the slack variables themselves (>= 0).\n\n :param ego_trajectory: planned ego trajectory (t_horizon, 5).\n :param ado_ids: ghost ids which should be taken into account for computation.\n :param tag: name of optimization call (name of the core).\n \"\"\"\n assert mantrap.utility.shaping.check_ego_trajectory(ego_trajectory, pos_and_vel_only=True)\n constraints = self._constraint_core(ego_trajectory, ado_ids=ado_ids, tag=tag)\n\n # Update slack variables (if any are defined for this module).\n if self._has_slack and constraints is not None:\n self._slack[tag] = - constraints\n # constraints = constraints + self._slack[tag] # constraint - slack (slacked variables)\n\n return constraints\n\n @abc.abstractmethod\n def _constraint_core(self, ego_trajectory: torch.Tensor, ado_ids: typing.List[str], tag: str\n ) -> typing.Union[torch.Tensor, None]:\n \"\"\"Determine constraint value core method.\n\n :param ego_trajectory: planned ego trajectory (t_horizon, 5).\n :param ado_ids: ghost ids which should be taken into account for computation.\n :param tag: name of optimization call (name of the core).\n \"\"\"\n raise NotImplementedError\n\n ###########################################################################\n # Jacobian ################################################################\n ###########################################################################\n def jacobian(self, ego_trajectory: torch.Tensor, grad_wrt: torch.Tensor, ado_ids: typing.List[str], tag: str\n ) -> np.ndarray:\n \"\"\"Determine jacobian matrix for passed ego trajectory.\n\n Therefore at first check whether an analytical solution is defined, if not determine the constraint values\n by calling the internal `compute()` method and en passant build a computation graph. Then using the PyTorch\n autograd library compute the jacobian matrix based on the constraints computation graph.\n\n :param ego_trajectory: planned ego trajectory (t_horizon, 5).\n :param grad_wrt: vector w.r.t. which the gradient should be determined.\n :param ado_ids: ghost ids which should be taken into account for computation.\n :param tag: name of optimization call (name of the core).\n \"\"\"\n assert mantrap.utility.shaping.check_ego_trajectory(ego_trajectory, pos_and_vel_only=True)\n\n # Analytical solutions are more exact and (usually more efficient) to compute, when known, compared\n # to the numerical \"graphical\" solution. Therefore, first check whether an analytical solution is\n # defined for this module.\n jacobian_analytical = self.compute_jacobian_analytically(ego_trajectory, grad_wrt, ado_ids=ado_ids, tag=tag)\n if jacobian_analytical is not None:\n jacobian = jacobian_analytical\n\n # Otherwise compute the jacobian using torch auto-grad function, for each constraint individually.\n else:\n # Compute the constraint values and check whether a gradient between them and the ego_trajectory\n # input (which has been assured to require a gradient) exists, if the module-conditions for\n # that are met.\n assert ego_trajectory.requires_grad # otherwise constraints cannot have gradient function\n constraints = self._constraint_core(ego_trajectory, ado_ids=ado_ids, tag=tag)\n\n # If constraint vector is None, directly return empty jacobian vector.\n if constraints is None:\n jacobian = np.array([])\n\n # Otherwise check for the existence of a gradient, as explained above.\n # In general the constraints might not be affected by the `ego_trajectory`, then they does not have\n # gradient function and the gradient is not defined. Then the jacobian is assumed to be zero.\n else:\n # If constraints are not None (exist) but the gradient cannot be computed, e.g. since the\n # constraints do not depend on the ego_trajectory, then return a zero jacobian.\n if not self.gradient_condition():\n grad_size = int(grad_wrt.numel())\n constraint_size = int(constraints.numel())\n jacobian = np.zeros(grad_size * constraint_size)\n\n # Otherwise determine the jacobian numerically using the PyTorch autograd package.\n else:\n jacobian = self.compute_gradient_auto_grad(constraints, grad_wrt=grad_wrt)\n\n return jacobian\n\n def jacobian_structure(self, ado_ids: typing.List[str], tag: str) -> typing.Union[np.ndarray, None]:\n \"\"\"Return the sparsity structure of the jacobian, i.e. the indices of non-zero elements.\n\n When not defined otherwise the jacobian structure is determined by determining the jacobian for\n some random input structure (and the current environment), so that the non-zero indices can be\n determined afterwards. However this way of computing the jacobian structure highly depends on\n efficient calculation of the jacobian matrix, and is therefore only available if the the\n `compute_jacobian_analytically()` function is defined.\n\n :param ado_ids: ghost ids which should be taken into account for computation.\n :param tag: name of optimization call (name of the core).\n :returns: indices of non-zero elements of jacobian.\n \"\"\"\n if self.num_constraints(ado_ids=ado_ids) == 0:\n return None\n\n controls = torch.rand((self.t_horizon, 2)) # assumption: grad_wrt = controls (!)\n ego_trajectory = self._env.ego.unroll_trajectory(controls, dt=self._env.dt)\n jacobian = self.compute_jacobian_analytically(ego_trajectory, grad_wrt=controls, ado_ids=ado_ids, tag=tag)\n\n if jacobian is not None:\n return np.nonzero(jacobian)[0]\n else:\n return None\n\n def compute_jacobian_analytically(\n self, ego_trajectory: torch.Tensor, grad_wrt: torch.Tensor, ado_ids: typing.List[str], tag: str\n ) -> typing.Union[np.ndarray, None]:\n \"\"\"Compute Jacobian matrix analytically.\n\n While the Jacobian matrix of the constraint can be computed automatically using PyTorch's automatic\n differentiation package there might be an analytic solution, which is when known for sure more\n efficient to compute. Although it is against the convention to use torch representations whenever\n possible, this function returns numpy arrays, since the main jacobian() function has to return\n a numpy array. Hence, not computing based on numpy arrays would just introduce an un-necessary\n `.detach().numpy()`.\n\n When no analytical solution is defined (or too hard to determine) return None.\n\n :param ego_trajectory: planned ego trajectory (t_horizon, 5).\n :param grad_wrt: vector w.r.t. which the gradient should be determined.\n :param ado_ids: ghost ids which should be taken into account for computation.\n :param tag: name of optimization call (name of the core).\n \"\"\"\n return None\n\n ###########################################################################\n # Autograd Differentiation ################################################\n ###########################################################################\n def compute_gradient_auto_grad(self, x: torch.Tensor, grad_wrt: torch.Tensor) -> np.ndarray:\n \"\"\"Compute derivative of x with respect to grad_wrt.\n\n Compute the gradient/jacobian/etc. of some vector x with respect to some tensor `grad_wrt`\n using the PyTorch autograd, automatic differentiation package. Here we assume that both are\n connected by some computational graph (PyTorch graph) that can be used for differentiation.\n\n A comment about multiprocessing: Computing the gradients in parallel would be a good match\n for multiple processing, since it is fairly independent from each other, given the shared\n memory of the computation graph.\n\n .. code-block:: python\n\n import torch.multiprocessing as mp\n\n mp.set_start_method('spawn')\n x.share_memory_()\n grad_wrt.share_memory_()\n gradient.share_memory_()\n\n def compute(x_i, grad_wrt_i):\n grad = torch.autograd.grad(element, grad_wrt, retain_graph=True, only_inputs=True)[0]\n return grad.flatten().detach()\n\n processes = []\n for i_process in range(8):\n p = mp.Process(target=compute, args=(x[i_process], grad_wrt, ))\n p.start()\n processes.append(p)\n for p in processes:\n p.join()\n\n Here the torch.multiprocessing library is used to compute the gradient over the whole tensor x in\n multiple parallel processes. Therefore the tensors of both x and grad_wrt are shared over all\n processes using the `.share_memory()` method and all processes are launched with a different\n element of the tensor x. However as shown below sharing a computation graph, i.e. tensors that\n require a gradient, being attached to this graph, over multiple processes is not supported in\n PyTorch and therefore not possible.\n\n .. code-block:: python\n\n def reduce_tensor(tensor):\n storage = tensor.storage()\n\n if tensor.requires_grad and not tensor.is_leaf:\n raise RuntimeError(\"Cowardly refusing to serialize non-leaf tensor which requires_grad, \"\n \"since autograd does not support crossing process boundaries. \"\n \"If you just want to transfer the data, call detach() on the tensor \"\n \"before serializing (e.g., putting it on the queue).\")\n\n To avoid this issue, the full computation graph would have to be re-built for every single element\n of x, which would create a lot of overhead due to repeated computations (as well as being quite not\n general and unreadable due to nesting instead of batching) and therefore not accelerate the computations.\n\n :param x: gradient input flat vector.\n :param grad_wrt: tensor with respect to gradients should be computed.\n :returns: flattened gradient tensor (x.size * grad_wrt.size)\n \"\"\"\n grad_size = int(grad_wrt.numel())\n x_size = int(x.numel())\n assert grad_wrt.requires_grad\n\n # If x has no gradient, we assume that this is not a bug, but there is actually no impact\n # of `grad_wrt` to the tensor x. Then the gradient is simply zero.\n if not x.requires_grad:\n logging.debug(f\"module {self.name} => un-rooted gradient detected !\")\n return np.zeros(grad_size)\n\n # Compute gradient batched, i.e. per element of x over the full `grad_wrt` tensor. However further\n # batching unfortunately is not possible using the autograd framework.\n if x_size == 1:\n gradient = torch.autograd.grad(x, grad_wrt, retain_graph=True)[0]\n else:\n gradient = torch.zeros(x_size * grad_size)\n for i, element in enumerate(x):\n grad = torch.autograd.grad(element, grad_wrt, retain_graph=True)[0]\n gradient[i * grad_size:(i + 1) * grad_size] = grad.flatten().detach()\n\n gradient = gradient.flatten().detach().numpy()\n\n # When we want to find the gradient of a tensor a with respect to some tensor b, but not all elements\n # of b affect a, although both are connected in the computation graph, the auto-grad function returns\n # a NaN at this place of `a.grad`. One might argue whether it should be 0 or NaN however as the optimizer\n # cannot deal with NaN gradient we use zeros here instead.\n # https://github.com/pytorch/pytorch/issues/15131\n # Additionally torch.clamp() or the L2 norm respectively kills the gradient at the border. So when the\n # computed control actions run into the limit, the gradient becomes NaN which otherwise should be maximal\n # or zero (depending on the side of limit). Choose zero here.\n gradient = np.nan_to_num(gradient, copy=False)\n return gradient\n\n ###########################################################################\n # Constraint Bounds #######################################################\n ###########################################################################\n def constraint_boundaries(self, ado_ids: typing.List[str]\n ) -> typing.Tuple[typing.Union[typing.List[typing.Union[float, None]]],\n typing.Union[typing.List[typing.Union[float, None]]]]:\n \"\"\"Compute module constraint boundaries.\n\n Assuming that the limits of a constraint are constant over the full time horizon, only the (scalar)\n limits are specific to the module, while the boundaries, i.e. the stacked limits over the time-horizon\n are generally stacked (and normalized !) for any module.\n \"\"\"\n lower, upper = self._constraint_limits()\n lower = self.normalize(lower) if lower is not None else None\n upper = self.normalize(upper) if upper is not None else None\n num_constraints = self._num_constraints(ado_ids=ado_ids) # number of internal constraints (!)\n lower_bounds = (lower * np.ones(num_constraints)).tolist() if lower is not None else [None] * num_constraints\n upper_bounds = (upper * np.ones(num_constraints)).tolist() if upper is not None else [None] * num_constraints\n\n # Slack variable introduced boundaries. We assume that the number of slack variables\n # is equal to number of constraint, i.e. that each constraint of the module is \"soft\".\n return lower_bounds, upper_bounds\n\n def _constraint_limits(self) -> typing.Tuple[typing.Union[float, None], typing.Union[float, None]]:\n \"\"\"Lower and upper bounds for constraint values.\"\"\"\n raise NotImplementedError\n\n def num_constraints(self, ado_ids: typing.List[str]) -> int:\n return self._num_constraints(ado_ids=ado_ids)\n\n @abc.abstractmethod\n def _num_constraints(self, ado_ids: typing.List[str]) -> int:\n raise NotImplementedError\n\n ###########################################################################\n # Constraint Violation ####################################################\n ###########################################################################\n def compute_violation(self, ego_trajectory: torch.Tensor, ado_ids: typing.List[str], tag: str) -> float:\n \"\"\"Determine constraint violation based on some input ego trajectory and ado ids list.\n\n The violation is the amount how much the solution state is inside the constraint active region.\n When the constraint is not active, then the violation is zero. The calculation is based on the last\n (cached) evaluation of the constraint function.\n\n :param ego_trajectory: planned ego trajectory (t_horizon, 5).\n :param ado_ids: ghost ids which should be taken into account for computation.\n :param tag: name of optimization call (name of the core).\n \"\"\"\n assert mantrap.utility.shaping.check_ego_trajectory(ego_trajectory, pos_and_vel_only=True)\n constraint = self.compute_constraint(ego_trajectory, ado_ids=ado_ids, tag=tag)\n if constraint is None:\n return self._violation(constraints=None)\n else:\n constraint_normalized = self.normalize(constraint.detach().numpy())\n return self._violation(constraints=constraint_normalized)\n\n def compute_violation_internal(self, tag: str) -> float:\n \"\"\"Determine constraint violation, i.e. how much the internal state is inside the constraint active region.\n When the constraint is not active, then the violation is zero. The calculation is based on the last (cached)\n evaluation of the constraint function.\n \"\"\"\n return self._violation(constraints=self._constraint_current[tag]) # already normalized\n\n def _violation(self, constraints: typing.Union[np.ndarray, None]) -> float:\n \"\"\"Compute the constraint violation based on its limits and normalized constraint values.\"\"\"\n if constraints is None or constraints.size == 0:\n return 0.0\n\n # Compute violation by subtracting the constraint values from the lower and upper constraint\n # boundaries. The \"over-hanging\" distance is the violation.\n assert self.env is not None\n violation = np.zeros(constraints.size)\n lower_bounds, upper_bounds = self.constraint_boundaries(ado_ids=self.env.ado_ids)\n for ic, constraint in enumerate(constraints):\n lower = lower_bounds[ic] if lower_bounds[ic] is not None else -np.inf\n upper = upper_bounds[ic] if upper_bounds[ic] is not None else np.inf\n violation[ic] = max(lower - constraints[ic], 0.0) + max(constraints[ic] - upper, 0.0)\n violation = violation.sum()\n\n # Due to numerical (precision) errors the violation might be non-zero, although the derived optimization\n # variable is just at the constraint border (as for example in linear programming). Ignore these violations.\n if np.abs(violation) < mantrap.constants.CONSTRAINT_VIOLATION_PRECISION:\n return 0.0\n else:\n return float(violation)\n\n ###########################################################################\n # Utility #################################################################\n ###########################################################################\n @abc.abstractmethod\n def normalize(self, x: typing.Union[np.ndarray, float]) -> typing.Union[np.ndarray, float]:\n \"\"\"Normalize the objective/constraint value for improved optimization performance.\n\n :param x: objective/constraint value in normal value range.\n :returns: normalized objective/constraint value in range [0, 1].\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def gradient_condition(self) -> bool:\n \"\"\"Condition for back-propagating through the objective/constraint in order to obtain the\n objective's gradient vector/jacobian (numerically). If returns True and the ego_trajectory\n itself requires a gradient, the objective/constraint value, stored from the last computation\n (`_current_`-variables) has to require a gradient as well.\"\"\"\n raise NotImplementedError\n\n def _return_constraint(self, constraint_value: np.ndarray, tag: str) -> np.ndarray:\n constraint_value = self.normalize(constraint_value)\n self._constraint_current[tag] = constraint_value\n return self._constraint_current[tag]\n\n def _return_jacobian(self, jacobian: np.ndarray, tag: str) -> np.ndarray:\n jacobian = self.normalize(jacobian)\n self._jacobian_current[tag] = jacobian\n return self._jacobian_current[tag]\n\n def _return_objective(self, obj_value: float, tag: str) -> float:\n obj_value = float(self.normalize(obj_value))\n self._obj_current[tag] = self.weight * obj_value\n return self._obj_current[tag]\n\n def _return_gradient(self, gradient: np.ndarray, tag: str) -> np.ndarray:\n gradient = self.normalize(gradient)\n self._grad_current[tag] = self.weight * gradient\n return self._grad_current[tag]\n\n ###########################################################################\n # Module backlog ##########################################################\n ###########################################################################\n def constraint_current(self, tag: str) -> np.ndarray:\n return self._constraint_current[tag]\n\n def inf_current(self, tag: str) -> float:\n return self.compute_violation_internal(tag=tag)\n\n def obj_current(self, tag: str) -> float:\n return self._obj_current[tag]\n\n def grad_current(self, tag: str) -> float:\n return np.linalg.norm(self._grad_current[tag])\n\n def jacobian_current(self, tag: str) -> float:\n return np.linalg.norm(self._jacobian_current[tag])\n\n def slack_variables(self, tag: str) -> torch.Tensor:\n return self._slack[tag]\n\n ###########################################################################\n # Module Properties #######################################################\n ###########################################################################\n @property\n def weight(self) -> float:\n return self._weight\n\n @property\n def env(self) -> typing.Union[mantrap.environment.base.GraphBasedEnvironment, None]:\n return self._env\n\n @property\n def t_horizon(self) -> int:\n return self._t_horizon\n\n @property\n def name(self) -> str:\n raise NotImplementedError\n" ]
[ [ "torch.zeros", "torch.rand", "numpy.array", "numpy.linalg.norm", "numpy.nan_to_num", "numpy.zeros", "numpy.ones", "numpy.nonzero", "torch.autograd.grad", "numpy.abs", "torch.zeros_like" ] ]
timgates42/bokeh
[ "fb8b07b838f4d07d520cfe899779a11bc89f3c77" ]
[ "bokeh/core/property/wrappers.py" ]
[ "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' Provide special versions of list and dict, that can automatically notify\nabout changes when used for property values.\n\nMutations to these values are detected, and the properties owning the\ncollection is notified of the changes. Consider the following model\ndefinition:\n\n.. code-block:: python\n\n class SomeModel(Model):\n\n options = List(String)\n\nIf we have an instance of this model, ``m`` then we can set the entire\nvalue of the ``options`` property at once:\n\n.. code-block:: python\n\n m.options = [\"foo\", \"bar\"]\n\nWhen we do this in the context of a Bokeh server application that is being\nviewed in a browser, this change is automatically noticed, and the\ncorresponding BokehJS property in the browser is synchronized, possibly\ncausing some change in the visual state of the application in the browser.\n\nBut it is also desirable that changes *inside* the ``options`` list also\nbe detected. That is, the following kinds of operations should also be\nautomatically synchronized between BokehJS and a Bokeh server:\n\n.. code-block:: python\n\n m.options.append(\"baz\")\n\n m.options[2] = \"quux\"\n\n m.options.insert(0, \"bar\")\n\nThe classes in this module provide this functionality.\n\n.. note::\n These classes form part of the very low-level machinery that implements\n the Bokeh model and property system. It is unlikely that any of these\n classes or their methods will be applicable to any standard usage or to\n anyone who is not directly developing on Bokeh's own infrastructure.\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport copy\n\n# External imports\nimport numpy as np\n\n# Bokeh imports\nfrom ...util.dependencies import import_optional\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\npd = import_optional('pandas')\n\n__all__ = (\n 'notify_owner',\n 'PropertyValueContainer',\n 'PropertyValueList',\n 'PropertyValueDict',\n 'PropertyValueColumnData',\n)\n\n#----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\ndef notify_owner(func):\n ''' A decorator for mutating methods of property container classes\n that notifies owners of the property container about mutating changes.\n\n Args:\n func (callable) : the container method to wrap in a notification\n\n Returns:\n wrapped method\n\n Examples:\n\n A ``__setitem__`` could be wrapped like this:\n\n .. code-block:: python\n\n # x[i] = y\n @notify_owner\n def __setitem__(self, i, y):\n return super().__setitem__(i, y)\n\n The returned wrapped method will have a docstring indicating what\n original method it is wrapping.\n\n '''\n def wrapper(self, *args, **kwargs):\n old = self._saved_copy()\n result = func(self, *args, **kwargs)\n self._notify_owners(old)\n return result\n wrapper.__doc__ = \"Container method ``%s`` instrumented to notify property owners\" % func.__name__\n return wrapper\n\nclass PropertyValueContainer(object):\n ''' A base class for property container classes that support change\n notifications on mutating operations.\n\n This class maintains an internal list of property owners, and also\n provides a private mechanism for methods wrapped with\n :func:`~bokeh.core.property.wrappers.notify_owners` to update\n those owners when mutating changes occur.\n\n '''\n def __init__(self, *args, **kwargs):\n self._owners = set()\n super().__init__(*args, **kwargs)\n\n def _register_owner(self, owner, descriptor):\n self._owners.add((owner, descriptor))\n\n def _unregister_owner(self, owner, descriptor):\n self._owners.discard((owner, descriptor))\n\n def _notify_owners(self, old, hint=None):\n for (owner, descriptor) in self._owners:\n descriptor._notify_mutated(owner, old, hint=hint)\n\n def _saved_copy(self):\n raise RuntimeError(\"Subtypes must implement this to make a backup copy\")\n\nclass PropertyValueList(PropertyValueContainer, list):\n ''' A list property value container that supports change notifications on\n mutating operations.\n\n When a Bokeh model has a ``List`` property, the ``PropertyValueLists`` are\n transparently created to wrap those values. These ``PropertyValueList``\n values are subject to normal property validation. If the property type\n ``foo = List(Str)`` then attempting to set ``x.foo[0] = 10`` will raise\n an error.\n\n Instances of ``PropertyValueList`` can be explicitly created by passing\n any object that the standard list initializer accepts, for example:\n\n .. code-block:: python\n\n >>> PropertyValueList([10, 20])\n [10, 20]\n\n >>> PropertyValueList((10, 20))\n [10, 20]\n\n The following mutating operations on lists automatically trigger\n notifications:\n\n .. code-block:: python\n\n del x[y]\n del x[i:j]\n x += y\n x *= y\n x[i] = y\n x[i:j] = y\n x.append\n x.extend\n x.insert\n x.pop\n x.remove\n x.reverse\n x.sort\n\n '''\n\n def __init__(self, *args, **kwargs):\n return super().__init__(*args, **kwargs)\n\n def _saved_copy(self):\n return list(self)\n\n # delete x[y]\n @notify_owner\n def __delitem__(self, y):\n return super().__delitem__(y)\n\n # delete x[i:j]\n @notify_owner\n def __delslice__(self, i, j):\n # Note: this is different py2 vs py3, py3 calls __delitem__ with a\n # slice index, and does not have this method at all\n return super().__delslice__(i, j)\n\n # x += y\n @notify_owner\n def __iadd__(self, y):\n return super().__iadd__(y)\n\n # x *= y\n @notify_owner\n def __imul__(self, y):\n return super().__imul__(y)\n\n # x[i] = y\n @notify_owner\n def __setitem__(self, i, y):\n return super().__setitem__(i, y)\n\n # x[i:j] = y\n @notify_owner\n def __setslice__(self, i, j, y):\n # Note: this is different py2 vs py3, py3 calls __setitem__ with a\n # slice index, and does not have this method at all\n return super().__setslice__(i, j, y)\n\n @notify_owner\n def append(self, obj):\n return super().append(obj)\n\n @notify_owner\n def extend(self, iterable):\n return super().extend(iterable)\n\n @notify_owner\n def insert(self, index, obj):\n return super().insert(index, obj)\n\n @notify_owner\n def pop(self, index=-1):\n return super().pop(index)\n\n @notify_owner\n def remove(self, obj):\n return super().remove(obj)\n\n @notify_owner\n def reverse(self):\n return super().reverse()\n\n @notify_owner\n def sort(self, **kwargs):\n return super().sort(**kwargs)\n\nclass PropertyValueDict(PropertyValueContainer, dict):\n ''' A dict property value container that supports change notifications on\n mutating operations.\n\n When a Bokeh model has a ``List`` property, the ``PropertyValueLists`` are\n transparently created to wrap those values. These ``PropertyValueList``\n values are subject to normal property validation. If the property type\n ``foo = Dict(Str, Str)`` then attempting to set ``x.foo['bar'] = 10`` will\n raise an error.\n\n Instances of ``PropertyValueDict`` can be eplicitly created by passing\n any object that the standard dict initializer accepts, for example:\n\n .. code-block:: python\n\n >>> PropertyValueDict(dict(a=10, b=20))\n {'a': 10, 'b': 20}\n\n >>> PropertyValueDict(a=10, b=20)\n {'a': 10, 'b': 20}\n\n >>> PropertyValueDict([('a', 10), ['b', 20]])\n {'a': 10, 'b': 20}\n\n The following mutating operations on dicts automatically trigger\n notifications:\n\n .. code-block:: python\n\n del x[y]\n x[i] = y\n x.clear\n x.pop\n x.popitem\n x.setdefault\n x.update\n\n '''\n def __init__(self, *args, **kwargs):\n return super().__init__(*args, **kwargs)\n\n def _saved_copy(self):\n return dict(self)\n\n # delete x[y]\n @notify_owner\n def __delitem__(self, y):\n return super().__delitem__(y)\n\n # x[i] = y\n @notify_owner\n def __setitem__(self, i, y):\n return super().__setitem__(i, y)\n\n @notify_owner\n def clear(self):\n return super().clear()\n\n @notify_owner\n def pop(self, *args):\n return super().pop(*args)\n\n @notify_owner\n def popitem(self):\n return super().popitem()\n\n @notify_owner\n def setdefault(self, *args):\n return super().setdefault(*args)\n\n @notify_owner\n def update(self, *args, **kwargs):\n return super().update(*args, **kwargs)\n\nclass PropertyValueColumnData(PropertyValueDict):\n ''' A property value container for ColumnData that supports change\n notifications on mutating operations.\n\n This property value container affords specialized code paths for\n updating the .data dictionary for ColumnDataSource. When possible,\n more efficient ColumnDataChangedEvent hints are generated to perform\n the updates:\n\n .. code-block:: python\n\n x[i] = y\n x.update\n\n '''\n\n # x[i] = y\n # don't wrap with notify_owner --- notifies owners explicitly\n def __setitem__(self, i, y):\n return self.update([(i, y)])\n\n def __copy__(self):\n return PropertyValueColumnData(dict(self))\n\n def __deepcopy__(self, memodict={}):\n return PropertyValueColumnData(copy.deepcopy(dict(self), memodict))\n\n # don't wrap with notify_owner --- notifies owners explicitly\n def update(self, *args, **kwargs):\n old = self._saved_copy()\n\n result = super(PropertyValueDict, self).update(*args, **kwargs) # note super special case\n\n from ...document.events import ColumnDataChangedEvent\n\n # Grab keys to update according to Python docstring for update([E, ]**F)\n #\n # If E is present and has a .keys() method, then does: for k in E: D[k] = E[k]\n # If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v\n # In either case, this is followed by: for k in F: D[k] = F[k]\n cols = set(kwargs.keys())\n if len(args) == 1:\n E = args[0]\n if hasattr(E, 'keys'):\n cols |= set(E.keys())\n else:\n cols |= { x[0] for x in E }\n\n # we must loop ourselves here instead of calling _notify_owners\n # because the hint is customized for each owner separately\n for (owner, descriptor) in self._owners:\n hint = ColumnDataChangedEvent(owner.document, owner, cols=list(cols))\n descriptor._notify_mutated(owner, old, hint=hint)\n\n return result\n\n # don't wrap with notify_owner --- notifies owners explicitly\n def _stream(self, doc, source, new_data, rollover=None, setter=None):\n ''' Internal implementation to handle special-casing stream events\n on ``ColumnDataSource`` columns.\n\n Normally any changes to the ``.data`` dict attribute on a\n ``ColumnDataSource`` triggers a notification, causing all of the data\n to be synchronized between server and clients.\n\n The ``.stream`` method on column data sources exists to provide a\n more efficient way to perform streaming (i.e. append-only) updates\n to a data source, without having to perform a full synchronization,\n which would needlessly re-send all the data.\n\n To accomplish this, this function bypasses the wrapped methods on\n ``PropertyValueDict`` and uses the unwrapped versions on the dict\n superclass directly. It then explicitly makes a notification, adding\n a special ``ColumnsStreamedEvent`` hint to the message containing\n only the small streamed data that BokehJS needs in order to\n efficiently synchronize.\n\n .. warning::\n This function assumes the integrity of ``new_data`` has already\n been verified.\n\n '''\n old = self._saved_copy()\n\n # TODO (bev) Currently this reports old differently for array vs list\n # For arrays is reports the actual old value. For lists, the old value\n # is actually the already updated value. This is because the method\n # self._saved_copy() makes a shallow copy.\n for k, v in new_data.items():\n if isinstance(self[k], np.ndarray) or isinstance(new_data[k], np.ndarray):\n data = np.append(self[k], new_data[k])\n if rollover and len(data) > rollover:\n data = data[-rollover:]\n super(PropertyValueDict, self).__setitem__(k, data) # note super special case\n else:\n L = self[k]\n L.extend(new_data[k])\n if rollover is not None:\n del L[:-rollover]\n\n from ...document.events import ColumnsStreamedEvent\n\n self._notify_owners(old,\n hint=ColumnsStreamedEvent(doc, source, new_data, rollover, setter))\n\n # don't wrap with notify_owner --- notifies owners explicitly\n def _patch(self, doc, source, patches, setter=None):\n ''' Internal implementation to handle special-casing patch events\n on ``ColumnDataSource`` columns.\n\n Normally any changes to the ``.data`` dict attribute on a\n ``ColumnDataSource`` triggers a notification, causing all of the data\n to be synchronized between server and clients.\n\n The ``.patch`` method on column data sources exists to provide a\n more efficient way to perform patching (i.e. random access) updates\n to a data source, without having to perform a full synchronization,\n which would needlessly re-send all the data.\n\n To accomplish this, this function bypasses the wrapped methods on\n ``PropertyValueDict`` and uses the unwrapped versions on the dict\n superclass directly. It then explicitly makes a notification, adding\n a special ``ColumnsPatchedEvent`` hint to the message containing\n only the small patched data that BokehJS needs in order to efficiently\n synchronize.\n\n .. warning::\n This function assumes the integrity of ``patches`` has already\n been verified.\n\n '''\n old = self._saved_copy()\n\n for name, patch in patches.items():\n for ind, value in patch:\n if isinstance(ind, (int, slice)):\n self[name][ind] = value\n else:\n shape = self[name][ind[0]][tuple(ind[1:])].shape\n self[name][ind[0]][tuple(ind[1:])] = np.array(value, copy=False).reshape(shape)\n\n from ...document.events import ColumnsPatchedEvent\n\n self._notify_owners(old,\n hint=ColumnsPatchedEvent(doc, source, patches, setter))\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n" ]
[ [ "numpy.array", "numpy.append" ] ]
nikhil3198/stock_market_analysis
[ "c4861eca606d129b702353f1c36aaf9b44a81863" ]
[ "scripts/update_companies.py" ]
[ "import os\nimport csv\nimport pandas as pd\n\npath=\"../datasets/Companies/\"\ncompanies=[]\nfor file in os.listdir(path):\n if file[0]!='.':\n companies.append(file.split('.')[0].upper())\n\ncolumnTitleRow = ['Symbol','Name','LastSale','MarketCap','IPOyear','Sector','industry','Summary Quote']\nwith open(\"stocks.csv\", 'a',newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=columnTitleRow)\n writer.writeheader()\n\npath_comp=\"../Stocks/\"\nwith open(\"../datasets/filtered_companies.csv\", 'a',newline='') as csvfile:\n writer = csv.writer(csvfile)\n for file in os.listdir(path_comp):\n if file[0]!='.':\n data=pd.read_csv(path_comp+file)\n for index, line in data.iterrows():\n if line[\"Symbol\"] in companies:\n writer.writerow(line[:-1])\n\n" ]
[ [ "pandas.read_csv" ] ]
HirataYurina/yoloV3-keras-sibyl
[ "15f6c5f021dfc80d753df6bcdd579ae1139edfb9" ]
[ "gaussion_yolo3/yolo.py" ]
[ "import os\nimport numpy as np\nimport copy\nimport colorsys\nfrom timeit import default_timer as timer\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\nfrom PIL import Image, ImageFont, ImageDraw\nfrom gaussion_yolo3.gaussian_yolo3 import yolo_body, yolo_eval\nfrom utils.utils import letterbox_image\nfrom gaussion_yolo3.configs import CONFIG\nimport tensorflow as tf\n\n# remember add this code to avoid some bugs\nconfig = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))\nsess = tf.Session(config=config)\n\n\nclass YOLO(object):\n\n _defaults = {\n \"model_path\": CONFIG.PREDICT.WEIGHTS,\n \"anchors_path\": CONFIG.PREDICT.ANCHOR_PATH,\n \"classes_path\": CONFIG.PREDICT.CLASS_PATH,\n \"score\": CONFIG.PREDICT.SCORE,\n \"iou\": CONFIG.PREDICT.IOU,\n \"model_image_size\": CONFIG.PREDICT.RESOLUTION,\n \"max_boxes\": CONFIG.PREDICT.MAX_BOXES\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n def __init__(self, **kwargs):\n # update dict of YOLO class\n self.__dict__.update(self._defaults)\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n\n num_anchors = len(self.anchors)\n num_classes = len(self.class_names)\n\n try:\n self.yolo_model = load_model(model_path, compile=False)\n except:\n self.yolo_model = yolo_body(Input(shape=(None, None, 3)), num_anchors // 3, num_classes)\n self.yolo_model.load_weights(self.model_path)\n else:\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\n num_anchors / len(self.yolo_model.output) * (num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes'\n\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # draw rectangles\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n\n np.random.seed(10101)\n np.random.shuffle(self.colors)\n np.random.seed(None)\n\n self.input_image_shape = K.placeholder(shape=(2,))\n\n boxes, scores, classes = yolo_eval(self.yolo_model.output,\n self.anchors,\n num_classes,\n self.input_image_shape,\n max_boxes=self.max_boxes,\n score_threshold=self.score,\n iou_threshold=self.iou)\n return boxes, scores, classes\n\n def detect_image(self, image):\n\n start = timer()\n\n # convert img_size to input_size\n new_image_size = (self.model_image_size[0], self.model_image_size[1])\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0)\n\n # sess.run\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n # print(out_scores)\n # print(out_boxes)\n print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\n\n # starting draw bounding boxes\n font = ImageFont.truetype(font='font/simhei.ttf',\n size=np.floor(2e-2 * image.size[1] + 0.5).astype('int32'))\n # thickness of bounding box and this thickness is changing according to img_size\n thickness = (image.size[0] + image.size[1]) // 500\n\n for i, c in list(enumerate(out_classes)):\n predicted_class = self.class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n top, left, bottom, right = box\n top = top - 5\n left = left - 5\n bottom = bottom + 5\n right = right + 5\n\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n label = label.encode('utf-8')\n print(label)\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n for i in range(thickness):\n draw.rectangle([left + i, top + i, right - i, bottom - i],\n outline=self.colors[c])\n draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[c])\n draw.text(text_origin, str(label, 'UTF-8'), fill=(0, 0, 0), font=font)\n del draw\n\n end = timer()\n print('detect time:', end - start)\n return image\n\n def close_session(self):\n self.sess.close()\n\n\nif __name__ == '__main__':\n\n yolo = YOLO()\n\n image = Image.open('../img/test1.jpg')\n img = yolo.detect_image(image)\n img.show()\n" ]
[ [ "numpy.array", "numpy.random.seed", "tensorflow.Session", "numpy.random.shuffle", "tensorflow.GPUOptions", "numpy.expand_dims", "numpy.floor" ] ]
earlsuke/sklearn-porter
[ "8658c6567e28c570d96ab2e858c510f84b1d94dc" ]
[ "examples/estimator/classifier/DecisionTreeClassifier/java/basics_embedded.pct.py" ]
[ "# %% [markdown]\n# # sklearn-porter\n#\n# Repository: [https://github.com/nok/sklearn-porter](https://github.com/nok/sklearn-porter)\n#\n# ## DecisionTreeClassifier\n#\n# Documentation: [sklearn.tree.DecisionTreeClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html)\n\n# %%\nimport sys\nsys.path.append('../../../../..')\n\n# %% [markdown]\n# ### Load data\n\n# %%\nfrom sklearn.datasets import load_iris\n\niris_data = load_iris()\n\nX = iris_data.data\ny = iris_data.target\n\nprint(X.shape, y.shape)\n\n# %% [markdown]\n# ### Train classifier\n\n# %%\nfrom sklearn.tree import tree\n\nclf = tree.DecisionTreeClassifier()\nclf.fit(X, y)\n\n# %% [markdown]\n# ### Transpile classifier\n\n# %%\nfrom sklearn_porter import Porter\n\nporter = Porter(clf, language='java')\noutput = porter.export(embed_data=True)\n\nprint(output)\n\n# class DecisionTreeClassifier {\n#\n# private static int findMax(int[] nums) {\n# int index = 0;\n# for (int i = 0; i < nums.length; i++) {\n# index = nums[i] > nums[index] ? i : index;\n# }\n# return index;\n# }\n#\n# public static int predict(double[] features) {\n# int[] classes = new int[3];\n#\n# if (features[3] <= 0.800000011920929) {\n# classes[0] = 50;\n# classes[1] = 0;\n# classes[2] = 0;\n# } else {\n# if (features[3] <= 1.75) {\n# if (features[2] <= 4.950000047683716) {\n# if (features[3] <= 1.6500000357627869) {\n# classes[0] = 0;\n# classes[1] = 47;\n# classes[2] = 0;\n# } else {\n# classes[0] = 0;\n# classes[1] = 0;\n# classes[2] = 1;\n# }\n# } else {\n# if (features[3] <= 1.550000011920929) {\n# classes[0] = 0;\n# classes[1] = 0;\n# classes[2] = 3;\n# } else {\n# if (features[2] <= 5.450000047683716) {\n# classes[0] = 0;\n# classes[1] = 2;\n# classes[2] = 0;\n# } else {\n# classes[0] = 0;\n# classes[1] = 0;\n# classes[2] = 1;\n# }\n# }\n# }\n# } else {\n# if (features[2] <= 4.8500001430511475) {\n# if (features[0] <= 5.950000047683716) {\n# classes[0] = 0;\n# classes[1] = 1;\n# classes[2] = 0;\n# } else {\n# classes[0] = 0;\n# classes[1] = 0;\n# classes[2] = 2;\n# }\n# } else {\n# classes[0] = 0;\n# classes[1] = 0;\n# classes[2] = 43;\n# }\n# }\n# }\n#\n# return findMax(classes);\n# }\n#\n# public static void main(String[] args) {\n# if (args.length == 4) {\n#\n# // Features:\n# double[] features = new double[args.length];\n# for (int i = 0, l = args.length; i < l; i++) {\n# features[i] = Double.parseDouble(args[i]);\n# }\n#\n# // Prediction:\n# int prediction = DecisionTreeClassifier.predict(features);\n# System.out.println(prediction);\n#\n# }\n# }\n# }\n\n# %% [markdown]\n# ### Run classification in Java\n\n# %%\n# Save classifier:\n# with open('DecisionTreeClassifier.java', 'w') as f:\n# f.write(output)\n\n# Compile model:\n# $ javac -cp . DecisionTreeClassifier.java\n\n# Run classification:\n# $ java DecisionTreeClassifier 1 2 3 4\n" ]
[ [ "sklearn.tree.tree.DecisionTreeClassifier", "sklearn.datasets.load_iris" ] ]
osljw/keras_tf
[ "400f7e8438216ff15e91509472dc028605ed97aa", "400f7e8438216ff15e91509472dc028605ed97aa" ]
[ "deepctr/layers/utils.py", "offline.py" ]
[ "# -*- coding:utf-8 -*-\n\"\"\"\n\nAuthor:\n Weichen Shen,wcshen1994@163.com\n\n\"\"\"\n\nfrom tensorflow.python.keras.layers import Layer, Concatenate\n\n\nclass NoMask(Layer):\n def __init__(self, **kwargs):\n super(NoMask, self).__init__(**kwargs)\n\n def build(self, input_shape):\n # Be sure to call this somewhere!\n super(NoMask, self).build(input_shape)\n\n def call(self, x, mask=None, **kwargs):\n return x\n\n def compute_mask(self, inputs, mask):\n return None\n\n\ndef concat_fun(inputs, axis=-1):\n if len(inputs) == 1:\n return inputs[0]\n else:\n return Concatenate(axis=axis)(inputs)\n", "import pandas as pd\nfrom deepctr import SingleFeat\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\n\nfrom model import xDeepFM_MTL\n\nONLINE_FLAG = False\nloss_weights = [1, 1, ] # [0.7,0.3]任务权重可以调下试试\nVALIDATION_FRAC = 0.2 # 用做线下验证数据比例\n\nif __name__ == \"__main__\":\n #data = pd.read_csv('./input/final_track2_train.txt', sep='\\t', names=[\n column_names = ['uid', 'user_city', 'item_id', 'author_id', 'item_city', 'channel', 'finish', 'like', 'music_id', 'did', 'creat_time', 'video_duration']\n data = pd.read_csv('./input/xa', sep='\\t', names=column_names)\n\n if ONLINE_FLAG:\n test_data = pd.read_csv('./input/final_track2_test_no_answer.txt', sep='\\t', names=[\n 'uid', 'user_city', 'item_id', 'author_id', 'item_city', 'channel', 'finish', 'like', 'music_id', 'did', 'creat_time', 'video_duration'])\n train_size = data.shape[0]\n data = data.append(test_data)\n else:\n #train_size = int(data.shape[0]*(1-VALIDATION_FRAC))\n test_data = pd.read_csv('./input/xa', sep='\\t', names=column_names)\n train_size = data.shape[0]\n data = data.append(test_data)\n\n sparse_features = ['uid', 'user_city', 'item_id', 'author_id', 'item_city', 'channel',\n 'music_id', 'did', ]\n dense_features = ['video_duration'] # 'creat_time',\n\n data[sparse_features] = data[sparse_features].fillna('-1', )\n data[dense_features] = data[dense_features].fillna(0,)\n\n target = ['finish', 'like']\n\n for feat in sparse_features:\n lbe = LabelEncoder()\n data[feat] = lbe.fit_transform(data[feat])\n mms = MinMaxScaler(feature_range=(0, 1))\n data[dense_features] = mms.fit_transform(data[dense_features])\n\n sparse_feature_list = [SingleFeat(feat, data[feat].nunique())\n for feat in sparse_features]\n dense_feature_list = [SingleFeat(feat, 0)\n for feat in dense_features]\n\n train = data.iloc[:train_size]\n test = data.iloc[train_size:]\n\n train_model_input = [train[feat.name].values for feat in sparse_feature_list] + \\\n [train[feat.name].values for feat in dense_feature_list]\n test_model_input = [test[feat.name].values for feat in sparse_feature_list] + \\\n [test[feat.name].values for feat in dense_feature_list]\n\n train_labels = [train[target[0]].values, train[target[1]].values]\n test_labels = [test[target[0]].values, test[target[1]].values]\n\n model = xDeepFM_MTL({\"sparse\": sparse_feature_list,\n \"dense\": dense_feature_list})\n model.compile(\"adagrad\", \"binary_crossentropy\", loss_weights=loss_weights,)\n\n if ONLINE_FLAG:\n history = model.fit(train_model_input, train_labels,\n batch_size=4096, epochs=1, verbose=1)\n pred_ans = model.predict(test_model_input, batch_size=2**14)\n\n else:\n history = model.fit(train_model_input, train_labels,\n batch_size=4096, epochs=1, verbose=1, validation_data=(test_model_input, test_labels))\n\n if ONLINE_FLAG:\n result = test_data[['uid', 'item_id', 'finish', 'like']].copy()\n result.rename(columns={'finish': 'finish_probability',\n 'like': 'like_probability'}, inplace=True)\n result['finish_probability'] = pred_ans[0]\n result['like_probability'] = pred_ans[1]\n result[['uid', 'item_id', 'finish_probability', 'like_probability']].to_csv(\n 'result.csv', index=None, float_format='%.6f')\n" ]
[ [ "tensorflow.python.keras.layers.Concatenate" ], [ "sklearn.preprocessing.LabelEncoder", "pandas.read_csv", "sklearn.preprocessing.MinMaxScaler" ] ]
benlevyx/opinion-vs-fact
[ "5063adc16e37b0b47cb6b55494866c31133281d4" ]
[ "src/0_test_corpus/01_match_media_ids.py" ]
[ "import pandas as pd\n\nfrom opinion import config, utils\n\n\ndef transform_url(df):\n df['media_source'] = df['media_source'].apply(\n lambda x: x.replace('https', 'http')\n )\n return df\n\n\ndef process_url(url):\n try:\n domain, path = utils.parse_url(url)\n return domain\n except ValueError as e:\n print(e, url)\n return 'na'\n\n\ndef main():\n df_top50 = pd.read_csv(config.data / 'media_sources.csv')\n df_all = pd.read_csv(config.data / 'all_media.csv', index_col=0)\n\n df_all['url'] = df_all['url'].apply(process_url)\n df_top50['media_source'] = df_top50['media_source'].apply(process_url)\n\n df_merged = pd.merge(df_top50, df_all,\n how='left',\n left_on='media_source', right_on='url')\n df_merged = df_merged[['media_source', 'has_opinion', 'media_id', 'name']]\n df_merged.to_csv(config.data / 'media_with_ids.csv', index=False)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.read_csv", "pandas.merge" ] ]
hunterhector/DDSemantics
[ "883ef1015bd21d9b8575d8000faf3b506a09f21c" ]
[ "event/util.py" ]
[ "import argparse\nimport gc\nimport hashlib\nimport logging\nimport os\nimport sys\nimport unicodedata\nfrom collections import Counter\nfrom time import strftime, localtime\nfrom datetime import datetime\nimport psutil\nfrom hurry.filesize import size\n\nimport numpy as np\nimport torch\n\nfrom traitlets.config.loader import KeyValueConfigLoader\nfrom traitlets.config.loader import PyFileConfigLoader\n\n\nclass OptionPerLineParser(argparse.ArgumentParser):\n def convert_arg_line_to_args(self, arg_line):\n if arg_line.startswith(\"#\"):\n return []\n return arg_line.split()\n\n\ndef ensure_dir(p):\n parent = os.path.dirname(p)\n if not os.path.exists(parent):\n os.makedirs(parent)\n\n\ndef tokens_to_sent(tokens, sent_start):\n sent = \"\"\n\n for token, span in tokens:\n if span[0] > len(sent) + sent_start:\n padding = \" \" * (span[0] - len(sent) - sent_start)\n sent += padding\n sent += token\n return sent\n\n\ndef find_by_id(folder, docid):\n for filename in os.listdir(folder):\n if filename.startswith(docid):\n return os.path.join(folder, filename)\n\n\ndef rm_prefix(text, prefix):\n if text.startswith(prefix):\n return text[len(prefix) :]\n return text\n\n\ndef get_date_stamp():\n return datetime.today().strftime(\"%Y%m%d\")\n\n\ndef set_basic_log(log_level=logging.INFO):\n log_format = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n logging.basicConfig(level=log_level, format=log_format)\n\n\ndef set_file_log(log_file, log_level=logging.INFO):\n log_format = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n logging.basicConfig(level=log_level, format=log_format, filename=log_file)\n\n\ndef basic_console_log(log_level=logging.INFO):\n root = logging.getLogger()\n root.setLevel(log_level)\n ch = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n ch.setFormatter(formatter)\n\n\ndef load_command_line_config(args):\n cl_loader = KeyValueConfigLoader()\n return cl_loader.load_config(args)\n\n\ndef load_file_config(config_path):\n loader = PyFileConfigLoader(config_path)\n conf = loader.load_config()\n return conf\n\n\ndef load_config_with_cmd(args):\n file_conf = load_file_config(args[1])\n\n if len(args) > 1:\n cl_conf = load_command_line_config(args[2:])\n file_conf.merge(cl_conf)\n\n return file_conf\n\n\ndef load_mixed_configs():\n file_confs = [a for a in sys.argv[1:] if a.endswith(\".py\")]\n arg_confs = [a for a in sys.argv[1:] if a.startswith(\"--\")]\n return load_multi_configs(file_confs, arg_confs)\n\n\ndef load_multi_configs(file_args, cmd_args):\n \"\"\"This method try to mimics the behavior of the sub_config. It currently\n only take one base and one main.\n\n Args:\n file_args:\n cmd_args:\n\n Returns:\n\n \"\"\"\n cl_conf = load_command_line_config(cmd_args)\n\n if len(file_args) > 0:\n base_conf = file_args[0]\n\n loader = PyFileConfigLoader(base_conf)\n loader.load_config()\n\n for conf in file_args[1:]:\n # Since subconfig will be merged to and override the base.\n loader.load_subconfig(conf)\n\n all_conf = loader.config\n all_conf.merge(cl_conf)\n return all_conf\n else:\n return cl_conf\n\n\ndef file_md5(file):\n hashlib.md5(open(file, \"rb\").read()).hexdigest()\n\n\ntbl = dict.fromkeys(\n i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith(\"P\")\n)\n\n\ndef remove_punctuation(text):\n return text.translate(tbl)\n\n\ndef get_env(var_name):\n if var_name not in os.environ:\n raise KeyError(\n \"Please supply the directory as environment \"\n \"variable: {}\".format(var_name)\n )\n else:\n return os.environ[var_name]\n\n\ndef append_num_to_path(file_path, suffix=0):\n if os.path.exists(file_path):\n new_path = f\"{file_path}_{suffix}\"\n if os.path.exists(new_path):\n append_num_to_path(file_path, suffix + 1)\n else:\n os.rename(file_path, new_path)\n\n\ndef batch_combine(l_data):\n data = torch.cat([torch.unsqueeze(d, 0) for d in l_data], dim=0)\n return data\n\n\ndef to_torch(data, data_type):\n return torch.from_numpy(np.asarray(data, data_type))\n\n\ndef remove_neg(raw_predicate):\n # Frames of verb with or without negation should be the same.\n\n neg = \"not_\"\n if raw_predicate.startswith(neg):\n return raw_predicate[len(neg) :]\n\n return raw_predicate\n\n\ndef get_time():\n return strftime(\"%Y-%m-%d %H:%M:%S\", localtime())\n\n\ndef show_tensors():\n num_allocated = 0\n cell_sum = Counter()\n\n for obj in gc.get_objects():\n if torch.is_tensor(obj):\n # print(type(obj), obj.size(), obj.type())\n num_allocated += 1\n\n cell_count = 1\n for e in obj.size():\n cell_count *= e\n cell_sum[obj.type()] += cell_count\n\n print(\"Number of tensors: [%d].\" % num_allocated)\n print(\"Cell by type\")\n for key, num in cell_sum.items():\n print(\"\\t\", key, num)\n\n\ndef gpu_mem_report():\n print(\"Allocated memory \", size(torch.cuda.memory_allocated()))\n\n\ndef cpu_stats():\n print(sys.version)\n print(psutil.cpu_percent())\n print(psutil.virtual_memory()) # physical memory usage\n pid = os.getpid()\n py = psutil.Process(pid)\n memoryUse = py.memory_info()[0] / 2.0 ** 30 # memory use in GB...I think\n print(\"memory GB:\", memoryUse)\n\n\ndef make_2d_one_hot(batched_indices, max_length, device):\n b, l = batched_indices.shape\n data = batched_indices.unsqueeze(-1)\n one_hot = torch.zeros([b, l, max_length], dtype=torch.float32).to(device)\n one_hot.scatter_(2, data, 1)\n return one_hot\n\n\ndef make_one_hot(labels, C=2):\n \"\"\"Converts an integer label torch.autograd.Variable to a one-hot Variable.\n\n Args:\n labels(torch.autograd.Variable of torch.cuda.LongTensor): N x 1 x H x W,\n where N is batch size. Each value is an integer representing correct\n classification.\n C(integer., optional): number of classes in labels. (Default value = 2)\n\n Returns:\n\n\n \"\"\"\n one_hot = torch.FloatTensor(\n labels.size(0), C, labels.size(2), labels.size(3)\n ).zero_()\n target = one_hot.scatter_(1, labels.data, 1)\n\n return target\n\n\ndef topk_with_fill(data, k, dimension, largest, dtype=torch.int32, filler=0):\n if data.shape[dimension] >= k:\n res, _ = data.topk(k, dimension, largest=largest)\n else:\n pad_len = k - data.shape[dimension]\n l_pad_shape = []\n\n for index, s in data.shape:\n if index == dimension:\n l_pad_shape.append(pad_len)\n else:\n l_pad_shape.append(s)\n\n pad_shape = tuple(l_pad_shape)\n\n if filler == 1:\n padding = torch.ones(pad_shape, dtype=dtype)\n else:\n padding = torch.zeros(pad_shape, dtype=dtype)\n if not filler == 0:\n padding.fill_(filler)\n\n res = torch.cat((data, padding), -1)\n\n return res\n" ]
[ [ "torch.zeros", "torch.cat", "numpy.asarray", "torch.is_tensor", "torch.cuda.memory_allocated", "torch.unsqueeze", "torch.ones" ] ]
abhinavsp0730/federated
[ "7c5821f85cb2d0379f33bf2b5e02f97d51a16427" ]
[ "tensorflow_federated/python/core/impl/compiler/building_block_factory_test.py" ]
[ "# Lint as: python3\n# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.common_libs import anonymous_tuple\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.api import placements\nfrom tensorflow_federated.python.core.impl import type_utils\nfrom tensorflow_federated.python.core.impl.compiler import building_block_factory\nfrom tensorflow_federated.python.core.impl.compiler import building_blocks\nfrom tensorflow_federated.python.core.impl.compiler import intrinsic_defs\nfrom tensorflow_federated.python.core.impl.compiler import placement_literals\nfrom tensorflow_federated.python.core.impl.compiler import test_utils\nfrom tensorflow_federated.python.core.impl.compiler import type_factory\n\n\nclass UniqueNameGeneratorTest(absltest.TestCase):\n\n def test_does_not_raise_type_error_with_none_comp(self):\n try:\n building_block_factory.unique_name_generator(None)\n except TypeError:\n self.fail('Raised TypeError unexpectedly.')\n\n def test_returns_unique_names_with_none_comp_and_none_prefix(self):\n name_generator = building_block_factory.unique_name_generator(\n None, prefix=None)\n names = set(next(name_generator) for _ in range(10))\n first_name = list(names)[0]\n prefix = first_name[:3]\n self.assertLen(names, 10)\n self.assertTrue(all(n.startswith(prefix) for n in names))\n\n def test_returns_unique_names_with_none_comp_and_unset_prefix(self):\n name_generator = building_block_factory.unique_name_generator(None)\n names = set(next(name_generator) for _ in range(10))\n self.assertLen(names, 10)\n self.assertTrue(all(n.startswith('_var') for n in names))\n\n def test_returns_unique_names_with_none_comp_and_prefix(self):\n name_generator = building_block_factory.unique_name_generator(\n None, prefix='_test')\n names = set(next(name_generator) for _ in range(10))\n self.assertLen(names, 10)\n self.assertTrue(all(n.startswith('_test') for n in names))\n\n def test_returns_unique_names_with_comp_and_none_prefix(self):\n ref = building_blocks.Reference('a', tf.int32)\n comp = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n name_generator = building_block_factory.unique_name_generator(\n comp, prefix=None)\n names = set(next(name_generator) for _ in range(10))\n first_name = list(names)[0]\n prefix = first_name[:3]\n self.assertLen(names, 10)\n self.assertTrue(all(n.startswith(prefix) for n in names))\n\n def test_returns_unique_names_with_comp_and_unset_prefix(self):\n ref = building_blocks.Reference('a', tf.int32)\n comp = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n name_generator = building_block_factory.unique_name_generator(comp)\n names = set(next(name_generator) for _ in range(10))\n self.assertLen(names, 10)\n self.assertTrue(all(n.startswith('_var') for n in names))\n\n def test_returns_unique_names_with_comp_and_prefix(self):\n ref = building_blocks.Reference('a', tf.int32)\n comp = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n name_generator = building_block_factory.unique_name_generator(\n comp, prefix='_test')\n names = set(next(name_generator) for _ in range(10))\n self.assertLen(names, 10)\n self.assertTrue(all(n.startswith('_test') for n in names))\n\n def test_returns_unique_names_with_conflicting_prefix(self):\n ref = building_blocks.Reference('_test', tf.int32)\n comp = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n name_generator = building_block_factory.unique_name_generator(\n comp, prefix='_test')\n names = set(next(name_generator) for _ in range(10))\n first_name = list(names)[0]\n prefix = first_name[:3]\n self.assertNotEqual(prefix, '_test')\n self.assertTrue(all(n.startswith(prefix) for n in names))\n\n\nclass CreateCompiledEmptyTupleTest(absltest.TestCase):\n\n def test_constructs_correct_type(self):\n empty_tuple = building_block_factory.create_compiled_empty_tuple()\n self.assertEqual(empty_tuple.type_signature,\n building_blocks.Tuple([]).type_signature)\n\n def test_constructs_called_graph(self):\n empty_tuple = building_block_factory.create_compiled_empty_tuple()\n self.assertIsInstance(empty_tuple, building_blocks.Call)\n self.assertIsNone(empty_tuple.argument)\n self.assertIsInstance(empty_tuple.function,\n building_blocks.CompiledComputation)\n\n\nclass CreateCompiledIdentityTest(absltest.TestCase):\n\n def test_raises_on_none(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_compiled_identity(None)\n\n def test_raises_on_federated_type(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_compiled_identity(\n computation_types.FederatedType(tf.int32, placement_literals.SERVER))\n\n def test_raises_on_federated_type_under_tuple(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_compiled_identity([\n computation_types.FederatedType(tf.int32, placement_literals.SERVER)\n ])\n\n def test_integer_identity_type_signature(self):\n int_identity = building_block_factory.create_compiled_identity(tf.int32)\n self.assertIsInstance(int_identity, building_blocks.CompiledComputation)\n expected_type_signature = computation_types.FunctionType(tf.int32, tf.int32)\n self.assertEqual(int_identity.type_signature, expected_type_signature)\n\n def test_integer_identity_acts_as_identity(self):\n int_identity = building_block_factory.create_compiled_identity(tf.int32)\n for k in range(10):\n result = test_utils.run_tensorflow(int_identity.proto, k)\n self.assertEqual(result, k)\n\n def test_unnamed_tuple_identity_type_signature(self):\n tuple_type = [tf.int32, tf.float32]\n tuple_identity = building_block_factory.create_compiled_identity(tuple_type)\n self.assertIsInstance(tuple_identity, building_blocks.CompiledComputation)\n expected_type_signature = computation_types.FunctionType(\n tuple_type, tuple_type)\n self.assertEqual(tuple_identity.type_signature, expected_type_signature)\n\n def test_unnamed_tuple_identity_acts_as_identity(self):\n tuple_type = [tf.int32, tf.float32]\n tuple_identity = building_block_factory.create_compiled_identity(tuple_type)\n for k in range(10):\n result = test_utils.run_tensorflow(tuple_identity.proto, [k, 10. - k])\n self.assertLen(result, 2)\n self.assertEqual(result[0], k)\n self.assertEqual(result[1], 10. - k)\n\n def test_named_tuple_identity_type_signature(self):\n tuple_type = [('a', tf.int32), ('b', tf.float32)]\n tuple_identity = building_block_factory.create_compiled_identity(tuple_type)\n self.assertIsInstance(tuple_identity, building_blocks.CompiledComputation)\n expected_type_signature = computation_types.FunctionType(\n tuple_type, tuple_type)\n self.assertEqual(tuple_identity.type_signature, expected_type_signature)\n\n def test_named_tuple_identity_acts_as_identity(self):\n tuple_type = [('a', tf.int32), ('b', tf.float32)]\n tuple_identity = building_block_factory.create_compiled_identity(tuple_type)\n for k in range(10):\n result = test_utils.run_tensorflow(tuple_identity.proto, {\n 'a': k,\n 'b': 10. - k\n })\n self.assertLen(result, 2)\n self.assertEqual(result.a, k)\n self.assertEqual(result.b, 10. - k)\n\n def test_sequence_identity_type_signature(self):\n sequence_type = computation_types.SequenceType(tf.int32)\n sequence_identity = building_block_factory.create_compiled_identity(\n sequence_type)\n self.assertIsInstance(sequence_identity,\n building_blocks.CompiledComputation)\n expected_type_signature = computation_types.FunctionType(\n sequence_type, sequence_type)\n self.assertEqual(sequence_identity.type_signature, expected_type_signature)\n\n def test_sequence_identity_acts_as_identity(self):\n sequence_type = computation_types.SequenceType(tf.int32)\n sequence_identity = building_block_factory.create_compiled_identity(\n sequence_type)\n seq = list(range(10))\n result = test_utils.run_tensorflow(sequence_identity.proto, seq)\n self.assertEqual(result, seq)\n\n\nclass CreateCompiledInputReplicationTest(absltest.TestCase):\n\n def test_raises_on_none_type(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_compiled_input_replication(None, 2)\n\n def test_raises_on_none_int(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_compiled_input_replication(tf.int32, None)\n\n def test_raises_on_federated_type(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_compiled_input_replication(\n computation_types.FederatedType(tf.int32, placement_literals.SERVER),\n 2)\n\n def test_raises_on_federated_type_under_tuple(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_compiled_input_replication([\n computation_types.FederatedType(tf.int32, placement_literals.SERVER)\n ])\n\n def test_integer_input_duplicate_type_signature(self):\n int_duplicate_input = building_block_factory.create_compiled_input_replication(\n tf.int32, 2)\n self.assertIsInstance(int_duplicate_input,\n building_blocks.CompiledComputation)\n expected_type_signature = computation_types.FunctionType(\n tf.int32, [tf.int32, tf.int32])\n self.assertEqual(int_duplicate_input.type_signature,\n expected_type_signature)\n\n def test_integer_input_duplicate_duplicates_input(self):\n int_duplicate_input = building_block_factory.create_compiled_input_replication(\n tf.int32, 2)\n for k in range(10):\n result = test_utils.run_tensorflow(int_duplicate_input.proto, k)\n self.assertLen(result, 2)\n self.assertEqual(result[0], k)\n self.assertEqual(result[1], k)\n\n def test_integer_input_triplicate_type_signature(self):\n int_duplicate_input = building_block_factory.create_compiled_input_replication(\n tf.int32, 3)\n self.assertIsInstance(int_duplicate_input,\n building_blocks.CompiledComputation)\n expected_type_signature = computation_types.FunctionType(\n tf.int32, [tf.int32, tf.int32, tf.int32])\n self.assertEqual(int_duplicate_input.type_signature,\n expected_type_signature)\n\n def test_integer_input_triplicate_triplicates_input(self):\n int_duplicate_input = building_block_factory.create_compiled_input_replication(\n tf.int32, 3)\n for k in range(10):\n result = test_utils.run_tensorflow(int_duplicate_input.proto, k)\n self.assertLen(result, 3)\n self.assertEqual(result[0], k)\n self.assertEqual(result[1], k)\n self.assertEqual(result[2], k)\n\n def test_unnamed_tuple_input_duplicate_type_signature(self):\n tuple_type = [tf.int32, tf.float32]\n tuple_duplicate_input = building_block_factory.create_compiled_input_replication(\n tuple_type, 2)\n self.assertIsInstance(tuple_duplicate_input,\n building_blocks.CompiledComputation)\n expected_type_signature = computation_types.FunctionType(\n tuple_type, [tuple_type, tuple_type])\n self.assertEqual(tuple_duplicate_input.type_signature,\n expected_type_signature)\n\n def test_unnamed_tuple_input_duplicate_duplicates_input(self):\n tuple_type = [tf.int32, tf.float32]\n tuple_duplicate_input = building_block_factory.create_compiled_input_replication(\n tuple_type, 2)\n for k in range(10):\n result = test_utils.run_tensorflow(tuple_duplicate_input.proto,\n [k, 10. - k])\n self.assertLen(result, 2)\n self.assertEqual(result[0][0], k)\n self.assertEqual(result[1][0], k)\n self.assertEqual(result[0][1], 10. - k)\n self.assertEqual(result[1][1], 10. - k)\n\n def test_named_tuple_input_duplicate_type_signature(self):\n tuple_type = [('a', tf.int32), ('b', tf.float32)]\n tuple_duplicate_input = building_block_factory.create_compiled_input_replication(\n tuple_type, 2)\n self.assertIsInstance(tuple_duplicate_input,\n building_blocks.CompiledComputation)\n expected_type_signature = computation_types.FunctionType(\n tuple_type, [tuple_type, tuple_type])\n self.assertEqual(tuple_duplicate_input.type_signature,\n expected_type_signature)\n\n def test_named_tuple_input_duplicate_duplicates_input(self):\n tuple_type = [('a', tf.int32), ('b', tf.float32)]\n tuple_duplicate_input = building_block_factory.create_compiled_input_replication(\n tuple_type, 2)\n for k in range(10):\n result = test_utils.run_tensorflow(tuple_duplicate_input.proto,\n [k, 10. - k])\n self.assertLen(result, 2)\n self.assertEqual(result[0].a, k)\n self.assertEqual(result[1].a, k)\n self.assertEqual(result[0].b, 10. - k)\n self.assertEqual(result[1].b, 10. - k)\n\n def test_sequence_input_duplicate_type_signature(self):\n sequence_type = computation_types.SequenceType(tf.int32)\n sequence_duplicate_input = building_block_factory.create_compiled_input_replication(\n sequence_type, 2)\n self.assertIsInstance(sequence_duplicate_input,\n building_blocks.CompiledComputation)\n expected_type_signature = computation_types.FunctionType(\n sequence_type, [sequence_type, sequence_type])\n self.assertEqual(sequence_duplicate_input.type_signature,\n expected_type_signature)\n\n def test_sequence_input_duplicate_duplicates_input(self):\n sequence_type = computation_types.SequenceType(tf.int32)\n sequence_duplicate_input = building_block_factory.create_compiled_input_replication(\n sequence_type, 2)\n seq = list(range(10))\n result = test_utils.run_tensorflow(sequence_duplicate_input.proto, seq)\n self.assertLen(result, 2)\n self.assertEqual(result[0], seq)\n self.assertEqual(result[1], seq)\n\n\nclass CreateFederatedGetitemCompTest(parameterized.TestCase):\n\n def test_raises_type_error_on_none(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_getitem_comp(None, 0)\n\n @parameterized.named_parameters(('clients', placement_literals.CLIENTS),\n ('server', placement_literals.SERVER))\n def test_returns_comp(self, placement):\n federated_value = building_blocks.Reference(\n 'test',\n computation_types.FederatedType([('a', tf.int32), ('b', tf.bool)],\n placement))\n get_0_comp = building_block_factory.create_federated_getitem_comp(\n federated_value, 0)\n self.assertEqual(str(get_0_comp), '(x -> x[0])')\n get_slice_comp = building_block_factory.create_federated_getitem_comp(\n federated_value, slice(None, None, -1))\n self.assertEqual(str(get_slice_comp), '(x -> <b=x[1],a=x[0]>)')\n\n\nclass CreateFederatedGetattrCompTest(parameterized.TestCase):\n\n def test_raises_type_error_on_none(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_getattr_comp(None, 'x')\n\n @parameterized.named_parameters(('clients', placement_literals.CLIENTS),\n ('server', placement_literals.SERVER))\n def test_returns_comp(self, placement):\n federated_value = building_blocks.Reference(\n 'test',\n computation_types.FederatedType([('a', tf.int32), ('b', tf.bool)],\n placement))\n get_a_comp = building_block_factory.create_federated_getattr_comp(\n federated_value, 'a')\n self.assertEqual(str(get_a_comp), '(x -> x.a)')\n get_b_comp = building_block_factory.create_federated_getattr_comp(\n federated_value, 'b')\n self.assertEqual(str(get_b_comp), '(x -> x.b)')\n non_federated_arg = building_blocks.Reference(\n 'test',\n computation_types.NamedTupleType([('a', tf.int32), ('b', tf.bool)]))\n with self.assertRaises(TypeError):\n _ = building_block_factory.create_federated_getattr_comp(\n non_federated_arg, 'a')\n with self.assertRaisesRegex(ValueError, 'has no element of name `c`'):\n _ = building_block_factory.create_federated_getattr_comp(\n federated_value, 'c')\n\n\nclass CreateFederatedGetattrCallTest(parameterized.TestCase):\n\n def test_raises_type_error_on_none(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_getattr_call(None, 'x')\n\n @parameterized.named_parameters(\n ('clients', placement_literals.CLIENTS),\n ('server', placement_literals.SERVER),\n )\n def test_returns_named(self, placement):\n federated_comp_named = building_blocks.Reference(\n 'test',\n computation_types.FederatedType([('a', tf.int32),\n ('b', tf.bool), tf.int32], placement))\n self.assertEqual(\n str(federated_comp_named.type_signature.member),\n '<a=int32,b=bool,int32>')\n name_a = building_block_factory.create_federated_getattr_call(\n federated_comp_named, 'a')\n name_b = building_block_factory.create_federated_getattr_call(\n federated_comp_named, 'b')\n self.assertIsInstance(name_a.type_signature,\n computation_types.FederatedType)\n self.assertIsInstance(name_b.type_signature,\n computation_types.FederatedType)\n self.assertEqual(str(name_a.type_signature.member), 'int32')\n self.assertEqual(str(name_b.type_signature.member), 'bool')\n try:\n type_utils.check_federated_type(\n name_a.type_signature, placement=placement)\n except TypeError:\n self.fail(\n 'Function \\'check_federated_type\\' raised TypeError unexpectedly.')\n try:\n type_utils.check_federated_type(\n name_b.type_signature, placement=placement)\n except TypeError:\n self.fail(\n 'Function \\'check_federated_type\\' raised TypeError unexpectedly.')\n with self.assertRaisesRegex(ValueError, 'has no element of name `c`'):\n _ = building_block_factory.create_federated_getattr_call(\n federated_comp_named, 'c')\n\n\nclass CreateFederatedGetitemCallTest(parameterized.TestCase):\n\n def test_fails_value(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_getitem_call(None, 0)\n\n @parameterized.named_parameters(\n ('clients', placement_literals.CLIENTS),\n ('server', placement_literals.SERVER),\n )\n def test_returns_named(self, placement):\n federated_comp_named = building_blocks.Reference(\n 'test',\n computation_types.FederatedType([('a', tf.int32), ('b', tf.bool)],\n placement))\n self.assertEqual(\n str(federated_comp_named.type_signature.member), '<a=int32,b=bool>')\n idx_0 = building_block_factory.create_federated_getitem_call(\n federated_comp_named, 0)\n idx_1 = building_block_factory.create_federated_getitem_call(\n federated_comp_named, 1)\n self.assertIsInstance(idx_0.type_signature, computation_types.FederatedType)\n self.assertIsInstance(idx_1.type_signature, computation_types.FederatedType)\n self.assertEqual(str(idx_0.type_signature.member), 'int32')\n self.assertEqual(str(idx_1.type_signature.member), 'bool')\n try:\n type_utils.check_federated_type(idx_0.type_signature, placement=placement)\n except TypeError:\n self.fail(\n 'Function \\'check_federated_type\\' raised TypeError unexpectedly.')\n try:\n type_utils.check_federated_type(idx_1.type_signature, placement=placement)\n except TypeError:\n self.fail(\n 'Function \\'check_federated_type\\' raised TypeError unexpectedly.')\n flipped = building_block_factory.create_federated_getitem_call(\n federated_comp_named, slice(None, None, -1))\n self.assertIsInstance(flipped.type_signature,\n computation_types.FederatedType)\n self.assertEqual(str(flipped.type_signature.member), '<b=bool,a=int32>')\n try:\n type_utils.check_federated_type(\n flipped.type_signature, placement=placement)\n except TypeError:\n self.fail(\n 'Function \\'check_federated_type\\' raised TypeError unexpectedly.')\n\n @parameterized.named_parameters(\n ('clients', placement_literals.CLIENTS),\n ('server', placement_literals.SERVER),\n )\n def test_returns_unnamed(self, placement):\n federated_comp_unnamed = building_blocks.Reference(\n 'test', computation_types.FederatedType([tf.int32, tf.bool], placement))\n self.assertEqual(\n str(federated_comp_unnamed.type_signature.member), '<int32,bool>')\n unnamed_idx_0 = building_block_factory.create_federated_getitem_call(\n federated_comp_unnamed, 0)\n unnamed_idx_1 = building_block_factory.create_federated_getitem_call(\n federated_comp_unnamed, 1)\n self.assertIsInstance(unnamed_idx_0.type_signature,\n computation_types.FederatedType)\n self.assertIsInstance(unnamed_idx_1.type_signature,\n computation_types.FederatedType)\n self.assertEqual(str(unnamed_idx_0.type_signature.member), 'int32')\n self.assertEqual(str(unnamed_idx_1.type_signature.member), 'bool')\n try:\n type_utils.check_federated_type(\n unnamed_idx_0.type_signature, placement=placement)\n except TypeError:\n self.fail(\n 'Function \\'check_federated_type\\' raised TypeError unexpectedly.')\n try:\n type_utils.check_federated_type(\n unnamed_idx_1.type_signature, placement=placement)\n except TypeError:\n self.fail(\n 'Function \\'check_federated_type\\' raised TypeError unexpectedly.')\n unnamed_flipped = building_block_factory.create_federated_getitem_call(\n federated_comp_unnamed, slice(None, None, -1))\n self.assertIsInstance(unnamed_flipped.type_signature,\n computation_types.FederatedType)\n self.assertEqual(str(unnamed_flipped.type_signature.member), '<bool,int32>')\n try:\n type_utils.check_federated_type(\n unnamed_flipped.type_signature, placement=placement)\n except TypeError:\n self.fail(\n 'Function \\'check_federated_type\\' raised TypeError unexpectedly.')\n\n\nclass CreateFederatedSetitemLambdaTest(parameterized.TestCase):\n\n def test_fails_on_bad_type(self):\n bad_type = computation_types.FederatedType([('a', tf.int32)],\n placement_literals.CLIENTS)\n value_comp = building_blocks.Data('x', tf.int32)\n with self.assertRaises(TypeError):\n _ = building_block_factory.create_named_tuple_setattr_lambda(\n bad_type, 'a', value_comp)\n\n def test_fails_on_none_name(self):\n good_type = computation_types.NamedTupleType([('a', tf.int32)])\n value_comp = building_blocks.Data('x', tf.int32)\n with self.assertRaises(TypeError):\n _ = building_block_factory.create_named_tuple_setattr_lambda(\n good_type, None, value_comp)\n\n def test_fails_on_none_value(self):\n good_type = computation_types.NamedTupleType([('a', tf.int32)])\n with self.assertRaises(TypeError):\n _ = building_block_factory.create_named_tuple_setattr_lambda(\n good_type, 'a', None)\n\n def test_fails_implicit_type_conversion(self):\n good_type = computation_types.NamedTupleType([('a', tf.int32),\n ('b', tf.bool)])\n value_comp = building_blocks.Data('x', tf.int32)\n with self.assertRaisesRegex(TypeError, 'incompatible type'):\n _ = building_block_factory.create_named_tuple_setattr_lambda(\n good_type, 'b', value_comp)\n\n def test_fails_unknown_name(self):\n good_type = computation_types.NamedTupleType([('a', tf.int32),\n ('b', tf.bool)])\n value_comp = building_blocks.Data('x', tf.int32)\n with self.assertRaises(AttributeError):\n _ = building_block_factory.create_named_tuple_setattr_lambda(\n good_type, 'c', value_comp)\n\n def test_replaces_single_element(self):\n good_type = computation_types.NamedTupleType([('a', tf.int32),\n ('b', tf.bool)])\n value_comp = building_blocks.Data('x', tf.int32)\n lam = building_block_factory.create_named_tuple_setattr_lambda(\n good_type, 'a', value_comp)\n # pyformat: disable\n self.assertEqual(\n lam.formatted_representation(),\n '(let\\n'\n ' value_comp_placeholder=x\\n'\n ' in (lambda_arg -> <\\n'\n ' a=value_comp_placeholder,\\n'\n ' b=lambda_arg[1]\\n'\n '>))'\n )\n # pyformat: enable\n\n def test_skips_unnamed_element(self):\n good_type = computation_types.NamedTupleType([('a', tf.int32),\n (None, tf.float32),\n ('b', tf.bool)])\n value_comp = building_blocks.Data('x', tf.int32)\n lam = building_block_factory.create_named_tuple_setattr_lambda(\n good_type, 'a', value_comp)\n # pyformat: disable\n self.assertEqual(\n lam.formatted_representation(),\n '(let\\n'\n ' value_comp_placeholder=x\\n'\n ' in (lambda_arg -> <\\n'\n ' a=value_comp_placeholder,\\n'\n ' lambda_arg[1],\\n'\n ' b=lambda_arg[2]\\n'\n '>))'\n )\n # pyformat: enable\n\n def test_leaves_type_signature_unchanged(self):\n good_type = computation_types.NamedTupleType([('a', tf.int32),\n (None, tf.float32),\n ('b', tf.bool)])\n value_comp = building_blocks.Data('x', tf.int32)\n lam = building_block_factory.create_named_tuple_setattr_lambda(\n good_type, 'a', value_comp)\n self.assertTrue(\n type_utils.are_equivalent_types(lam.type_signature.parameter,\n lam.type_signature.result))\n\n\nclass CreateFederatedSetatterCallTest(parameterized.TestCase):\n\n def test_fails_on_none_federated_comp(self):\n value_comp = building_blocks.Data('x', tf.int32)\n with self.assertRaises(TypeError):\n _ = building_block_factory.create_federated_setattr_call(\n None, 'a', value_comp)\n\n def test_fails_non_federated_type(self):\n bad_type = computation_types.NamedTupleType([('a', tf.int32),\n (None, tf.float32),\n ('b', tf.bool)])\n bad_comp = building_blocks.Data('data', bad_type)\n value_comp = building_blocks.Data('x', tf.int32)\n\n with self.assertRaises(TypeError):\n _ = building_block_factory.create_federated_setattr_call(\n bad_comp, 'a', value_comp)\n\n def test_fails_on_none_name(self):\n named_tuple_type = computation_types.NamedTupleType([('a', tf.int32),\n (None, tf.float32),\n ('b', tf.bool)])\n good_type = computation_types.FederatedType(named_tuple_type,\n placement_literals.CLIENTS)\n acceptable_comp = building_blocks.Data('data', good_type)\n value_comp = building_blocks.Data('x', tf.int32)\n\n with self.assertRaises(TypeError):\n _ = building_block_factory.create_federated_setattr_call(\n acceptable_comp, None, value_comp)\n\n def test_fails_on_none_value(self):\n named_tuple_type = computation_types.NamedTupleType([('a', tf.int32),\n (None, tf.float32),\n ('b', tf.bool)])\n good_type = computation_types.FederatedType(named_tuple_type,\n placement_literals.CLIENTS)\n acceptable_comp = building_blocks.Data('data', good_type)\n\n with self.assertRaises(TypeError):\n _ = building_block_factory.create_federated_setattr_call(\n acceptable_comp, 'a', None)\n\n def test_constructs_correct_intrinsic_clients(self):\n named_tuple_type = computation_types.NamedTupleType([('a', tf.int32),\n (None, tf.float32),\n ('b', tf.bool)])\n good_type = computation_types.FederatedType(named_tuple_type,\n placement_literals.CLIENTS)\n federated_comp = building_blocks.Data('federated_comp', good_type)\n value_comp = building_blocks.Data('x', tf.int32)\n\n federated_setattr = building_block_factory.create_federated_setattr_call(\n federated_comp, 'a', value_comp)\n self.assertEqual(federated_setattr.function.uri,\n intrinsic_defs.FEDERATED_MAP.uri)\n\n def test_constructs_correct_intrinsic_server(self):\n named_tuple_type = computation_types.NamedTupleType([('a', tf.int32),\n (None, tf.float32),\n ('b', tf.bool)])\n good_type = computation_types.FederatedType(named_tuple_type,\n placement_literals.SERVER)\n federated_comp = building_blocks.Data('federated_comp', good_type)\n value_comp = building_blocks.Data('x', tf.int32)\n\n federated_setattr = building_block_factory.create_federated_setattr_call(\n federated_comp, 'a', value_comp)\n self.assertEqual(federated_setattr.function.uri,\n intrinsic_defs.FEDERATED_APPLY.uri)\n\n @parameterized.named_parameters(('clients', placement_literals.CLIENTS),\n ('server', placement_literals.SERVER))\n def test_leaves_type_signatures_alone(self, placement):\n named_tuple_type = computation_types.NamedTupleType([('a', tf.int32),\n (None, tf.float32),\n ('b', tf.bool)])\n good_type = computation_types.FederatedType(named_tuple_type, placement)\n federated_comp = building_blocks.Data('federated_comp', good_type)\n value_comp = building_blocks.Data('x', tf.int32)\n\n federated_setattr = building_block_factory.create_federated_setattr_call(\n federated_comp, 'a', value_comp)\n self.assertTrue(\n type_utils.are_equivalent_types(federated_setattr.type_signature,\n federated_comp.type_signature))\n\n def test_constructs_correct_computation_clients(self):\n named_tuple_type = computation_types.NamedTupleType([('a', tf.int32),\n (None, tf.float32),\n ('b', tf.bool)])\n good_type = computation_types.FederatedType(named_tuple_type,\n placement_literals.CLIENTS)\n federated_comp = building_blocks.Data('federated_comp', good_type)\n value_comp = building_blocks.Data('x', tf.int32)\n\n federated_setattr = building_block_factory.create_federated_setattr_call(\n federated_comp, 'a', value_comp)\n # pyformat: disable\n self.assertEqual(\n federated_setattr.formatted_representation(),\n 'federated_map(<\\n'\n ' (let\\n'\n ' value_comp_placeholder=x\\n'\n ' in (lambda_arg -> <\\n'\n ' a=value_comp_placeholder,\\n'\n ' lambda_arg[1],\\n'\n ' b=lambda_arg[2]\\n'\n ' >)),\\n'\n ' federated_comp\\n'\n '>)'\n )\n # pyformat: enable\n\n def test_constructs_correct_computation_server(self):\n named_tuple_type = computation_types.NamedTupleType([('a', tf.int32),\n (None, tf.float32),\n ('b', tf.bool)])\n good_type = computation_types.FederatedType(named_tuple_type,\n placement_literals.SERVER)\n federated_comp = building_blocks.Data('federated_comp', good_type)\n value_comp = building_blocks.Data('x', tf.int32)\n\n federated_setattr = building_block_factory.create_federated_setattr_call(\n federated_comp, 'a', value_comp)\n # pyformat: disable\n self.assertEqual(\n federated_setattr.formatted_representation(),\n 'federated_apply(<\\n'\n ' (let\\n'\n ' value_comp_placeholder=x\\n'\n ' in (lambda_arg -> <\\n'\n ' a=value_comp_placeholder,\\n'\n ' lambda_arg[1],\\n'\n ' b=lambda_arg[2]\\n'\n ' >)),\\n'\n ' federated_comp\\n'\n '>)'\n )\n # pyformat: enable\n\n\nclass CreateComputationAppendingTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_comp1(self):\n comp2 = building_blocks.Data('y', tf.int32)\n with self.assertRaises(TypeError):\n building_block_factory.create_computation_appending(None, comp2)\n\n def test_raises_type_error_with_none_comp2(self):\n value = building_blocks.Data('x', tf.int32)\n comp1 = building_blocks.Tuple([value, value])\n with self.assertRaises(TypeError):\n building_block_factory.create_computation_appending(comp1, None)\n\n def test_raises_type_error_with_comp1_bad_type(self):\n comp1 = building_blocks.Data('x', tf.int32)\n comp2 = building_blocks.Data('y', tf.int32)\n with self.assertRaises(TypeError):\n building_block_factory.create_computation_appending(comp1, comp2)\n\n def test_returns_comp_unnamed(self):\n value = building_blocks.Data('x', tf.int32)\n comp1 = building_blocks.Tuple([value, value])\n comp2 = building_blocks.Data('y', tf.int32)\n comp = building_block_factory.create_computation_appending(comp1, comp2)\n self.assertEqual(\n comp.compact_representation(),\n '(let comps=<<x,x>,y> in <comps[0][0],comps[0][1],comps[1]>)')\n self.assertEqual(str(comp.type_signature), '<int32,int32,int32>')\n\n def test_returns_comp_named(self):\n value = building_blocks.Data('x', tf.int32)\n comp1 = building_blocks.Tuple((\n ('a', value),\n ('b', value),\n ))\n comp2 = building_blocks.Data('y', tf.int32)\n comp = building_block_factory.create_computation_appending(\n comp1, ('c', comp2))\n self.assertEqual(\n comp.compact_representation(),\n '(let comps=<<a=x,b=x>,y> in <a=comps[0][0],b=comps[0][1],c=comps[1]>)')\n self.assertEqual(str(comp.type_signature), '<a=int32,b=int32,c=int32>')\n\n\nclass CreateFederatedAggregateTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_value(self):\n zero = building_blocks.Data('z', tf.int32)\n accumulate_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n accumulate_result = building_blocks.Data('a', tf.int32)\n accumulate = building_blocks.Lambda('x', accumulate_type, accumulate_result)\n merge_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n merge_result = building_blocks.Data('m', tf.int32)\n merge = building_blocks.Lambda('x', merge_type, merge_result)\n report_ref = building_blocks.Reference('r', tf.int32)\n report = building_blocks.Lambda(report_ref.name, report_ref.type_signature,\n report_ref)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_aggregate(None, zero, accumulate,\n merge, report)\n\n def test_raises_type_error_with_none_zero(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n accumulate_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n accumulate_result = building_blocks.Data('a', tf.int32)\n accumulate = building_blocks.Lambda('x', accumulate_type, accumulate_result)\n merge_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n merge_result = building_blocks.Data('m', tf.int32)\n merge = building_blocks.Lambda('x', merge_type, merge_result)\n report_ref = building_blocks.Reference('r', tf.int32)\n report = building_blocks.Lambda(report_ref.name, report_ref.type_signature,\n report_ref)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_aggregate(value, None, accumulate,\n merge, report)\n\n def test_raises_type_error_with_none_accumulate(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n zero = building_blocks.Data('z', tf.int32)\n merge_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n merge_result = building_blocks.Data('m', tf.int32)\n merge = building_blocks.Lambda('x', merge_type, merge_result)\n report_ref = building_blocks.Reference('r', tf.int32)\n report = building_blocks.Lambda(report_ref.name, report_ref.type_signature,\n report_ref)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_aggregate(value, zero, None,\n merge, report)\n\n def test_raises_type_error_with_none_merge(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n zero = building_blocks.Data('z', tf.int32)\n accumulate_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n accumulate_result = building_blocks.Data('a', tf.int32)\n accumulate = building_blocks.Lambda('x', accumulate_type, accumulate_result)\n report_ref = building_blocks.Reference('r', tf.int32)\n report = building_blocks.Lambda(report_ref.name, report_ref.type_signature,\n report_ref)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_aggregate(value, zero, accumulate,\n None, report)\n\n def test_raises_type_error_with_none_report(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n zero = building_blocks.Data('z', tf.int32)\n accumulate_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n accumulate_result = building_blocks.Data('a', tf.int32)\n accumulate = building_blocks.Lambda('x', accumulate_type, accumulate_result)\n merge_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n merge_result = building_blocks.Data('m', tf.int32)\n merge = building_blocks.Lambda('x', merge_type, merge_result)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_aggregate(value, zero, accumulate,\n merge, None)\n\n def test_returns_federated_aggregate(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n zero = building_blocks.Data('z', tf.int32)\n accumulate_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n accumulate_result = building_blocks.Data('a', tf.int32)\n accumulate = building_blocks.Lambda('x', accumulate_type, accumulate_result)\n merge_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n merge_result = building_blocks.Data('m', tf.int32)\n merge = building_blocks.Lambda('x', merge_type, merge_result)\n report_ref = building_blocks.Reference('r', tf.int32)\n report = building_blocks.Lambda(report_ref.name, report_ref.type_signature,\n report_ref)\n comp = building_block_factory.create_federated_aggregate(\n value, zero, accumulate, merge, report)\n self.assertEqual(comp.compact_representation(),\n 'federated_aggregate(<v,z,(x -> a),(x -> m),(r -> r)>)')\n self.assertEqual(str(comp.type_signature), 'int32@SERVER')\n\n\nclass CreateFederatedApplyTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_fn(self):\n arg = building_blocks.Data('y', tf.int32)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_apply(None, arg)\n\n def test_raises_type_error_with_none_arg(self):\n ref = building_blocks.Reference('x', tf.int32)\n fn = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_apply(fn, None)\n\n def test_returns_federated_apply(self):\n ref = building_blocks.Reference('x', tf.int32)\n fn = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n arg_type = computation_types.FederatedType(tf.int32, placements.SERVER)\n arg = building_blocks.Data('y', arg_type)\n comp = building_block_factory.create_federated_apply(fn, arg)\n self.assertEqual(comp.compact_representation(),\n 'federated_apply(<(x -> x),y>)')\n self.assertEqual(str(comp.type_signature), 'int32@SERVER')\n\n\nclass CreateFederatedBroadcastTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_value(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_broadcast(None)\n\n def test_returns_federated_broadcast(self):\n value_type = computation_types.FederatedType(tf.int32, placements.SERVER)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_broadcast(value)\n self.assertEqual(comp.compact_representation(), 'federated_broadcast(v)')\n self.assertEqual(str(comp.type_signature), 'int32@CLIENTS')\n\n\nclass CreateFederatedCollectTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_value(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_collect(None)\n\n def test_returns_federated_collect(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_collect(value)\n self.assertEqual(comp.compact_representation(), 'federated_collect(v)')\n self.assertEqual(str(comp.type_signature), 'int32*@SERVER')\n\n def test_constructs_federated_collect_with_all_equal_argument(self):\n value_type = computation_types.FederatedType(\n tf.int32, placements.CLIENTS, all_equal=True)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_collect(value)\n self.assertEqual(comp.compact_representation(), 'federated_collect(v)')\n self.assertEqual(str(comp.type_signature), 'int32*@SERVER')\n\n\nclass CreateFederatedEvalTest(absltest.TestCase):\n\n def assert_type_error(self, fn, placement):\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_eval(fn, placement)\n\n def test_raises_type_error_with_none_fn(self):\n self.assert_type_error(None, placement_literals.CLIENTS)\n\n def test_raises_type_error_with_nonfunctional_fn(self):\n fn = building_blocks.Data('y', tf.int32)\n self.assert_type_error(fn, placement_literals.CLIENTS)\n\n def test_returns_federated_eval(self):\n fn = building_blocks.Data('y',\n computation_types.FunctionType(None, tf.int32))\n comp = building_block_factory.create_federated_eval(\n fn, placement_literals.CLIENTS)\n self.assertEqual(comp.compact_representation(),\n 'federated_eval_at_clients(y)')\n self.assertEqual(str(comp.type_signature), '{int32}@CLIENTS')\n\n\nclass CreateFederatedMapTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_fn(self):\n arg = building_blocks.Data('y', tf.int32)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_map(None, arg)\n\n def test_raises_type_error_with_none_arg(self):\n ref = building_blocks.Reference('x', tf.int32)\n fn = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_map(fn, None)\n\n def test_returns_federated_map(self):\n ref = building_blocks.Reference('x', tf.int32)\n fn = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n arg_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n arg = building_blocks.Data('y', arg_type)\n comp = building_block_factory.create_federated_map(fn, arg)\n self.assertEqual(comp.compact_representation(),\n 'federated_map(<(x -> x),y>)')\n self.assertEqual(str(comp.type_signature), '{int32}@CLIENTS')\n\n\nclass CreateFederatedMapAllEqualTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_fn(self):\n arg = building_blocks.Data('y', tf.int32)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_map_all_equal(None, arg)\n\n def test_raises_type_error_with_none_arg(self):\n ref = building_blocks.Reference('x', tf.int32)\n fn = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_map_all_equal(fn, None)\n\n def test_returns_federated_map_all_equal(self):\n ref = building_blocks.Reference('x', tf.int32)\n fn = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n arg_type = computation_types.FederatedType(\n tf.int32, placements.CLIENTS, all_equal=True)\n arg = building_blocks.Data('y', arg_type)\n comp = building_block_factory.create_federated_map_all_equal(fn, arg)\n self.assertEqual(comp.compact_representation(),\n 'federated_map_all_equal(<(x -> x),y>)')\n self.assertEqual(str(comp.type_signature), 'int32@CLIENTS')\n\n\nclass CreateFederatedMapOrApplyTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_fn(self):\n arg = building_blocks.Data('y', tf.int32)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_map_or_apply(None, arg)\n\n def test_raises_type_error_with_none_arg(self):\n ref = building_blocks.Reference('x', tf.int32)\n fn = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_map_or_apply(fn, None)\n\n def test_returns_federated_apply(self):\n ref = building_blocks.Reference('x', tf.int32)\n fn = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n arg_type = computation_types.FederatedType(tf.int32, placements.SERVER)\n arg = building_blocks.Data('y', arg_type)\n comp = building_block_factory.create_federated_map_or_apply(fn, arg)\n self.assertEqual(comp.compact_representation(),\n 'federated_apply(<(x -> x),y>)')\n self.assertEqual(str(comp.type_signature), 'int32@SERVER')\n\n def test_returns_federated_map(self):\n ref = building_blocks.Reference('x', tf.int32)\n fn = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n arg_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n arg = building_blocks.Data('y', arg_type)\n comp = building_block_factory.create_federated_map_or_apply(fn, arg)\n self.assertEqual(comp.compact_representation(),\n 'federated_map(<(x -> x),y>)')\n self.assertEqual(str(comp.type_signature), '{int32}@CLIENTS')\n\n\nclass CreateFederatedMeanTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_value(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_mean(None, None)\n\n def test_returns_federated_mean(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_mean(value, None)\n self.assertEqual(comp.compact_representation(), 'federated_mean(v)')\n self.assertEqual(str(comp.type_signature), 'int32@SERVER')\n\n def test_returns_federated_weighted_mean(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n weight_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n weight = building_blocks.Data('w', weight_type)\n comp = building_block_factory.create_federated_mean(value, weight)\n self.assertEqual(comp.compact_representation(),\n 'federated_weighted_mean(<v,w>)')\n self.assertEqual(str(comp.type_signature), 'int32@SERVER')\n\n\nclass CreateFederatedReduceTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_value(self):\n zero = building_blocks.Data('z', tf.int32)\n op_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n op_result = building_blocks.Data('o', tf.int32)\n op = building_blocks.Lambda('x', op_type, op_result)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_reduce(None, zero, op)\n\n def test_raises_type_error_with_none_zero(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n op_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n op_result = building_blocks.Data('o', tf.int32)\n op = building_blocks.Lambda('x', op_type, op_result)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_reduce(value, None, op)\n\n def test_raises_type_error_with_none_op(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n zero = building_blocks.Data('z', tf.int32)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_reduce(value, zero, None)\n\n def test_returns_federated_reduce(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n zero = building_blocks.Data('z', tf.int32)\n op_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n op_result = building_blocks.Data('o', tf.int32)\n op = building_blocks.Lambda('x', op_type, op_result)\n comp = building_block_factory.create_federated_reduce(value, zero, op)\n self.assertEqual(comp.compact_representation(),\n 'federated_reduce(<v,z,(x -> o)>)')\n self.assertEqual(str(comp.type_signature), 'int32@SERVER')\n\n\nclass CreateFederatedSecureSumTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_value(self):\n bitwidth = building_block_factory.create_compiled_identity(\n tf.int32, name='b')\n\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_secure_sum(None, bitwidth)\n\n def test_raises_type_error_with_none_bitwidth(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_secure_sum(value, None)\n\n def test_returns_federated_sum(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n bitwidth = building_block_factory.create_tensorflow_constant(\n tf.int32, 8, 'b')\n comp = building_block_factory.create_federated_secure_sum(value, bitwidth)\n self.assertEqual(comp.compact_representation(),\n 'federated_secure_sum(<v,comp#b()>)')\n self.assertEqual(comp.type_signature.compact_representation(),\n 'int32@SERVER')\n\n\nclass CreateFederatedSumTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_value(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_sum(None)\n\n def test_returns_federated_sum(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_sum(value)\n self.assertEqual(comp.compact_representation(), 'federated_sum(v)')\n self.assertEqual(str(comp.type_signature), 'int32@SERVER')\n\n\nclass CreateFederatedUnzipTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_value(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_unzip(None)\n\n def test_returns_tuple_federated_map_with_empty_value(self):\n value_type = computation_types.FederatedType([], placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n with self.assertRaises(ValueError):\n building_block_factory.create_federated_unzip(value)\n\n def test_returns_tuple_federated_map_with_one_value_unnamed(self):\n value_type = computation_types.FederatedType((tf.int32,),\n placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_unzip(value)\n self.assertEqual(\n comp.compact_representation(),\n '(let value=v in <federated_map(<(arg -> arg[0]),value>)>)')\n self.assertEqual(str(comp.type_signature), '<{int32}@CLIENTS>')\n\n def test_returns_tuple_federated_map_with_one_value_named(self):\n type_signature = computation_types.NamedTupleType((('a', tf.int32),))\n value_type = computation_types.FederatedType(type_signature,\n placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_unzip(value)\n self.assertEqual(\n comp.compact_representation(),\n '(let value=v in <a=federated_map(<(arg -> arg[0]),value>)>)')\n self.assertEqual(str(comp.type_signature), '<a={int32}@CLIENTS>')\n\n def test_returns_tuple_federated_map_with_two_values_unnamed(self):\n value_type = computation_types.FederatedType((tf.int32, tf.int32),\n placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_unzip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n '(let\\n'\n ' value=v\\n'\n ' in <\\n'\n ' federated_map(<\\n'\n ' (arg -> arg[0]),\\n'\n ' value\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> arg[1]),\\n'\n ' value\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(\n str(comp.type_signature), '<{int32}@CLIENTS,{int32}@CLIENTS>')\n\n def test_returns_tuple_federated_map_with_two_values_named(self):\n type_signature = computation_types.NamedTupleType(\n (('a', tf.int32), ('b', tf.int32)))\n value_type = computation_types.FederatedType(type_signature,\n placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_unzip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n '(let\\n'\n ' value=v\\n'\n ' in <\\n'\n ' a=federated_map(<\\n'\n ' (arg -> arg[0]),\\n'\n ' value\\n'\n ' >),\\n'\n ' b=federated_map(<\\n'\n ' (arg -> arg[1]),\\n'\n ' value\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(\n str(comp.type_signature), '<a={int32}@CLIENTS,b={int32}@CLIENTS>')\n\n def test_returns_tuple_federated_map_with_two_values_different_typed(self):\n value_type = computation_types.FederatedType((tf.int32, tf.bool),\n placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_unzip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n '(let\\n'\n ' value=v\\n'\n ' in <\\n'\n ' federated_map(<\\n'\n ' (arg -> arg[0]),\\n'\n ' value\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> arg[1]),\\n'\n ' value\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(\n str(comp.type_signature), '<{int32}@CLIENTS,{bool}@CLIENTS>')\n\n def test_returns_tuple_federated_apply_with_one_value_unnamed(self):\n value_type = computation_types.FederatedType((tf.int32,), placements.SERVER)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_unzip(value)\n self.assertEqual(\n comp.compact_representation(),\n '(let value=v in <federated_apply(<(arg -> arg[0]),value>)>)')\n self.assertEqual(str(comp.type_signature), '<int32@SERVER>')\n\n def test_returns_tuple_federated_apply_with_one_value_named(self):\n type_signature = computation_types.NamedTupleType((('a', tf.int32),))\n value_type = computation_types.FederatedType(type_signature,\n placements.SERVER)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_unzip(value)\n self.assertEqual(\n comp.compact_representation(),\n '(let value=v in <a=federated_apply(<(arg -> arg[0]),value>)>)')\n self.assertEqual(str(comp.type_signature), '<a=int32@SERVER>')\n\n def test_returns_tuple_federated_apply_with_two_values_unnamed(self):\n value_type = computation_types.FederatedType((tf.int32, tf.int32),\n placements.SERVER)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_unzip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n '(let\\n'\n ' value=v\\n'\n ' in <\\n'\n ' federated_apply(<\\n'\n ' (arg -> arg[0]),\\n'\n ' value\\n'\n ' >),\\n'\n ' federated_apply(<\\n'\n ' (arg -> arg[1]),\\n'\n ' value\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '<int32@SERVER,int32@SERVER>')\n\n def test_returns_tuple_federated_apply_with_two_values_named(self):\n type_signature = computation_types.NamedTupleType(\n (('a', tf.int32), ('b', tf.int32)))\n value_type = computation_types.FederatedType(type_signature,\n placements.SERVER)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_unzip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n '(let\\n'\n ' value=v\\n'\n ' in <\\n'\n ' a=federated_apply(<\\n'\n ' (arg -> arg[0]),\\n'\n ' value\\n'\n ' >),\\n'\n ' b=federated_apply(<\\n'\n ' (arg -> arg[1]),\\n'\n ' value\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(\n str(comp.type_signature), '<a=int32@SERVER,b=int32@SERVER>')\n\n def test_returns_tuple_federated_apply_with_two_values_different_typed(self):\n value_type = computation_types.FederatedType((tf.int32, tf.bool),\n placements.SERVER)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_unzip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n '(let\\n'\n ' value=v\\n'\n ' in <\\n'\n ' federated_apply(<\\n'\n ' (arg -> arg[0]),\\n'\n ' value\\n'\n ' >),\\n'\n ' federated_apply(<\\n'\n ' (arg -> arg[1]),\\n'\n ' value\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '<int32@SERVER,bool@SERVER>')\n\n\nclass CreateFederatedValueTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_value(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_value(None,\n placement_literals.CLIENTS)\n\n def test_raises_type_error_with_none_placement(self):\n value = building_blocks.Data('v', tf.int32)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_value(value, None)\n\n def test_raises_type_error_with_unknown_placement(self):\n value = building_blocks.Data('v', tf.int32)\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_value(value, 'unknown')\n\n def test_returns_federated_value_at_clients(self):\n value = building_blocks.Data('v', tf.int32)\n comp = building_block_factory.create_federated_value(\n value, placement_literals.CLIENTS)\n self.assertEqual(comp.compact_representation(),\n 'federated_value_at_clients(v)')\n self.assertEqual(str(comp.type_signature), 'int32@CLIENTS')\n\n def test_returns_federated_value_at_server(self):\n value = building_blocks.Data('v', tf.int32)\n comp = building_block_factory.create_federated_value(\n value, placement_literals.SERVER)\n self.assertEqual(comp.compact_representation(),\n 'federated_value_at_server(v)')\n self.assertEqual(str(comp.type_signature), 'int32@SERVER')\n\n\nclass CreateFederatedZipTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_value(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_zip(None)\n\n def test_raises_value_error_with_empty_value(self):\n value_type = computation_types.NamedTupleType([])\n value = building_blocks.Data('v', value_type)\n with self.assertRaises(ValueError):\n building_block_factory.create_federated_zip(value)\n\n def test_returns_federated_map_with_one_value_unnamed(self):\n type_signature = computation_types.FederatedType(tf.int32,\n placements.CLIENTS)\n value_type = computation_types.NamedTupleType((type_signature,))\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_zip(value)\n self.assertEqual(comp.compact_representation(),\n 'federated_map(<(arg -> <arg>),v[0]>)')\n self.assertEqual(str(comp.type_signature), '{<int32>}@CLIENTS')\n\n def test_returns_federated_map_with_one_value_unnamed_tuple(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n tup = building_blocks.Tuple((value,))\n comp = building_block_factory.create_federated_zip(tup)\n self.assertEqual(comp.compact_representation(),\n 'federated_map(<(arg -> <arg>),<v>[0]>)')\n self.assertEqual(str(comp.type_signature), '{<int32>}@CLIENTS')\n\n def test_returns_federated_map_with_one_value_named(self):\n type_signature = computation_types.FederatedType(tf.int32,\n placements.CLIENTS)\n value_type = computation_types.NamedTupleType((('a', type_signature),))\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_zip(value)\n self.assertEqual(comp.compact_representation(),\n 'federated_map(<(arg -> <a=arg>),v[0]>)')\n self.assertEqual(str(comp.type_signature), '{<a=int32>}@CLIENTS')\n\n def test_returns_federated_map_with_one_value_named_tuple(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n tup = building_blocks.Tuple((('a', value),))\n comp = building_block_factory.create_federated_zip(tup)\n self.assertEqual(comp.compact_representation(),\n 'federated_map(<(arg -> <a=arg>),<a=v>[0]>)')\n self.assertEqual(str(comp.type_signature), '{<a=int32>}@CLIENTS')\n\n def test_returns_federated_zip_at_clients_with_two_values_unnamed(self):\n type_signature = computation_types.FederatedType(tf.int32,\n placements.CLIENTS)\n value_type = computation_types.NamedTupleType(\n (type_signature, type_signature))\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_zip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_map(<\\n'\n ' (x -> <\\n'\n ' x[0],\\n'\n ' x[1]\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> arg),\\n'\n ' (let\\n'\n ' value=v\\n'\n ' in federated_zip_at_clients(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '{<int32,int32>}@CLIENTS')\n\n def test_returns_federated_zip_at_clients_with_two_values_unnamed_tuple(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n tup = building_blocks.Tuple((value, value))\n comp = building_block_factory.create_federated_zip(tup)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_map(<\\n'\n ' (x -> <\\n'\n ' x[0],\\n'\n ' x[1]\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> arg),\\n'\n ' (let\\n'\n ' value=<\\n'\n ' v,\\n'\n ' v\\n'\n ' >\\n'\n ' in federated_zip_at_clients(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '{<int32,int32>}@CLIENTS')\n\n def test_returns_federated_zip_at_clients_with_two_values_named(self):\n type_signature = computation_types.FederatedType(tf.int32,\n placements.CLIENTS)\n value_type = computation_types.NamedTupleType(\n (('a', type_signature), ('b', type_signature)))\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_zip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_map(<\\n'\n ' (x -> <\\n'\n ' a=x[0],\\n'\n ' b=x[1]\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> arg),\\n'\n ' (let\\n'\n ' value=v\\n'\n ' in federated_zip_at_clients(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '{<a=int32,b=int32>}@CLIENTS')\n\n def test_returns_federated_zip_at_clients_with_two_values_named_tuple(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n tup = building_blocks.Tuple((('a', value), ('b', value)))\n comp = building_block_factory.create_federated_zip(tup)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_map(<\\n'\n ' (x -> <\\n'\n ' a=x[0],\\n'\n ' b=x[1]\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> arg),\\n'\n ' (let\\n'\n ' value=<\\n'\n ' a=v,\\n'\n ' b=v\\n'\n ' >\\n'\n ' in federated_zip_at_clients(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '{<a=int32,b=int32>}@CLIENTS')\n\n def test_returns_federated_zip_at_clients_with_three_values_unnamed(self):\n type_signature = computation_types.FederatedType(tf.int32,\n placements.CLIENTS)\n value_type = computation_types.NamedTupleType(\n (type_signature, type_signature, type_signature))\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_zip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_map(<\\n'\n ' (x -> <\\n'\n ' x[0],\\n'\n ' x[1],\\n'\n ' x[2]\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> (let\\n'\n ' comps=<\\n'\n ' (arg -> arg)(arg[0]),\\n'\n ' arg[1]\\n'\n ' >\\n'\n ' in <\\n'\n ' comps[0][0],\\n'\n ' comps[0][1],\\n'\n ' comps[1]\\n'\n ' >)),\\n'\n ' (let\\n'\n ' value=v\\n'\n ' in federated_zip_at_clients(<\\n'\n ' federated_zip_at_clients(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >),\\n'\n ' value[2]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '{<int32,int32,int32>}@CLIENTS')\n\n def test_returns_federated_zip_at_clients_with_three_values_unnamed_tuple(\n self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n tup = building_blocks.Tuple((value, value, value))\n comp = building_block_factory.create_federated_zip(tup)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_map(<\\n'\n ' (x -> <\\n'\n ' x[0],\\n'\n ' x[1],\\n'\n ' x[2]\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> (let\\n'\n ' comps=<\\n'\n ' (arg -> arg)(arg[0]),\\n'\n ' arg[1]\\n'\n ' >\\n'\n ' in <\\n'\n ' comps[0][0],\\n'\n ' comps[0][1],\\n'\n ' comps[1]\\n'\n ' >)),\\n'\n ' (let\\n'\n ' value=<\\n'\n ' v,\\n'\n ' v,\\n'\n ' v\\n'\n ' >\\n'\n ' in federated_zip_at_clients(<\\n'\n ' federated_zip_at_clients(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >),\\n'\n ' value[2]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '{<int32,int32,int32>}@CLIENTS')\n\n def test_returns_federated_zip_at_clients_with_three_values_named(self):\n type_signature = computation_types.FederatedType(tf.int32,\n placements.CLIENTS)\n value_type = computation_types.NamedTupleType((\n ('a', type_signature),\n ('b', type_signature),\n ('c', type_signature),\n ))\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_zip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_map(<\\n'\n ' (x -> <\\n'\n ' a=x[0],\\n'\n ' b=x[1],\\n'\n ' c=x[2]\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> (let\\n'\n ' comps=<\\n'\n ' (arg -> arg)(arg[0]),\\n'\n ' arg[1]\\n'\n ' >\\n'\n ' in <\\n'\n ' comps[0][0],\\n'\n ' comps[0][1],\\n'\n ' comps[1]\\n'\n ' >)),\\n'\n ' (let\\n'\n ' value=v\\n'\n ' in federated_zip_at_clients(<\\n'\n ' federated_zip_at_clients(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >),\\n'\n ' value[2]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(\n str(comp.type_signature), '{<a=int32,b=int32,c=int32>}@CLIENTS')\n\n def test_returns_federated_zip_at_clients_with_three_values_named_tuple(self):\n value_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value = building_blocks.Data('v', value_type)\n tup = building_blocks.Tuple((\n ('a', value),\n ('b', value),\n ('c', value),\n ))\n comp = building_block_factory.create_federated_zip(tup)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_map(<\\n'\n ' (x -> <\\n'\n ' a=x[0],\\n'\n ' b=x[1],\\n'\n ' c=x[2]\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> (let\\n'\n ' comps=<\\n'\n ' (arg -> arg)(arg[0]),\\n'\n ' arg[1]\\n'\n ' >\\n'\n ' in <\\n'\n ' comps[0][0],\\n'\n ' comps[0][1],\\n'\n ' comps[1]\\n'\n ' >)),\\n'\n ' (let\\n'\n ' value=<\\n'\n ' a=v,\\n'\n ' b=v,\\n'\n ' c=v\\n'\n ' >\\n'\n ' in federated_zip_at_clients(<\\n'\n ' federated_zip_at_clients(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >),\\n'\n ' value[2]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(\n str(comp.type_signature), '{<a=int32,b=int32,c=int32>}@CLIENTS')\n\n def test_returns_federated_zip_at_clients_with_three_values_different_typed(\n self):\n type_signature1 = computation_types.FederatedType(tf.int32,\n placements.CLIENTS)\n type_signature2 = computation_types.FederatedType(tf.float32,\n placements.CLIENTS)\n type_signature3 = computation_types.FederatedType(tf.bool,\n placements.CLIENTS)\n value_type = computation_types.NamedTupleType(\n (type_signature1, type_signature2, type_signature3))\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_zip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_map(<\\n'\n ' (x -> <\\n'\n ' x[0],\\n'\n ' x[1],\\n'\n ' x[2]\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> (let\\n'\n ' comps=<\\n'\n ' (arg -> arg)(arg[0]),\\n'\n ' arg[1]\\n'\n ' >\\n'\n ' in <\\n'\n ' comps[0][0],\\n'\n ' comps[0][1],\\n'\n ' comps[1]\\n'\n ' >)),\\n'\n ' (let\\n'\n ' value=v\\n'\n ' in federated_zip_at_clients(<\\n'\n ' federated_zip_at_clients(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >),\\n'\n ' value[2]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '{<int32,float32,bool>}@CLIENTS')\n\n def test_returns_federated_zip_at_clients_with_three_values_different_typed_tuple(\n self):\n value_type1 = computation_types.FederatedType(tf.int32, placements.CLIENTS)\n value1 = building_blocks.Data('v1', value_type1)\n value_type2 = computation_types.FederatedType(tf.float32,\n placements.CLIENTS)\n value2 = building_blocks.Data('v2', value_type2)\n value_type3 = computation_types.FederatedType(tf.bool, placements.CLIENTS)\n value3 = building_blocks.Data('v3', value_type3)\n tup = building_blocks.Tuple((value1, value2, value3))\n comp = building_block_factory.create_federated_zip(tup)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_map(<\\n'\n ' (x -> <\\n'\n ' x[0],\\n'\n ' x[1],\\n'\n ' x[2]\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> (let\\n'\n ' comps=<\\n'\n ' (arg -> arg)(arg[0]),\\n'\n ' arg[1]\\n'\n ' >\\n'\n ' in <\\n'\n ' comps[0][0],\\n'\n ' comps[0][1],\\n'\n ' comps[1]\\n'\n ' >)),\\n'\n ' (let\\n'\n ' value=<\\n'\n ' v1,\\n'\n ' v2,\\n'\n ' v3\\n'\n ' >\\n'\n ' in federated_zip_at_clients(<\\n'\n ' federated_zip_at_clients(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >),\\n'\n ' value[2]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '{<int32,float32,bool>}@CLIENTS')\n\n def test_returns_federated_apply_with_one_value_unnamed(self):\n value_type = computation_types.FederatedType(tf.int32, placements.SERVER)\n value = building_blocks.Data('v', value_type)\n tup = building_blocks.Tuple((value,))\n comp = building_block_factory.create_federated_zip(tup)\n self.assertEqual(comp.compact_representation(),\n 'federated_apply(<(arg -> <arg>),<v>[0]>)')\n self.assertEqual(str(comp.type_signature), '<int32>@SERVER')\n\n def test_returns_federated_apply_with_one_value_named(self):\n value_type = computation_types.FederatedType(tf.int32, placements.SERVER)\n value = building_blocks.Data('v', value_type)\n tup = building_blocks.Tuple((('a', value),))\n comp = building_block_factory.create_federated_zip(tup)\n self.assertEqual(comp.compact_representation(),\n 'federated_apply(<(arg -> <a=arg>),<a=v>[0]>)')\n self.assertEqual(str(comp.type_signature), '<a=int32>@SERVER')\n\n def test_returns_federated_zip_at_server_with_two_values_unnamed(self):\n type_signature = computation_types.FederatedType(tf.int32,\n placements.SERVER)\n value_type = computation_types.NamedTupleType(\n (type_signature, type_signature))\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_zip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_apply(<\\n'\n ' (x -> <\\n'\n ' x[0],\\n'\n ' x[1]\\n'\n ' >),\\n'\n ' federated_apply(<\\n'\n ' (arg -> arg),\\n'\n ' (let\\n'\n ' value=v\\n'\n ' in federated_zip_at_server(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '<int32,int32>@SERVER')\n\n def test_returns_federated_zip_at_server_with_two_values_named(self):\n type_signature = computation_types.FederatedType(tf.int32,\n placements.SERVER)\n value_type = computation_types.NamedTupleType(\n (('a', type_signature), ('b', type_signature)))\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_zip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_apply(<\\n'\n ' (x -> <\\n'\n ' a=x[0],\\n'\n ' b=x[1]\\n'\n ' >),\\n'\n ' federated_apply(<\\n'\n ' (arg -> arg),\\n'\n ' (let\\n'\n ' value=v\\n'\n ' in federated_zip_at_server(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '<a=int32,b=int32>@SERVER')\n\n def test_returns_federated_zip_at_server_with_three_values_unnamed(self):\n type_signature = computation_types.FederatedType(tf.int32,\n placements.SERVER)\n value_type = computation_types.NamedTupleType(\n (type_signature, type_signature, type_signature))\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_zip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_apply(<\\n'\n ' (x -> <\\n'\n ' x[0],\\n'\n ' x[1],\\n'\n ' x[2]\\n'\n ' >),\\n'\n ' federated_apply(<\\n'\n ' (arg -> (let\\n'\n ' comps=<\\n'\n ' (arg -> arg)(arg[0]),\\n'\n ' arg[1]\\n'\n ' >\\n'\n ' in <\\n'\n ' comps[0][0],\\n'\n ' comps[0][1],\\n'\n ' comps[1]\\n'\n ' >)),\\n'\n ' (let\\n'\n ' value=v\\n'\n ' in federated_zip_at_server(<\\n'\n ' federated_zip_at_server(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >),\\n'\n ' value[2]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '<int32,int32,int32>@SERVER')\n\n def test_returns_federated_zip_at_server_with_three_values_named(self):\n type_signature = computation_types.FederatedType(tf.int32,\n placements.SERVER)\n value_type = computation_types.NamedTupleType((\n ('a', type_signature),\n ('b', type_signature),\n ('c', type_signature),\n ))\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_zip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_apply(<\\n'\n ' (x -> <\\n'\n ' a=x[0],\\n'\n ' b=x[1],\\n'\n ' c=x[2]\\n'\n ' >),\\n'\n ' federated_apply(<\\n'\n ' (arg -> (let\\n'\n ' comps=<\\n'\n ' (arg -> arg)(arg[0]),\\n'\n ' arg[1]\\n'\n ' >\\n'\n ' in <\\n'\n ' comps[0][0],\\n'\n ' comps[0][1],\\n'\n ' comps[1]\\n'\n ' >)),\\n'\n ' (let\\n'\n ' value=v\\n'\n ' in federated_zip_at_server(<\\n'\n ' federated_zip_at_server(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >),\\n'\n ' value[2]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(\n str(comp.type_signature), '<a=int32,b=int32,c=int32>@SERVER')\n\n def test_returns_federated_zip_at_server_with_three_values_different_typed(\n self):\n type_signature1 = computation_types.FederatedType(tf.int32,\n placements.SERVER)\n type_signature2 = computation_types.FederatedType(tf.float32,\n placements.SERVER)\n type_signature3 = computation_types.FederatedType(tf.bool,\n placements.SERVER)\n value_type = computation_types.NamedTupleType(\n (type_signature1, type_signature2, type_signature3))\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_federated_zip(value)\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_apply(<\\n'\n ' (x -> <\\n'\n ' x[0],\\n'\n ' x[1],\\n'\n ' x[2]\\n'\n ' >),\\n'\n ' federated_apply(<\\n'\n ' (arg -> (let\\n'\n ' comps=<\\n'\n ' (arg -> arg)(arg[0]),\\n'\n ' arg[1]\\n'\n ' >\\n'\n ' in <\\n'\n ' comps[0][0],\\n'\n ' comps[0][1],\\n'\n ' comps[1]\\n'\n ' >)),\\n'\n ' (let\\n'\n ' value=v\\n'\n ' in federated_zip_at_server(<\\n'\n ' federated_zip_at_server(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >),\\n'\n ' value[2]\\n'\n ' >))\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(str(comp.type_signature), '<int32,float32,bool>@SERVER')\n\n def test_flat_raises_type_error_with_inconsistent_placement(self):\n client_type = computation_types.FederatedType(\n tf.int32, placements.CLIENTS, all_equal=True)\n server_type = computation_types.FederatedType(\n tf.int32, placements.SERVER, all_equal=True)\n value_type = computation_types.NamedTupleType([('a', client_type),\n ('b', server_type)])\n value = building_blocks.Data('v', value_type)\n self.assertEqual(value.type_signature.compact_representation(),\n '<a=int32@CLIENTS,b=int32@SERVER>')\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_zip(value)\n\n def test_nested_raises_type_error_with_inconsistent_placement(self):\n client_type = computation_types.FederatedType(\n tf.int32, placements.CLIENTS, all_equal=True)\n server_type = computation_types.FederatedType(\n tf.int32, placements.SERVER, all_equal=True)\n tuple_type = computation_types.NamedTupleType([('c', server_type),\n ('d', server_type)])\n value_type = computation_types.NamedTupleType([('a', client_type),\n ('b', tuple_type)])\n value = building_blocks.Data('v', value_type)\n self.assertEqual(value.type_signature.compact_representation(),\n '<a=int32@CLIENTS,b=<c=int32@SERVER,d=int32@SERVER>>')\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_zip(value)\n\n def test_flat_raises_type_error_with_unplaced(self):\n client_type = computation_types.FederatedType(\n tf.int32, placements.CLIENTS, all_equal=True)\n value_type = computation_types.NamedTupleType([('a', client_type),\n ('b', tf.int32)])\n value = building_blocks.Data('v', value_type)\n self.assertEqual(value.type_signature.compact_representation(),\n '<a=int32@CLIENTS,b=int32>')\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_zip(value)\n\n def test_nested_raises_type_error_with_unplaced(self):\n client_type = computation_types.FederatedType(\n tf.int32, placements.CLIENTS, all_equal=True)\n tuple_type = computation_types.NamedTupleType([('c', tf.int32),\n ('d', tf.int32)])\n value_type = computation_types.NamedTupleType([('a', client_type),\n ('b', tuple_type)])\n value = building_blocks.Data('v', value_type)\n self.assertEqual(value.type_signature.compact_representation(),\n '<a=int32@CLIENTS,b=<c=int32,d=int32>>')\n with self.assertRaises(TypeError):\n building_block_factory.create_federated_zip(value)\n\n def test_nested_returns_federated_zip_at_clients(self):\n int_type = computation_types.FederatedType(\n tf.int32, placements.CLIENTS, all_equal=True)\n tuple_type = computation_types.NamedTupleType([('c', int_type),\n ('d', int_type)])\n value_type = computation_types.NamedTupleType([('a', int_type),\n ('b', tuple_type)])\n value = building_blocks.Data('v', value_type)\n self.assertEqual(value.type_signature.compact_representation(),\n '<a=int32@CLIENTS,b=<c=int32@CLIENTS,d=int32@CLIENTS>>')\n\n comp = building_block_factory.create_federated_zip(value)\n\n self.assertEqual(\n str(comp.type_signature), '{<a=int32,b=<c=int32,d=int32>>}@CLIENTS')\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_map(<\\n'\n ' (x -> <\\n'\n ' a=x[0],\\n'\n ' b=<\\n'\n ' c=x[1],\\n'\n ' d=x[2]\\n'\n ' >\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (x -> <\\n'\n ' x[0],\\n'\n ' x[1],\\n'\n ' x[2]\\n'\n ' >),\\n'\n ' federated_map(<\\n'\n ' (arg -> (let\\n'\n ' comps=<\\n'\n ' (arg -> arg)(arg[0]),\\n'\n ' arg[1]\\n'\n ' >\\n'\n ' in <\\n'\n ' comps[0][0],\\n'\n ' comps[0][1],\\n'\n ' comps[1]\\n'\n ' >)),\\n'\n ' (let\\n'\n ' value=<\\n'\n ' v[0],\\n'\n ' v[1][0],\\n'\n ' v[1][1]\\n'\n ' >\\n'\n ' in federated_zip_at_clients(<\\n'\n ' federated_zip_at_clients(<\\n'\n ' value[0],\\n'\n ' value[1]\\n'\n ' >),\\n'\n ' value[2]\\n'\n ' >))\\n'\n ' >)\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n\n def test_nested_returns_federated_zip_at_server(self):\n value_type = computation_types.NamedTupleType([\n ('a',\n computation_types.NamedTupleType([\n ('b',\n computation_types.FederatedType(\n computation_types.NamedTupleType([('c', tf.int32)]),\n placements.SERVER,\n all_equal=True))\n ]))\n ])\n value = building_blocks.Data('v', value_type)\n self.assertEqual(value.type_signature.compact_representation(),\n '<a=<b=<c=int32>@SERVER>>')\n\n comp = building_block_factory.create_federated_zip(value)\n\n self.assertEqual(str(comp.type_signature), '<a=<b=<c=int32>>>@SERVER')\n # pyformat: disable\n self.assertEqual(\n comp.formatted_representation(),\n 'federated_apply(<\\n'\n ' (x -> <\\n'\n ' a=<\\n'\n ' b=x[0]\\n'\n ' >\\n'\n ' >),\\n'\n ' federated_apply(<\\n'\n ' (arg -> <\\n'\n ' arg\\n'\n ' >),\\n'\n ' <\\n'\n ' v[0][0]\\n'\n ' >[0]\\n'\n ' >)\\n'\n '>)'\n )\n # pyformat: enable\n\n\nclass CreateGenericConstantTest(absltest.TestCase):\n\n def test_raises_on_none_type(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_generic_constant(None, 0)\n\n def test_raises_non_scalar(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_generic_constant([tf.int32], [0])\n\n def test_constructs_tensor_zero(self):\n tensor_type = computation_types.TensorType(tf.float32, [2, 2])\n tensor_zero = building_block_factory.create_generic_constant(tensor_type, 0)\n self.assertEqual(tensor_zero.type_signature, tensor_type)\n self.assertIsInstance(tensor_zero, building_blocks.Call)\n self.assertTrue(\n np.array_equal(\n test_utils.run_tensorflow(tensor_zero.function.proto),\n np.zeros([2, 2])))\n\n def test_create_unnamed_tuple_zero(self):\n tuple_type = [computation_types.TensorType(tf.float32, [2, 2])] * 2\n tuple_zero = building_block_factory.create_generic_constant(tuple_type, 0)\n self.assertEqual(tuple_zero.type_signature,\n computation_types.to_type(tuple_type))\n self.assertIsInstance(tuple_zero, building_blocks.Call)\n result = test_utils.run_tensorflow(tuple_zero.function.proto)\n self.assertLen(result, 2)\n self.assertTrue(np.array_equal(result[0], np.zeros([2, 2])))\n self.assertTrue(np.array_equal(result[1], np.zeros([2, 2])))\n\n def test_create_named_tuple_one(self):\n tuple_type = [('a', computation_types.TensorType(tf.float32, [2, 2])),\n ('b', computation_types.TensorType(tf.float32, [2, 2]))]\n tuple_zero = building_block_factory.create_generic_constant(tuple_type, 1)\n self.assertEqual(tuple_zero.type_signature,\n computation_types.to_type(tuple_type))\n self.assertIsInstance(tuple_zero, building_blocks.Call)\n result = test_utils.run_tensorflow(tuple_zero.function.proto)\n self.assertLen(result, 2)\n self.assertTrue(np.array_equal(result.a, np.ones([2, 2])))\n self.assertTrue(np.array_equal(result.b, np.ones([2, 2])))\n\n def test_create_federated_tensor_one(self):\n fed_type = computation_types.FederatedType(\n computation_types.TensorType(tf.float32, [2, 2]),\n placement_literals.CLIENTS)\n fed_zero = building_block_factory.create_generic_constant(fed_type, 1)\n self.assertEqual(fed_zero.type_signature.member, fed_type.member)\n self.assertEqual(fed_zero.type_signature.placement, fed_type.placement)\n self.assertTrue(fed_zero.type_signature.all_equal)\n self.assertIsInstance(fed_zero, building_blocks.Call)\n self.assertIsInstance(fed_zero.function, building_blocks.Intrinsic)\n self.assertEqual(fed_zero.function.uri,\n intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri)\n self.assertIsInstance(fed_zero.argument, building_blocks.Call)\n self.assertTrue(\n np.array_equal(\n test_utils.run_tensorflow(fed_zero.argument.function.proto),\n np.ones([2, 2])))\n\n def test_create_federated_named_tuple_one(self):\n tuple_type = [('a', computation_types.TensorType(tf.float32, [2, 2])),\n ('b', computation_types.TensorType(tf.float32, [2, 2]))]\n fed_type = computation_types.FederatedType(tuple_type,\n placement_literals.SERVER)\n fed_zero = building_block_factory.create_generic_constant(fed_type, 1)\n self.assertEqual(fed_zero.type_signature.member, fed_type.member)\n self.assertEqual(fed_zero.type_signature.placement, fed_type.placement)\n self.assertTrue(fed_zero.type_signature.all_equal)\n self.assertIsInstance(fed_zero, building_blocks.Call)\n self.assertIsInstance(fed_zero.function, building_blocks.Intrinsic)\n self.assertEqual(fed_zero.function.uri,\n intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri)\n self.assertIsInstance(fed_zero.argument, building_blocks.Call)\n result = test_utils.run_tensorflow(fed_zero.argument.function.proto)\n self.assertLen(result, 2)\n self.assertTrue(np.array_equal(result.a, np.ones([2, 2])))\n self.assertTrue(np.array_equal(result.b, np.ones([2, 2])))\n\n def test_create_named_tuple_of_federated_tensors_zero(self):\n fed_type = computation_types.FederatedType(\n computation_types.TensorType(tf.float32, [2, 2]),\n placement_literals.CLIENTS,\n all_equal=True)\n tuple_type = [('a', fed_type), ('b', fed_type)]\n zero = building_block_factory.create_generic_constant(tuple_type, 0)\n fed_zero = zero.argument[0]\n self.assertEqual(zero.type_signature, computation_types.to_type(tuple_type))\n self.assertIsInstance(fed_zero.function, building_blocks.Intrinsic)\n self.assertEqual(fed_zero.function.uri,\n intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri)\n self.assertIsInstance(fed_zero.argument, building_blocks.Call)\n self.assertTrue(\n np.array_equal(\n test_utils.run_tensorflow(fed_zero.argument.function.proto),\n np.zeros([2, 2])))\n\n\nclass CreateSequenceMapTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_fn(self):\n arg_type = computation_types.SequenceType(tf.int32)\n arg = building_blocks.Data('y', arg_type)\n with self.assertRaises(TypeError):\n building_block_factory.create_sequence_map(None, arg)\n\n def test_raises_type_error_with_none_arg(self):\n ref = building_blocks.Reference('x', tf.int32)\n fn = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n with self.assertRaises(TypeError):\n building_block_factory.create_sequence_map(fn, None)\n\n def test_returns_sequence_map(self):\n ref = building_blocks.Reference('x', tf.int32)\n fn = building_blocks.Lambda(ref.name, ref.type_signature, ref)\n arg_type = computation_types.SequenceType(tf.int32)\n arg = building_blocks.Data('y', arg_type)\n comp = building_block_factory.create_sequence_map(fn, arg)\n self.assertEqual(comp.compact_representation(),\n 'sequence_map(<(x -> x),y>)')\n self.assertEqual(str(comp.type_signature), 'int32*')\n\n\nclass CreateSequenceReduceTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_value(self):\n zero = building_blocks.Data('z', tf.int32)\n op_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n op_result = building_blocks.Data('o', tf.int32)\n op = building_blocks.Lambda('x', op_type, op_result)\n with self.assertRaises(TypeError):\n building_block_factory.create_sequence_reduce(None, zero, op)\n\n def test_raises_type_error_with_none_zero(self):\n value_type = computation_types.SequenceType(tf.int32)\n value = building_blocks.Data('v', value_type)\n op_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n op_result = building_blocks.Data('o', tf.int32)\n op = building_blocks.Lambda('x', op_type, op_result)\n with self.assertRaises(TypeError):\n building_block_factory.create_sequence_reduce(value, None, op)\n\n def test_raises_type_error_with_none_op(self):\n value_type = computation_types.SequenceType(tf.int32)\n value = building_blocks.Data('v', value_type)\n zero = building_blocks.Data('z', tf.int32)\n with self.assertRaises(TypeError):\n building_block_factory.create_sequence_reduce(value, zero, None)\n\n def test_returns_sequence_reduce(self):\n value_type = computation_types.SequenceType(tf.int32)\n value = building_blocks.Data('v', value_type)\n zero = building_blocks.Data('z', tf.int32)\n op_type = computation_types.NamedTupleType((tf.int32, tf.int32))\n op_result = building_blocks.Data('o', tf.int32)\n op = building_blocks.Lambda('x', op_type, op_result)\n comp = building_block_factory.create_sequence_reduce(value, zero, op)\n self.assertEqual(comp.compact_representation(),\n 'sequence_reduce(<v,z,(x -> o)>)')\n self.assertEqual(str(comp.type_signature), 'int32')\n\n\nclass CreateSequenceSumTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_value(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_sequence_sum(None)\n\n def test_returns_federated_sum(self):\n value_type = computation_types.SequenceType(tf.int32)\n value = building_blocks.Data('v', value_type)\n comp = building_block_factory.create_sequence_sum(value)\n self.assertEqual(comp.compact_representation(), 'sequence_sum(v)')\n self.assertEqual(str(comp.type_signature), 'int32')\n\n\nclass CreateNamedFederatedTupleTest(parameterized.TestCase):\n\n def test_raises_on_none(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_named_federated_tuple(None, ['a'])\n\n def test_raises_non_federated_type(self):\n bad_comp = building_blocks.Data('x', computation_types.to_type(tf.int32))\n with self.assertRaises(TypeError):\n building_block_factory.create_named_federated_tuple(bad_comp, ['a'])\n\n def test_raises_federated_non_tuple(self):\n bad_comp = building_blocks.Data(\n 'x',\n computation_types.FederatedType(tf.int32, placement_literals.CLIENTS))\n with self.assertRaises(TypeError):\n building_block_factory.create_named_federated_tuple(bad_comp, ['a'])\n\n def test_raises_on_naked_string(self):\n data_tuple = building_blocks.Data(\n 'x',\n computation_types.FederatedType([tf.int32], placement_literals.CLIENTS))\n with self.assertRaises(TypeError):\n building_block_factory.create_named_federated_tuple(data_tuple, 'a')\n\n def test_raises_list_of_ints(self):\n data_tuple = building_blocks.Data(\n 'x',\n computation_types.FederatedType([tf.int32], placement_literals.SERVER))\n with self.assertRaises(TypeError):\n building_block_factory.create_named_federated_tuple(data_tuple, [1])\n\n def test_raises_wrong_list_length(self):\n data_tuple = building_blocks.Data(\n 'x',\n computation_types.FederatedType([tf.int32], placement_literals.SERVER))\n with self.assertRaises(ValueError):\n building_block_factory.create_named_federated_tuple(\n data_tuple, ['a', 'b'])\n\n @parameterized.named_parameters(\n ('server', placement_literals.SERVER),\n ('clients', placement_literals.CLIENTS),\n )\n def test_constructs_correct_type_from_unnamed_tuple(self, placement):\n fed_type = computation_types.FederatedType([tf.int32, tf.float32],\n placement)\n data_tuple = building_blocks.Data('x', fed_type)\n named_tuple = building_block_factory.create_named_federated_tuple(\n data_tuple, ['a', 'b'])\n expected_result_type = computation_types.FederatedType([('a', tf.int32),\n ('b', tf.float32)],\n placement)\n self.assertEqual(expected_result_type, named_tuple.type_signature)\n\n @parameterized.named_parameters(\n ('server', placement_literals.SERVER),\n ('clients', placement_literals.CLIENTS),\n )\n def test_constructs_correct_type_from_named_tuple(self, placement):\n fed_type = computation_types.FederatedType([('c', tf.int32),\n ('d', tf.float32)], placement)\n data_tuple = building_blocks.Data('x', fed_type)\n named_tuple = building_block_factory.create_named_federated_tuple(\n data_tuple, ['a', 'b'])\n expected_result_type = computation_types.FederatedType([('a', tf.int32),\n ('b', tf.float32)],\n placement)\n self.assertEqual(expected_result_type, named_tuple.type_signature)\n\n @parameterized.named_parameters(\n ('server', placement_literals.SERVER),\n ('clients', placement_literals.CLIENTS),\n )\n def test_only_names_unnamed_tuple(self, placement):\n ntt = computation_types.FederatedType([tf.int32, tf.float32], placement)\n data_tuple = building_blocks.Data('data', ntt)\n named_tuple = building_block_factory.create_named_federated_tuple(\n data_tuple, ['a', 'b'])\n self.assertRegexMatch(\n named_tuple.compact_representation(),\n [r'federated_(map|apply)\\(<\\(x -> <a=x\\[0\\],b=x\\[1\\]>\\),data>\\)'])\n\n @parameterized.named_parameters(\n ('server', placement_literals.SERVER),\n ('clients', placement_literals.CLIENTS),\n )\n def test_only_overwrites_existing_names_in_tuple(self, placement):\n fed_type = computation_types.FederatedType([('c', tf.int32),\n ('d', tf.float32)], placement)\n data_tuple = building_blocks.Data('data', fed_type)\n named_tuple = building_block_factory.create_named_federated_tuple(\n data_tuple, ['a', 'b'])\n self.assertRegexMatch(\n named_tuple.compact_representation(),\n [r'federated_(map|apply)\\(<\\(x -> <a=x\\[0\\],b=x\\[1\\]>\\),data>\\)'])\n\n\nclass CreateNamedTupleTest(absltest.TestCase):\n\n def test_raises_type_error_with_none_comp(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_named_tuple(None, ('a',))\n\n def test_raises_type_error_with_wrong_comp_type(self):\n comp = building_blocks.Data('data', tf.int32)\n with self.assertRaises(TypeError):\n building_block_factory.create_named_tuple(comp, ('a',))\n\n def test_raises_type_error_with_wrong_names_type_string(self):\n type_signature = computation_types.NamedTupleType((tf.int32, tf.int32))\n comp = building_blocks.Data('data', type_signature)\n with self.assertRaises(TypeError):\n building_block_factory.create_named_tuple(comp, 'a')\n\n def test_raises_type_error_with_wrong_names_type_ints(self):\n type_signature = computation_types.NamedTupleType((tf.int32, tf.int32))\n comp = building_blocks.Data('data', type_signature)\n with self.assertRaises(TypeError):\n building_block_factory.create_named_tuple(comp, 'a')\n\n def test_raises_value_error_with_wrong_lengths(self):\n type_signature = computation_types.NamedTupleType((tf.int32, tf.int32))\n comp = building_blocks.Data('data', type_signature)\n with self.assertRaises(ValueError):\n building_block_factory.create_named_tuple(comp, ('a',))\n\n def test_creates_named_tuple_from_unamed_tuple(self):\n type_signature = computation_types.NamedTupleType((tf.int32, tf.int32))\n comp = building_blocks.Data('data', type_signature)\n named_comp = building_block_factory.create_named_tuple(comp, ('a', 'b'))\n expected_type_signature = computation_types.NamedTupleType(\n (('a', tf.int32), ('b', tf.int32)))\n self.assertEqual(named_comp.type_signature, expected_type_signature)\n\n def test_creates_named_tuple_from_named_tuple(self):\n type_signature = computation_types.NamedTupleType(\n (('a', tf.int32), ('b', tf.int32)))\n comp = building_blocks.Data('data', type_signature)\n named_comp = building_block_factory.create_named_tuple(comp, ('c', 'd'))\n expected_type_signature = computation_types.NamedTupleType(\n (('c', tf.int32), ('d', tf.int32)))\n self.assertEqual(named_comp.type_signature, expected_type_signature)\n\n\nclass CreateZipTest(absltest.TestCase):\n\n def test_raises_type_error(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_zip(None)\n\n def test_zips_tuple_unnamed(self):\n data_1 = building_blocks.Data('a', tf.int32)\n data_2 = building_blocks.Data('b', tf.float32)\n data_3 = building_blocks.Data('c', tf.bool)\n tup_1 = building_blocks.Tuple((data_1, data_2, data_3))\n tup_2 = building_blocks.Tuple((tup_1, tup_1))\n comp = tup_2\n new_comp = building_block_factory.create_zip(comp)\n self.assertEqual(comp.compact_representation(), '<<a,b,c>,<a,b,c>>')\n # pyformat: disable\n self.assertEqual(\n new_comp.formatted_representation(),\n '(let\\n'\n ' _var1=<\\n'\n ' <\\n'\n ' a,\\n'\n ' b,\\n'\n ' c\\n'\n ' >,\\n'\n ' <\\n'\n ' a,\\n'\n ' b,\\n'\n ' c\\n'\n ' >\\n'\n ' >\\n'\n ' in <\\n'\n ' <\\n'\n ' _var1[0][0],\\n'\n ' _var1[1][0]\\n'\n ' >,\\n'\n ' <\\n'\n ' _var1[0][1],\\n'\n ' _var1[1][1]\\n'\n ' >,\\n'\n ' <\\n'\n ' _var1[0][2],\\n'\n ' _var1[1][2]\\n'\n ' >\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(\n str(comp.type_signature), '<<int32,float32,bool>,<int32,float32,bool>>')\n self.assertEqual(\n str(new_comp.type_signature),\n '<<int32,int32>,<float32,float32>,<bool,bool>>')\n\n def test_zips_tuple_named(self):\n data_1 = building_blocks.Data('a', tf.int32)\n data_2 = building_blocks.Data('b', tf.float32)\n data_3 = building_blocks.Data('c', tf.bool)\n tup_1 = building_blocks.Tuple((('d', data_1), ('e', data_2), ('f', data_3)))\n tup_2 = building_blocks.Tuple((('g', tup_1), ('h', tup_1)))\n comp = tup_2\n new_comp = building_block_factory.create_zip(comp)\n self.assertEqual(comp.compact_representation(),\n '<g=<d=a,e=b,f=c>,h=<d=a,e=b,f=c>>')\n # pyformat: disable\n self.assertEqual(\n new_comp.formatted_representation(),\n '(let\\n'\n ' _var1=<\\n'\n ' g=<\\n'\n ' d=a,\\n'\n ' e=b,\\n'\n ' f=c\\n'\n ' >,\\n'\n ' h=<\\n'\n ' d=a,\\n'\n ' e=b,\\n'\n ' f=c\\n'\n ' >\\n'\n ' >\\n'\n ' in <\\n'\n ' <\\n'\n ' _var1[0][0],\\n'\n ' _var1[1][0]\\n'\n ' >,\\n'\n ' <\\n'\n ' _var1[0][1],\\n'\n ' _var1[1][1]\\n'\n ' >,\\n'\n ' <\\n'\n ' _var1[0][2],\\n'\n ' _var1[1][2]\\n'\n ' >\\n'\n '>)'\n )\n # pyformat: enable\n self.assertEqual(\n str(comp.type_signature),\n '<g=<d=int32,e=float32,f=bool>,h=<d=int32,e=float32,f=bool>>')\n self.assertEqual(\n str(new_comp.type_signature),\n '<<int32,int32>,<float32,float32>,<bool,bool>>')\n\n def test_zips_reference(self):\n type_signature_1 = computation_types.NamedTupleType(\n [tf.int32, tf.float32, tf.bool])\n type_signature_2 = computation_types.NamedTupleType(\n [type_signature_1, type_signature_1])\n ref = building_blocks.Reference('a', type_signature_2)\n comp = ref\n new_comp = building_block_factory.create_zip(comp)\n self.assertEqual(comp.compact_representation(), 'a')\n # pyformat: disable\n self.assertEqual(\n new_comp.formatted_representation(),\n '<\\n'\n ' <\\n'\n ' a[0][0],\\n'\n ' a[1][0]\\n'\n ' >,\\n'\n ' <\\n'\n ' a[0][1],\\n'\n ' a[1][1]\\n'\n ' >,\\n'\n ' <\\n'\n ' a[0][2],\\n'\n ' a[1][2]\\n'\n ' >\\n'\n '>'\n )\n # pyformat: enable\n self.assertEqual(\n str(comp.type_signature), '<<int32,float32,bool>,<int32,float32,bool>>')\n self.assertEqual(\n str(new_comp.type_signature),\n '<<int32,int32>,<float32,float32>,<bool,bool>>')\n\n\nclass CreateTensorFlowBroadcastFunctionTest(absltest.TestCase):\n\n def test_raises_python_type(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_tensorflow_to_broadcast_scalar(\n int, tf.TensorShape([]))\n\n def test_raises_list_for_shape(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_tensorflow_to_broadcast_scalar(\n tf.int32, [1, 1])\n\n def test_raises_partially_defined(self):\n with self.assertRaises(ValueError):\n building_block_factory.create_tensorflow_to_broadcast_scalar(\n tf.int32, tf.TensorShape([None, 1]))\n\n def test_constructs_identity_scalar_function(self):\n int_identity = building_block_factory.create_tensorflow_to_broadcast_scalar(\n tf.int32, tf.TensorShape([]))\n for k in range(5):\n result = test_utils.run_tensorflow(int_identity.proto, k)\n self.assertEqual(result, k)\n\n def test_broadcasts_ints_to_nonempty_shape(self):\n int_broadcast = building_block_factory.create_tensorflow_to_broadcast_scalar(\n tf.int32, tf.TensorShape([2, 2]))\n for k in range(5):\n self.assertTrue(\n np.array_equal(\n test_utils.run_tensorflow(int_broadcast.proto, k),\n np.array([[k, k], [k, k]])))\n\n def test_broadcasts_bools_to_nonempty_shape(self):\n int_broadcast = building_block_factory.create_tensorflow_to_broadcast_scalar(\n tf.bool, tf.TensorShape([2, 2]))\n self.assertTrue(\n np.array_equal(\n test_utils.run_tensorflow(int_broadcast.proto, True),\n np.array([[True, True], [True, True]])))\n\n\nclass CreateTensorFlowBinaryOpTest(absltest.TestCase):\n\n def test_raises_on_none_type(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_tensorflow_binary_operator(None, tf.add)\n\n def test_raises_non_callable_op(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_tensorflow_binary_operator(tf.int32, 1)\n\n def test_raises_on_federated_type(self):\n fed_type = computation_types.FederatedType(tf.int32,\n placement_literals.SERVER)\n with self.assertRaises(TypeError):\n building_block_factory.create_tensorflow_binary_operator(fed_type, tf.add)\n\n def test_raises_on_nested_sequence_type(self):\n hiding_sequence_type = computation_types.NamedTupleType(\n [computation_types.SequenceType(tf.int32)])\n with self.assertRaises(TypeError):\n building_block_factory.create_tensorflow_binary_operator(\n hiding_sequence_type, tf.add)\n\n def test_divide_integers(self):\n integer_division_func = building_block_factory.create_tensorflow_binary_operator(\n tf.int32, tf.divide)\n self.assertEqual(\n integer_division_func.type_signature,\n computation_types.FunctionType([tf.int32, tf.int32], tf.float64))\n result_1 = test_utils.run_tensorflow(integer_division_func.proto, [1, 1])\n self.assertEqual(result_1, 1)\n result_2 = test_utils.run_tensorflow(integer_division_func.proto, [1, 2])\n self.assertEqual(result_2, 0.5)\n result_3 = test_utils.run_tensorflow(integer_division_func.proto, [2, 1])\n self.assertEqual(result_3, 2)\n result_4 = test_utils.run_tensorflow(integer_division_func.proto, [1, 0])\n self.assertEqual(result_4, np.inf)\n\n def test_divide_unnamed_tuple(self):\n division_func = building_block_factory.create_tensorflow_binary_operator(\n [tf.int32, tf.float32], tf.divide)\n self.assertEqual(\n division_func.type_signature,\n computation_types.FunctionType(\n [[tf.int32, tf.float32], [tf.int32, tf.float32]],\n [tf.float64, tf.float32]))\n self.assertEqual(\n test_utils.run_tensorflow(division_func.proto, [[1, 0.], [1, 1.]])[0],\n 1)\n self.assertEqual(\n test_utils.run_tensorflow(division_func.proto, [[1, 0.], [1, 1.]])[1],\n 0.)\n\n def test_divide_named_tuple(self):\n integer_division_func = building_block_factory.create_tensorflow_binary_operator(\n [('a', tf.int32), ('b', tf.float32)], tf.divide)\n self.assertDictEqual(\n anonymous_tuple.to_odict(\n test_utils.run_tensorflow(integer_division_func.proto,\n [[1, 0.], [1, 1.]])), {\n 'a': 1,\n 'b': 0.\n })\n\n def test_multiply_integers(self):\n integer_multiplication_func = building_block_factory.create_tensorflow_binary_operator(\n tf.int32, tf.multiply)\n self.assertEqual(\n test_utils.run_tensorflow(integer_multiplication_func.proto, [1, 1]), 1)\n self.assertEqual(\n test_utils.run_tensorflow(integer_multiplication_func.proto, [1, 2]), 2)\n self.assertEqual(\n test_utils.run_tensorflow(integer_multiplication_func.proto, [2, 1]), 2)\n\n def test_multiply_named_tuple(self):\n integer_multiplication_func = building_block_factory.create_tensorflow_binary_operator(\n [('a', tf.int32), ('b', tf.float32)], tf.multiply)\n self.assertDictEqual(\n anonymous_tuple.to_odict(\n test_utils.run_tensorflow(integer_multiplication_func.proto,\n [[1, 0.], [1, 1.]])), {\n 'a': 1,\n 'b': 0.\n })\n self.assertDictEqual(\n anonymous_tuple.to_odict(\n test_utils.run_tensorflow(integer_multiplication_func.proto,\n [[2, 2.], [1, 1.]])), {\n 'a': 2,\n 'b': 2.\n })\n\n def test_add_integers(self):\n integer_add = building_block_factory.create_tensorflow_binary_operator(\n tf.int32, tf.add)\n result_1 = test_utils.run_tensorflow(integer_add.proto, [0, 0])\n self.assertEqual(result_1, 0)\n result_2 = test_utils.run_tensorflow(integer_add.proto, [1, 0])\n self.assertEqual(result_2, 1)\n result_3 = test_utils.run_tensorflow(integer_add.proto, [0, 1])\n self.assertEqual(result_3, 1)\n result_4 = test_utils.run_tensorflow(integer_add.proto, [1, 1])\n self.assertEqual(result_4, 2)\n\n\nclass CreateTensorFlowConstantTest(absltest.TestCase):\n\n def test_raises_on_none_type_spec(self):\n with self.assertRaises(TypeError):\n building_block_factory.create_tensorflow_constant(None, 0)\n\n def test_raises_type_spec_federated_int(self):\n federated_int = computation_types.FederatedType(tf.int32,\n placement_literals.SERVER)\n with self.assertRaisesRegex(TypeError, 'only nested tuples and tensors'):\n building_block_factory.create_tensorflow_constant(federated_int, 0)\n\n def test_raises_non_scalar_value(self):\n non_scalar_value = np.zeros([1])\n with self.assertRaisesRegex(TypeError, 'Must pass a scalar'):\n building_block_factory.create_tensorflow_constant(tf.int32,\n non_scalar_value)\n\n def test_raises_float_passed_for_int(self):\n with self.assertRaisesRegex(TypeError, 'Only integers'):\n building_block_factory.create_tensorflow_constant(tf.int32, 1.)\n\n def test_constructs_integer_tensor_zero(self):\n tensor_zero = building_block_factory.create_tensorflow_constant(\n computation_types.TensorType(tf.int32, [2, 2]), 0)\n self.assertIsInstance(tensor_zero, building_blocks.Call)\n self.assertTrue(\n np.array_equal(\n test_utils.run_tensorflow(tensor_zero.function.proto),\n np.zeros([2, 2], dtype=np.int32)))\n\n def test_constructs_float_tensor_one(self):\n tensor_one = building_block_factory.create_tensorflow_constant(\n computation_types.TensorType(tf.float32, [2, 2]), 1.)\n self.assertIsInstance(tensor_one, building_blocks.Call)\n self.assertTrue(\n np.array_equal(\n test_utils.run_tensorflow(tensor_one.function.proto),\n np.ones([2, 2], dtype=np.float32)))\n\n def test_constructs_unnamed_tuple_of_float_tensor_ones(self):\n tuple_type = computation_types.NamedTupleType(\n [computation_types.TensorType(tf.float32, [2, 2])] * 2)\n tuple_of_ones = building_block_factory.create_tensorflow_constant(\n tuple_type, 1.)\n self.assertEqual(tuple_of_ones.type_signature, tuple_type)\n self.assertIsInstance(tuple_of_ones, building_blocks.Call)\n result = test_utils.run_tensorflow(tuple_of_ones.function.proto)\n self.assertLen(result, 2)\n self.assertTrue(\n np.array_equal(result[0], np.ones([2, 2], dtype=np.float32)))\n self.assertTrue(\n np.array_equal(result[1], np.ones([2, 2], dtype=np.float32)))\n\n def test_constructs_named_tuple_of_float_tensor_ones(self):\n tuple_type = computation_types.NamedTupleType([\n ('a', computation_types.TensorType(tf.float32, [2, 2])),\n ('b', computation_types.TensorType(tf.float32, [2, 2]))\n ])\n tuple_of_ones = building_block_factory.create_tensorflow_constant(\n tuple_type, 1.)\n self.assertEqual(tuple_of_ones.type_signature, tuple_type)\n self.assertIsInstance(tuple_of_ones, building_blocks.Call)\n result = test_utils.run_tensorflow(tuple_of_ones.function.proto)\n self.assertLen(result, 2)\n self.assertTrue(np.array_equal(result.a, np.ones([2, 2], dtype=np.float32)))\n self.assertTrue(np.array_equal(result.b, np.ones([2, 2], dtype=np.float32)))\n\n def test_constructs_nested_named_tuple_of_float_tensor_ones(self):\n tuple_type = computation_types.NamedTupleType([[\n ('a', computation_types.TensorType(tf.float32, [2, 2])),\n ('b', computation_types.TensorType(tf.float32, [2, 2]))\n ]])\n tuple_of_ones = building_block_factory.create_tensorflow_constant(\n tuple_type, 1.)\n self.assertEqual(tuple_of_ones.type_signature, tuple_type)\n self.assertIsInstance(tuple_of_ones, building_blocks.Call)\n result = test_utils.run_tensorflow(tuple_of_ones.function.proto)\n self.assertLen(result, 1)\n self.assertTrue(\n np.array_equal(result[0].a, np.ones([2, 2], dtype=np.float32)))\n self.assertTrue(\n np.array_equal(result[0].b, np.ones([2, 2], dtype=np.float32)))\n\n def test_constructs_nested_named_tuple_of_int_and_float_tensor_ones(self):\n tuple_type = computation_types.NamedTupleType([[\n ('a', computation_types.TensorType(tf.int32, [2, 2])),\n ('b', computation_types.TensorType(tf.float32, [2, 2]))\n ]])\n tuple_of_ones = building_block_factory.create_tensorflow_constant(\n tuple_type, 1)\n self.assertEqual(tuple_of_ones.type_signature, tuple_type)\n self.assertIsInstance(tuple_of_ones, building_blocks.Call)\n result = test_utils.run_tensorflow(tuple_of_ones.function.proto)\n self.assertLen(result, 1)\n self.assertTrue(\n np.array_equal(result[0].a, np.ones([2, 2], dtype=np.int32)))\n self.assertTrue(\n np.array_equal(result[0].b, np.ones([2, 2], dtype=np.float32)))\n\n\nclass BinaryOperatorTest(absltest.TestCase):\n\n def test_apply_op_raises_on_none(self):\n with self.assertRaisesRegex(TypeError, 'ComputationBuildingBlock'):\n building_block_factory.apply_binary_operator_with_upcast(\n None, tf.multiply)\n\n def test_construct_op_raises_on_none_operator(self):\n with self.assertRaisesRegex(TypeError, 'found non-callable'):\n building_block_factory.create_binary_operator_with_upcast(tf.int32, None)\n\n def test_raises_incompatible_tuple_and_tensor(self):\n bad_type_ref = building_blocks.Reference(\n 'x',\n computation_types.FederatedType([[tf.int32, tf.int32], tf.float32],\n placement_literals.CLIENTS))\n with self.assertRaisesRegex(TypeError, 'incompatible with upcasted'):\n building_block_factory.apply_binary_operator_with_upcast(\n bad_type_ref, tf.multiply)\n with self.assertRaisesRegex(TypeError, 'incompatible with upcasted'):\n building_block_factory.create_binary_operator_with_upcast(\n bad_type_ref.type_signature.member, tf.multiply)\n\n def test_raises_non_callable_op(self):\n bad_type_ref = building_blocks.Reference('x', [tf.float32, tf.float32])\n with self.assertRaisesRegex(TypeError, 'non-callable'):\n building_block_factory.apply_binary_operator_with_upcast(\n bad_type_ref, tf.constant(0))\n with self.assertRaisesRegex(TypeError, 'non-callable'):\n building_block_factory.create_binary_operator_with_upcast(\n bad_type_ref, tf.constant(0))\n\n def test_raises_tuple_and_nonscalar_tensor(self):\n bad_type_ref = building_blocks.Reference(\n 'x',\n computation_types.FederatedType(\n [[tf.int32, tf.int32],\n computation_types.TensorType(tf.float32, [2])],\n placement_literals.CLIENTS))\n with self.assertRaisesRegex(TypeError, 'incompatible with upcasted'):\n building_block_factory.apply_binary_operator_with_upcast(\n bad_type_ref, tf.multiply)\n with self.assertRaisesRegex(TypeError, 'incompatible with upcasted'):\n building_block_factory.create_binary_operator_with_upcast(\n bad_type_ref.type_signature.member, tf.multiply)\n\n def test_raises_tuple_scalar_multiplied_by_nonscalar(self):\n bad_type_ref = building_blocks.Reference(\n 'x', [tf.int32, computation_types.TensorType(tf.float32, [2])])\n with self.assertRaisesRegex(TypeError, 'incompatible with upcasted'):\n building_block_factory.apply_binary_operator_with_upcast(\n bad_type_ref, tf.multiply)\n with self.assertRaisesRegex(TypeError, 'incompatible with upcasted'):\n building_block_factory.create_binary_operator_with_upcast(\n bad_type_ref.type_signature, tf.multiply)\n\n def test_construct_generic_raises_federated_type(self):\n bad_type = computation_types.FederatedType(\n [[tf.int32, tf.int32],\n computation_types.TensorType(tf.float32, [2])],\n placement_literals.CLIENTS)\n with self.assertRaisesRegex(TypeError, 'argument that is not a two-tuple'):\n building_block_factory.create_binary_operator_with_upcast(\n bad_type, tf.multiply)\n\n def test_apply_integer_type_signature(self):\n ref = building_blocks.Reference('x', [tf.int32, tf.int32])\n multiplied = building_block_factory.apply_binary_operator_with_upcast(\n ref, tf.multiply)\n self.assertEqual(multiplied.type_signature,\n computation_types.to_type(tf.int32))\n\n def test_construct_integer_type_signature(self):\n ref = building_blocks.Reference('x', [tf.int32, tf.int32])\n multiplier = building_block_factory.create_binary_operator_with_upcast(\n ref.type_signature, tf.multiply)\n self.assertEqual(\n multiplier.type_signature,\n type_factory.binary_op(computation_types.to_type(tf.int32)))\n\n def test_multiply_federated_integer_type_signature(self):\n ref = building_blocks.Reference(\n 'x',\n computation_types.FederatedType([tf.int32, tf.int32],\n placement_literals.CLIENTS))\n multiplied = building_block_factory.apply_binary_operator_with_upcast(\n ref, tf.multiply)\n self.assertEqual(\n multiplied.type_signature,\n computation_types.FederatedType(tf.int32, placement_literals.CLIENTS))\n\n def test_divide_federated_float_type_signature(self):\n ref = building_blocks.Reference(\n 'x',\n computation_types.FederatedType([tf.float32, tf.float32],\n placement_literals.CLIENTS))\n multiplied = building_block_factory.apply_binary_operator_with_upcast(\n ref, tf.multiply)\n self.assertEqual(\n multiplied.type_signature,\n computation_types.FederatedType(tf.float32, placement_literals.CLIENTS))\n\n def test_multiply_federated_unnamed_tuple_type_signature(self):\n ref = building_blocks.Reference(\n 'x',\n computation_types.FederatedType(\n [[tf.int32, tf.float32], [tf.int32, tf.float32]],\n placement_literals.CLIENTS))\n multiplied = building_block_factory.apply_binary_operator_with_upcast(\n ref, tf.multiply)\n self.assertEqual(\n multiplied.type_signature,\n computation_types.FederatedType([tf.int32, tf.float32],\n placement_literals.CLIENTS))\n\n def test_multiply_federated_named_tuple_type_signature(self):\n ref = building_blocks.Reference(\n 'x',\n computation_types.FederatedType(\n [[('a', tf.int32),\n ('b', tf.float32)], [('a', tf.int32), ('b', tf.float32)]],\n placement_literals.CLIENTS))\n multiplied = building_block_factory.apply_binary_operator_with_upcast(\n ref, tf.multiply)\n self.assertEqual(\n multiplied.type_signature,\n computation_types.FederatedType([('a', tf.int32), ('b', tf.float32)],\n placement_literals.CLIENTS))\n\n def test_divide_federated_named_tuple_type_signature(self):\n ref = building_blocks.Reference(\n 'x',\n computation_types.FederatedType(\n [[('a', tf.int32),\n ('b', tf.float32)], [('a', tf.int32), ('b', tf.float32)]],\n placement_literals.CLIENTS))\n multiplied = building_block_factory.apply_binary_operator_with_upcast(\n ref, tf.divide)\n self.assertEqual(\n multiplied.type_signature,\n computation_types.FederatedType([('a', tf.float64), ('b', tf.float32)],\n placement_literals.CLIENTS))\n\n def test_multiply_federated_named_tuple_with_scalar_type_signature(self):\n ref = building_blocks.Reference(\n 'x',\n computation_types.FederatedType([[('a', tf.float32),\n ('b', tf.float32)], tf.float32],\n placement_literals.CLIENTS))\n multiplied = building_block_factory.apply_binary_operator_with_upcast(\n ref, tf.multiply)\n self.assertEqual(\n multiplied.type_signature,\n computation_types.FederatedType([('a', tf.float32), ('b', tf.float32)],\n placement_literals.CLIENTS))\n\n def test_multiply_named_tuple_with_scalar_type_signature(self):\n ref = building_blocks.Reference('x', [[('a', tf.float32),\n ('b', tf.float32)], tf.float32])\n multiplied = building_block_factory.apply_binary_operator_with_upcast(\n ref, tf.multiply)\n self.assertEqual(\n multiplied.type_signature,\n computation_types.NamedTupleType([('a', tf.float32),\n ('b', tf.float32)]))\n\n def test_construct_multiply_op_named_tuple_with_scalar_type_signature(self):\n type_spec = computation_types.to_type([[('a', tf.float32),\n ('b', tf.float32)], tf.float32])\n multiplier = building_block_factory.create_binary_operator_with_upcast(\n type_spec, tf.multiply)\n expected_function_type = computation_types.FunctionType(\n type_spec, type_spec[0])\n self.assertEqual(multiplier.type_signature, expected_function_type)\n\n def test_construct_divide_op_named_tuple_with_scalar_type_signature(self):\n type_spec = computation_types.to_type([[('a', tf.float32),\n ('b', tf.float32)], tf.float32])\n multiplier = building_block_factory.create_binary_operator_with_upcast(\n type_spec, tf.divide)\n expected_function_type = computation_types.FunctionType(\n type_spec, type_spec[0])\n self.assertEqual(multiplier.type_signature, expected_function_type)\n\n def test_divide_federated_named_tuple_with_scalar_type_signature(self):\n ref = building_blocks.Reference(\n 'x',\n computation_types.FederatedType([[('a', tf.float32),\n ('b', tf.float32)], tf.float32],\n placement_literals.CLIENTS))\n multiplied = building_block_factory.apply_binary_operator_with_upcast(\n ref, tf.divide)\n self.assertEqual(\n multiplied.type_signature,\n computation_types.FederatedType([('a', tf.float32), ('b', tf.float32)],\n placement_literals.CLIENTS))\n\n\nclass ConstructTensorFlowSelectingOutputsTest(absltest.TestCase):\n\n def test_raises_non_named_tuple_type(self):\n selection_spec = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[0])\n with self.assertRaises(TypeError):\n building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n tf.int32, anonymous_tuple.AnonymousTuple([(None, selection_spec)]))\n\n def test_raises_non_anonymous_tuple(self):\n selection_spec = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[0])\n with self.assertRaises(TypeError):\n building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [tf.int32], [selection_spec])\n\n def test_raises_nested_non_anonymous_tuple(self):\n selection_spec = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[0])\n with self.assertRaises(TypeError):\n building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [tf.int32],\n anonymous_tuple.from_container([[selection_spec]], recursive=False))\n\n def test_construct_selection_from_tuple_with_empty_list_type_signature(self):\n constructed_tf = building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [tf.int32, tf.float32], anonymous_tuple.from_container([]))\n self.assertIsInstance(constructed_tf, building_blocks.CompiledComputation)\n self.assertEqual(constructed_tf.type_signature,\n computation_types.FunctionType([tf.int32, tf.float32], []))\n\n def test_construct_selection_from_two_tuple_correct_type_signature(self):\n selection_spec_1 = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[])\n selection_spec_2 = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[])\n output_structure = anonymous_tuple.from_container(\n [selection_spec_1, selection_spec_2])\n constructed_tf = building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [tf.int32, tf.float32], output_structure=output_structure)\n self.assertIsInstance(constructed_tf, building_blocks.CompiledComputation)\n self.assertEqual(\n constructed_tf.type_signature,\n computation_types.FunctionType([tf.int32, tf.float32],\n [tf.int32, tf.int32]))\n\n def test_construct_selection_from_two_tuple_correct_singleton_type_signature(\n self):\n selection_spec = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[])\n output_structure = anonymous_tuple.from_container([selection_spec])\n constructed_tf = building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [tf.int32, tf.float32], output_structure=output_structure)\n self.assertIsInstance(constructed_tf, building_blocks.CompiledComputation)\n self.assertEqual(\n constructed_tf.type_signature,\n computation_types.FunctionType([tf.int32, tf.float32], [tf.int32]))\n\n def test_construct_selection_from_two_tuple_executes_correctly(self):\n selection_spec_1 = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[])\n selection_spec_2 = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[])\n output_structure = anonymous_tuple.from_container(\n [selection_spec_1, selection_spec_2])\n constructed_tf = building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [tf.int32, tf.float32], output_structure=output_structure)\n result = test_utils.run_tensorflow(constructed_tf.proto, [0, 1.])\n self.assertLen(result, 2)\n self.assertEqual(result[0], 0)\n self.assertEqual(result[1], 0)\n result = test_utils.run_tensorflow(constructed_tf.proto, [1, 0.])\n self.assertLen(result, 2)\n self.assertEqual(result[0], 1)\n self.assertEqual(result[1], 1)\n\n def test_construct_selection_with_names(self):\n selection_spec_1 = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[])\n selection_spec_2 = building_block_factory.SelectionSpec(\n tuple_index=1, selection_sequence=[])\n output_structure = anonymous_tuple.AnonymousTuple([('a', selection_spec_1),\n ('b', selection_spec_2)])\n constructed_tf = building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [('a', tf.int32), ('b', tf.float32)], output_structure=output_structure)\n self.assertEqual(\n constructed_tf.type_signature,\n computation_types.FunctionType([('a', tf.int32), ('b', tf.float32)],\n [('a', tf.int32), ('b', tf.float32)]))\n\n def test_construct_tuple_packed_selection_with_name(self):\n selection_spec_1 = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[])\n selection_spec_2 = building_block_factory.SelectionSpec(\n tuple_index=1, selection_sequence=[])\n output_structure = anonymous_tuple.AnonymousTuple([\n ('c',\n anonymous_tuple.from_container([selection_spec_1, selection_spec_2],\n recursive=True))\n ])\n constructed_tf = building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [('a', tf.int32), ('b', tf.float32)], output_structure=output_structure)\n self.assertEqual(\n constructed_tf.type_signature,\n computation_types.FunctionType([('a', tf.int32), ('b', tf.float32)],\n [('c', [tf.int32, tf.float32])]))\n\n def test_construct_selection_from_nested_tuple_executes_correctly(self):\n selection_spec = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[0, 0])\n output_structure = anonymous_tuple.from_container([selection_spec],\n recursive=True)\n constructed_tf = building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [[[tf.int32]], tf.float32], output_structure=output_structure)\n result = test_utils.run_tensorflow(constructed_tf.proto, [[[0]], 1.])\n self.assertEqual(result[0], 0)\n\n def test_construct_selection_from_nested_tuple_repack_into_tuple_executes_correctly(\n self):\n selection_spec = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[0, 0])\n output_structure = anonymous_tuple.from_container([[[selection_spec]]],\n recursive=True)\n constructed_tf = building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [[[tf.int32]], tf.float32], output_structure=output_structure)\n result = test_utils.run_tensorflow(constructed_tf.proto, [[[0]], 1.])\n self.assertEqual(result[0][0][0], 0)\n\n def test_construct_selection_from_two_tuple_repack_named_lower_level_type_signature(\n self):\n selection_spec_1 = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[])\n selection_spec_2 = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[])\n output_structure = anonymous_tuple.from_container([\n anonymous_tuple.AnonymousTuple([('a', selection_spec_1)]),\n selection_spec_2\n ],\n recursive=True)\n constructed_tf = building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [tf.int32, tf.float32], output_structure=output_structure)\n self.assertEqual(\n constructed_tf.type_signature,\n computation_types.FunctionType([tf.int32, tf.float32],\n [[('a', tf.int32)], tf.int32]))\n\n def test_construct_selection_from_two_tuple_repack_lower_level_output_executes_correctly(\n self):\n selection_spec_1 = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[])\n selection_spec_2 = building_block_factory.SelectionSpec(\n tuple_index=0, selection_sequence=[])\n output_structure = anonymous_tuple.from_container(\n [[selection_spec_1], selection_spec_2], recursive=True)\n constructed_tf = building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [tf.int32, tf.float32], output_structure=output_structure)\n result = test_utils.run_tensorflow(constructed_tf.proto, [0, 1.])\n self.assertLen(result, 2)\n self.assertLen(result[0], 1)\n self.assertEqual(result[0][0], 0)\n self.assertEqual(result[1], 0)\n result = test_utils.run_tensorflow(constructed_tf.proto, [1, 0.])\n self.assertLen(result, 2)\n self.assertLen(result[0], 1)\n self.assertEqual(result[0][0], 1)\n self.assertEqual(result[1], 1)\n flipped_output_structure = anonymous_tuple.from_container(\n [selection_spec_1, [selection_spec_2]], recursive=True)\n flipped_packing_tf = building_block_factory.construct_tensorflow_selecting_and_packing_outputs(\n [tf.int32, tf.float32], output_structure=flipped_output_structure)\n result = test_utils.run_tensorflow(flipped_packing_tf.proto, [0, 1.])\n self.assertLen(result, 2)\n self.assertEqual(result[0], 0)\n self.assertEqual(result[1][0], 0)\n result = test_utils.run_tensorflow(flipped_packing_tf.proto, [1, 0.])\n self.assertLen(result, 2)\n self.assertEqual(result[0], 1)\n self.assertEqual(result[1][0], 1)\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.array", "numpy.zeros", "tensorflow.TensorShape", "numpy.ones", "tensorflow.constant" ] ]
bharlow058/Imbalance-Library
[ "3e875838cb602865d8b9786fbe940d0704771fca" ]
[ "imblearn/datasets/zenodo.py" ]
[ "\"\"\"Collection of imbalanced datasets.\n\nThis collection of datasets has been proposed in [1]_. The\ncharacteristics of the available datasets are presented in the table\nbelow.\n\n ID Name Repository & Target Ratio #S #F\n 1 ecoli UCI, target: imU 8.6:1 336 7\n 2 optical_digits UCI, target: 8 9.1:1 5,620 64\n 3 satimage UCI, target: 4 9.3:1 6,435 36\n 4 pen_digits UCI, target: 5 9.4:1 10,992 16\n 5 abalone UCI, target: 7 9.7:1 4,177 10\n 6 sick_euthyroid UCI, target: sick euthyroid 9.8:1 3,163 42\n 7 spectrometer UCI, target: >=44 11:1 531 93\n 8 car_eval_34 UCI, target: good, v good 12:1 1,728 21\n 9 isolet UCI, target: A, B 12:1 7,797 617\n 10 us_crime UCI, target: >0.65 12:1 1,994 100\n 11 yeast_ml8 LIBSVM, target: 8 13:1 2,417 103\n 12 scene LIBSVM, target: >one label 13:1 2,407 294\n 13 libras_move UCI, target: 1 14:1 360 90\n 14 thyroid_sick UCI, target: sick 15:1 3,772 52\n 15 coil_2000 KDD, CoIL, target: minority 16:1 9,822 85\n 16 arrhythmia UCI, target: 06 17:1 452 278\n 17 solar_flare_m0 UCI, target: M->0 19:1 1,389 32\n 18 oil UCI, target: minority 22:1 937 49\n 19 car_eval_4 UCI, target: vgood 26:1 1,728 21\n 20 wine_quality UCI, wine, target: <=4 26:1 4,898 11\n 21 letter_img UCI, target: Z 26:1 20,000 16\n 22 yeast_me2 UCI, target: ME2 28:1 1,484 8\n 23 webpage LIBSVM, w7a, target: minority 33:1 34,780 300\n 24 ozone_level UCI, ozone, data 34:1 2,536 72\n 25 mammography UCI, target: minority 42:1 11,183 6\n 26 protein_homo KDD CUP 2004, minority 111:1 145,751 74\n 27 abalone_19 UCI, target: 19 130:1 4,177 10\n\nReferences\n----------\n.. [1] Ding, Zejin, \"Diversified Ensemble Classifiers for Highly\n Imbalanced Data Learning and their Application in Bioinformatics.\"\n Dissertation, Georgia State University, (2011).\n\n\"\"\"\n\n# Author: Guillaume Lemaitre\n# License: BSD 3 clause\n\nfrom collections import OrderedDict\nimport tarfile\nfrom io import BytesIO\nimport logging\nfrom os.path import join, isfile\ntry:\n from urllib2 import urlopen\nexcept ImportError:\n from urllib.request import urlopen\n\nimport numpy as np\n\nfrom sklearn.datasets import get_data_home\nfrom sklearn.datasets.base import Bunch\nfrom sklearn.utils.fixes import makedirs\nfrom sklearn.externals import six\nfrom sklearn.utils import check_random_state\n\nURL = ('https://zenodo.org/record/61452/files/'\n 'benchmark-imbalanced-learn.tar.gz')\nPRE_FILENAME = 'x'\nPOST_FILENAME = 'data.npz'\n\nMAP_NAME_ID_KEYS = ['ecoli',\n 'optical_digits',\n 'satimage',\n 'pen_digits',\n 'abalone',\n 'sick_euthyroid',\n 'spectrometer',\n 'car_eval_34',\n 'isolet',\n 'us_crime',\n 'yeast_ml8',\n 'scene',\n 'libras_move',\n 'thyroid_sick',\n 'coil_2000',\n 'arrhythmia',\n 'solar_flare_m0',\n 'oil',\n 'car_eval_4',\n 'wine_quality',\n 'letter_img',\n 'yeast_me2',\n 'webpage',\n 'ozone_level',\n 'mammography',\n 'protein_homo',\n 'abalone_19']\n\nMAP_NAME_ID = OrderedDict()\nMAP_ID_NAME = OrderedDict()\nfor v, k in enumerate(MAP_NAME_ID_KEYS):\n MAP_NAME_ID[k] = v + 1\n MAP_ID_NAME[v + 1] = k\n\nlogger = logging.getLogger()\n\n\ndef fetch_datasets(data_home=None,\n filter_data=None,\n download_if_missing=True,\n random_state=None,\n shuffle=False):\n \"\"\"Load the benchmark datasets from Zenodo, downloading it if necessary.\n\n Parameters\n ----------\n data_home : string, optional (default=None)\n Specify another download and cache folder for the datasets. By default\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n filter_data : tuple of str/int or None, optional (default=None)\n A tuple containing the ID or the name of the datasets to be returned.\n Refer to the above table to get the ID and name of the datasets.\n\n download_if_missing : boolean, optional (default=True)\n If False, raise a IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n random_state : int, RandomState instance or None, optional (default=None)\n Random state for shuffling the dataset.\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n shuffle : bool, optional (default=False)\n Whether to shuffle dataset.\n\n Returns\n -------\n datasets : OrderedDict of Bunch object,\n The ordered is defined by ``filter_data``. Each Bunch object ---\n refered as dataset --- have the following attributes:\n\n dataset.data : ndarray, shape (n_samples, n_features)\n\n dataset.target : ndarray, shape (n_samples, )\n\n dataset.DESCR : string\n Description of the each dataset.\n\n Notes\n -----\n This collection of datasets have been proposed in [1]_. The\n characteristics of the available datasets are presented in the table\n below.\n\n +--+--------------+-------------------------------+-------+---------+-----+\n |ID|Name | Repository & Target | Ratio | #S | #F |\n +==+==============+===============================+=======+=========+=====+\n |1 |ecoli | UCI, target: imU | 8.6:1 | 336 | 7 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |2 |optical_digits| UCI, target: 8 | 9.1:1 | 5,620 | 64 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |3 |satimage | UCI, target: 4 | 9.3:1 | 6,435 | 36 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |4 |pen_digits | UCI, target: 5 | 9.4:1 | 10,992 | 16 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |5 |abalone | UCI, target: 7 | 9.7:1 | 4,177 | 10 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |6 |sick_euthyroid| UCI, target: sick euthyroid | 9.8:1 | 3,163 | 42 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |7 |spectrometer | UCI, target: >=44 | 11:1 | 531 | 93 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |8 |car_eval_34 | UCI, target: good, v good | 12:1 | 1,728 | 21 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |9 |isolet | UCI, target: A, B | 12:1 | 7,797 | 617 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |10|us_crime | UCI, target: >0.65 | 12:1 | 1,994 | 100 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |11|yeast_ml8 | LIBSVM, target: 8 | 13:1 | 2,417 | 103 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |12|scene | LIBSVM, target: >one label | 13:1 | 2,407 | 294 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |13|libras_move | UCI, target: 1 | 14:1 | 360 | 90 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |14|thyroid_sick | UCI, target: sick | 15:1 | 3,772 | 52 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |15|coil_2000 | KDD, CoIL, target: minority | 16:1 | 9,822 | 85 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |16|arrhythmia | UCI, target: 06 | 17:1 | 452 | 278 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |17|solar_flare_m0| UCI, target: M->0 | 19:1 | 1,389 | 32 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |18|oil | UCI, target: minority | 22:1 | 937 | 49 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |19|car_eval_4 | UCI, target: vgood | 26:1 | 1,728 | 21 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |20|wine_quality | UCI, wine, target: <=4 | 26:1 | 4,898 | 11 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |21|letter_img | UCI, target: Z | 26:1 | 20,000 | 16 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |22|yeast_me2 | UCI, target: ME2 | 28:1 | 1,484 | 8 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |23|webpage | LIBSVM, w7a, target: minority | 33:1 | 34,780 | 300 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |24|ozone_level | UCI, ozone, data | 34:1 | 2,536 | 72 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |25|mammography | UCI, target: minority | 42:1 | 11,183 | 6 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |26|protein_homo | KDD CUP 2004, minority | 11:1 | 145,751 | 74 |\n +--+--------------+-------------------------------+-------+---------+-----+\n |27|abalone_19 | UCI, target: 19 | 130:1 | 4,177 | 10 |\n +--+--------------+-------------------------------+-------+---------+-----+\n\n References\n ----------\n .. [1] Ding, Zejin, \"Diversified Ensemble Classifiers for Highly\n Imbalanced Data Learning and their Application in Bioinformatics.\"\n Dissertation, Georgia State University, (2011).\n \"\"\"\n\n data_home = get_data_home(data_home=data_home)\n zenodo_dir = join(data_home, \"zenodo\")\n datasets = OrderedDict()\n\n if filter_data is None:\n filter_data_ = MAP_NAME_ID.keys()\n else:\n list_data = MAP_NAME_ID.keys()\n filter_data_ = []\n for it in filter_data:\n if isinstance(it, six.string_types):\n if it not in list_data:\n raise ValueError('{} is not a dataset available. '\n 'The available datasets are {}'.format(\n it, list_data))\n else:\n filter_data_.append(it)\n elif isinstance(it, int):\n if it < 1 or it > 27:\n raise ValueError('The dataset with the ID={} is not an '\n 'available dataset. The IDs are '\n '{}'.format(it, range(1, 28)))\n else:\n # The index start at one, then we need to remove one\n # to not have issue with the indexing.\n filter_data_.append(MAP_ID_NAME[it])\n else:\n raise ValueError('The value in the tuple should be str or int.'\n ' Got {} instead.'.format(type(it)))\n\n # go through the list and check if the data are available\n for it in filter_data_:\n filename = PRE_FILENAME + str(MAP_NAME_ID[it]) + POST_FILENAME\n filename = join(zenodo_dir, filename)\n available = isfile(filename)\n\n if download_if_missing and not available:\n makedirs(zenodo_dir, exist_ok=True)\n logger.warning(\"Downloading %s\" % URL)\n f = BytesIO(urlopen(URL).read())\n tar = tarfile.open(fileobj=f)\n tar.extractall(path=zenodo_dir)\n elif not download_if_missing and not available:\n raise IOError(\"Data not found and `download_if_missing` is False\")\n\n data = np.load(filename)\n X, y = data['data'], data['label']\n\n if shuffle:\n ind = np.arange(X.shape[0])\n rng = check_random_state(random_state)\n rng.shuffle(ind)\n X = X[ind]\n y = y[ind]\n\n datasets[it] = Bunch(data=X, target=y, DESCR=it)\n\n return datasets\n" ]
[ [ "sklearn.datasets.get_data_home", "sklearn.datasets.base.Bunch", "numpy.load", "sklearn.utils.check_random_state", "numpy.arange", "sklearn.utils.fixes.makedirs" ] ]
RicoFio/disentangle_mlp
[ "1fb3b6070b5846051b8b9e9333e8ee61418f4893" ]
[ "dataloader/dataset.py" ]
[ "import numpy as np\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nimport random\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.utils.data import DataLoader, Dataset\nimport torch\n\nnormalize_birds = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\nnormalize_mnist = transforms.Normalize(mean=[0.1307], std=[0.3081])\nnormalize_fiw = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n\ndef get_data_loader(opt):\n if opt.dataset == \"birds\":\n my_transform = transforms.Compose([\n transforms.Resize((opt.img_size, opt.img_size)),\n transforms.ToTensor(),\n normalize_birds\n ])\n train_dataset = datasets.ImageFolder(root=opt.image_root, transform=my_transform)\n train_loader = DataLoader(dataset=train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers)\n test_loader = None\n val_loader = None\n\n elif opt.dataset == \"mnist\":\n my_transform = transforms.Compose([\n transforms.Resize((opt.img_size, opt.img_size)),\n transforms.ToTensor(),\n normalize_mnist\n ])\n train_loader = DataLoader(datasets.MNIST(opt.image_root, train=True, download=True, transform=my_transform),\n batch_size=opt.batch_size, shuffle=True)\n test_loader = None\n val_loader = None\n\n elif opt.dataset == \"celebA\" or opt.dataset == \"celebA_reduced\":\n my_transform = transforms.Compose([\n transforms.Resize((opt.img_size, opt.img_size)),\n transforms.CenterCrop(opt.img_size),\n transforms.ToTensor(),\n normalize_fiw\n ])\n train_dataset = datasets.ImageFolder(root=opt.image_root_train, transform=my_transform)\n val_dataset = datasets.ImageFolder(root=opt.image_root_val, transform=my_transform)\n test_dataset = datasets.ImageFolder(root=opt.image_root_test, transform=my_transform)\n\n train_loader = DataLoader(dataset=train_dataset, batch_size=opt.batch_size_train, shuffle=True, num_workers=opt.num_workers)\n val_loader = DataLoader(dataset=val_dataset, batch_size=opt.batch_size_val, shuffle=False, num_workers=opt.num_workers)\n test_loader = DataLoader(dataset=test_dataset, batch_size=opt.batch_size_test, shuffle=False, num_workers=opt.num_workers)\n\n return train_loader, val_loader, test_loader" ]
[ [ "torch.utils.data.DataLoader" ] ]
rainwangphy/AutoDL-Projects
[ "1a40948255ac3c16ee529d94144a39bf26e89bfa", "1a40948255ac3c16ee529d94144a39bf26e89bfa", "1a40948255ac3c16ee529d94144a39bf26e89bfa", "1a40948255ac3c16ee529d94144a39bf26e89bfa", "1a40948255ac3c16ee529d94144a39bf26e89bfa" ]
[ "exps/NATS-Bench/draw-fig2_5.py", "exps/algos/DARTS-V2.py", "exps/NAS-Bench-201/statistics-v2.py", "exps/NATS-Bench/sss-collect.py", "exps/algos/SETN.py" ]
[ "###############################################################\n# NATS-Bench (arxiv.org/pdf/2009.00437.pdf), IEEE TPAMI 2021 #\n# The code to draw Figure 2 / 3 / 4 / 5 in our paper. #\n###############################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020.06 #\n###############################################################\n# Usage: python exps/NATS-Bench/draw-fig2_5.py #\n###############################################################\nimport os, sys, time, torch, argparse\nimport scipy\nimport numpy as np\nfrom typing import List, Text, Dict, Any\nfrom shutil import copyfile\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom pathlib import Path\nimport matplotlib\nimport seaborn as sns\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nlib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\nfrom config_utils import dict2config, load_config\nfrom log_utils import time_string\nfrom models import get_cell_based_tiny_net\nfrom nats_bench import create\n\n\ndef visualize_relative_info(api, vis_save_dir, indicator):\n vis_save_dir = vis_save_dir.resolve()\n # print ('{:} start to visualize {:} information'.format(time_string(), api))\n vis_save_dir.mkdir(parents=True, exist_ok=True)\n\n cifar010_cache_path = vis_save_dir / '{:}-cache-{:}-info.pth'.format('cifar10', indicator)\n cifar100_cache_path = vis_save_dir / '{:}-cache-{:}-info.pth'.format('cifar100', indicator)\n imagenet_cache_path = vis_save_dir / '{:}-cache-{:}-info.pth'.format('ImageNet16-120', indicator)\n cifar010_info = torch.load(cifar010_cache_path)\n cifar100_info = torch.load(cifar100_cache_path)\n imagenet_info = torch.load(imagenet_cache_path)\n indexes = list(range(len(cifar010_info['params'])))\n\n print ('{:} start to visualize relative ranking'.format(time_string()))\n\n cifar010_ord_indexes = sorted(indexes, key=lambda i: cifar010_info['test_accs'][i])\n cifar100_ord_indexes = sorted(indexes, key=lambda i: cifar100_info['test_accs'][i])\n imagenet_ord_indexes = sorted(indexes, key=lambda i: imagenet_info['test_accs'][i])\n\n cifar100_labels, imagenet_labels = [], []\n for idx in cifar010_ord_indexes:\n cifar100_labels.append( cifar100_ord_indexes.index(idx) )\n imagenet_labels.append( imagenet_ord_indexes.index(idx) )\n print ('{:} prepare data done.'.format(time_string()))\n\n dpi, width, height = 200, 1400, 800\n figsize = width / float(dpi), height / float(dpi)\n LabelSize, LegendFontsize = 18, 12\n resnet_scale, resnet_alpha = 120, 0.5\n\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n plt.xlim(min(indexes), max(indexes))\n plt.ylim(min(indexes), max(indexes))\n # plt.ylabel('y').set_rotation(30)\n plt.yticks(np.arange(min(indexes), max(indexes), max(indexes)//3), fontsize=LegendFontsize, rotation='vertical')\n plt.xticks(np.arange(min(indexes), max(indexes), max(indexes)//5), fontsize=LegendFontsize)\n ax.scatter(indexes, cifar100_labels, marker='^', s=0.5, c='tab:green', alpha=0.8)\n ax.scatter(indexes, imagenet_labels, marker='*', s=0.5, c='tab:red' , alpha=0.8)\n ax.scatter(indexes, indexes , marker='o', s=0.5, c='tab:blue' , alpha=0.8)\n ax.scatter([-1], [-1], marker='o', s=100, c='tab:blue' , label='CIFAR-10')\n ax.scatter([-1], [-1], marker='^', s=100, c='tab:green', label='CIFAR-100')\n ax.scatter([-1], [-1], marker='*', s=100, c='tab:red' , label='ImageNet-16-120')\n plt.grid(zorder=0)\n ax.set_axisbelow(True)\n plt.legend(loc=0, fontsize=LegendFontsize)\n ax.set_xlabel('architecture ranking in CIFAR-10', fontsize=LabelSize)\n ax.set_ylabel('architecture ranking', fontsize=LabelSize)\n save_path = (vis_save_dir / '{:}-relative-rank.pdf'.format(indicator)).resolve()\n fig.savefig(save_path, dpi=dpi, bbox_inches='tight', format='pdf')\n save_path = (vis_save_dir / '{:}-relative-rank.png'.format(indicator)).resolve()\n fig.savefig(save_path, dpi=dpi, bbox_inches='tight', format='png')\n print ('{:} save into {:}'.format(time_string(), save_path))\n\n\ndef visualize_sss_info(api, dataset, vis_save_dir):\n vis_save_dir = vis_save_dir.resolve()\n print ('{:} start to visualize {:} information'.format(time_string(), dataset))\n vis_save_dir.mkdir(parents=True, exist_ok=True)\n cache_file_path = vis_save_dir / '{:}-cache-sss-info.pth'.format(dataset)\n if not cache_file_path.exists():\n print ('Do not find cache file : {:}'.format(cache_file_path))\n params, flops, train_accs, valid_accs, test_accs = [], [], [], [], []\n for index in range(len(api)):\n cost_info = api.get_cost_info(index, dataset, hp='90')\n params.append(cost_info['params'])\n flops.append(cost_info['flops'])\n # accuracy\n info = api.get_more_info(index, dataset, hp='90', is_random=False)\n train_accs.append(info['train-accuracy'])\n test_accs.append(info['test-accuracy'])\n if dataset == 'cifar10':\n info = api.get_more_info(index, 'cifar10-valid', hp='90', is_random=False)\n valid_accs.append(info['valid-accuracy'])\n else:\n valid_accs.append(info['valid-accuracy'])\n info = {'params': params, 'flops': flops, 'train_accs': train_accs, 'valid_accs': valid_accs, 'test_accs': test_accs}\n torch.save(info, cache_file_path)\n else:\n print ('Find cache file : {:}'.format(cache_file_path))\n info = torch.load(cache_file_path)\n params, flops, train_accs, valid_accs, test_accs = info['params'], info['flops'], info['train_accs'], info['valid_accs'], info['test_accs']\n print ('{:} collect data done.'.format(time_string()))\n\n # pyramid = ['8:16:32:48:64', '8:8:16:32:48', '8:8:16:16:32', '8:8:16:16:48', '8:8:16:16:64', '16:16:32:32:64', '32:32:64:64:64']\n pyramid = ['8:16:24:32:40', '8:16:32:48:64', '32:40:48:56:64']\n pyramid_indexes = [api.query_index_by_arch(x) for x in pyramid]\n largest_indexes = [api.query_index_by_arch('64:64:64:64:64')]\n\n indexes = list(range(len(params)))\n dpi, width, height = 250, 8500, 1300\n figsize = width / float(dpi), height / float(dpi)\n LabelSize, LegendFontsize = 24, 24\n # resnet_scale, resnet_alpha = 120, 0.5\n xscale, xalpha = 120, 0.8\n\n fig, axs = plt.subplots(1, 4, figsize=figsize)\n # ax1, ax2, ax3, ax4, ax5 = axs\n for ax in axs:\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(LabelSize)\n ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.0f'))\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(LabelSize)\n ax1, ax2, ax3, ax4 = axs\n\n ax1.scatter(params, train_accs, marker='o', s=0.5, c='tab:blue')\n ax1.scatter([params[x] for x in pyramid_indexes], [train_accs[x] for x in pyramid_indexes], marker='*', s=xscale, c='tab:orange', label='Pyramid Structure', alpha=xalpha)\n ax1.scatter([params[x] for x in largest_indexes], [train_accs[x] for x in largest_indexes], marker='x', s=xscale, c='tab:green', label='Largest Candidate', alpha=xalpha)\n ax1.set_xlabel('#parameters (MB)', fontsize=LabelSize)\n ax1.set_ylabel('train accuracy (%)', fontsize=LabelSize)\n ax1.legend(loc=4, fontsize=LegendFontsize)\n\n ax2.scatter(flops, train_accs, marker='o', s=0.5, c='tab:blue')\n ax2.scatter([flops[x] for x in pyramid_indexes], [train_accs[x] for x in pyramid_indexes], marker='*', s=xscale, c='tab:orange', label='Pyramid Structure', alpha=xalpha)\n ax2.scatter([flops[x] for x in largest_indexes], [train_accs[x] for x in largest_indexes], marker='x', s=xscale, c='tab:green', label='Largest Candidate', alpha=xalpha)\n ax2.set_xlabel('#FLOPs (M)', fontsize=LabelSize)\n # ax2.set_ylabel('train accuracy (%)', fontsize=LabelSize)\n ax2.legend(loc=4, fontsize=LegendFontsize)\n\n ax3.scatter(params, test_accs, marker='o', s=0.5, c='tab:blue')\n ax3.scatter([params[x] for x in pyramid_indexes], [test_accs[x] for x in pyramid_indexes], marker='*', s=xscale, c='tab:orange', label='Pyramid Structure', alpha=xalpha)\n ax3.scatter([params[x] for x in largest_indexes], [test_accs[x] for x in largest_indexes], marker='x', s=xscale, c='tab:green', label='Largest Candidate', alpha=xalpha)\n ax3.set_xlabel('#parameters (MB)', fontsize=LabelSize)\n ax3.set_ylabel('test accuracy (%)', fontsize=LabelSize)\n ax3.legend(loc=4, fontsize=LegendFontsize)\n\n ax4.scatter(flops, test_accs, marker='o', s=0.5, c='tab:blue')\n ax4.scatter([flops[x] for x in pyramid_indexes], [test_accs[x] for x in pyramid_indexes], marker='*', s=xscale, c='tab:orange', label='Pyramid Structure', alpha=xalpha)\n ax4.scatter([flops[x] for x in largest_indexes], [test_accs[x] for x in largest_indexes], marker='x', s=xscale, c='tab:green', label='Largest Candidate', alpha=xalpha)\n ax4.set_xlabel('#FLOPs (M)', fontsize=LabelSize)\n # ax4.set_ylabel('test accuracy (%)', fontsize=LabelSize)\n ax4.legend(loc=4, fontsize=LegendFontsize)\n\n save_path = vis_save_dir / 'sss-{:}.png'.format(dataset.lower())\n fig.savefig(save_path, dpi=dpi, bbox_inches='tight', format='png')\n print ('{:} save into {:}'.format(time_string(), save_path))\n plt.close('all')\n\n\ndef visualize_tss_info(api, dataset, vis_save_dir):\n vis_save_dir = vis_save_dir.resolve()\n print ('{:} start to visualize {:} information'.format(time_string(), dataset))\n vis_save_dir.mkdir(parents=True, exist_ok=True)\n cache_file_path = vis_save_dir / '{:}-cache-tss-info.pth'.format(dataset)\n if not cache_file_path.exists():\n print ('Do not find cache file : {:}'.format(cache_file_path))\n params, flops, train_accs, valid_accs, test_accs = [], [], [], [], []\n for index in range(len(api)):\n cost_info = api.get_cost_info(index, dataset, hp='12')\n params.append(cost_info['params'])\n flops.append(cost_info['flops'])\n # accuracy\n info = api.get_more_info(index, dataset, hp='200', is_random=False)\n train_accs.append(info['train-accuracy'])\n test_accs.append(info['test-accuracy'])\n if dataset == 'cifar10':\n info = api.get_more_info(index, 'cifar10-valid', hp='200', is_random=False)\n valid_accs.append(info['valid-accuracy'])\n else:\n valid_accs.append(info['valid-accuracy'])\n print('')\n info = {'params': params, 'flops': flops, 'train_accs': train_accs, 'valid_accs': valid_accs, 'test_accs': test_accs}\n torch.save(info, cache_file_path)\n else:\n print ('Find cache file : {:}'.format(cache_file_path))\n info = torch.load(cache_file_path)\n params, flops, train_accs, valid_accs, test_accs = info['params'], info['flops'], info['train_accs'], info['valid_accs'], info['test_accs']\n print ('{:} collect data done.'.format(time_string()))\n\n resnet = ['|nor_conv_3x3~0|+|none~0|nor_conv_3x3~1|+|skip_connect~0|none~1|skip_connect~2|']\n resnet_indexes = [api.query_index_by_arch(x) for x in resnet]\n largest_indexes = [api.query_index_by_arch('|nor_conv_3x3~0|+|nor_conv_3x3~0|nor_conv_3x3~1|+|nor_conv_3x3~0|nor_conv_3x3~1|nor_conv_3x3~2|')]\n\n indexes = list(range(len(params)))\n dpi, width, height = 250, 8500, 1300\n figsize = width / float(dpi), height / float(dpi)\n LabelSize, LegendFontsize = 24, 24\n # resnet_scale, resnet_alpha = 120, 0.5\n xscale, xalpha = 120, 0.8\n\n fig, axs = plt.subplots(1, 4, figsize=figsize)\n for ax in axs:\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(LabelSize)\n ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.0f'))\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(LabelSize)\n ax1, ax2, ax3, ax4 = axs\n\n ax1.scatter(params, train_accs, marker='o', s=0.5, c='tab:blue')\n ax1.scatter([params[x] for x in resnet_indexes] , [train_accs[x] for x in resnet_indexes], marker='*', s=xscale, c='tab:orange', label='ResNet', alpha=xalpha)\n ax1.scatter([params[x] for x in largest_indexes], [train_accs[x] for x in largest_indexes], marker='x', s=xscale, c='tab:green', label='Largest Candidate', alpha=xalpha)\n ax1.set_xlabel('#parameters (MB)', fontsize=LabelSize)\n ax1.set_ylabel('train accuracy (%)', fontsize=LabelSize)\n ax1.legend(loc=4, fontsize=LegendFontsize)\n\n ax2.scatter(flops, train_accs, marker='o', s=0.5, c='tab:blue')\n ax2.scatter([flops[x] for x in resnet_indexes], [train_accs[x] for x in resnet_indexes], marker='*', s=xscale, c='tab:orange', label='ResNet', alpha=xalpha)\n ax2.scatter([flops[x] for x in largest_indexes], [train_accs[x] for x in largest_indexes], marker='x', s=xscale, c='tab:green', label='Largest Candidate', alpha=xalpha)\n ax2.set_xlabel('#FLOPs (M)', fontsize=LabelSize)\n # ax2.set_ylabel('train accuracy (%)', fontsize=LabelSize)\n ax2.legend(loc=4, fontsize=LegendFontsize)\n\n ax3.scatter(params, test_accs, marker='o', s=0.5, c='tab:blue')\n ax3.scatter([params[x] for x in resnet_indexes] , [test_accs[x] for x in resnet_indexes], marker='*', s=xscale, c='tab:orange', label='ResNet', alpha=xalpha)\n ax3.scatter([params[x] for x in largest_indexes], [test_accs[x] for x in largest_indexes], marker='x', s=xscale, c='tab:green', label='Largest Candidate', alpha=xalpha)\n ax3.set_xlabel('#parameters (MB)', fontsize=LabelSize)\n ax3.set_ylabel('test accuracy (%)', fontsize=LabelSize)\n ax3.legend(loc=4, fontsize=LegendFontsize)\n\n ax4.scatter(flops, test_accs, marker='o', s=0.5, c='tab:blue')\n ax4.scatter([flops[x] for x in resnet_indexes], [test_accs[x] for x in resnet_indexes], marker='*', s=xscale, c='tab:orange', label='ResNet', alpha=xalpha)\n ax4.scatter([flops[x] for x in largest_indexes], [test_accs[x] for x in largest_indexes], marker='x', s=xscale, c='tab:green', label='Largest Candidate', alpha=xalpha)\n ax4.set_xlabel('#FLOPs (M)', fontsize=LabelSize)\n # ax4.set_ylabel('test accuracy (%)', fontsize=LabelSize)\n ax4.legend(loc=4, fontsize=LegendFontsize)\n\n save_path = vis_save_dir / 'tss-{:}.png'.format(dataset.lower())\n fig.savefig(save_path, dpi=dpi, bbox_inches='tight', format='png')\n print ('{:} save into {:}'.format(time_string(), save_path))\n plt.close('all')\n\n\ndef visualize_rank_info(api, vis_save_dir, indicator):\n vis_save_dir = vis_save_dir.resolve()\n # print ('{:} start to visualize {:} information'.format(time_string(), api))\n vis_save_dir.mkdir(parents=True, exist_ok=True)\n\n cifar010_cache_path = vis_save_dir / '{:}-cache-{:}-info.pth'.format('cifar10', indicator)\n cifar100_cache_path = vis_save_dir / '{:}-cache-{:}-info.pth'.format('cifar100', indicator)\n imagenet_cache_path = vis_save_dir / '{:}-cache-{:}-info.pth'.format('ImageNet16-120', indicator)\n cifar010_info = torch.load(cifar010_cache_path)\n cifar100_info = torch.load(cifar100_cache_path)\n imagenet_info = torch.load(imagenet_cache_path)\n indexes = list(range(len(cifar010_info['params'])))\n\n print ('{:} start to visualize relative ranking'.format(time_string()))\n\n dpi, width, height = 250, 3800, 1200\n figsize = width / float(dpi), height / float(dpi)\n LabelSize, LegendFontsize = 14, 14\n\n fig, axs = plt.subplots(1, 3, figsize=figsize)\n ax1, ax2, ax3 = axs\n\n def get_labels(info):\n ord_test_indexes = sorted(indexes, key=lambda i: info['test_accs'][i])\n ord_valid_indexes = sorted(indexes, key=lambda i: info['valid_accs'][i])\n labels = []\n for idx in ord_test_indexes:\n labels.append(ord_valid_indexes.index(idx))\n return labels\n\n def plot_ax(labels, ax, name):\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(LabelSize)\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(LabelSize)\n tick.label.set_rotation(90)\n ax.set_xlim(min(indexes), max(indexes))\n ax.set_ylim(min(indexes), max(indexes))\n ax.yaxis.set_ticks(np.arange(min(indexes), max(indexes), max(indexes)//3))\n ax.xaxis.set_ticks(np.arange(min(indexes), max(indexes), max(indexes)//5))\n ax.scatter(indexes, labels , marker='^', s=0.5, c='tab:green', alpha=0.8)\n ax.scatter(indexes, indexes, marker='o', s=0.5, c='tab:blue' , alpha=0.8)\n ax.scatter([-1], [-1], marker='^', s=100, c='tab:green' , label='{:} test'.format(name))\n ax.scatter([-1], [-1], marker='o', s=100, c='tab:blue' , label='{:} validation'.format(name))\n ax.legend(loc=4, fontsize=LegendFontsize)\n ax.set_xlabel('ranking on the {:} validation'.format(name), fontsize=LabelSize)\n ax.set_ylabel('architecture ranking', fontsize=LabelSize)\n labels = get_labels(cifar010_info)\n plot_ax(labels, ax1, 'CIFAR-10')\n labels = get_labels(cifar100_info)\n plot_ax(labels, ax2, 'CIFAR-100')\n labels = get_labels(imagenet_info)\n plot_ax(labels, ax3, 'ImageNet-16-120')\n\n save_path = (vis_save_dir / '{:}-same-relative-rank.pdf'.format(indicator)).resolve()\n fig.savefig(save_path, dpi=dpi, bbox_inches='tight', format='pdf')\n save_path = (vis_save_dir / '{:}-same-relative-rank.png'.format(indicator)).resolve()\n fig.savefig(save_path, dpi=dpi, bbox_inches='tight', format='png')\n print ('{:} save into {:}'.format(time_string(), save_path))\n plt.close('all')\n\n\ndef compute_kendalltau(vectori, vectorj):\n # indexes = list(range(len(vectori)))\n # rank_1 = sorted(indexes, key=lambda i: vectori[i])\n # rank_2 = sorted(indexes, key=lambda i: vectorj[i])\n return scipy.stats.kendalltau(vectori, vectorj).correlation\n\n\ndef calculate_correlation(*vectors):\n matrix = []\n for i, vectori in enumerate(vectors):\n x = []\n for j, vectorj in enumerate(vectors):\n # x.append(np.corrcoef(vectori, vectorj)[0,1])\n x.append(compute_kendalltau(vectori, vectorj))\n matrix.append( x )\n return np.array(matrix)\n\n\ndef visualize_all_rank_info(api, vis_save_dir, indicator):\n vis_save_dir = vis_save_dir.resolve()\n # print ('{:} start to visualize {:} information'.format(time_string(), api))\n vis_save_dir.mkdir(parents=True, exist_ok=True)\n\n cifar010_cache_path = vis_save_dir / '{:}-cache-{:}-info.pth'.format('cifar10', indicator)\n cifar100_cache_path = vis_save_dir / '{:}-cache-{:}-info.pth'.format('cifar100', indicator)\n imagenet_cache_path = vis_save_dir / '{:}-cache-{:}-info.pth'.format('ImageNet16-120', indicator)\n cifar010_info = torch.load(cifar010_cache_path)\n cifar100_info = torch.load(cifar100_cache_path)\n imagenet_info = torch.load(imagenet_cache_path)\n indexes = list(range(len(cifar010_info['params'])))\n\n print ('{:} start to visualize relative ranking'.format(time_string()))\n \n\n dpi, width, height = 250, 3200, 1400\n figsize = width / float(dpi), height / float(dpi)\n LabelSize, LegendFontsize = 14, 14\n\n fig, axs = plt.subplots(1, 2, figsize=figsize)\n ax1, ax2 = axs\n\n sns_size, xformat = 15, '.2f'\n CoRelMatrix = calculate_correlation(cifar010_info['valid_accs'], cifar010_info['test_accs'], cifar100_info['valid_accs'], cifar100_info['test_accs'], imagenet_info['valid_accs'], imagenet_info['test_accs'])\n \n sns.heatmap(CoRelMatrix, annot=True, annot_kws={'size':sns_size}, fmt=xformat, linewidths=0.5, ax=ax1,\n xticklabels=['C10-V', 'C10-T', 'C100-V', 'C100-T', 'I120-V', 'I120-T'],\n yticklabels=['C10-V', 'C10-T', 'C100-V', 'C100-T', 'I120-V', 'I120-T'])\n \n selected_indexes, acc_bar = [], 92\n for i, acc in enumerate(cifar010_info['test_accs']):\n if acc > acc_bar: selected_indexes.append( i )\n cifar010_valid_accs = np.array(cifar010_info['valid_accs'])[ selected_indexes ]\n cifar010_test_accs = np.array(cifar010_info['test_accs']) [ selected_indexes ]\n cifar100_valid_accs = np.array(cifar100_info['valid_accs'])[ selected_indexes ]\n cifar100_test_accs = np.array(cifar100_info['test_accs']) [ selected_indexes ]\n imagenet_valid_accs = np.array(imagenet_info['valid_accs'])[ selected_indexes ]\n imagenet_test_accs = np.array(imagenet_info['test_accs']) [ selected_indexes ]\n CoRelMatrix = calculate_correlation(cifar010_valid_accs, cifar010_test_accs, cifar100_valid_accs, cifar100_test_accs, imagenet_valid_accs, imagenet_test_accs)\n \n sns.heatmap(CoRelMatrix, annot=True, annot_kws={'size':sns_size}, fmt=xformat, linewidths=0.5, ax=ax2,\n xticklabels=['C10-V', 'C10-T', 'C100-V', 'C100-T', 'I120-V', 'I120-T'],\n yticklabels=['C10-V', 'C10-T', 'C100-V', 'C100-T', 'I120-V', 'I120-T'])\n ax1.set_title('Correlation coefficient over ALL candidates')\n ax2.set_title('Correlation coefficient over candidates with accuracy > {:}%'.format(acc_bar))\n save_path = (vis_save_dir / '{:}-all-relative-rank.png'.format(indicator)).resolve()\n fig.savefig(save_path, dpi=dpi, bbox_inches='tight', format='png')\n print ('{:} save into {:}'.format(time_string(), save_path))\n plt.close('all')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='NATS-Bench', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--save_dir', type=str, default='output/vis-nas-bench', help='Folder to save checkpoints and log.')\n # use for train the model\n args = parser.parse_args()\n\n to_save_dir = Path(args.save_dir)\n\n datasets = ['cifar10', 'cifar100', 'ImageNet16-120']\n # Figure 3 (a-c)\n api_tss = create(None, 'tss', verbose=True)\n for xdata in datasets:\n visualize_tss_info(api_tss, xdata, to_save_dir)\n # Figure 3 (d-f)\n api_sss = create(None, 'size', verbose=True)\n for xdata in datasets:\n visualize_sss_info(api_sss, xdata, to_save_dir)\n\n # Figure 2\n visualize_relative_info(None, to_save_dir, 'tss')\n visualize_relative_info(None, to_save_dir, 'sss')\n\n # Figure 4\n visualize_rank_info(None, to_save_dir, 'tss')\n visualize_rank_info(None, to_save_dir, 'sss')\n\n # Figure 5\n visualize_all_rank_info(None, to_save_dir, 'tss')\n visualize_all_rank_info(None, to_save_dir, 'sss')\n", "##################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 #\n########################################################\n# DARTS: Differentiable Architecture Search, ICLR 2019 #\n########################################################\nimport os, sys, time, glob, random, argparse\nimport numpy as np\nfrom copy import deepcopy\nimport torch\nimport torch.nn as nn\nfrom pathlib import Path\nlib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\nfrom config_utils import load_config, dict2config, configure2str\nfrom datasets import get_datasets, get_nas_search_loaders\nfrom procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler\nfrom utils import get_model_infos, obtain_accuracy\nfrom log_utils import AverageMeter, time_string, convert_secs2time\nfrom models import get_cell_based_tiny_net, get_search_spaces\nfrom nas_201_api import NASBench201API as API\n\n\ndef _concat(xs):\n return torch.cat([x.view(-1) for x in xs])\n\n\ndef _hessian_vector_product(vector, network, criterion, base_inputs, base_targets, r=1e-2):\n R = r / _concat(vector).norm()\n for p, v in zip(network.module.get_weights(), vector):\n p.data.add_(R, v)\n _, logits = network(base_inputs)\n loss = criterion(logits, base_targets)\n grads_p = torch.autograd.grad(loss, network.module.get_alphas())\n\n for p, v in zip(network.module.get_weights(), vector):\n p.data.sub_(2*R, v)\n _, logits = network(base_inputs)\n loss = criterion(logits, base_targets)\n grads_n = torch.autograd.grad(loss, network.module.get_alphas())\n\n for p, v in zip(network.module.get_weights(), vector):\n p.data.add_(R, v)\n return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]\n\n\ndef backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets):\n # _compute_unrolled_model\n _, logits = network(base_inputs)\n loss = criterion(logits, base_targets)\n LR, WD, momentum = w_optimizer.param_groups[0]['lr'], w_optimizer.param_groups[0]['weight_decay'], w_optimizer.param_groups[0]['momentum']\n with torch.no_grad():\n theta = _concat(network.module.get_weights())\n try:\n moment = _concat(w_optimizer.state[v]['momentum_buffer'] for v in network.module.get_weights())\n moment = moment.mul_(momentum)\n except:\n moment = torch.zeros_like(theta)\n dtheta = _concat(torch.autograd.grad(loss, network.module.get_weights())) + WD*theta\n params = theta.sub(LR, moment+dtheta)\n unrolled_model = deepcopy(network)\n model_dict = unrolled_model.state_dict()\n new_params, offset = {}, 0\n for k, v in network.named_parameters():\n if 'arch_parameters' in k: continue\n v_length = np.prod(v.size())\n new_params[k] = params[offset: offset+v_length].view(v.size())\n offset += v_length\n model_dict.update(new_params)\n unrolled_model.load_state_dict(model_dict)\n\n unrolled_model.zero_grad()\n _, unrolled_logits = unrolled_model(arch_inputs)\n unrolled_loss = criterion(unrolled_logits, arch_targets)\n unrolled_loss.backward()\n\n dalpha = unrolled_model.module.arch_parameters.grad\n vector = [v.grad.data for v in unrolled_model.module.get_weights()]\n [implicit_grads] = _hessian_vector_product(vector, network, criterion, base_inputs, base_targets)\n \n dalpha.data.sub_(LR, implicit_grads.data)\n\n if network.module.arch_parameters.grad is None:\n network.module.arch_parameters.grad = deepcopy( dalpha )\n else:\n network.module.arch_parameters.grad.data.copy_( dalpha.data )\n return unrolled_loss.detach(), unrolled_logits.detach()\n \n\ndef search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger):\n data_time, batch_time = AverageMeter(), AverageMeter()\n base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()\n arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()\n network.train()\n end = time.time()\n for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):\n scheduler.update(None, 1.0 * step / len(xloader))\n base_targets = base_targets.cuda(non_blocking=True)\n arch_targets = arch_targets.cuda(non_blocking=True)\n # measure data loading time\n data_time.update(time.time() - end)\n\n # update the architecture-weight\n a_optimizer.zero_grad()\n arch_loss, arch_logits = backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets)\n a_optimizer.step()\n # record\n arch_prec1, arch_prec5 = obtain_accuracy(arch_logits.data, arch_targets.data, topk=(1, 5))\n arch_losses.update(arch_loss.item(), arch_inputs.size(0))\n arch_top1.update (arch_prec1.item(), arch_inputs.size(0))\n arch_top5.update (arch_prec5.item(), arch_inputs.size(0))\n \n # update the weights\n w_optimizer.zero_grad()\n _, logits = network(base_inputs)\n base_loss = criterion(logits, base_targets)\n base_loss.backward()\n torch.nn.utils.clip_grad_norm_(network.parameters(), 5)\n w_optimizer.step()\n # record\n base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))\n base_losses.update(base_loss.item(), base_inputs.size(0))\n base_top1.update (base_prec1.item(), base_inputs.size(0))\n base_top5.update (base_prec5.item(), base_inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if step % print_freq == 0 or step + 1 == len(xloader):\n Sstr = '*SEARCH* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader))\n Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)\n Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5)\n Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5)\n logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr)\n return base_losses.avg, base_top1.avg, base_top5.avg\n\n\ndef valid_func(xloader, network, criterion):\n data_time, batch_time = AverageMeter(), AverageMeter()\n arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()\n network.eval()\n end = time.time()\n with torch.no_grad():\n for step, (arch_inputs, arch_targets) in enumerate(xloader):\n arch_targets = arch_targets.cuda(non_blocking=True)\n # measure data loading time\n data_time.update(time.time() - end)\n # prediction\n _, logits = network(arch_inputs)\n arch_loss = criterion(logits, arch_targets)\n # record\n arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))\n arch_losses.update(arch_loss.item(), arch_inputs.size(0))\n arch_top1.update (arch_prec1.item(), arch_inputs.size(0))\n arch_top5.update (arch_prec5.item(), arch_inputs.size(0))\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n return arch_losses.avg, arch_top1.avg, arch_top5.avg\n\n\ndef main(xargs):\n assert torch.cuda.is_available(), 'CUDA is not available.'\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n torch.set_num_threads( xargs.workers )\n prepare_seed(xargs.rand_seed)\n logger = prepare_logger(args)\n\n train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)\n config = load_config(xargs.config_path, {'class_num': class_num, 'xshape': xshape}, logger)\n search_loader, _, valid_loader = get_nas_search_loaders(train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', config.batch_size, xargs.workers)\n logger.log('||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size))\n logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))\n\n search_space = get_search_spaces('cell', xargs.search_space_name)\n model_config = dict2config({'name': 'DARTS-V2', 'C': xargs.channel, 'N': xargs.num_cells,\n 'max_nodes': xargs.max_nodes, 'num_classes': class_num,\n 'space' : search_space,\n 'affine' : False, 'track_running_stats': bool(xargs.track_running_stats)}, None)\n search_model = get_cell_based_tiny_net(model_config)\n logger.log('search-model :\\n{:}'.format(search_model))\n \n w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config)\n a_optimizer = torch.optim.Adam(search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay)\n logger.log('w-optimizer : {:}'.format(w_optimizer))\n logger.log('a-optimizer : {:}'.format(a_optimizer))\n logger.log('w-scheduler : {:}'.format(w_scheduler))\n logger.log('criterion : {:}'.format(criterion))\n flop, param = get_model_infos(search_model, xshape)\n #logger.log('{:}'.format(search_model))\n logger.log('FLOP = {:.2f} M, Params = {:.2f} MB'.format(flop, param))\n if xargs.arch_nas_dataset is None:\n api = None\n else:\n api = API(xargs.arch_nas_dataset)\n logger.log('{:} create API = {:} done'.format(time_string(), api))\n\n last_info, model_base_path, model_best_path = logger.path('info'), logger.path('model'), logger.path('best')\n network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()\n\n if last_info.exists(): # automatically resume from previous checkpoint\n logger.log(\"=> loading checkpoint of the last-info '{:}' start\".format(last_info))\n last_info = torch.load(last_info)\n start_epoch = last_info['epoch']\n checkpoint = torch.load(last_info['last_checkpoint'])\n genotypes = checkpoint['genotypes']\n valid_accuracies = checkpoint['valid_accuracies']\n search_model.load_state_dict( checkpoint['search_model'] )\n w_scheduler.load_state_dict ( checkpoint['w_scheduler'] )\n w_optimizer.load_state_dict ( checkpoint['w_optimizer'] )\n a_optimizer.load_state_dict ( checkpoint['a_optimizer'] )\n logger.log(\"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.\".format(last_info, start_epoch))\n else:\n logger.log(\"=> do not find the last-info file : {:}\".format(last_info))\n start_epoch, valid_accuracies, genotypes = 0, {'best': -1}, {-1: search_model.genotype()}\n\n # start training\n start_time, search_time, epoch_time, total_epoch = time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup\n for epoch in range(start_epoch, total_epoch):\n w_scheduler.update(epoch, 0.0)\n need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.val * (total_epoch-epoch), True) )\n epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch)\n min_LR = min(w_scheduler.get_lr())\n logger.log('\\n[Search the {:}-th epoch] {:}, LR={:}'.format(epoch_str, need_time, min_LR))\n\n search_w_loss, search_w_top1, search_w_top5 = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger)\n search_time.update(time.time() - start_time)\n logger.log('[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s'.format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum))\n valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)\n logger.log('[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5))\n # check the best accuracy\n valid_accuracies[epoch] = valid_a_top1\n if valid_a_top1 > valid_accuracies['best']:\n valid_accuracies['best'] = valid_a_top1\n genotypes['best'] = search_model.genotype()\n find_best = True\n else: find_best = False\n\n genotypes[epoch] = search_model.genotype()\n logger.log('<<<--->>> The {:}-th epoch : {:}'.format(epoch_str, genotypes[epoch]))\n # save checkpoint\n save_path = save_checkpoint({'epoch' : epoch + 1,\n 'args' : deepcopy(xargs),\n 'search_model': search_model.state_dict(),\n 'w_optimizer' : w_optimizer.state_dict(),\n 'a_optimizer' : a_optimizer.state_dict(),\n 'w_scheduler' : w_scheduler.state_dict(),\n 'genotypes' : genotypes,\n 'valid_accuracies' : valid_accuracies},\n model_base_path, logger)\n last_info = save_checkpoint({\n 'epoch': epoch + 1,\n 'args' : deepcopy(args),\n 'last_checkpoint': save_path,\n }, logger.path('info'), logger)\n if find_best:\n logger.log('<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.'.format(epoch_str, valid_a_top1))\n copy_checkpoint(model_base_path, model_best_path, logger)\n with torch.no_grad():\n logger.log('arch-parameters :\\n{:}'.format( nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu() ))\n if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[epoch], '200')))\n # measure elapsed time\n epoch_time.update(time.time() - start_time)\n start_time = time.time()\n\n logger.log('\\n' + '-'*100)\n # check the performance from the architecture dataset\n logger.log('DARTS-V2 : run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format(total_epoch, search_time.sum, genotypes[total_epoch-1]))\n if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[total_epoch-1]), '200'))\n logger.close()\n \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"DARTS Second Order\")\n parser.add_argument('--data_path', type=str, help='Path to dataset')\n parser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100', 'ImageNet16-120'], help='Choose between Cifar10/100 and ImageNet-16.')\n # channels and number-of-cells\n parser.add_argument('--config_path', type=str, help='The config path.')\n parser.add_argument('--search_space_name', type=str, help='The search space name.')\n parser.add_argument('--max_nodes', type=int, help='The maximum number of nodes.')\n parser.add_argument('--channel', type=int, help='The number of channels.')\n parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.')\n parser.add_argument('--track_running_stats',type=int, choices=[0,1],help='Whether use track_running_stats or not in the BN layer.')\n # architecture leraning rate\n parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')\n parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')\n # log\n parser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')\n parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.')\n parser.add_argument('--arch_nas_dataset', type=str, help='The path to load the architecture dataset (tiny-nas-benchmark).')\n parser.add_argument('--print_freq', type=int, help='print frequency (default: 200)')\n parser.add_argument('--rand_seed', type=int, help='manual seed')\n args = parser.parse_args()\n if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000)\n main(args)\n", "#####################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.08 #\n#####################################################\nimport os, sys, time, argparse, collections\nimport numpy as np\nimport torch\nfrom pathlib import Path\nfrom collections import defaultdict, OrderedDict\nfrom typing import Dict, Any, Text, List\nlib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\nfrom log_utils import AverageMeter, time_string, convert_secs2time\nfrom config_utils import dict2config\n# NAS-Bench-201 related module or function\nfrom models import CellStructure, get_cell_based_tiny_net\nfrom nas_201_api import NASBench201API, ArchResults, ResultsCount\nfrom procedures import bench_pure_evaluate as pure_evaluate, get_nas_bench_loaders\n\napi = NASBench201API('{:}/.torch/NAS-Bench-201-v1_0-e61699.pth'.format(os.environ['HOME']))\n\ndef create_result_count(used_seed: int, dataset: Text, arch_config: Dict[Text, Any],\n results: Dict[Text, Any], dataloader_dict: Dict[Text, Any]) -> ResultsCount:\n xresult = ResultsCount(dataset, results['net_state_dict'], results['train_acc1es'], results['train_losses'],\n results['param'], results['flop'], arch_config, used_seed, results['total_epoch'], None)\n net_config = dict2config({'name': 'infer.tiny', 'C': arch_config['channel'], 'N': arch_config['num_cells'], 'genotype': CellStructure.str2structure(arch_config['arch_str']), 'num_classes': arch_config['class_num']}, None)\n network = get_cell_based_tiny_net(net_config)\n network.load_state_dict(xresult.get_net_param())\n if 'train_times' in results: # new version\n xresult.update_train_info(results['train_acc1es'], results['train_acc5es'], results['train_losses'], results['train_times'])\n xresult.update_eval(results['valid_acc1es'], results['valid_losses'], results['valid_times'])\n else:\n if dataset == 'cifar10-valid':\n xresult.update_OLD_eval('x-valid' , results['valid_acc1es'], results['valid_losses'])\n loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format('cifar10', 'test')], network.cuda())\n xresult.update_OLD_eval('ori-test', {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})\n xresult.update_latency(latencies)\n elif dataset == 'cifar10':\n xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses'])\n loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda())\n xresult.update_latency(latencies)\n elif dataset == 'cifar100' or dataset == 'ImageNet16-120':\n xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses'])\n loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'valid')], network.cuda())\n xresult.update_OLD_eval('x-valid', {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})\n loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda())\n xresult.update_OLD_eval('x-test' , {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})\n xresult.update_latency(latencies)\n else:\n raise ValueError('invalid dataset name : {:}'.format(dataset))\n return xresult\n \n\n\ndef account_one_arch(arch_index: int, arch_str: Text, checkpoints: List[Text],\n datasets: List[Text], dataloader_dict: Dict[Text, Any]) -> ArchResults:\n information = ArchResults(arch_index, arch_str)\n\n for checkpoint_path in checkpoints:\n checkpoint = torch.load(checkpoint_path, map_location='cpu')\n used_seed = checkpoint_path.name.split('-')[-1].split('.')[0]\n ok_dataset = 0\n for dataset in datasets:\n if dataset not in checkpoint:\n print('Can not find {:} in arch-{:} from {:}'.format(dataset, arch_index, checkpoint_path))\n continue\n else:\n ok_dataset += 1\n results = checkpoint[dataset]\n assert results['finish-train'], 'This {:} arch seed={:} does not finish train on {:} ::: {:}'.format(arch_index, used_seed, dataset, checkpoint_path)\n arch_config = {'channel': results['channel'], 'num_cells': results['num_cells'], 'arch_str': arch_str, 'class_num': results['config']['class_num']}\n \n xresult = create_result_count(used_seed, dataset, arch_config, results, dataloader_dict)\n information.update(dataset, int(used_seed), xresult)\n if ok_dataset == 0: raise ValueError('{:} does not find any data'.format(checkpoint_path))\n return information\n\n\ndef correct_time_related_info(arch_index: int, arch_info_full: ArchResults, arch_info_less: ArchResults):\n # calibrate the latency based on NAS-Bench-201-v1_0-e61699.pth\n cifar010_latency = (api.get_latency(arch_index, 'cifar10-valid', hp='200') + api.get_latency(arch_index, 'cifar10', hp='200')) / 2\n arch_info_full.reset_latency('cifar10-valid', None, cifar010_latency)\n arch_info_full.reset_latency('cifar10', None, cifar010_latency)\n arch_info_less.reset_latency('cifar10-valid', None, cifar010_latency)\n arch_info_less.reset_latency('cifar10', None, cifar010_latency)\n\n cifar100_latency = api.get_latency(arch_index, 'cifar100', hp='200')\n arch_info_full.reset_latency('cifar100', None, cifar100_latency)\n arch_info_less.reset_latency('cifar100', None, cifar100_latency)\n\n image_latency = api.get_latency(arch_index, 'ImageNet16-120', hp='200')\n arch_info_full.reset_latency('ImageNet16-120', None, image_latency)\n arch_info_less.reset_latency('ImageNet16-120', None, image_latency)\n\n train_per_epoch_time = list(arch_info_less.query('cifar10-valid', 777).train_times.values())\n train_per_epoch_time = sum(train_per_epoch_time) / len(train_per_epoch_time)\n eval_ori_test_time, eval_x_valid_time = [], []\n for key, value in arch_info_less.query('cifar10-valid', 777).eval_times.items():\n if key.startswith('ori-test@'):\n eval_ori_test_time.append(value)\n elif key.startswith('x-valid@'):\n eval_x_valid_time.append(value)\n else: raise ValueError('-- {:} --'.format(key))\n eval_ori_test_time, eval_x_valid_time = float(np.mean(eval_ori_test_time)), float(np.mean(eval_x_valid_time))\n nums = {'ImageNet16-120-train': 151700, 'ImageNet16-120-valid': 3000, 'ImageNet16-120-test': 6000,\n 'cifar10-valid-train': 25000, 'cifar10-valid-valid': 25000,\n 'cifar10-train': 50000, 'cifar10-test': 10000,\n 'cifar100-train': 50000, 'cifar100-test': 10000, 'cifar100-valid': 5000}\n eval_per_sample = (eval_ori_test_time + eval_x_valid_time) / (nums['cifar10-valid-valid'] + nums['cifar10-test'])\n for arch_info in [arch_info_less, arch_info_full]:\n arch_info.reset_pseudo_train_times('cifar10-valid', None,\n train_per_epoch_time / nums['cifar10-valid-train'] * nums['cifar10-valid-train'])\n arch_info.reset_pseudo_train_times('cifar10', None,\n train_per_epoch_time / nums['cifar10-valid-train'] * nums['cifar10-train'])\n arch_info.reset_pseudo_train_times('cifar100', None,\n train_per_epoch_time / nums['cifar10-valid-train'] * nums['cifar100-train'])\n arch_info.reset_pseudo_train_times('ImageNet16-120', None,\n train_per_epoch_time / nums['cifar10-valid-train'] * nums['ImageNet16-120-train'])\n arch_info.reset_pseudo_eval_times('cifar10-valid', None, 'x-valid', eval_per_sample*nums['cifar10-valid-valid'])\n arch_info.reset_pseudo_eval_times('cifar10-valid', None, 'ori-test', eval_per_sample * nums['cifar10-test'])\n arch_info.reset_pseudo_eval_times('cifar10', None, 'ori-test', eval_per_sample * nums['cifar10-test'])\n arch_info.reset_pseudo_eval_times('cifar100', None, 'x-valid', eval_per_sample * nums['cifar100-valid'])\n arch_info.reset_pseudo_eval_times('cifar100', None, 'x-test', eval_per_sample * nums['cifar100-valid'])\n arch_info.reset_pseudo_eval_times('cifar100', None, 'ori-test', eval_per_sample * nums['cifar100-test'])\n arch_info.reset_pseudo_eval_times('ImageNet16-120', None, 'x-valid', eval_per_sample * nums['ImageNet16-120-valid'])\n arch_info.reset_pseudo_eval_times('ImageNet16-120', None, 'x-test', eval_per_sample * nums['ImageNet16-120-valid'])\n arch_info.reset_pseudo_eval_times('ImageNet16-120', None, 'ori-test', eval_per_sample * nums['ImageNet16-120-test'])\n # arch_info_full.debug_test()\n # arch_info_less.debug_test()\n return arch_info_full, arch_info_less\n\n\ndef simplify(save_dir, meta_file, basestr, target_dir):\n meta_infos = torch.load(meta_file, map_location='cpu')\n meta_archs = meta_infos['archs'] # a list of architecture strings\n meta_num_archs = meta_infos['total']\n assert meta_num_archs == len(meta_archs), 'invalid number of archs : {:} vs {:}'.format(meta_num_archs, len(meta_archs))\n\n sub_model_dirs = sorted(list(save_dir.glob('*-*-{:}'.format(basestr))))\n print ('{:} find {:} directories used to save checkpoints'.format(time_string(), len(sub_model_dirs)))\n \n subdir2archs, num_evaluated_arch = collections.OrderedDict(), 0\n num_seeds = defaultdict(lambda: 0)\n for index, sub_dir in enumerate(sub_model_dirs):\n xcheckpoints = list(sub_dir.glob('arch-*-seed-*.pth'))\n arch_indexes = set()\n for checkpoint in xcheckpoints:\n temp_names = checkpoint.name.split('-')\n assert len(temp_names) == 4 and temp_names[0] == 'arch' and temp_names[2] == 'seed', 'invalid checkpoint name : {:}'.format(checkpoint.name)\n arch_indexes.add( temp_names[1] )\n subdir2archs[sub_dir] = sorted(list(arch_indexes))\n num_evaluated_arch += len(arch_indexes)\n # count number of seeds for each architecture\n for arch_index in arch_indexes:\n num_seeds[ len(list(sub_dir.glob('arch-{:}-seed-*.pth'.format(arch_index)))) ] += 1\n print('{:} There are {:5d} architectures that have been evaluated ({:} in total).'.format(time_string(), num_evaluated_arch, meta_num_archs))\n for key in sorted( list( num_seeds.keys() ) ): print ('{:} There are {:5d} architectures that are evaluated {:} times.'.format(time_string(), num_seeds[key], key))\n\n dataloader_dict = get_nas_bench_loaders( 6 )\n to_save_simply = save_dir / 'simplifies'\n to_save_allarc = save_dir / 'simplifies' / 'architectures'\n if not to_save_simply.exists(): to_save_simply.mkdir(parents=True, exist_ok=True)\n if not to_save_allarc.exists(): to_save_allarc.mkdir(parents=True, exist_ok=True)\n\n assert (save_dir / target_dir) in subdir2archs, 'can not find {:}'.format(target_dir)\n arch2infos, datasets = {}, ('cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120')\n evaluated_indexes = set()\n target_full_dir = save_dir / target_dir\n target_less_dir = save_dir / '{:}-LESS'.format(target_dir)\n arch_indexes = subdir2archs[ target_full_dir ]\n num_seeds = defaultdict(lambda: 0)\n end_time = time.time()\n arch_time = AverageMeter()\n for idx, arch_index in enumerate(arch_indexes):\n checkpoints = list(target_full_dir.glob('arch-{:}-seed-*.pth'.format(arch_index)))\n ckps_less = list(target_less_dir.glob('arch-{:}-seed-*.pth'.format(arch_index)))\n # create the arch info for each architecture\n try:\n arch_info_full = account_one_arch(arch_index, meta_archs[int(arch_index)], checkpoints, datasets, dataloader_dict)\n arch_info_less = account_one_arch(arch_index, meta_archs[int(arch_index)], ckps_less, datasets, dataloader_dict)\n num_seeds[ len(checkpoints) ] += 1\n except:\n print('Loading {:} failed, : {:}'.format(arch_index, checkpoints))\n continue\n assert int(arch_index) not in evaluated_indexes, 'conflict arch-index : {:}'.format(arch_index)\n assert 0 <= int(arch_index) < len(meta_archs), 'invalid arch-index {:} (not found in meta_archs)'.format(arch_index)\n arch_info = {'full': arch_info_full, 'less': arch_info_less}\n evaluated_indexes.add(int(arch_index))\n arch2infos[int(arch_index)] = arch_info\n # to correct the latency and training_time info.\n arch_info_full, arch_info_less = correct_time_related_info(int(arch_index), arch_info_full, arch_info_less)\n to_save_data = OrderedDict(full=arch_info_full.state_dict(), less=arch_info_less.state_dict())\n torch.save(to_save_data, to_save_allarc / '{:}-FULL.pth'.format(arch_index))\n arch_info['full'].clear_params()\n arch_info['less'].clear_params()\n torch.save(to_save_data, to_save_allarc / '{:}-SIMPLE.pth'.format(arch_index))\n # measure elapsed time\n arch_time.update(time.time() - end_time)\n end_time = time.time()\n need_time = '{:}'.format( convert_secs2time(arch_time.avg * (len(arch_indexes)-idx-1), True) )\n print('{:} {:} [{:03d}/{:03d}] : {:} still need {:}'.format(time_string(), target_dir, idx, len(arch_indexes), arch_index, need_time))\n # measure time\n xstrs = ['{:}:{:03d}'.format(key, num_seeds[key]) for key in sorted( list( num_seeds.keys() ) ) ]\n print('{:} {:} done : {:}'.format(time_string(), target_dir, xstrs))\n final_infos = {'meta_archs' : meta_archs,\n 'total_archs': meta_num_archs,\n 'basestr' : basestr,\n 'arch2infos' : arch2infos,\n 'evaluated_indexes': evaluated_indexes}\n save_file_name = to_save_simply / '{:}.pth'.format(target_dir)\n torch.save(final_infos, save_file_name)\n print ('Save {:} / {:} architecture results into {:}.'.format(len(evaluated_indexes), meta_num_archs, save_file_name))\n\n\ndef merge_all(save_dir, meta_file, basestr):\n meta_infos = torch.load(meta_file, map_location='cpu')\n meta_archs = meta_infos['archs']\n meta_num_archs = meta_infos['total']\n assert meta_num_archs == len(meta_archs), 'invalid number of archs : {:} vs {:}'.format(meta_num_archs, len(meta_archs))\n\n sub_model_dirs = sorted(list(save_dir.glob('*-*-{:}'.format(basestr))))\n print ('{:} find {:} directories used to save checkpoints'.format(time_string(), len(sub_model_dirs)))\n for index, sub_dir in enumerate(sub_model_dirs):\n arch_info_files = sorted( list(sub_dir.glob('arch-*-seed-*.pth') ) )\n print ('The {:02d}/{:02d}-th directory : {:} : {:} runs.'.format(index, len(sub_model_dirs), sub_dir, len(arch_info_files)))\n \n arch2infos, evaluated_indexes = dict(), set()\n for IDX, sub_dir in enumerate(sub_model_dirs):\n ckp_path = sub_dir.parent / 'simplifies' / '{:}.pth'.format(sub_dir.name)\n if ckp_path.exists():\n sub_ckps = torch.load(ckp_path, map_location='cpu')\n assert sub_ckps['total_archs'] == meta_num_archs and sub_ckps['basestr'] == basestr\n xarch2infos = sub_ckps['arch2infos']\n xevalindexs = sub_ckps['evaluated_indexes']\n for eval_index in xevalindexs:\n assert eval_index not in evaluated_indexes and eval_index not in arch2infos\n #arch2infos[eval_index] = xarch2infos[eval_index].state_dict()\n arch2infos[eval_index] = {'full': xarch2infos[eval_index]['full'].state_dict(),\n 'less': xarch2infos[eval_index]['less'].state_dict()}\n evaluated_indexes.add( eval_index )\n print ('{:} [{:03d}/{:03d}] merge data from {:} with {:} models.'.format(time_string(), IDX, len(sub_model_dirs), ckp_path, len(xevalindexs)))\n else:\n raise ValueError('Can not find {:}'.format(ckp_path))\n #print ('{:} [{:03d}/{:03d}] can not find {:}, skip.'.format(time_string(), IDX, len(subdir2archs), ckp_path))\n\n evaluated_indexes = sorted( list( evaluated_indexes ) )\n print ('Finally, there are {:} architectures that have been trained and evaluated.'.format(len(evaluated_indexes)))\n\n to_save_simply = save_dir / 'simplifies'\n if not to_save_simply.exists(): to_save_simply.mkdir(parents=True, exist_ok=True)\n final_infos = {'meta_archs' : meta_archs,\n 'total_archs': meta_num_archs,\n 'arch2infos' : arch2infos,\n 'evaluated_indexes': evaluated_indexes}\n save_file_name = to_save_simply / '{:}-final-infos.pth'.format(basestr)\n torch.save(final_infos, save_file_name)\n print ('Save {:} / {:} architecture results into {:}.'.format(len(evaluated_indexes), meta_num_archs, save_file_name))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='NAS-BENCH-201', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--mode' , type=str, choices=['cal', 'merge'], help='The running mode for this script.')\n parser.add_argument('--base_save_dir', type=str, default='./output/NAS-BENCH-201-4', help='The base-name of folder to save checkpoints and log.')\n parser.add_argument('--target_dir' , type=str, help='The target directory.')\n parser.add_argument('--max_node' , type=int, default=4, help='The maximum node in a cell.')\n parser.add_argument('--channel' , type=int, default=16, help='The number of channels.')\n parser.add_argument('--num_cells' , type=int, default=5, help='The number of cells in one stage.')\n args = parser.parse_args()\n \n save_dir = Path(args.base_save_dir)\n meta_path = save_dir / 'meta-node-{:}.pth'.format(args.max_node)\n assert save_dir.exists(), 'invalid save dir path : {:}'.format(save_dir)\n assert meta_path.exists(), 'invalid saved meta path : {:}'.format(meta_path)\n print ('start the statistics of our nas-benchmark from {:} using {:}.'.format(save_dir, args.target_dir))\n basestr = 'C{:}-N{:}'.format(args.channel, args.num_cells)\n \n if args.mode == 'cal':\n simplify(save_dir, meta_path, basestr, args.target_dir)\n elif args.mode == 'merge':\n merge_all(save_dir, meta_path, basestr)\n else:\n raise ValueError('invalid mode : {:}'.format(args.mode))\n", "##############################################################################\n# NATS-Bench: Benchmarking NAS Algorithms for Architecture Topology and Size #\n##############################################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020.08 #\n##############################################################################\n# This file is used to re-orangize all checkpoints (created by main-sss.py) #\n# into a single benchmark file. Besides, for each trial, we will merge the #\n# information of all its trials into a single file. #\n# #\n# Usage: #\n# python exps/NATS-Bench/sss-collect.py #\n##############################################################################\nimport os, re, sys, time, shutil, argparse, collections\nimport torch\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom collections import defaultdict, OrderedDict\nfrom typing import Dict, Any, Text, List\nlib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\nfrom log_utils import AverageMeter, time_string, convert_secs2time\nfrom config_utils import dict2config\nfrom models import CellStructure, get_cell_based_tiny_net\nfrom nats_bench import pickle_save, pickle_load, ArchResults, ResultsCount\nfrom procedures import bench_pure_evaluate as pure_evaluate, get_nas_bench_loaders\nfrom utils import get_md5_file\n\n\nNATS_SSS_BASE_NAME = 'NATS-sss-v1_0' # 2020.08.28\n\n\ndef account_one_arch(arch_index: int, arch_str: Text, checkpoints: List[Text], datasets: List[Text]) -> ArchResults:\n information = ArchResults(arch_index, arch_str)\n\n for checkpoint_path in checkpoints:\n try:\n checkpoint = torch.load(checkpoint_path, map_location='cpu')\n except:\n raise ValueError('This checkpoint failed to be loaded : {:}'.format(checkpoint_path))\n used_seed = checkpoint_path.name.split('-')[-1].split('.')[0]\n ok_dataset = 0\n for dataset in datasets:\n if dataset not in checkpoint:\n print('Can not find {:} in arch-{:} from {:}'.format(dataset, arch_index, checkpoint_path))\n continue\n else:\n ok_dataset += 1\n results = checkpoint[dataset]\n assert results['finish-train'], 'This {:} arch seed={:} does not finish train on {:} ::: {:}'.format(arch_index, used_seed, dataset, checkpoint_path)\n arch_config = {'name': 'infer.shape.tiny', 'channels': arch_str, 'arch_str': arch_str,\n 'genotype': results['arch_config']['genotype'],\n 'class_num': results['arch_config']['num_classes']}\n xresult = ResultsCount(dataset, results['net_state_dict'], results['train_acc1es'], results['train_losses'],\n results['param'], results['flop'], arch_config, used_seed, results['total_epoch'], None)\n xresult.update_train_info(results['train_acc1es'], results['train_acc5es'], results['train_losses'], results['train_times'])\n xresult.update_eval(results['valid_acc1es'], results['valid_losses'], results['valid_times'])\n information.update(dataset, int(used_seed), xresult)\n if ok_dataset < len(datasets): raise ValueError('{:} does find enought data : {:} vs {:}'.format(checkpoint_path, ok_dataset, len(datasets)))\n return information\n\n\ndef correct_time_related_info(hp2info: Dict[Text, ArchResults]):\n # calibrate the latency based on the number of epochs = 01, since they are trained on the same machine.\n x1 = hp2info['01'].get_metrics('cifar10-valid', 'x-valid')['all_time'] / 98\n x2 = hp2info['01'].get_metrics('cifar10-valid', 'ori-test')['all_time'] / 40\n cifar010_latency = (x1 + x2) / 2\n for hp, arch_info in hp2info.items():\n arch_info.reset_latency('cifar10-valid', None, cifar010_latency)\n arch_info.reset_latency('cifar10', None, cifar010_latency)\n # hp2info['01'].get_latency('cifar10')\n\n x1 = hp2info['01'].get_metrics('cifar100', 'ori-test')['all_time'] / 40\n x2 = hp2info['01'].get_metrics('cifar100', 'x-test')['all_time'] / 20\n x3 = hp2info['01'].get_metrics('cifar100', 'x-valid')['all_time'] / 20\n cifar100_latency = (x1 + x2 + x3) / 3\n for hp, arch_info in hp2info.items():\n arch_info.reset_latency('cifar100', None, cifar100_latency)\n\n x1 = hp2info['01'].get_metrics('ImageNet16-120', 'ori-test')['all_time'] / 24\n x2 = hp2info['01'].get_metrics('ImageNet16-120', 'x-test')['all_time'] / 12\n x3 = hp2info['01'].get_metrics('ImageNet16-120', 'x-valid')['all_time'] / 12\n image_latency = (x1 + x2 + x3) / 3\n for hp, arch_info in hp2info.items():\n arch_info.reset_latency('ImageNet16-120', None, image_latency)\n\n # CIFAR10 VALID\n train_per_epoch_time = list(hp2info['01'].query('cifar10-valid', 777).train_times.values())\n train_per_epoch_time = sum(train_per_epoch_time) / len(train_per_epoch_time)\n eval_ori_test_time, eval_x_valid_time = [], []\n for key, value in hp2info['01'].query('cifar10-valid', 777).eval_times.items():\n if key.startswith('ori-test@'):\n eval_ori_test_time.append(value)\n elif key.startswith('x-valid@'):\n eval_x_valid_time.append(value)\n else: raise ValueError('-- {:} --'.format(key))\n eval_ori_test_time = sum(eval_ori_test_time) / len(eval_ori_test_time)\n eval_x_valid_time = sum(eval_x_valid_time) / len(eval_x_valid_time)\n for hp, arch_info in hp2info.items():\n arch_info.reset_pseudo_train_times('cifar10-valid', None, train_per_epoch_time)\n arch_info.reset_pseudo_eval_times('cifar10-valid', None, 'x-valid', eval_x_valid_time)\n arch_info.reset_pseudo_eval_times('cifar10-valid', None, 'ori-test', eval_ori_test_time)\n\n # CIFAR10\n train_per_epoch_time = list(hp2info['01'].query('cifar10', 777).train_times.values())\n train_per_epoch_time = sum(train_per_epoch_time) / len(train_per_epoch_time)\n eval_ori_test_time = []\n for key, value in hp2info['01'].query('cifar10', 777).eval_times.items():\n if key.startswith('ori-test@'):\n eval_ori_test_time.append(value)\n else: raise ValueError('-- {:} --'.format(key))\n eval_ori_test_time = sum(eval_ori_test_time) / len(eval_ori_test_time)\n for hp, arch_info in hp2info.items():\n arch_info.reset_pseudo_train_times('cifar10', None, train_per_epoch_time)\n arch_info.reset_pseudo_eval_times('cifar10', None, 'ori-test', eval_ori_test_time)\n\n # CIFAR100\n train_per_epoch_time = list(hp2info['01'].query('cifar100', 777).train_times.values())\n train_per_epoch_time = sum(train_per_epoch_time) / len(train_per_epoch_time)\n eval_ori_test_time, eval_x_valid_time, eval_x_test_time = [], [], []\n for key, value in hp2info['01'].query('cifar100', 777).eval_times.items():\n if key.startswith('ori-test@'):\n eval_ori_test_time.append(value)\n elif key.startswith('x-valid@'):\n eval_x_valid_time.append(value)\n elif key.startswith('x-test@'):\n eval_x_test_time.append(value)\n else: raise ValueError('-- {:} --'.format(key))\n eval_ori_test_time = sum(eval_ori_test_time) / len(eval_ori_test_time)\n eval_x_valid_time = sum(eval_x_valid_time) / len(eval_x_valid_time)\n eval_x_test_time = sum(eval_x_test_time) / len(eval_x_test_time)\n for hp, arch_info in hp2info.items():\n arch_info.reset_pseudo_train_times('cifar100', None, train_per_epoch_time)\n arch_info.reset_pseudo_eval_times('cifar100', None, 'x-valid', eval_x_valid_time)\n arch_info.reset_pseudo_eval_times('cifar100', None, 'x-test', eval_x_test_time)\n arch_info.reset_pseudo_eval_times('cifar100', None, 'ori-test', eval_ori_test_time)\n\n # ImageNet16-120\n train_per_epoch_time = list(hp2info['01'].query('ImageNet16-120', 777).train_times.values())\n train_per_epoch_time = sum(train_per_epoch_time) / len(train_per_epoch_time)\n eval_ori_test_time, eval_x_valid_time, eval_x_test_time = [], [], []\n for key, value in hp2info['01'].query('ImageNet16-120', 777).eval_times.items():\n if key.startswith('ori-test@'):\n eval_ori_test_time.append(value)\n elif key.startswith('x-valid@'):\n eval_x_valid_time.append(value)\n elif key.startswith('x-test@'):\n eval_x_test_time.append(value)\n else: raise ValueError('-- {:} --'.format(key))\n eval_ori_test_time = sum(eval_ori_test_time) / len(eval_ori_test_time)\n eval_x_valid_time = sum(eval_x_valid_time) / len(eval_x_valid_time)\n eval_x_test_time = sum(eval_x_test_time) / len(eval_x_test_time)\n for hp, arch_info in hp2info.items():\n arch_info.reset_pseudo_train_times('ImageNet16-120', None, train_per_epoch_time)\n arch_info.reset_pseudo_eval_times('ImageNet16-120', None, 'x-valid', eval_x_valid_time)\n arch_info.reset_pseudo_eval_times('ImageNet16-120', None, 'x-test', eval_x_test_time)\n arch_info.reset_pseudo_eval_times('ImageNet16-120', None, 'ori-test', eval_ori_test_time)\n return hp2info\n\n\ndef simplify(save_dir, save_name, nets, total):\n \n hps, seeds = ['01', '12', '90'], set()\n for hp in hps:\n sub_save_dir = save_dir / 'raw-data-{:}'.format(hp)\n ckps = sorted(list(sub_save_dir.glob('arch-*-seed-*.pth')))\n seed2names = defaultdict(list)\n for ckp in ckps:\n parts = re.split('-|\\.', ckp.name)\n seed2names[parts[3]].append(ckp.name)\n print('DIR : {:}'.format(sub_save_dir))\n nums = []\n for seed, xlist in seed2names.items():\n seeds.add(seed)\n nums.append(len(xlist))\n print(' [seed={:}] there are {:} checkpoints.'.format(seed, len(xlist)))\n assert len(nets) == total == max(nums), 'there are some missed files : {:} vs {:}'.format(max(nums), total)\n print('{:} start simplify the checkpoint.'.format(time_string()))\n\n datasets = ('cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120')\n\n # Create the directory to save the processed data\n # full_save_dir contains all benchmark files with trained weights.\n # simplify_save_dir contains all benchmark files without trained weights.\n full_save_dir = save_dir / (save_name + '-FULL')\n simple_save_dir = save_dir / (save_name + '-SIMPLIFY')\n full_save_dir.mkdir(parents=True, exist_ok=True)\n simple_save_dir.mkdir(parents=True, exist_ok=True)\n # all data in memory\n arch2infos, evaluated_indexes = dict(), set()\n end_time, arch_time = time.time(), AverageMeter()\n\n for index in tqdm(range(total)):\n arch_str = nets[index]\n hp2info = OrderedDict()\n\n full_save_path = full_save_dir / '{:06d}.pickle'.format(index)\n simple_save_path = simple_save_dir / '{:06d}.pickle'.format(index)\n\n for hp in hps:\n sub_save_dir = save_dir / 'raw-data-{:}'.format(hp)\n ckps = [sub_save_dir / 'arch-{:06d}-seed-{:}.pth'.format(index, seed) for seed in seeds]\n ckps = [x for x in ckps if x.exists()]\n if len(ckps) == 0:\n raise ValueError('Invalid data : index={:}, hp={:}'.format(index, hp))\n\n arch_info = account_one_arch(index, arch_str, ckps, datasets)\n hp2info[hp] = arch_info\n \n hp2info = correct_time_related_info(hp2info)\n evaluated_indexes.add(index)\n\n hp2info['01'].clear_params() # to save some spaces...\n to_save_data = OrderedDict({'01': hp2info['01'].state_dict(),\n '12': hp2info['12'].state_dict(),\n '90': hp2info['90'].state_dict()})\n pickle_save(to_save_data, str(full_save_path))\n \n for hp in hps: hp2info[hp].clear_params()\n to_save_data = OrderedDict({'01': hp2info['01'].state_dict(),\n '12': hp2info['12'].state_dict(),\n '90': hp2info['90'].state_dict()})\n pickle_save(to_save_data, str(simple_save_path))\n arch2infos[index] = to_save_data\n # measure elapsed time\n arch_time.update(time.time() - end_time)\n end_time = time.time()\n need_time = '{:}'.format(convert_secs2time(arch_time.avg * (total-index-1), True))\n # print('{:} {:06d}/{:06d} : still need {:}'.format(time_string(), index, total, need_time))\n print('{:} {:} done.'.format(time_string(), save_name))\n final_infos = {'meta_archs' : nets,\n 'total_archs': total,\n 'arch2infos' : arch2infos,\n 'evaluated_indexes': evaluated_indexes}\n save_file_name = save_dir / '{:}.pickle'.format(save_name)\n pickle_save(final_infos, str(save_file_name))\n # move the benchmark file to a new path\n hd5sum = get_md5_file(str(save_file_name) + '.pbz2')\n hd5_file_name = save_dir / '{:}-{:}.pickle.pbz2'.format(NATS_SSS_BASE_NAME, hd5sum)\n shutil.move(str(save_file_name) + '.pbz2', hd5_file_name)\n print('Save {:} / {:} architecture results into {:} -> {:}.'.format(len(evaluated_indexes), total, save_file_name, hd5_file_name))\n # move the directory to a new path\n hd5_full_save_dir = save_dir / '{:}-{:}-full'.format(NATS_SSS_BASE_NAME, hd5sum)\n hd5_simple_save_dir = save_dir / '{:}-{:}-simple'.format(NATS_SSS_BASE_NAME, hd5sum)\n shutil.move(full_save_dir, hd5_full_save_dir)\n shutil.move(simple_save_dir, hd5_simple_save_dir)\n # save the meta information for simple and full\n final_infos['arch2infos'] = None\n final_infos['evaluated_indexes'] = set()\n pickle_save(final_infos, str(hd5_full_save_dir / 'meta.pickle'))\n pickle_save(final_infos, str(hd5_simple_save_dir / 'meta.pickle'))\n\n\ndef traverse_net(candidates: List[int], N: int):\n nets = ['']\n for i in range(N):\n new_nets = []\n for net in nets:\n for C in candidates:\n new_nets.append(str(C) if net == '' else \"{:}:{:}\".format(net,C))\n nets = new_nets\n return nets\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='NATS-Bench (size search space)', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--base_save_dir', type=str, default='./output/NATS-Bench-size', help='The base-name of folder to save checkpoints and log.')\n parser.add_argument('--candidateC' , type=int, nargs='+', default=[8, 16, 24, 32, 40, 48, 56, 64], help='.')\n parser.add_argument('--num_layers' , type=int, default=5, help='The number of layers in a network.')\n parser.add_argument('--check_N' , type=int, default=32768, help='For safety.')\n parser.add_argument('--save_name' , type=str, default='process', help='The save directory.')\n args = parser.parse_args()\n \n nets = traverse_net(args.candidateC, args.num_layers)\n if len(nets) != args.check_N:\n raise ValueError('Pre-num-check failed : {:} vs {:}'.format(len(nets), args.check_N))\n\n save_dir = Path(args.base_save_dir)\n simplify(save_dir, args.save_name, nets, args.check_N)\n", "##################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 #\n######################################################################################\n# One-Shot Neural Architecture Search via Self-Evaluated Template Network, ICCV 2019 #\n######################################################################################\nimport sys, time, random, argparse\nimport numpy as np\nfrom copy import deepcopy\nimport torch\nimport torch.nn as nn\nfrom pathlib import Path\nlib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\nfrom config_utils import load_config, dict2config, configure2str\nfrom datasets import get_datasets, get_nas_search_loaders\nfrom procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler\nfrom utils import get_model_infos, obtain_accuracy\nfrom log_utils import AverageMeter, time_string, convert_secs2time\nfrom models import get_cell_based_tiny_net, get_search_spaces\nfrom nas_201_api import NASBench201API as API\n\n\ndef search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger):\n data_time, batch_time = AverageMeter(), AverageMeter()\n base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()\n arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()\n end = time.time()\n network.train()\n for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):\n scheduler.update(None, 1.0 * step / len(xloader))\n base_targets = base_targets.cuda(non_blocking=True)\n arch_targets = arch_targets.cuda(non_blocking=True)\n # measure data loading time\n data_time.update(time.time() - end)\n \n # update the weights\n sampled_arch = network.module.dync_genotype(True)\n network.module.set_cal_mode('dynamic', sampled_arch)\n #network.module.set_cal_mode( 'urs' )\n network.zero_grad()\n _, logits = network(base_inputs)\n base_loss = criterion(logits, base_targets)\n base_loss.backward()\n w_optimizer.step()\n # record\n base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))\n base_losses.update(base_loss.item(), base_inputs.size(0))\n base_top1.update (base_prec1.item(), base_inputs.size(0))\n base_top5.update (base_prec5.item(), base_inputs.size(0))\n\n # update the architecture-weight\n network.module.set_cal_mode( 'joint' )\n network.zero_grad()\n _, logits = network(arch_inputs)\n arch_loss = criterion(logits, arch_targets)\n arch_loss.backward()\n a_optimizer.step()\n # record\n arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))\n arch_losses.update(arch_loss.item(), arch_inputs.size(0))\n arch_top1.update (arch_prec1.item(), arch_inputs.size(0))\n arch_top5.update (arch_prec5.item(), arch_inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if step % print_freq == 0 or step + 1 == len(xloader):\n Sstr = '*SEARCH* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader))\n Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)\n Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5)\n Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5)\n logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr)\n #print (nn.functional.softmax(network.module.arch_parameters, dim=-1))\n #print (network.module.arch_parameters)\n return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg\n\n\ndef get_best_arch(xloader, network, n_samples):\n with torch.no_grad():\n network.eval()\n archs, valid_accs = network.module.return_topK(n_samples), []\n #print ('obtain the top-{:} architectures'.format(n_samples))\n loader_iter = iter(xloader)\n for i, sampled_arch in enumerate(archs):\n network.module.set_cal_mode('dynamic', sampled_arch)\n try:\n inputs, targets = next(loader_iter)\n except:\n loader_iter = iter(xloader)\n inputs, targets = next(loader_iter)\n\n _, logits = network(inputs)\n val_top1, val_top5 = obtain_accuracy(logits.cpu().data, targets.data, topk=(1, 5))\n\n valid_accs.append(val_top1.item())\n\n best_idx = np.argmax(valid_accs)\n best_arch, best_valid_acc = archs[best_idx], valid_accs[best_idx]\n return best_arch, best_valid_acc\n\n\ndef valid_func(xloader, network, criterion):\n data_time, batch_time = AverageMeter(), AverageMeter()\n arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()\n end = time.time()\n with torch.no_grad():\n network.eval()\n for step, (arch_inputs, arch_targets) in enumerate(xloader):\n arch_targets = arch_targets.cuda(non_blocking=True)\n # measure data loading time\n data_time.update(time.time() - end)\n # prediction\n _, logits = network(arch_inputs)\n arch_loss = criterion(logits, arch_targets)\n # record\n arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))\n arch_losses.update(arch_loss.item(), arch_inputs.size(0))\n arch_top1.update (arch_prec1.item(), arch_inputs.size(0))\n arch_top5.update (arch_prec5.item(), arch_inputs.size(0))\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n return arch_losses.avg, arch_top1.avg, arch_top5.avg\n\n\ndef main(xargs):\n assert torch.cuda.is_available(), 'CUDA is not available.'\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n torch.set_num_threads( xargs.workers )\n prepare_seed(xargs.rand_seed)\n logger = prepare_logger(args)\n\n train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)\n config = load_config(xargs.config_path, {'class_num': class_num, 'xshape': xshape}, logger)\n search_loader, _, valid_loader = get_nas_search_loaders(train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', \\\n (config.batch_size, config.test_batch_size), xargs.workers)\n logger.log('||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size))\n logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))\n\n search_space = get_search_spaces('cell', xargs.search_space_name)\n if xargs.model_config is None:\n model_config = dict2config(\n dict(name='SETN', C=xargs.channel, N=xargs.num_cells, max_nodes=xargs.max_nodes, num_classes=class_num,\n space=search_space, affine=False, track_running_stats=bool(xargs.track_running_stats)), None)\n else:\n model_config = load_config(xargs.model_config, dict(num_classes=class_num, space=search_space, affine=False,\n track_running_stats=bool(xargs.track_running_stats)), None)\n logger.log('search space : {:}'.format(search_space))\n search_model = get_cell_based_tiny_net(model_config)\n \n w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config)\n a_optimizer = torch.optim.Adam(search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay)\n logger.log('w-optimizer : {:}'.format(w_optimizer))\n logger.log('a-optimizer : {:}'.format(a_optimizer))\n logger.log('w-scheduler : {:}'.format(w_scheduler))\n logger.log('criterion : {:}'.format(criterion))\n flop, param = get_model_infos(search_model, xshape)\n logger.log('FLOP = {:.2f} M, Params = {:.2f} MB'.format(flop, param))\n logger.log('search-space : {:}'.format(search_space))\n if xargs.arch_nas_dataset is None:\n api = None\n else:\n api = API(xargs.arch_nas_dataset)\n logger.log('{:} create API = {:} done'.format(time_string(), api))\n\n last_info, model_base_path, model_best_path = logger.path('info'), logger.path('model'), logger.path('best')\n network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()\n\n if last_info.exists(): # automatically resume from previous checkpoint\n logger.log(\"=> loading checkpoint of the last-info '{:}' start\".format(last_info))\n last_info = torch.load(last_info)\n start_epoch = last_info['epoch']\n checkpoint = torch.load(last_info['last_checkpoint'])\n genotypes = checkpoint['genotypes']\n valid_accuracies = checkpoint['valid_accuracies']\n search_model.load_state_dict( checkpoint['search_model'] )\n w_scheduler.load_state_dict ( checkpoint['w_scheduler'] )\n w_optimizer.load_state_dict ( checkpoint['w_optimizer'] )\n a_optimizer.load_state_dict ( checkpoint['a_optimizer'] )\n logger.log(\"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.\".format(last_info, start_epoch))\n else:\n logger.log(\"=> do not find the last-info file : {:}\".format(last_info))\n init_genotype, _ = get_best_arch(valid_loader, network, xargs.select_num)\n start_epoch, valid_accuracies, genotypes = 0, {'best': -1}, {-1: init_genotype}\n\n # start training\n start_time, search_time, epoch_time, total_epoch = time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup\n for epoch in range(start_epoch, total_epoch):\n w_scheduler.update(epoch, 0.0)\n need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.val * (total_epoch-epoch), True) )\n epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch)\n logger.log('\\n[Search the {:}-th epoch] {:}, LR={:}'.format(epoch_str, need_time, min(w_scheduler.get_lr())))\n\n search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5 \\\n = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger)\n search_time.update(time.time() - start_time)\n logger.log('[{:}] search [base] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s'.format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum))\n logger.log('[{:}] search [arch] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, search_a_loss, search_a_top1, search_a_top5))\n\n genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num)\n network.module.set_cal_mode('dynamic', genotype)\n valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)\n logger.log('[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype))\n #search_model.set_cal_mode('urs')\n #valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)\n #logger.log('[{:}] URS---evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5))\n #search_model.set_cal_mode('joint')\n #valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)\n #logger.log('[{:}] JOINT-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5))\n #search_model.set_cal_mode('select')\n #valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)\n #logger.log('[{:}] Selec-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5))\n # check the best accuracy\n valid_accuracies[epoch] = valid_a_top1\n\n genotypes[epoch] = genotype\n logger.log('<<<--->>> The {:}-th epoch : {:}'.format(epoch_str, genotypes[epoch]))\n # save checkpoint\n save_path = save_checkpoint({'epoch' : epoch + 1,\n 'args' : deepcopy(xargs),\n 'search_model': search_model.state_dict(),\n 'w_optimizer' : w_optimizer.state_dict(),\n 'a_optimizer' : a_optimizer.state_dict(),\n 'w_scheduler' : w_scheduler.state_dict(),\n 'genotypes' : genotypes,\n 'valid_accuracies' : valid_accuracies},\n model_base_path, logger)\n last_info = save_checkpoint({\n 'epoch': epoch + 1,\n 'args' : deepcopy(args),\n 'last_checkpoint': save_path,\n }, logger.path('info'), logger)\n with torch.no_grad():\n logger.log('{:}'.format(search_model.show_alphas()))\n if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[epoch], '200')))\n # measure elapsed time\n epoch_time.update(time.time() - start_time)\n start_time = time.time()\n\n # the final post procedure : count the time\n start_time = time.time()\n genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num)\n search_time.update(time.time() - start_time)\n network.module.set_cal_mode('dynamic', genotype)\n valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)\n logger.log('Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%.'.format(genotype, valid_a_top1))\n\n logger.log('\\n' + '-'*100)\n # check the performance from the architecture dataset\n logger.log('SETN : run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format(total_epoch, search_time.sum, genotype))\n if api is not None: logger.log('{:}'.format(api.query_by_arch(genotype, '200') ))\n logger.close()\n \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"SETN\")\n parser.add_argument('--data_path', type=str, help='Path to dataset')\n parser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100', 'ImageNet16-120'], help='Choose between Cifar10/100 and ImageNet-16.')\n # channels and number-of-cells\n parser.add_argument('--search_space_name', type=str, help='The search space name.')\n parser.add_argument('--max_nodes', type=int, help='The maximum number of nodes.')\n parser.add_argument('--channel', type=int, help='The number of channels.')\n parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.')\n parser.add_argument('--select_num', type=int, help='The number of selected architectures to evaluate.')\n parser.add_argument('--track_running_stats',type=int, choices=[0,1],help='Whether use track_running_stats or not in the BN layer.')\n parser.add_argument('--config_path', type=str, help='The path of the configuration.')\n # architecture leraning rate\n parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')\n parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')\n # log\n parser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')\n parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.')\n parser.add_argument('--arch_nas_dataset', type=str, help='The path to load the architecture dataset (tiny-nas-benchmark).')\n parser.add_argument('--print_freq', type=int, help='print frequency (default: 200)')\n parser.add_argument('--rand_seed', type=int, help='manual seed')\n args = parser.parse_args()\n if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000)\n main(args)\n" ]
[ [ "matplotlib.use", "numpy.array", "scipy.stats.kendalltau", "matplotlib.pyplot.grid", "torch.save", "matplotlib.pyplot.legend", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "matplotlib.ticker.FormatStrFormatter", "torch.load" ], [ "torch.no_grad", "torch.cuda.is_available", "torch.load", "torch.nn.functional.softmax", "torch.zeros_like", "torch.nn.DataParallel", "torch.set_num_threads" ], [ "torch.save", "numpy.mean", "torch.load" ], [ "torch.load" ], [ "torch.no_grad", "torch.cuda.is_available", "numpy.argmax", "torch.load", "torch.nn.DataParallel", "torch.set_num_threads" ] ]
cemac-tech/WCSSP-FORTIS
[ "f7f47112de085095d39b403dab6cbdc1db987801" ]
[ "FORTISApp.bak.py" ]
[ "from flask import Flask, render_template, flash, redirect, url_for, request, g, session, abort, send_from_directory\nfrom wtforms import Form, validators, StringField, TextAreaField, SelectField, PasswordField\nfrom werkzeug.utils import secure_filename\nfrom passlib.hash import sha256_crypt\nfrom functools import wraps\nimport os\nimport pandas as pd\nfrom flask_sqlalchemy import SQLAlchemy\nimport boto3\nfrom random import randint\nimport json\nimport sys\nimport dropbox\nimport mammoth\n\napp = Flask(__name__)\n\n# Set config variables:\nassert \"APP_SETTINGS\" in os.environ, \"APP_SETTINGS environment variable not set\"\nassert \"SECRET_KEY\" in os.environ, \"SECRET_KEY environment variable not set\"\nassert \"ADMIN_PWD\" in os.environ, \"ADMIN_PWD environment variable not set\"\nassert \"DATABASE_URL\" in os.environ, \"DATABASE_URL environment variable not set\"\nassert \"S3_OR_DBX\" in os.environ, \"S3_OR_DBX environment variable not set\"\nif os.environ['S3_OR_DBX'] == 'S3':\n assert \"AWS_ACCESS_KEY_ID\" in os.environ, \"AWS_ACCESS_KEY_ID environment variable not set\"\n assert \"AWS_SECRET_ACCESS_KEY\" in os.environ, \"AWS_SECRET_ACCESS_KEY environment variable not set\"\n assert \"S3_BUCKET\" in os.environ, \"S3_BUCKET environment variable not set\"\nelif os.environ['S3_OR_DBX'] == 'DBX':\n assert \"DROPBOX_KEY\" in os.environ, \"DROPBOX_KEY environment variable not set\"\nelse:\n sys.exit(\"Variable S3_OR_DBX not set correctly\")\napp.config.from_object(os.environ['APP_SETTINGS'])\n\n# Configure postgresql database:\ndb = SQLAlchemy(app)\nfrom models import Trainees, Trainers, Workshops, Files, Timetables, Folders\n# ######### GLOBAL VARIABLES ##########\ntypeDict = {\n 'lectures1': 'Day 1 / Lectures / ',\n 'practicals1': 'Day 1 / Practical 1 /',\n 'practicals2-1': 'Day 1 / Practical 2 / ',\n 'lectures2': 'Day 2 / Lectures / ',\n 'practicals2': 'Day 2 / Practical 1 / ',\n 'practicals2-2': 'Day 2 / Practical 2 / ',\n 'lectures3': 'Day 3 / Lectures / ',\n 'practicals3': 'Day 3 / Practical 1 / ',\n 'practicals2-3': 'Day 3 / Practical 2 / ',\n 'lectures4': 'Day 4 / Lectures / ',\n 'practicals4': 'Day 4 / Practical 1 / ',\n 'practicals2-4': 'Day 4 / Practical 2 / ',\n 'lectures5': 'Day 5 / Lectures / ',\n 'practicals5': 'Day 5 / Practical 1 / ',\n 'other': 'Other'\n}\n######################################\n\n# ######### PSQL FUNCTIONS ##########\n\n\ndef psql_to_pandas(query):\n df = pd.read_sql(query.statement, db.session.bind)\n return df\n\n\ndef psql_insert(row):\n db.session.add(row)\n db.session.commit()\n return row.id\n\n\ndef psql_delete(row):\n db.session.delete(row)\n db.session.commit()\n return\n####################################\n\n# ######### S3 FUNCTIONS/ROUTES ##########\n\n\ndef delete_file_from_s3(filename):\n bucket_name = app.config['S3_BUCKET']\n s3 = boto3.resource('s3', 'eu-west-2')\n s3.Object(bucket_name, filename).delete()\n return\n\n\n@app.route('/sign_s3/')\ndef sign_s3():\n bucket_name = app.config['S3_BUCKET']\n filename_orig = request.args.get('file_name')\n filename_s3 = str(randint(10000, 99999)) + '_' + \\\n secure_filename(filename_orig)\n file_type = request.args.get('file_type')\n s3 = boto3.client('s3', 'eu-west-2')\n presigned_post = s3.generate_presigned_post(\n Bucket=bucket_name,\n Key=filename_s3,\n Fields={\"acl\": \"private\", \"Content-Type\": file_type},\n Conditions=[\n {\"acl\": \"private\"},\n {\"Content-Type\": file_type}\n ],\n ExpiresIn=3600\n )\n return json.dumps({\n 'data': presigned_post,\n 'url': 'https://%s.s3.eu-west-2.amazonaws.com/%s' % (bucket_name, filename_s3)\n })\n\n\n@app.route('/sign_s3_download_timetable/')\ndef sign_s3_download_timetable():\n bucket_name = app.config['S3_BUCKET']\n id = request.args.get('id')\n # Retrieve s3 filename from DB:\n db_entry = Timetables.query.filter_by(id=id).first()\n filename_s3 = db_entry.filename\n # Access granting:\n if not 'logged_in' in session:\n abort(403)\n # Create and return pre-signed url:\n s3 = boto3.client('s3', 'eu-west-2')\n presigned_url = s3.generate_presigned_url(\n 'get_object',\n Params={'Bucket': bucket_name, 'Key': filename_s3},\n ExpiresIn=3600\n )\n return json.dumps({\n 'url': presigned_url,\n })\n\n\n@app.route('/sign_s3_download_file/')\ndef sign_s3_download_file():\n bucket_name = app.config['S3_BUCKET']\n id = request.args.get('id')\n # Retrieve s3 filename from DB:\n db_entry = Files.query.filter_by(id=id).first()\n filename_s3 = db_entry.filename\n # Access granting:\n who = db_entry.who\n if not 'logged_in' in session:\n abort(403)\n if who == 'trainers' and session['usertype'] == 'trainee':\n abort(403)\n # Create and return pre-signed url:\n s3 = boto3.client('s3', 'eu-west-2')\n presigned_url = s3.generate_presigned_url(\n 'get_object',\n Params={'Bucket': bucket_name, 'Key': filename_s3},\n ExpiresIn=3600\n )\n return json.dumps({\n 'url': presigned_url,\n })\n##################################\n\n# ######### DROPBOX FUNCTIONS ##########\n\n\ndef upload_file_to_dbx(file, filename):\n dbx = dropbox.Dropbox(app.config['DROPBOX_KEY'])\n response = dbx.files_upload(file.read(), '/' + filename, mute=True)\n\n\ndef download_file_from_dbx(filename):\n dbx = dropbox.Dropbox(app.config['DROPBOX_KEY'])\n response = dbx.files_download_to_file('/tmp/' + filename, '/' + filename)\n\n\ndef delete_file_from_dbx(filename):\n dbx = dropbox.Dropbox(app.config['DROPBOX_KEY'])\n response = dbx.files_delete('/' + filename)\n##################################\n\n# ######### LOGGED-IN FUNCTIONS ##########\n# Check if user is logged in\n\n\ndef is_logged_in(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return f(*args, **kwargs)\n else:\n flash('Unauthorised, please login', 'danger')\n return redirect(url_for('index'))\n return wrap\n\n# Check if user is logged in as a trainer/admin\n\n\ndef is_logged_in_as_trainer(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session and (session['usertype'] == 'trainer' or session['usertype'] == 'admin'):\n return f(*args, **kwargs)\n else:\n flash('Unauthorised, please login as a trainer/admin', 'danger')\n return redirect(url_for('index'))\n return wrap\n\n# Check if user is logged in as admin\n\n\ndef is_logged_in_as_admin(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session and session['usertype'] == 'admin':\n return f(*args, **kwargs)\n else:\n flash('Unauthorised, please login as admin', 'danger')\n return redirect(url_for('index'))\n return wrap\n#########################################\n\n########## MISC FUNCTIONS ##########\n# Get list of workshops from workshop DB:\n\n\ndef get_workshop_list():\n workshopDF = psql_to_pandas(Workshops.query)\n workshopList = [('blank', '--Please select--')]\n for w in workshopDF['workshop']:\n workshopList.append((w, w))\n return workshopList\n\n# Get list of types for Upload Form:\n\n\ndef get_type_list(workshop):\n typeList = [('blank', '--Please select--')]\n # Add default folders:\n for key, value in typeDict.items():\n typeList.append((key, value))\n # Add custom folders:\n foldersDF = psql_to_pandas(Folders.query.filter_by(workshop=workshop))\n for index, row in foldersDF.iterrows():\n key = row['parent'] + '_' + row['name']\n value = typeDict[row['parent']] + row['name']\n typeList.append((key, value))\n # Sort by second element:\n typeList = sorted(typeList, key=lambda tup: tup[1])\n return typeList\n####################################\n\n# ######### FORM CLASSES ##########\n\n\nclass TimetableForm(Form):\n workshop = SelectField(u'Select the workshop that this timetable is for',\n [validators.NoneOf(('blank'), message='Please select')])\n\n\nclass UploadForm(Form):\n title = StringField(u'Title of material', [\n validators.required(), validators.Length(min=1, max=50)])\n description = TextAreaField(u'Description of material', [\n validators.optional(), validators.Length(max=1000)])\n type = SelectField('Select the type of material you are uploading',\n [validators.NoneOf(('blank'), message='Please select')])\n who = SelectField('Is the material for trainees (typically non-editable files, e.g. PDFs) or trainers (typically editable files, e.g. PPTs)',\n [validators.NoneOf(('blank'), message='Please select')],\n choices=[('blank', '--Please select--'),\n ('trainees', 'Trainees'),\n ('trainers', 'Trainers')])\n\n\nclass RegisterForm(Form):\n username = StringField('Username',\n [validators.Regexp('^BMKG_participant-[0-9]{2}$',\n message='Username must be of the form BMKG_participant-XX where XX is a two-digit number')])\n password = PasswordField('Password',\n [validators.Regexp('^([a-zA-Z0-9]{8,})$',\n message='Password must be mimimum 8 characters and contain only uppercase letters, \\\n lowercase letters and numbers')])\n\n\nclass RegisterTrainerForm(Form):\n username = StringField('Username', [validators.Length(min=4, max=25)])\n password = PasswordField('Password',\n [validators.Regexp('^([a-zA-Z0-9]{8,})$',\n message='Password must be mimimum 8 characters and contain only uppercase letters, \\\n lowercase letters and numbers')])\n\n\nclass ChangePwdForm(Form):\n current = PasswordField('Current password',\n [validators.DataRequired()])\n new = PasswordField('New password',\n [validators.Regexp('^([a-zA-Z0-9]{8,})$',\n message='Password must be mimimum 8 characters and contain only uppercase letters, \\\n lowercase letters and numbers')])\n confirm = PasswordField('Confirm new password',\n [validators.EqualTo('new', message='Passwords do no match')])\n##################################\n\n# ####################################\n# ######### START OF ROUTES ##########\n# ####################################\n\n# Index\n\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == 'POST':\n # Get form fields\n username = request.form['username']\n password_candidate = request.form['password']\n # Check trainee accounts first:\n user = Trainees.query.filter_by(username=username).first()\n if user is not None:\n password = user.password\n # Compare passwords\n if password_candidate == password:\n # Passed\n session['logged_in'] = True\n session['username'] = username\n session['usertype'] = 'trainee'\n flash('You are now logged in', 'success')\n return redirect(url_for('index'))\n else:\n flash('Incorrect password', 'danger')\n return redirect(url_for('index'))\n # Check trainer accounts next:\n user = Trainers.query.filter_by(username=username).first()\n if user is not None:\n password = user.password\n # Compare passwords\n if sha256_crypt.verify(password_candidate, password):\n # Passed\n session['logged_in'] = True\n session['username'] = username\n if username != 'sam_hardy':\n session['usertype'] = 'trainer'\n flash('You are now logged in', 'success')\n elif username == 'sam_hardy':\n session['usertype'] = 'admin'\n flash('You are now logged in with admin privillages', 'success')\n return redirect(url_for('index'))\n else:\n flash('Incorrect password', 'danger')\n return redirect(url_for('index'))\n # Finally check admin account:\n if username == 'admin':\n password = app.config['ADMIN_PWD']\n if password_candidate == password:\n # Passed\n session['logged_in'] = True\n session['username'] = 'admin'\n session['usertype'] = 'admin'\n flash('You are now logged in', 'success')\n return redirect(url_for('index'))\n else:\n flash('Incorrect password', 'danger')\n return redirect(url_for('index'))\n # Username not found:\n flash('Username not found', 'danger')\n return redirect(url_for('index'))\n return render_template('home.html')\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n\n@app.route('/timetables', methods=[\"GET\", \"POST\"])\n@is_logged_in\ndef timetables():\n form = TimetableForm(request.form)\n form.workshop.choices = get_workshop_list()\n timetablesData = psql_to_pandas(Timetables.query)\n # If user tries to upload a timetable\n if request.method == 'POST':\n if form.validate():\n if app.config['S3_OR_DBX'] == 'S3': # Get filename only\n filename = request.form['filename_s3']\n else: # Also get file\n file = request.files['file']\n filename = str(randint(10000, 99999)) + '_' + \\\n secure_filename(file.filename)\n # Get fields from web-form\n workshop = form.workshop.data\n author = session['username']\n # Delete old timetable if it exists:\n timetable = Timetables.query.filter_by(workshop=workshop).first()\n if timetable is not None:\n old_filename = timetable.filename\n # Delete from DB:\n psql_delete(timetable)\n # Delete from cloud:\n try:\n if app.config['S3_OR_DBX'] == 'S3':\n delete_file_from_s3(old_filename)\n else:\n delete_file_from_dbx(old_filename)\n except:\n flash(\"Unable to delete timetable from cloud\", \"warning\")\n # Insert new timetable into database:\n db_row = Timetables(filename=filename,\n workshop=workshop, author=author)\n id = psql_insert(db_row)\n if app.config['S3_OR_DBX'] == 'DBX': # Save file to dropbox\n upload_file_to_dbx(file, filename)\n # flash success message and reload page\n flash('Timetable uploaded successfully', 'success')\n return redirect(url_for('timetables'))\n else:\n if app.config['S3_OR_DBX'] == 'S3': # Delete file from S3\n filename_s3 = request.form['filename_s3']\n delete_file_from_s3(filename_s3)\n # Flash error message:\n flash('Fix form errors and try again', 'danger')\n return render_template('timetables.html', form=form, timetablesData=timetablesData, S3_OR_DBX=app.config['S3_OR_DBX'])\n\n\n@app.route('/partners')\ndef partners():\n return render_template('partners.html')\n\n\n@app.route('/contact-us')\ndef contact_us():\n return render_template('contact-us.html')\n\n\n@app.route('/select-workshop/<string:linkTo>')\n@is_logged_in\ndef select_workshop(linkTo):\n workshopsData = psql_to_pandas(Workshops.query)\n return render_template('select-workshop.html', workshopsData=workshopsData, linkTo=linkTo)\n\n\n@app.route('/training-material/<string:workshopID>')\n@is_logged_in\ndef training_material(workshopID):\n # Check workshop exists:\n result = Workshops.query.filter_by(id=workshopID).first()\n if result is None:\n abort(404)\n workshop = result.workshop\n # Subset Files and Folders data:\n allFilesData = psql_to_pandas(Files.query)\n filesData = allFilesData.loc[allFilesData['workshop'] == workshop]\n allfoldersData = psql_to_pandas(Folders.query)\n foldersData = allfoldersData.loc[allfoldersData['workshop'] == workshop]\n return render_template('material.html', filesData=filesData, foldersData=foldersData,\n workshop=workshop, who='trainees', S3_OR_DBX=app.config['S3_OR_DBX'])\n\n\n@app.route('/trainer-material/<string:workshopID>')\n@is_logged_in_as_trainer\ndef trainer_material(workshopID):\n # Check workshop exists:\n result = Workshops.query.filter_by(id=workshopID).first()\n if result is None:\n abort(404)\n workshop = result.workshop\n # Subset Files and Folders data:\n allFilesData = psql_to_pandas(Files.query)\n filesData = allFilesData.loc[allFilesData['workshop'] == workshop]\n allfoldersData = psql_to_pandas(Folders.query)\n foldersData = allfoldersData.loc[allfoldersData['workshop'] == workshop]\n return render_template('material.html', filesData=filesData, foldersData=foldersData,\n workshop=workshop, who='trainers', S3_OR_DBX=app.config['S3_OR_DBX'])\n\n\n@app.route('/upload/<string:workshopID>', methods=[\"GET\", \"POST\"])\n@is_logged_in_as_trainer\ndef upload(workshopID):\n # Check workshop exists:\n result = Workshops.query.filter_by(id=workshopID).first()\n if result is None:\n abort(404)\n workshop = result.workshop\n # Prepare form:\n form = UploadForm(request.form)\n form.type.choices = get_type_list(workshop)\n # If user tries to upload a file\n if request.method == 'POST':\n if form.validate():\n if app.config['S3_OR_DBX'] == 'S3': # Get filename only\n filename = request.form['filename_s3']\n else: # Also get file\n file = request.files['file']\n filename = str(randint(10000, 99999)) + '_' + \\\n secure_filename(file.filename)\n # Get fields from web-form\n title = form.title.data\n description = form.description.data\n type = form.type.data\n who = form.who.data\n author = session['username']\n # Insert into files database:\n db_row = Files(filename=filename, title=title, description=description,\n workshop=workshop, type=type, who=who, author=author)\n id = psql_insert(db_row)\n if app.config['S3_OR_DBX'] == 'DBX': # Save file to dropbox\n upload_file_to_dbx(file, filename)\n # flash success message and reload page\n flash('File uploaded successfully', 'success')\n return redirect(url_for('upload', workshopID=workshopID))\n else:\n if app.config['S3_OR_DBX'] == 'S3': # Delete file from S3\n filename_s3 = request.form['filename_s3']\n delete_file_from_s3(filename_s3)\n # Flash error message:\n flash('Fix form errors and try again', 'danger')\n # If user just navigates to page\n return render_template('upload.html', form=form, workshop=workshop,\n workshopID=workshopID, S3_OR_DBX=app.config['S3_OR_DBX'])\n\n\n@app.route('/trainee-accounts', methods=[\"GET\", \"POST\"])\n@is_logged_in_as_trainer\ndef trainee_accounts():\n usersData = psql_to_pandas(Trainees.query.order_by(Trainees.username))\n form = RegisterForm(request.form)\n if request.method == 'POST' and form.validate():\n username = form.username.data\n # Check username is unique\n user = Trainees.query.filter_by(username=username).first()\n if user is not None:\n flash('Username already exists', 'danger')\n return redirect(url_for('trainee_accounts'))\n password = form.password.data\n db_row = Trainees(username=username, password=password)\n id = psql_insert(db_row)\n flash('Trainee account added', 'success')\n return redirect(url_for('trainee_accounts'))\n return render_template('trainee-accounts.html', form=form, usersData=usersData)\n\n\n@app.route('/trainer-accounts', methods=[\"GET\", \"POST\"])\n@is_logged_in_as_admin\ndef trainer_accounts():\n usersData = psql_to_pandas(Trainers.query)\n form = RegisterTrainerForm(request.form)\n if request.method == 'POST' and form.validate():\n username = form.username.data\n # Check username is unique\n user = Trainers.query.filter_by(username=username).first()\n if user is not None:\n flash('Username already exists', 'danger')\n return redirect(url_for('trainer_accounts'))\n if username == 'admin' or username.startswith('trainee'):\n flash('Username not allowed', 'danger')\n return redirect(url_for('trainer_accounts'))\n password = sha256_crypt.encrypt(str(form.password.data))\n db_row = Trainers(username=username, password=password)\n id = psql_insert(db_row)\n flash('Trainer account added', 'success')\n return redirect(url_for('trainer_accounts'))\n return render_template('trainer-accounts.html', form=form, usersData=usersData)\n\n\n@app.route('/change-pwd', methods=[\"GET\", \"POST\"])\n@is_logged_in_as_trainer\ndef change_pwd():\n form = ChangePwdForm(request.form)\n if request.method == 'POST' and form.validate():\n user = Trainers.query.filter_by(username=session['username']).first()\n password = user.password\n current = form.current.data\n if sha256_crypt.verify(current, password):\n user.password = sha256_crypt.encrypt(str(form.new.data))\n db.session.commit()\n flash('Password changed', 'success')\n return redirect(url_for('change_pwd'))\n else:\n flash('Current password incorrect', 'danger')\n return redirect(url_for('change_pwd'))\n return render_template('change-pwd.html', form=form)\n\n\n@app.route('/workshops', methods=[\"GET\", \"POST\"])\n@is_logged_in_as_admin\ndef workshops():\n workshopsData = psql_to_pandas(Workshops.query)\n if request.method == 'POST':\n workshop = request.form['workshop']\n db_row = Workshops(workshop=workshop)\n id = psql_insert(db_row)\n flash('Workshop added', 'success')\n return redirect(url_for('workshops'))\n return render_template('workshops.html', workshopsData=workshopsData)\n\n\n@app.route('/folders/<string:id>')\n@is_logged_in_as_admin\ndef folders(id):\n # Retrieve workshop:\n result = Workshops.query.filter_by(id=id).first()\n if result is None:\n abort(404)\n allFoldersData = psql_to_pandas(Folders.query)\n foldersData = allFoldersData.loc[allFoldersData['workshop']\n == result.workshop]\n return render_template('folders.html', data=foldersData, workshopName=result.workshop, workshopID=id)\n\n\n@app.route('/add-folder/<string:id>/<string:parent>', methods=[\"POST\"])\n@is_logged_in_as_admin\ndef add_folder(id, parent):\n # Retrieve workshop:\n workshop = Workshops.query.filter_by(id=id).first().workshop\n name = request.form['folder']\n db_row = Folders(workshop=workshop, parent=parent, name=name)\n dummy = psql_insert(db_row)\n return redirect(url_for('folders', id=id))\n\n\n@app.route('/delete-folder/<string:id>', methods=[\"POST\"])\n@is_logged_in_as_admin\ndef delete_folder(id):\n # Retrieve folder:\n folder = Folders.query.filter_by(id=id).first()\n if folder is None:\n abort(404)\n # Retrieve workshop id:\n workshop = folder.workshop\n workshopID = Workshops.query.filter_by(workshop=workshop).first().id\n # Check folder is empty:\n type = folder.parent + '_' + folder.name\n filesInFolder = Files.query.filter_by(workshop=workshop, type=type).first()\n if filesInFolder is not None:\n flash(\"Cannot delete folder until it is empty (check both trainee and trainer material)\", \"danger\")\n return redirect(url_for('folders', id=workshopID))\n # Delete from DB:\n psql_delete(folder)\n flash(\"Folder deleted\", \"success\")\n return redirect(url_for('folders', id=workshopID))\n\n\n@app.route('/edit/<string:id>/<string:S3_OR_DBX>', methods=[\"POST\"])\n@is_logged_in_as_trainer\ndef edit(id, S3_OR_DBX):\n result = Files.query.filter_by(id=id).first()\n if result is None:\n abort(404)\n workshop = result.workshop\n if 'edit' in request.form:\n form = UploadForm()\n form.type.choices = get_type_list(workshop)\n form.title.data = result.title\n form.description.data = result.description\n form.type.data = result.type\n form.who.data = result.who\n return render_template('edit.html', form=form, id=id, S3_OR_DBX=S3_OR_DBX)\n else:\n form = UploadForm(request.form)\n form.type.choices = get_type_list(workshop)\n if form.validate():\n if app.config['S3_OR_DBX'] == 'S3': # Get filename only\n filename = request.form['filename_s3']\n else: # Also get file\n if 'file' in request.files:\n file = request.files['file']\n filename = str(randint(10000, 99999)) + \\\n '_' + secure_filename(file.filename)\n else:\n filename = ''\n # Delete old file if not blank:\n if not filename == '':\n old_filename = result.filename\n if app.config['S3_OR_DBX'] == 'S3':\n delete_file_from_s3(old_filename)\n else:\n delete_file_from_dbx(old_filename)\n # Save new file to dropbox:\n upload_file_to_dbx(file, filename)\n result.filename = filename\n # Get form info:\n title = form.title.data\n description = form.description.data\n type = form.type.data\n who = form.who.data\n # Update DB:\n result.title = title\n result.description = description\n result.type = type\n result.who = who\n db.session.commit()\n flash('File edits successful', 'success')\n return redirect(url_for('index'))\n else:\n # Delete file from S3 if not blank:\n if app.config['S3_OR_DBX'] == 'S3':\n filename = request.form['filename_s3']\n if not filename == \"\":\n delete_file_from_s3(filename)\n # Flash error message:\n flash('Invalid option selected, please try to edit the file again', 'danger')\n return redirect(url_for('index'))\n\n# Download file (Dropbox only)\n\n\n@app.route('/download-file/<string:id>', methods=['POST'])\n@is_logged_in\ndef download_file(id):\n result = Files.query.filter_by(id=id).first()\n if result is None:\n abort(404)\n filename = result.filename\n # Try to download the file from dbx to /tmp if it's not already there:\n if not os.path.exists('/tmp/' + filename):\n try:\n download_file_from_dbx(filename)\n except:\n flash(\"Unable to download file\", \"danger\")\n return redirect(url_for('index'))\n # Serve the file to the client:\n if os.path.exists('/tmp/' + filename):\n return send_from_directory('/tmp', filename, as_attachment=True, attachment_filename=filename)\n else:\n abort(404)\n\n# Download timetable (Dropbox only)\n\n\n@app.route('/download-timetable/<string:id>', methods=['POST'])\n@is_logged_in\ndef download_timetable(id):\n result = Timetables.query.filter_by(id=id).first()\n if result is None:\n abort(404)\n filename = result.filename\n # Try to download the timetable from dbx to /tmp if it's not already there:\n if not os.path.exists('/tmp/' + filename):\n try:\n download_file_from_dbx(filename)\n except:\n flash(\"Unable to download timetable\", \"danger\")\n return redirect(url_for('timetables'))\n # Serve the timetable to the client:\n if os.path.exists('/tmp/' + filename):\n return send_from_directory('/tmp', filename, as_attachment=True, attachment_filename=filename)\n else:\n abort(404)\n\n# View timetable (Dropbox only; docx files only)\n\n\n@app.route('/view-timetable/<string:id>')\n@is_logged_in\ndef view_timetable(id):\n if app.config['S3_OR_DBX'] != 'DBX':\n abort(403)\n result = Timetables.query.filter_by(id=id).first()\n if result is None:\n abort(404)\n filename = result.filename\n # Try to download the timetable from dbx to /tmp if it's not already there:\n if not os.path.exists('/tmp/' + filename):\n try:\n download_file_from_dbx(filename)\n except:\n flash(\"Unable to download timetable\", \"danger\")\n return redirect(url_for('timetables'))\n # Convert to HTML:\n try:\n with open('/tmp/' + filename, \"rb\") as docx_file:\n result = mammoth.convert_to_html(docx_file)\n text = result.value\n print(text)\n return text\n except:\n flash(\"Unable to convert to html\", \"danger\")\n return redirect(url_for('timetables'))\n\n# Delete file\n\n\n@app.route('/delete-file/<string:id>', methods=['POST'])\n@is_logged_in_as_trainer\ndef delete_file(id):\n result = Files.query.filter_by(id=id).first()\n if result is None:\n abort(404)\n filename = result.filename\n # Delete from DB:\n psql_delete(result)\n # Delete from cloud:\n try:\n if app.config['S3_OR_DBX'] == 'S3':\n delete_file_from_s3(filename)\n else:\n delete_file_from_dbx(filename)\n except:\n flash(\"Unable to delete file from cloud\", \"warning\")\n flash(\"File deleted\", \"success\")\n return redirect(url_for('index'))\n\n# Delete timetable\n\n\n@app.route('/delete-timetable/<string:id>', methods=['POST'])\n@is_logged_in_as_trainer\ndef delete_timetable(id):\n result = Timetables.query.filter_by(id=id).first()\n if result is None:\n abort(404)\n filename = result.filename\n # Delete from DB:\n psql_delete(result)\n # Delete from cloud:\n try:\n if app.config['S3_OR_DBX'] == 'S3':\n delete_file_from_s3(filename)\n else:\n delete_file_from_dbx(filename)\n except:\n flash(\"Unable to delete timetable from cloud\", \"warning\")\n flash(\"Timetable deleted\", \"success\")\n return redirect(url_for('timetables'))\n\n# Delete trainee\n\n\n@app.route('/delete-trainee/<string:id>', methods=['POST'])\n@is_logged_in_as_admin\ndef delete_trainee(id):\n result = Trainees.query.filter_by(id=id).first()\n if result is None:\n abort(404)\n psql_delete(result)\n flash('Trainee account deleted', 'success')\n return redirect(url_for('trainee_accounts'))\n\n# Delete trainer\n\n\n@app.route('/delete-trainer/<string:id>', methods=['POST'])\n@is_logged_in_as_admin\ndef delete_trainer(id):\n result = Trainers.query.filter_by(id=id).first()\n if result is None:\n abort(404)\n psql_delete(result)\n flash('Trainer account deleted', 'success')\n return redirect(url_for('trainer_accounts'))\n\n# Delete workshop\n\n\n@app.route('/delete-workshop/<string:id>', methods=['POST'])\n@is_logged_in_as_admin\ndef delete_workshop(id):\n result = Workshops.query.filter_by(id=id).first()\n if result is None:\n abort(404)\n psql_delete(result)\n flash('Workshop deleted', 'success')\n return redirect(url_for('workshops'))\n\n# Logout\n\n\n@app.route('/logout')\n@is_logged_in\ndef logout():\n session.clear()\n flash('You are now logged out', 'success')\n return redirect(url_for('index'))\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n" ]
[ [ "pandas.read_sql" ] ]
seovalue/VDSR-PyTorch
[ "99bf8457a13cda38870f6b1a72fda13fcd8b0efa" ]
[ "test.py" ]
[ "# Copyright 2020 Dakewe Biotech Corporation. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport argparse\nimport os\n\nimport cv2\nimport numpy as np\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom sewar.full_ref import mse\nfrom sewar.full_ref import msssim\nfrom sewar.full_ref import psnr\nfrom sewar.full_ref import rmse\nfrom sewar.full_ref import sam\nfrom sewar.full_ref import ssim\nfrom sewar.full_ref import vifp\n\nfrom vdsr_pytorch import cal_niqe\nfrom vdsr_pytorch import VDSR\n\nparser = argparse.ArgumentParser(description=\"Accurate Image Super-Resolution Using Very Deep Convolutional Networks\")\nparser.add_argument(\"--dataroot\", type=str, default=\"./data/Set5\",\n help=\"The directory address where the image needs \"\n \"to be processed. (default: `./data/Set5`).\")\nparser.add_argument(\"--scale-factor\", type=int, default=4, choices=[2, 3, 4],\n help=\"Image scaling ratio. (default: 4).\")\nparser.add_argument(\"--weights\", type=str, default=\"weights/vdsr_4x.pth\",\n help=\"Generator model name. (default:`weights/vdsr_4x.pth`)\")\nparser.add_argument(\"--cuda\", action=\"store_true\", help=\"Enables cuda\")\n\nargs = parser.parse_args()\nprint(args)\n\ntry:\n os.makedirs(\"result\")\nexcept OSError:\n pass\n\ncudnn.benchmark = True\n\nif torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\ndevice = torch.device(\"cuda:0\" if args.cuda else \"cpu\")\n\n# create model\nmodel = VDSR(scale_factor=args.scale_factor).to(device)\n\n# Load state dicts\nmodel.load_state_dict(torch.load(args.weights, map_location=device))\n\n# Evaluate algorithm performance\ntotal_mse_value = 0.0\ntotal_rmse_value = 0.0\ntotal_psnr_value = 0.0\ntotal_ssim_value = 0.0\ntotal_ms_ssim_value = 0.0\ntotal_niqe_value = 0.0\ntotal_sam_value = 0.0\ntotal_vif_value = 0.0\n# Count the number of files in the directory\ntotal_file = 0\n\ndataroot = f\"{args.dataroot}/{args.scale_factor}x/data\"\ntarget = f\"{args.dataroot}/{args.scale_factor}x/target\"\nscale_factor = args.scale_factor\n\nfor filename in os.listdir(dataroot):\n # Open image\n image = Image.open(f\"{dataroot}/{filename}\").convert(\"YCbCr\")\n image_width = int(image.size[0] * scale_factor)\n image_height = int(image.size[1] * scale_factor)\n image = image.resize((image_width, image_height), Image.BICUBIC)\n y, cb, cr = image.split()\n\n preprocess = transforms.ToTensor()\n inputs = preprocess(y).view(1, -1, y.size[1], y.size[0])\n\n inputs = inputs.to(device)\n\n out = model(inputs)\n out = out.cpu()\n out_image_y = out[0].detach().numpy()\n out_image_y *= 255.0\n out_image_y = out_image_y.clip(0, 255)\n out_image_y = Image.fromarray(np.uint8(out_image_y[0]), mode=\"L\")\n\n out_img_cb = cb.resize(out_image_y.size, Image.BICUBIC)\n out_img_cr = cr.resize(out_image_y.size, Image.BICUBIC)\n out_img = Image.merge(\"YCbCr\", [out_image_y, out_img_cb, out_img_cr]).convert(\"RGB\")\n # before converting the result in RGB\n out_img.save(f\"result/{filename}\")\n\n # Evaluate performance\n src_img = cv2.imread(f\"result/{filename}\")\n dst_img = cv2.imread(f\"{target}/{filename}\")\n\n total_mse_value += mse(src_img, dst_img)\n total_rmse_value += rmse(src_img, dst_img)\n total_psnr_value += psnr(src_img, dst_img)\n total_ssim_value += ssim(src_img, dst_img)\n total_ms_ssim_value += msssim(src_img, dst_img)\n total_niqe_value += cal_niqe(f\"result/{filename}\")\n total_sam_value += sam(src_img, dst_img)\n total_vif_value += vifp(src_img, dst_img)\n\n total_file += 1\n\nprint(f\"Avg MSE: {total_mse_value / total_file:.2f}\\n\"\n f\"Avg RMSE: {total_rmse_value / total_file:.2f}\\n\"\n f\"Avg PSNR: {total_psnr_value / total_file:.2f}\\n\"\n f\"Avg SSIM: {total_ssim_value / total_file:.4f}\\n\"\n f\"Avg MS-SSIM: {total_ms_ssim_value / total_file:.4f}\\n\"\n f\"Avg NIQE: {total_niqe_value / total_file:.2f}\\n\"\n f\"Avg SAM: {total_sam_value / total_file:.4f}\\n\"\n f\"Avg VIF: {total_vif_value / total_file:.4f}\")\n" ]
[ [ "numpy.uint8" ] ]
HH-MWB/sable
[ "78ec3d1892af83992cdd6a719e16155706b0ea92" ]
[ "sable/exec.py" ]
[ "\"\"\"Sable Executor\"\"\"\n\nfrom contextlib import contextmanager\nfrom sqlite3 import Connection, connect\nfrom typing import Iterable, Iterator\n\nfrom pandas import read_sql_query\n\nfrom sable.data import Table, Tabulation, TestCase\n\n\n@contextmanager\ndef create_sqlite() -> Iterator[Connection]:\n \"\"\"Create SQLite connection.\n\n Create an SQLite connection that can be used for running test cases.\n\n Yields\n -------\n Iterator[Connection]\n Connection to a newly created SQLite database.\n \"\"\"\n connection = connect(\":memory:\")\n try:\n yield connection\n finally:\n connection.close()\n\n\ndef setup_environ(\n connection: Connection, environment: Iterable[Table]\n) -> None:\n \"\"\"Set up environment.\n\n Given an expected environment, which contains the tables and data,\n set up the database to match the expected environment through given\n connection.\n\n Parameters\n ----------\n connection : Connection\n A connection to the database will be set in the environment.\n environment : Iterable[Table]\n Expected environment, which contains tables and data.\n \"\"\"\n cursor = connection.cursor()\n for table in environment:\n cursor.execute(table.definition)\n table.tabulation.dataframe.to_sql(\n table.name,\n connection,\n if_exists=\"append\",\n index=False,\n )\n\n\ndef test(case: TestCase) -> bool:\n \"\"\"Run a test case and get pass/fail result.\n\n Parameters\n ----------\n case : TestCase\n The test case to be checked.\n\n Returns\n -------\n bool\n True for a pass and false for a fail.\n \"\"\"\n with create_sqlite() as connection:\n setup_environ(connection, case.environment)\n\n if case.expectation.expect_checking_from_result():\n captured_frame = read_sql_query(str(case.query), connection)\n else:\n cursor = connection.cursor()\n cursor.execute(str(case.query))\n captured_frame = read_sql_query(\n f\"SELECT * FROM {case.expectation.table_name};\", connection\n )\n\n return Tabulation(captured_frame) == case.expectation.tabulation\n" ]
[ [ "pandas.read_sql_query" ] ]
DidierRLopes/GST-discordbot
[ "8ff7f7557f5db62ea33d63cfc11ee7ae5f9de56c", "8ff7f7557f5db62ea33d63cfc11ee7ae5f9de56c" ]
[ "discordbot/stocks/technical_analysis/macd.py", "tests/gamestonk_terminal/stocks/dark_pool_shorts/test_sec_view.py" ]
[ "import os\nfrom datetime import datetime, timedelta\nimport discord\nfrom matplotlib import pyplot as plt\n\nfrom gamestonk_terminal.helper_funcs import plot_autoscale\nfrom gamestonk_terminal.common.technical_analysis import momentum_model\nfrom gamestonk_terminal.config_plot import PLOT_DPI\n\nimport discordbot.config_discordbot as cfg\nfrom discordbot.run_discordbot import gst_imgur\nimport discordbot.helpers\n\n\nasync def macd_command(\n ctx, ticker=\"\", fast=\"12\", slow=\"26\", signal=\"9\", start=\"\", end=\"\"\n):\n \"\"\"Displays chart with moving average convergence/divergence [Yahoo Finance]\"\"\"\n\n try:\n\n # Debug\n if cfg.DEBUG:\n print(f\"!stocks.ta.macd {ticker} {fast} {slow} {signal} {start} {end}\")\n\n # Check for argument\n if ticker == \"\":\n raise Exception(\"Stock ticker is required\")\n\n if start == \"\":\n start = datetime.now() - timedelta(days=365)\n else:\n start = datetime.strptime(start, cfg.DATE_FORMAT)\n\n if end == \"\":\n end = datetime.now()\n else:\n end = datetime.strptime(end, cfg.DATE_FORMAT)\n\n if not fast.lstrip(\"-\").isnumeric():\n raise Exception(\"Number has to be an integer\")\n fast = float(fast)\n if not slow.lstrip(\"-\").isnumeric():\n raise Exception(\"Number has to be an integer\")\n slow = float(slow)\n if not signal.lstrip(\"-\").isnumeric():\n raise Exception(\"Number has to be an integer\")\n signal = float(signal)\n\n ticker = ticker.upper()\n df_stock = discordbot.helpers.load(ticker, start)\n if df_stock.empty:\n raise Exception(\"Stock ticker is invalid\")\n\n # Retrieve Data\n df_stock = df_stock.loc[(df_stock.index >= start) & (df_stock.index < end)]\n\n df_ta = momentum_model.macd(\"1440min\", df_stock, fast, slow, signal)\n\n # Output Data\n fig, axes = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=PLOT_DPI)\n ax = axes[0]\n ax.set_title(f\"{ticker} MACD\")\n ax.plot(df_stock.index, df_stock[\"Adj Close\"].values, \"k\", lw=2)\n ax.set_xlim(df_stock.index[0], df_stock.index[-1])\n ax.set_ylabel(\"Share Price ($)\")\n ax.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n\n ax2 = axes[1]\n ax2.plot(df_ta.index, df_ta.iloc[:, 0].values, \"b\", lw=2)\n ax2.plot(df_ta.index, df_ta.iloc[:, 2].values, \"r\", lw=2)\n ax2.bar(df_ta.index, df_ta.iloc[:, 1].values, color=\"g\")\n ax2.legend(\n [\n f\"MACD Line {df_ta.columns[0]}\",\n f\"Signal Line {df_ta.columns[2]}\",\n f\"Histogram {df_ta.columns[1]}\",\n ],\n loc=\"upper left\",\n )\n ax2.set_xlim(df_stock.index[0], df_stock.index[-1])\n ax2.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n\n plt.gcf().autofmt_xdate()\n fig.tight_layout(pad=1)\n\n plt.savefig(\"ta_macd.png\")\n uploaded_image = gst_imgur.upload_image(\"ta_cci.png\", title=\"something\")\n image_link = uploaded_image.link\n if cfg.DEBUG:\n print(f\"Image URL: {image_link}\")\n title = \"Stocks: Moving-Average-Convergence-Divergence \" + ticker\n embed = discord.Embed(title=title, colour=cfg.COLOR)\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n embed.set_image(url=image_link)\n os.remove(\"ta_macd.png\")\n\n await ctx.send(embed=embed)\n\n except Exception as e:\n embed = discord.Embed(\n title=\"ERROR Stocks: Moving-Average-Convergence-Divergence\",\n colour=cfg.COLOR,\n description=e,\n )\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n\n await ctx.send(embed=embed)\n", "# IMPORTATION STANDARD\nfrom datetime import datetime\n\n# IMPORTATION THIRDPARTY\nimport pandas as pd\nimport pytest\n\n# IMPORTATION INTERNAL\nfrom gamestonk_terminal.stocks import stocks_helper\nfrom gamestonk_terminal.stocks.dark_pool_shorts import sec_view\n\n\ndf_fails_to_deliver = pd.DataFrame(\n data={\n \"SETTLEMENT DATE\": [\n pd.Timestamp(\"2021-11-26 00:00:00\"),\n pd.Timestamp(\"2021-11-30 00:00:00\"),\n ],\n \"QUANTITY (FAILS)\": [27, 2440],\n }\n)\n\n\n@pytest.fixture(scope=\"module\")\ndef vcr_config():\n return {\n \"filter_headers\": [(\"User-Agent\", None)],\n \"filter_query_parameters\": [\n (\"period1\", \"1598220000\"),\n (\"period2\", \"1635980400\"),\n ],\n }\n\n\n@pytest.mark.vcr\n@pytest.mark.record_stdout\n@pytest.mark.parametrize(\n \"raw\",\n [True, False],\n)\ndef test_fails_to_deliver(mocker, raw):\n mocker.patch.object(target=sec_view.gtff, attribute=\"USE_ION\", new=False)\n mocker.patch(\"matplotlib.pyplot.show\")\n mocker.patch(\n target=\"gamestonk_terminal.stocks.dark_pool_shorts.sec_model.get_fails_to_deliver\",\n new=mocker.Mock(return_value=df_fails_to_deliver.copy()),\n )\n stock = stocks_helper.load(\n ticker=\"TSLA\",\n start=datetime.strptime(\"2021-12-18\", \"%Y-%m-%d\"),\n )\n\n sec_view.fails_to_deliver(\n ticker=\"PM\",\n stock=stock,\n start=datetime.strptime(\"2021-12-18\", \"%Y-%m-%d\"),\n end=datetime.strptime(\"2021-12-19\", \"%Y-%m-%d\"),\n num=2,\n raw=raw,\n export=\"\",\n )\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.gcf" ], [ "pandas.Timestamp" ] ]
nikkkkhil/DLOD
[ "1ee63477193dce77b3f8820fe02fa65be698bdb2" ]
[ "DLOD/models/knn.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"k-Nearest Neighbors Detector (kNN)\n\"\"\"\n# Author: Yue Zhao <zhaoy@cmu.edu>\n# License: BSD 2 clause\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom warnings import warn\n\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.neighbors import BallTree\nfrom sklearn.utils import check_array\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom .base import BaseDetector\n\n# TODO: algorithm parameter is deprecated and will be removed in 0.7.6.\n# Warning has been turned on.\n# TODO: since Ball_tree is used by default, may introduce its parameters.\n\nclass KNN(BaseDetector):\n # noinspection PyPep8\n \"\"\"kNN class for outlier detection.\n For an observation, its distance to its kth nearest neighbor could be\n viewed as the outlying score. It could be viewed as a way to measure\n the density. See :cite:`ramaswamy2000efficient,angiulli2002fast` for\n details.\n\n Three kNN detectors are supported:\n largest: use the distance to the kth neighbor as the outlier score\n mean: use the average of all k neighbors as the outlier score\n median: use the median of the distance to k neighbors as the outlier score\n\n Parameters\n ----------\n contamination : float in (0., 0.5), optional (default=0.1)\n The amount of contamination of the data set,\n i.e. the proportion of outliers in the data set. Used when fitting to\n define the threshold on the decision function.\n\n n_neighbors : int, optional (default = 5)\n Number of neighbors to use by default for k neighbors queries.\n\n method : str, optional (default='largest')\n {'largest', 'mean', 'median'}\n\n - 'largest': use the distance to the kth neighbor as the outlier score\n - 'mean': use the average of all k neighbors as the outlier score\n - 'median': use the median of the distance to k neighbors as the\n outlier score\n\n radius : float, optional (default = 1.0)\n Range of parameter space to use by default for `radius_neighbors`\n queries.\n\n algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional\n Algorithm used to compute the nearest neighbors:\n\n - 'ball_tree' will use BallTree\n - 'kd_tree' will use KDTree\n - 'brute' will use a brute-force search.\n - 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\n Note: fitting on sparse input will override the setting of\n this parameter, using brute force.\n\n .. deprecated:: 0.74\n ``algorithm`` is deprecated in PyOD 0.7.4 and will not be\n possible in 0.7.6. It has to use BallTree for consistency.\n\n leaf_size : int, optional (default = 30)\n Leaf size passed to BallTree. This can affect the\n speed of the construction and query, as well as the memory\n required to store the tree. The optimal value depends on the\n nature of the problem.\n\n metric : string or callable, default 'minkowski'\n metric to use for distance computation. Any metric from scikit-learn\n or scipy.spatial.distance can be used.\n\n If metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays as input and return one value indicating the\n distance between them. This works for Scipy's metrics, but is less\n efficient than passing the metric name as a string.\n\n Distance matrices are not supported.\n\n Valid values for metric are:\n\n - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',\n 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',\n 'sqeuclidean', 'yule']\n\n See the documentation for scipy.spatial.distance for details on these\n metrics.\n\n p : integer, optional (default = 2)\n Parameter for the Minkowski metric from\n sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is\n equivalent to using manhattan_distance (l1), and euclidean_distance\n (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n See http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances\n\n metric_params : dict, optional (default = None)\n Additional keyword arguments for the metric function.\n\n n_jobs : int, optional (default = 1)\n The number of parallel jobs to run for neighbors search.\n If ``-1``, then the number of jobs is set to the number of CPU cores.\n Affects only kneighbors and kneighbors_graph methods.\n\n Attributes\n ----------\n decision_scores_ : numpy array of shape (n_samples,)\n The outlier scores of the training data.\n The higher, the more abnormal. Outliers tend to have higher\n scores. This value is available once the detector is\n fitted.\n\n threshold_ : float\n The threshold is based on ``contamination``. It is the\n ``n_samples * contamination`` most abnormal samples in\n ``decision_scores_``. The threshold is calculated for generating\n binary outlier labels.\n\n labels_ : int, either 0 or 1\n The binary labels of the training data. 0 stands for inliers\n and 1 for outliers/anomalies. It is generated by applying\n ``threshold_`` on ``decision_scores_``.\n \"\"\"\n\n def __init__(self, contamination=0.1, n_neighbors=5, method='largest',\n radius=1.0, algorithm='auto', leaf_size=30,\n metric='minkowski', p=2, metric_params=None, n_jobs=1,\n **kwargs):\n super(KNN, self).__init__(contamination=contamination)\n self.n_neighbors = n_neighbors\n self.method = method\n self.radius = radius\n self.algorithm = algorithm\n self.leaf_size = leaf_size\n self.metric = metric\n self.p = p\n self.metric_params = metric_params\n self.n_jobs = n_jobs\n\n if self.algorithm != 'auto' and self.algorithm != 'ball_tree':\n warn('algorithm parameter is deprecated and will be removed '\n 'in version 0.7.6. By default, ball_tree will be used.',\n FutureWarning)\n\n self.neigh_ = NearestNeighbors(n_neighbors=self.n_neighbors,\n radius=self.radius,\n algorithm=self.algorithm,\n leaf_size=self.leaf_size,\n metric=self.metric,\n p=self.p,\n metric_params=self.metric_params,\n n_jobs=self.n_jobs,\n **kwargs)\n\n def fit(self, X, y=None):\n \"\"\"Fit detector. y is optional for unsupervised methods.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The input samples.\n\n y : numpy array of shape (n_samples,), optional (default=None)\n The ground truth of the input samples (labels).\n \"\"\"\n\n # validate inputs X and y (optional)\n X = check_array(X)\n self._set_n_classes(y)\n\n if self.metric_params is not None:\n self.tree_ = BallTree(X, leaf_size=self.leaf_size,\n metric=self.metric,\n **self.metric_params)\n else:\n self.tree_ = BallTree(X, leaf_size=self.leaf_size,\n metric=self.metric)\n self.neigh_.fit(X)\n\n dist_arr, _ = self.neigh_.kneighbors(n_neighbors=self.n_neighbors,\n return_distance=True)\n dist = self._get_dist_by_method(dist_arr)\n\n self.decision_scores_ = dist.ravel()\n self._process_decision_scores()\n\n return self\n\n def decision_function(self, X):\n \"\"\"Predict raw anomaly score of X using the fitted detector.\n\n The anomaly score of an input sample is computed based on different\n detector algorithms. For consistency, outliers are assigned with\n larger anomaly scores.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only\n if they are supported by the base estimator.\n\n Returns\n -------\n anomaly_scores : numpy array of shape (n_samples,)\n The anomaly score of the input samples.\n \"\"\"\n check_is_fitted(self, ['tree_', 'decision_scores_',\n 'threshold_', 'labels_'])\n\n X = check_array(X)\n\n # initialize the output score\n pred_scores = np.zeros([X.shape[0], 1])\n\n for i in range(X.shape[0]):\n x_i = X[i, :]\n x_i = np.asarray(x_i).reshape(1, x_i.shape[0])\n\n # get the distance of the current point\n dist_arr, _ = self.tree_.query(x_i, k=self.n_neighbors)\n dist = self._get_dist_by_method(dist_arr)\n pred_score_i = dist[-1]\n\n # record the current item\n pred_scores[i, :] = pred_score_i\n\n return pred_scores.ravel()\n\n def _get_dist_by_method(self, dist_arr):\n \"\"\"Internal function to decide how to process passed in distance array\n\n Parameters\n ----------\n dist_arr : numpy array of shape (n_samples, n_neighbors)\n Distance matrix.\n\n Returns\n -------\n dist : numpy array of shape (n_samples,)\n The outlier scores by distance.\n \"\"\"\n\n if self.method == 'largest':\n return dist_arr[:, -1]\n elif self.method == 'mean':\n return np.mean(dist_arr, axis=1)\n elif self.method == 'median':\n return np.median(dist_arr, axis=1)\n" ]
[ [ "sklearn.utils.validation.check_is_fitted", "numpy.asarray", "numpy.zeros", "numpy.median", "sklearn.neighbors.BallTree", "numpy.mean", "sklearn.utils.check_array", "sklearn.neighbors.NearestNeighbors" ] ]
worldbank/SDG-big-data
[ "7349cffde3c32bb5a7abc99d910a40e1ba611916" ]
[ "twitter-analytics/code/3-model_evaluation/preliminary/check_presence_seedlist_keyword.py" ]
[ "import pandas as pd\nimport argparse\nimport logging\nfrom pathlib import Path\nimport os\nimport re\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef get_args_from_command_line():\n \"\"\"Parse the command line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--country_code\", type=str,\n default=\"US\")\n parser.add_argument(\"--threshold\", type=float,\n default=0.95)\n args = parser.parse_args()\n return args\n\ndef regex_match_string(ngram_list, regex_list, mystring):\n if any(regex.search(mystring) for regex in regex_list):\n return 1\n elif any(regex in mystring for regex in ngram_list):\n return 1\n else:\n return 0\n\nif __name__ == '__main__':\n args = get_args_from_command_line()\n # define paths\n data_path = '/scratch/mt4493/twitter_labor/twitter-labor-data/data'\n random_path = f'{data_path}/random_samples/random_samples_splitted'\n random_path_evaluation = Path(os.path.join(random_path, args.country_code, 'evaluation'))\n # load random set\n random_df = pd.concat(\n pd.read_parquet(parquet_file)\n for parquet_file in random_path_evaluation.glob('*.parquet')\n )\n logger.info('Loaded random data')\n ngram_dict = {\n 'US': ['laid off',\n 'lost my job',\n 'found[.\\w\\s\\d]*job',\n 'got [.\\w\\s\\d]*job',\n 'started[.\\w\\s\\d]*job',\n 'new job',\n 'unemployment',\n 'anyone[.\\w\\s\\d]*hiring',\n 'wish[.\\w\\s\\d]*job',\n 'need[.\\w\\s\\d]*job',\n 'searching[.\\w\\s\\d]*job',\n 'job',\n 'hiring',\n 'opportunity',\n 'apply', \"(^|\\W)i[ve|'ve| ][\\w\\s\\d]* fired\",\n \"(^|\\W)just[\\w\\s\\d]* hired\",\n \"(^|\\W)i[m|'m|ve|'ve| am| have]['\\w\\s\\d]*unemployed\",\n \"(^|\\W)i[m|'m|ve|'ve| am| have]['\\w\\s\\d]*jobless\",\n \"(^|\\W)looking[\\w\\s\\d]* gig[\\W]\",\n \"(^|\\W)applying[\\w\\s\\d]* position[\\W]\",\n \"(^|\\W)find[\\w\\s\\d]* job[\\W]\",\n \"i got fired\",\n \"just got fired\",\n \"i got hired\",\n \"unemployed\",\n \"jobless\"\n ]}\n regex_list = [re.compile(regex) for regex in ngram_dict[args.country_code]]\n random_df['text_lower'] = random_df['text'].str.lower()\n random_df['seedlist_keyword'] = random_df['text_lower'].apply(lambda x: regex_match_string(ngram_list=ngram_dict[args.country_code], regex_list=regex_list, mystring=x))\n random_df = random_df[['tweet_id', 'text_lower', 'seedlist_keyword']]\n output_path = f'/scratch/mt4493/twitter_labor/twitter-labor-data/data/random_samples/random_samples_splitted/{args.country_code}/evaluation_seedlist_keyword'\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n random_df.to_parquet(os.path.join(output_path, 'evaluation_seedlist_keyword.parquet'), index=False)" ]
[ [ "pandas.read_parquet" ] ]
qiuhaoling/indra
[ "fa1fb31c4333ea63d023181eaf6f759e3dd3b400" ]
[ "indra/assemblers/cyjs/assembler.py" ]
[ "from __future__ import absolute_import, print_function, unicode_literals\nfrom builtins import dict, str\nfrom copy import deepcopy\nimport json\nimport logging\nimport itertools\nimport collections\nimport numpy as np\nfrom indra.statements import *\nfrom indra.databases import hgnc_client\nfrom indra.databases import context_client, get_identifiers_url\nfrom indra.tools.expand_families import Expander\nfrom indra.preassembler.hierarchy_manager import hierarchies\n\nexpander = Expander(hierarchies)\n\n# Python 2\ntry:\n basestring\n# Python 3\nexcept:\n basestring = str\n\nlogger = logging.getLogger(__name__)\n\n\nclass CyJSAssembler(object):\n \"\"\"This class assembles a CytoscapeJS graph from a set of INDRA Statements.\n\n CytoscapeJS is a web-based network library for analysis and\n visualisation: http://js.cytoscape.org/\n\n Parameters\n ----------\n statements : Optional[list[indra.statements.Statement]]\n A list of INDRA Statements to be assembled.\n\n Attributes\n ----------\n statements : list[indra.statements.Statement]\n A list of INDRA Statements to be assembled.\n \"\"\"\n def __init__(self, stmts=None):\n if not stmts:\n self.statements = []\n else:\n self.statements = stmts\n self._edges = []\n self._nodes = []\n self._existing_nodes = {}\n self._id_counter = 0\n self._exp_colorscale = []\n self._mut_colorscale = []\n self._gene_names = []\n self._context = {}\n\n def add_statements(self, stmts):\n \"\"\"Add INDRA Statements to the assembler's list of statements.\n\n Parameters\n ----------\n stmts : list[indra.statements.Statement]\n A list of :py:class:`indra.statements.Statement`\n to be added to the statement list of the assembler.\n \"\"\"\n for stmt in stmts:\n self.statements.append(stmt)\n\n def make_model(self, *args, **kwargs):\n \"\"\"Assemble a Cytoscape JS network from INDRA Statements.\n\n This method assembles a Cytoscape JS network from the set of INDRA\n Statements added to the assembler.\n\n Parameters\n ----------\n grouping : bool\n If True, the nodes with identical incoming and outgoing edges\n are grouped and the corresponding edges are merged.\n\n Returns\n -------\n cyjs_str : str\n The json serialized Cytoscape JS model.\n \"\"\"\n for stmt in self.statements:\n if isinstance(stmt, RegulateActivity):\n self._add_regulate_activity(stmt)\n elif isinstance(stmt, RegulateAmount):\n self._add_regulate_amount(stmt)\n elif isinstance(stmt, Modification):\n self._add_modification(stmt)\n elif isinstance(stmt, SelfModification):\n self._add_selfmodification(stmt)\n elif isinstance(stmt, Gef):\n self._add_gef(stmt)\n elif isinstance(stmt, Gap):\n self._add_gap(stmt)\n elif isinstance(stmt, Complex):\n self._add_complex(stmt)\n else:\n logger.warning('Unhandled statement type: %s' %\n stmt.__class__.__name__)\n if kwargs.get('grouping'):\n self._group_nodes()\n self._group_edges()\n return self.print_cyjs_graph()\n\n def get_gene_names(self):\n \"\"\"Gather gene names of all nodes and node members\"\"\"\n # Collect all gene names in network\n gene_names = []\n for node in self._nodes:\n members = node['data'].get('members')\n if members:\n gene_names += list(members.keys())\n else:\n if node['data']['name'].startswith('Group'):\n continue\n gene_names.append(node['data']['name'])\n self._gene_names = gene_names\n\n def set_CCLE_context(self, cell_types):\n \"\"\"Set context of all nodes and node members from CCLE.\"\"\"\n self.get_gene_names()\n\n # Get expression and mutations from context client\n exp_values = \\\n context_client.get_protein_expression(self._gene_names, cell_types)\n mut_values = \\\n context_client.get_mutations(self._gene_names, cell_types)\n\n # Make a dict of presence/absence of mutations\n muts = {cell_line: {} for cell_line in cell_types}\n for cell_line, entries in mut_values.items():\n if entries is not None:\n for gene, mutations in entries.items():\n if mutations:\n muts[cell_line][gene] = 1\n else:\n muts[cell_line][gene] = 0\n\n # Create bins for the exp values\n # because colorbrewer only does 3-9 bins and I don't feel like\n # reinventing color scheme theory, this will only bin 3-9 bins\n def bin_exp(expression_dict):\n d = expression_dict\n exp_values = []\n for line in d:\n for gene in d[line]:\n val = d[line][gene]\n if val is not None:\n exp_values.append(val)\n thr_dict = {}\n for n_bins in range(3, 10):\n bin_thr = np.histogram(np.log10(exp_values), n_bins)[1][1:]\n thr_dict[n_bins] = bin_thr\n # this dict isn't yet binned, that happens in the loop\n binned_dict = {x: deepcopy(expression_dict) for x in range(3, 10)}\n for n_bins in binned_dict:\n for line in binned_dict[n_bins]:\n for gene in binned_dict[n_bins][line]:\n # last bin is reserved for None\n if binned_dict[n_bins][line][gene] is None:\n binned_dict[n_bins][line][gene] = n_bins\n else:\n val = np.log10(binned_dict[n_bins][line][gene])\n for thr_idx, thr in enumerate(thr_dict[n_bins]):\n if val <= thr:\n binned_dict[n_bins][line][gene] = thr_idx\n break\n return binned_dict\n binned_exp = bin_exp(exp_values)\n\n context = {'bin_expression': binned_exp,\n 'mutation': muts}\n self._context['CCLE'] = context\n\n def print_cyjs_graph(self):\n \"\"\"Return the assembled Cytoscape JS network as a json string.\n\n Returns\n -------\n cyjs_str : str\n A json string representation of the Cytoscape JS network.\n \"\"\"\n cyjs_dict = {'edges': self._edges, 'nodes': self._nodes}\n cyjs_str = json.dumps(cyjs_dict, indent=1, sort_keys=True)\n return cyjs_str\n\n def print_cyjs_context(self):\n \"\"\"Return a list of node names and their respective context.\n\n Returns\n -------\n cyjs_str_context : str\n A json string of the context dictionary. e.g. -\n {'CCLE' : {'bin_expression' : {'cell_line1' : {'gene1':'val1'} },\n 'bin_expression' : {'cell_line' : {'gene1':'val1'} }\n }}\n \"\"\"\n context = self._context\n context_str = json.dumps(context, indent=1, sort_keys=True)\n return context_str\n\n def save_json(self, fname_prefix='model'):\n \"\"\"Save the assembled Cytoscape JS network in a json file.\n\n This method saves two files based on the file name prefix given.\n It saves one json file with the graph itself, and another json\n file with the context.\n\n Parameters\n ----------\n fname_prefix : Optional[str]\n The prefix of the files to save the Cytoscape JS network and\n context to.\n Default: model\n \"\"\"\n cyjs_str = self.print_cyjs_graph()\n # outputs the graph\n with open(fname_prefix + '.json', 'wb') as fh:\n fh.write(cyjs_str.encode('utf-8'))\n # outputs the context of graph nodes\n context_str = self.print_cyjs_context()\n with open(fname_prefix + '_context.json', 'wb') as fh:\n fh.write(context_str.encode('utf-8'))\n\n def save_model(self, fname='model.js'):\n \"\"\"Save the assembled Cytoscape JS network in a js file.\n\n Parameters\n ----------\n file_name : Optional[str]\n The name of the file to save the Cytoscape JS network to.\n Default: model.js\n \"\"\"\n exp_colorscale_str = json.dumps(self._exp_colorscale)\n mut_colorscale_str = json.dumps(self._mut_colorscale)\n cyjs_dict = {'edges': self._edges, 'nodes': self._nodes}\n model_str = json.dumps(cyjs_dict, indent=1, sort_keys=True)\n model_dict = {'exp_colorscale_str': exp_colorscale_str,\n 'mut_colorscale_str': mut_colorscale_str,\n 'model_elements_str': model_str}\n s = ''\n s += 'var exp_colorscale = %s;\\n' % model_dict['exp_colorscale_str']\n s += 'var mut_colorscale = %s;\\n' % model_dict['mut_colorscale_str']\n s += 'var model_elements = %s;\\n' % model_dict['model_elements_str']\n with open(fname, 'wb') as fh:\n fh.write(s.encode('utf-8'))\n\n def _add_binary_regulation(self, stmt):\n subj, obj = stmt.agent_list()\n if subj is None:\n return\n edge_type, edge_polarity = _get_stmt_type(stmt)\n source_id = self._add_node(subj, uuid=stmt.uuid)\n target_id = self._add_node(obj, uuid=stmt.uuid)\n self._add_edge(edge_type, source_id, target_id, edge_polarity,\n stmt.uuid)\n\n _add_regulate_activity = _add_binary_regulation\n _add_regulate_amount = _add_binary_regulation\n _add_modification = _add_binary_regulation\n _add_gef = _add_binary_regulation\n _add_gap = _add_binary_regulation\n\n def _add_selfmodification(self, stmt):\n edge_type, edge_polarity = _get_stmt_type(stmt)\n source_id = self._add_node(stmt.enz, uuid=stmt.uuid)\n self._add_edge(edge_type, source_id, source_id, edge_polarity,\n stmt.uuid)\n\n def _add_complex(self, stmt):\n edge_type, edge_polarity = _get_stmt_type(stmt)\n for m1, m2 in itertools.combinations(stmt.members, 2):\n m1_id = self._add_node(m1, uuid=stmt.uuid)\n m2_id = self._add_node(m2, uuid=stmt.uuid)\n self._add_edge(edge_type, m1_id, m2_id, edge_polarity,\n stmt.uuid)\n\n def _get_edge_dict(self):\n \"\"\"Return a dict of edges.\n\n Keyed tuples of (i, source, target, polarity)\n with lists of edge ids [id1, id2, ...]\n \"\"\"\n edge_dict = collections.defaultdict(lambda: [])\n if len(self._edges) > 0:\n for e in self._edges:\n data = e['data']\n key = tuple([data['i'], data['source'],\n data['target'], data['polarity']])\n edge_dict[key] = data['id']\n return edge_dict\n\n def _add_edge(self, edge_type, source, target, edge_polarity, uuid):\n edge_dict = self._get_edge_dict()\n uuids = collections.defaultdict(lambda: [])\n edge = {'data': {'i': edge_type,\n 'source': source, 'target': target,\n 'polarity': edge_polarity}}\n data = edge['data']\n key = tuple([data['i'], data['source'],\n data['target'], data['polarity']])\n if key in edge_dict:\n val = edge_dict[key]\n edge = [e for e in self._edges if e['data']['id'] == val][0]\n else:\n edge['data']['id'] = self._get_new_id()\n self._edges.append(edge)\n if type(uuid) is not list:\n uuid = [uuid]\n edge['data']['uuid_list'] = edge['data'].get('uuid_list', [])\n edge['data']['uuid_list'] += uuid\n return\n\n def _add_node(self, agent, uuid=None):\n node_key = agent.name\n node_id = self._existing_nodes.get(node_key)\n # if the node already exists we do not want to add it again\n # we must however add its uuid\n if node_id is not None:\n # fetch the appropriate node\n n = [x for x in self._nodes if x['data']['id'] == node_id][0]\n uuid_list = n['data']['uuid_list']\n if uuid not in uuid_list:\n uuid_list.append(uuid)\n return node_id\n db_refs = _get_db_refs(agent)\n node_id = self._get_new_id()\n self._existing_nodes[node_key] = node_id\n node_name = agent.name\n node_name = node_name.replace('_', ' ')\n expanded_families = expander.get_children(agent, ns_filter='HGNC')\n members = {}\n for member in expanded_families:\n hgnc_symbol = member[1]\n hgnc_id = hgnc_client.get_hgnc_id(hgnc_symbol)\n if hgnc_id:\n up_id = hgnc_client.get_uniprot_id(hgnc_id)\n member_agent = Agent(hgnc_symbol,\n db_refs={'HGNC': hgnc_id,\n 'UP': up_id})\n member_db_refs = _get_db_refs(member_agent)\n else:\n member_db_refs = {}\n members[member[1]] = {'db_refs': member_db_refs}\n node = {'data': {'id': node_id, 'name': node_name,\n 'db_refs': db_refs, 'parent': '',\n 'members': members, 'uuid_list': [uuid]}}\n self._nodes.append(node)\n return node_id\n\n def _get_new_id(self):\n ret = self._id_counter\n self._id_counter += 1\n return ret\n\n def _get_node_key(self, node_dict_item):\n \"\"\"Return a tuple of sorted sources and targets given a node dict.\"\"\"\n s = tuple(sorted(node_dict_item['sources']))\n t = tuple(sorted(node_dict_item['targets']))\n return (s, t)\n\n def _get_node_groups(self):\n \"\"\"Return a list of node id lists that are topologically identical.\n\n First construct a node_dict which is keyed to the node id and\n has a value which is a dict with keys 'sources' and 'targets'.\n The 'sources' and 'targets' each contain a list of tuples\n (i, polarity, source) edge of the node. node_dict is then processed\n by _get_node_key() which returns a tuple of (s,t) where s,t are\n sorted tuples of the ids for the source and target nodes. (s,t) is\n then used as a key in node_key_dict where the values are the node\n ids. node_groups is restricted to groups greater than 1 node.\n \"\"\"\n node_dict = {node['data']['id']: {'sources': [], 'targets': []}\n for node in self._nodes}\n for edge in self._edges:\n # Add edge as a source for its target node\n edge_data = (edge['data']['i'], edge['data']['polarity'],\n edge['data']['source'])\n node_dict[edge['data']['target']]['sources'].append(edge_data)\n # Add edge as target for its source node\n edge_data = (edge['data']['i'], edge['data']['polarity'],\n edge['data']['target'])\n node_dict[edge['data']['source']]['targets'].append(edge_data)\n # Make a dictionary of nodes based on source/target as a key\n node_key_dict = collections.defaultdict(lambda: [])\n for node_id, node_d in node_dict.items():\n key = self._get_node_key(node_d)\n node_key_dict[key].append(node_id)\n # Constrain the groups to ones that have more than 1 member\n node_groups = [g for g in node_key_dict.values() if (len(g) > 1)]\n return node_groups\n\n def _group_edges(self):\n \"\"\"Group all edges that are topologically identical.\n\n This means that (i, source, target, polarity) are the same, then sets\n edges on parent (i.e. - group) nodes to 'Virtual' and creates a new\n edge to represent all of them.\n \"\"\"\n # edit edges on parent nodes and make new edges for them\n edges_to_add = [[], []] # [group_edges, uuid_lists]\n for e in self._edges:\n new_edge = deepcopy(e)\n new_edge['data'].pop('id', None)\n uuid_list = new_edge['data'].pop('uuid_list', [])\n # Check if edge source or target are contained in a parent\n # If source or target in parent edit edge\n # Nodes may only point within their container\n source = e['data']['source']\n target = e['data']['target']\n source_node = [x for x in self._nodes if\n x['data']['id'] == source][0]\n target_node = [x for x in self._nodes if\n x['data']['id'] == target][0]\n # If the source node is in a group, we change the source of this\n # edge to the group\n if source_node['data']['parent'] != '':\n new_edge['data']['source'] = source_node['data']['parent']\n e['data']['i'] = 'Virtual'\n # If the targete node is in a group, we change the target of this\n # edge to the group\n if target_node['data']['parent'] != '':\n new_edge['data']['target'] = target_node['data']['parent']\n e['data']['i'] = 'Virtual'\n if e['data']['i'] == 'Virtual':\n if new_edge not in edges_to_add[0]:\n edges_to_add[0].append(new_edge)\n edges_to_add[1].append(uuid_list)\n else:\n idx = edges_to_add[0].index(new_edge)\n edges_to_add[1][idx] += uuid_list\n edges_to_add[1][idx] = list(set(edges_to_add[1][idx]))\n for ze in zip(*edges_to_add):\n edge = ze[0]\n edge['data']['id'] = self._get_new_id()\n edge['data']['uuid_list'] = ze[1]\n self._edges.append(edge)\n\n def _group_nodes(self):\n node_groups = self._get_node_groups()\n for group in node_groups:\n # Make new group node\n new_group_node = {'data': {'id': (self._get_new_id()),\n 'name': ('Group' + str(group)),\n 'parent': '', 'uuid_list': []}}\n member_nodes = [x for x in self._nodes if x['data']['id'] in group]\n for m_node in member_nodes:\n new_group_node['data']['uuid_list'] += \\\n m_node['data']['uuid_list']\n new_group_node['data']['uuid_list'] = \\\n list(set(new_group_node['data']['uuid_list']))\n # Point the node to its parent\n for node in self._nodes:\n if node['data']['id'] in group:\n node['data']['parent'] = new_group_node['data']['id']\n self._nodes.append(new_group_node)\n\n\ndef _get_db_refs(agent):\n cyjs_db_refs = {}\n for db_name, db_ids in agent.db_refs.items():\n if isinstance(db_ids, int):\n db_id = str(db_ids)\n elif isinstance(db_ids, basestring):\n db_id = db_ids\n else:\n db_id = db_ids[0]\n if db_name == 'TEXT':\n url = db_id\n else:\n url = get_identifiers_url(db_name, db_id)\n if not url:\n continue\n db_name_map = {\n 'UP': 'UniProt', 'PUBCHEM': 'PubChem',\n 'IP': 'InterPro', 'NXPFA': 'NextProtFamily',\n 'PF': 'Pfam', 'CHEBI': 'ChEBI'}\n name = db_name_map.get(db_name)\n if not name:\n name = db_name\n cyjs_db_refs[name] = url\n return cyjs_db_refs\n\n\ndef _get_stmt_type(stmt):\n if isinstance(stmt, AddModification):\n edge_type = stmt.__class__.__name__\n edge_polarity = 'positive'\n elif isinstance(stmt, RemoveModification):\n edge_type = stmt.__class__.__name__\n edge_polarity = 'negative'\n elif isinstance(stmt, SelfModification):\n edge_type = 'SelfModification'\n edge_polarity = 'positive'\n elif isinstance(stmt, Complex):\n edge_type = 'Complex'\n edge_polarity = 'none'\n elif isinstance(stmt, Activation):\n edge_type = 'Activation'\n edge_polarity = 'positive'\n elif isinstance(stmt, Inhibition):\n edge_type = 'Inhibition'\n edge_polarity = 'negative'\n elif isinstance(stmt, DecreaseAmount):\n edge_type = 'DecreaseAmount'\n edge_polarity = 'negative'\n elif isinstance(stmt, IncreaseAmount):\n edge_type = 'IncreaseAmount'\n edge_polarity = 'positive'\n elif isinstance(stmt, Gef):\n edge_type = 'Gef'\n edge_polarity = 'positive'\n elif isinstance(stmt, Gap):\n edge_type = 'Gap'\n edge_polarity = 'negative'\n else:\n edge_type = stmt.__class__.__str__()\n edge_polarity = 'none'\n return edge_type, edge_polarity\n" ]
[ [ "numpy.log10" ] ]
brianchung0803/MegaDepth
[ "2aa2bd9c6025938c40b5839989d37a647c82ff39" ]
[ "models/base_model.py" ]
[ "import os\nimport torch\n\nclass BaseModel():\n def name(self):\n return 'BaseModel'\n\n def initialize(self):\n # self.opt = opt\n # self.gpu_ids = opt.gpu_ids\n # self.isTrain = opt.isTrain\n # self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor\n # self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)\n self.gpu_ids = [0,1]\n self.isTrain = True\n \n self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor\n self.save_dir = os.path.join(\"./checkpoints\", \"test_local\")\n\n\n def set_input(self, input):\n self.input = input\n\n def forward(self):\n pass\n\n # used in test time, no backprop\n def test(self):\n pass\n\n def get_image_paths(self):\n pass\n\n def optimize_parameters(self):\n pass\n\n def get_current_visuals(self):\n return self.input\n\n def get_current_errors(self):\n return {}\n\n def save(self, label):\n pass\n\n # helper saving function that can be used by subclasses\n def save_network(self, network, network_label, epoch_label, gpu_ids):\n save_filename = '_%s_net_%s.pth' % (epoch_label, network_label)\n save_path = os.path.join(self.save_dir, save_filename)\n torch.save(network.cpu().state_dict(), save_path)\n if len(gpu_ids) and torch.cuda.is_available():\n network.cuda(device_id=gpu_ids[0])\n\n # helper loading function that can be used by subclasses\n def load_network(self, network, network_label, epoch_label):\n save_filename = '%s_net_%s.pth' % (epoch_label, network_label)\n save_path = os.path.join(self.save_dir, save_filename)\n print(save_path)\n model = torch.load(save_path)\n return model\n # network.load_state_dict(torch.load(save_path))\n\n def update_learning_rate():\n pass\n" ]
[ [ "torch.cuda.is_available", "torch.load" ] ]
Umang81/ga-dsmp
[ "3f44da82cf6eb199da0dbc2546509aad254dd630" ]
[ "Telecom-Churn-Prediction-with-Boosting/code.py" ]
[ "# --------------\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n#path - Path of file \n\n# Code starts here\ndf = pd.read_csv(path)\nX = df.drop(['customerID','Churn'],1)\ny = df['Churn'].copy()\n\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size =0.3, random_state=0)\n\n\n# --------------\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\n\n# Code starts here\n\nX_train['TotalCharges']=X_train['TotalCharges'].replace(' ',np.nan).astype(float)\nX_test['TotalCharges']=X_test['TotalCharges'].replace(' ',np.nan).astype(float)\n\nX_train['TotalCharges'].fillna(X_train['TotalCharges'].mean(),inplace=True)\nX_test['TotalCharges'].fillna(X_test['TotalCharges'].mean(),inplace=True)\n\nprint(X_train.isnull().sum())\nprint(X_test.isnull().sum())\n\nle=LabelEncoder()\ncategorical_feature_mask = X_train.dtypes==object\ncategorical_cols = X_train.columns[categorical_feature_mask].tolist()\nX_train[categorical_cols]= X_train[categorical_cols].apply(lambda col: le.fit_transform(col))\nX_test[categorical_cols]= X_test[categorical_cols].apply(lambda col: le.fit_transform(col))\n\ny_train = y_train.replace({'No':0,'Yes':1})\ny_test =y_test.replace({'No':0,'Yes':1})\n\n\n# --------------\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.metrics import accuracy_score,classification_report,confusion_matrix\n\n# Code starts here\nprint(X_train.head())\nprint(X_test.head())\nprint(y_train.head())\nprint(y_train.head())\n\nada_model = AdaBoostClassifier(random_state=0)\nada_model.fit(X_train,y_train)\ny_pred = ada_model.predict(X_test)\nada_score = accuracy_score(y_test,y_pred)\nprint(ada_score)\n\nada_cm = confusion_matrix(y_test,y_pred)\nprint(ada_cm)\n\nada_cr = classification_report(y_test,y_pred)\nprint(ada_cr)\n\n\n# --------------\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import GridSearchCV\n\n#Parameter list\nparameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],\n 'max_depth':range(1,3)}\n\n# Code starts here\nxgb_model = XGBClassifier(random_state=0)\nxgb_model.fit(X_train,y_train)\ny_pred = xgb_model.predict(X_test)\nxgb_score = accuracy_score(y_test,y_pred)\nprint(xgb_score)\nxgb_cm = confusion_matrix(y_test,y_pred)\nprint(xgb_cm)\nxgb_cr = classification_report(y_test,y_pred)\nprint(xgb_cr)\n\nclf_model = GridSearchCV(estimator=xgb_model,param_grid=parameters)\nclf_model.fit(X_train,y_train)\ny_pred = clf_model.predict(X_test)\nclf_score = accuracy_score(y_test,y_pred)\nprint(clf_score)\nclf_cm = confusion_matrix(y_test,y_pred)\nprint(clf_cm)\nclf_cr = classification_report(y_test,y_pred)\nprint(clf_cr)\n\n\n" ]
[ [ "sklearn.preprocessing.LabelEncoder", "sklearn.metrics.confusion_matrix", "sklearn.ensemble.AdaBoostClassifier", "sklearn.model_selection.GridSearchCV", "sklearn.metrics.accuracy_score", "sklearn.metrics.classification_report", "sklearn.model_selection.train_test_split", "pandas.read_csv" ] ]
hsabiu/thesis-scripts
[ "54b7b2248f9a341be7579c96eb54dd43b7d7138f" ]
[ "plotEventsWithTimestampOriginal.py" ]
[ "# Author: Habib Sabiu\n# Date: March 31, 2017\n# Purpose: Script to process Spark log files generated when running Spark applications. This script\n# would loop through all the log files in the current directory and get the timestamp for\n# all events specified within 'events' list. Basically, the events of interest here are the\n# number of executors added or remove from an application over time. The x-axis shows the time\n# stamp of the event while the y-axis shows the count of the executors used by the application\n# at the given time stamp. The script will only process and compare the log files of 5 applications at a time\n# Copyright: Any person can adopt this script for their specific need as long as credit is given\n# to the initial author\n\nfrom datetime import datetime, timedelta\nfrom itertools import groupby\nfrom matplotlib.font_manager import FontProperties\n\nimport tzlocal\nimport re\n\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport dateutil\nimport glob\n\neventsFileName = \"events_log_record.txt\"\nevents = [\"SparkListenerApplicationStart\", \"SparkListenerExecutorAdded\", \"SparkListenerExecutorRemoved\", \"SparkListenerApplicationEnd\"]\n\nlocal_timezone = tzlocal.get_localzone()\nintermediateLines = []\n\nx_axis = []\ny_axis = []\n\nlist_of_files = glob.glob('./*.json')\n\neventLine = \"\"\ncurrentFileName = \"\"\n\nfor fileName in list_of_files:\n with open(fileName) as logFile:\n fileName = fileName.replace(\"./\", \"\")\n fileName = fileName.replace(\".json\", \"\")\n for line in logFile:\n if any(s in line for s in events):\n eventLine = \"\\\"App\\\":\" + fileName + \" \" + line.replace(\",\", \"\").strip()\n currentFileName = fileName\n if \"Timestamp\" in line and eventLine != \"\" and currentFileName == fileName:\n intermediateLines.append(eventLine)\n intermediateLines.append(\"\\\"App\\\":\" + fileName + \" \" + line.replace(\",\", \"\").strip())\n eventLine = \"\"\n\nexecutorsCounter = 0\npreviousAppName = \"\"\n\nlistLength = len(intermediateLines)\n\nwith open(eventsFileName, 'w') as eventsFile:\n for index, line in enumerate(intermediateLines):\n if \"Event\" in line:\n if not index + 3 > listLength:\n currentEventTime = intermediateLines[index + 1]\n nextEventTime = intermediateLines[index + 3]\n\n nextEvent = intermediateLines[index + 2]\n\n currentEventTimeList = re.findall(r'\\S+', currentEventTime)\n nextEventTimeList = re.findall(r'\\S+', nextEventTime)\n\n currentEventTimeStampFloat = float(currentEventTimeList[2])\n nextEventTimeStampFloat = float(nextEventTimeList[2])\n\n current_event_local_time = datetime.fromtimestamp(currentEventTimeStampFloat / 1000.0, local_timezone)\n next_event_local_time = datetime.fromtimestamp(nextEventTimeStampFloat / 1000.0, local_timezone)\n\n currentEventTimestampToLocal = str(current_event_local_time.strftime(\"%Y-%b-%d %H:%M:%S\"))\n nextEventTimestampToLocal = str(next_event_local_time.strftime(\"%Y-%b-%d %H:%M:%S\"))\n\n intermediateTimeStamp = current_event_local_time\n intermediateTimeStamp = str(intermediateTimeStamp.strftime(\"%Y-%b-%d %H:%M:%S\"))\n\n appName = currentEventTimeList[0][6:]\n event = line[40:len(line) - 1]\n\n if previousAppName == \"\":\n previousAppName = appName\n if appName == previousAppName:\n if event == \"SparkListenerApplicationStart\":\n executorsCounter = 0\n elif event == \"SparkListenerExecutorAdded\":\n executorsCounter += 1\n elif event == \"SparkListenerExecutorRemoved\":\n executorsCounter -= 1\n elif event == \"SparkListenerApplicationEnd\":\n executorsCounter = 0\n else:\n previousAppName = \"\"\n executorsCounter = 0\n x_axis.append(\"\")\n y_axis.append(\"\")\n\n lineToWrite = (line.replace(\",\", \"\").strip() + \" Timestamp: \" + currentEventTimeList[2] + \" Current Time: \" + currentEventTimestampToLocal + \" Next Time: \" + nextEventTimestampToLocal + \" NumExecutors: \" + str(executorsCounter)).replace(\"\\\"\", \"\")\n y_axis.append(executorsCounter)\n x_axis.append(currentEventTimestampToLocal)\n eventsFile.write(lineToWrite + \"\\n\")\n\n while intermediateTimeStamp == nextEventTimestampToLocal:\n intermediateTimeStamp = datetime.strptime(intermediateTimeStamp, \"%Y-%b-%d %H:%M:%S\") + timedelta(seconds=1)\n intermediateTimeStamp = str(intermediateTimeStamp.strftime(\"%Y-%b-%d %H:%M:%S\"))\n if appName == previousAppName and not \"SparkListenerApplicationStart\" in lineToWrite and not \"SparkListenerApplicationEnd\" in lineToWrite:\n lineToWrite = (line.replace(\",\", \"\").strip() + \" Timestamp: \" + currentEventTimeList[2] + \" Current Time: \" + intermediateTimeStamp + \" Next Time: \" + nextEventTimestampToLocal + \" NumExecutors: \" + str(executorsCounter)).replace(\"\\\"\", \"\")\n y_axis.append(executorsCounter)\n x_axis.append(intermediateTimeStamp)\n eventsFile.write(lineToWrite + \"\\n\")\n\n else:\n lastEventTime = intermediateLines[index + 1]\n lastEventTimeList = re.findall(r'\\S+', lastEventTime)\n lastEventTimeStampFloat = float(lastEventTimeList[2])\n last_event_local_time = datetime.fromtimestamp(lastEventTimeStampFloat / 1000.0, local_timezone)\n lastEventTimestampToLocal = str(last_event_local_time.strftime(\"%Y-%b-%d %H:%M:%S\"))\n\n lineToWrite = (line.replace(\",\", \"\").strip() + \" Timestamp: \" + lastEventTimeList[2] + \" Current Time: \" + lastEventTimestampToLocal + \" Next Time: \" + lastEventTimestampToLocal + \" NumExecutors: 0\").replace(\"\\\"\", \"\")\n y_axis.append(0)\n x_axis.append(lastEventTimestampToLocal)\n eventsFile.write(lineToWrite + \"\\n\")\n\n if \"SparkListenerApplicationEnd\" in nextEvent:\n while intermediateTimeStamp != nextEventTimestampToLocal:\n intermediateTimeStamp = datetime.strptime(intermediateTimeStamp, \"%Y-%b-%d %H:%M:%S\") + timedelta(\n seconds=1)\n intermediateTimeStamp = str(intermediateTimeStamp.strftime(\"%Y-%b-%d %H:%M:%S\"))\n if appName == previousAppName and not \"SparkListenerApplicationStart\" in lineToWrite and not \"SparkListenerApplicationEnd\" in lineToWrite:\n lineToWrite = (line.replace(\",\", \"\").strip() + \" Timestamp: \" + currentEventTimeList[2] + \" Current Time: \" + intermediateTimeStamp + \" Next Time: \" + nextEventTimestampToLocal + \" NumExecutors: 88\").replace(\"\\\"\", \"\")\n y_axis.append(\"88\")\n x_axis.append(intermediateTimeStamp)\n eventsFile.write(lineToWrite + \"\\n\")\n\ntimeStampList = [list(group) for k, group in groupby(x_axis, lambda x: x == \"\") if not k]\nexecutorsList = [list(group) for k, group in groupby(y_axis, lambda x: x == \"\") if not k]\n\nfirstPlotTimeStamp = [dateutil.parser.parse(s) for s in timeStampList[0]]\nfirstPlotExecutors = executorsList[0]\n\nsecondPlotTimeStamp = [dateutil.parser.parse(s) for s in timeStampList[1]]\nsecondPlotExecutors = executorsList[1]\n\nthirdPlotTimeStamp = [dateutil.parser.parse(s) for s in timeStampList[2]]\nthirdPlotExecutors = executorsList[2]\n\nfourthPlotTimeStamp = [dateutil.parser.parse(s) for s in timeStampList[3]]\nfourthPlotExecutors = executorsList[3]\n\nfifthPlotTimeStamp = [dateutil.parser.parse(s) for s in timeStampList[4]]\nfifthPlotExecutors = executorsList[4]\n\nfig, ax = plt.subplots()\nplt.title(\"Image format conversion - Standalone\", fontsize=20)\nplt.ylabel(\"Number of executors allocated\", fontsize=15)\nplt.xlabel(\"Timestamp\", fontsize=15)\n#plt.xlim([datetime(2017, 02, 03, 20, 39, 00), datetime(2017, 02, 03, 20, 51, 00)])\nplt.ylim([-5, 100])\n\nax.xaxis.set_major_locator(mdates.SecondLocator(interval=60))\nax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:S'))\n\nax.plot_date(firstPlotTimeStamp, firstPlotExecutors, fmt=\"r-*\", label=\"job_1\", )\nax.plot_date(secondPlotTimeStamp, secondPlotExecutors, fmt=\"b-o\", label=\"job_2\")\nax.plot_date(thirdPlotTimeStamp, thirdPlotExecutors, fmt=\"g-^\", label=\"job_3\")\nax.plot_date(fourthPlotTimeStamp, fourthPlotExecutors, fmt=\"kx-\", label=\"job_4\")\nax.plot_date(fifthPlotTimeStamp, fifthPlotExecutors, fmt=\"c-d\", label=\"job_5\")\n\n# font of the legend\nfontP = FontProperties()\nfontP.set_size('small')\n\nax.legend(loc='upper right', shadow=False, ncol=5, prop=fontP)\nax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))\nax.grid(False)\nfig.autofmt_xdate()\nplt.show()\n" ]
[ [ "matplotlib.font_manager.FontProperties", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.title", "matplotlib.dates.DateFormatter", "matplotlib.dates.SecondLocator", "matplotlib.pyplot.subplots", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
aldragan0/jax
[ "ca766caa02296023bd6714bb7fdba064a45e2258", "3ddd3905a4c8f59480c43c24bb1f9374238bb2a0", "3ddd3905a4c8f59480c43c24bb1f9374238bb2a0" ]
[ "jax/_src/scipy/special.py", "jax/abstract_arrays.py", "jax/lax/lax_parallel.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\n\nimport numpy as np\nimport scipy.special as osp_special\n\nfrom jax import lax\nfrom jax import api\nfrom jax.interpreters import ad\nfrom jax._src.numpy import lax_numpy as jnp\nfrom jax._src.numpy.lax_numpy import (asarray, _reduction_dims, _constant_like,\n _promote_args_inexact)\nfrom jax._src.numpy.util import _wraps\n\n\n@_wraps(osp_special.gammaln)\ndef gammaln(x):\n x, = _promote_args_inexact(\"gammaln\", x)\n return lax.lgamma(x)\n\n\n@_wraps(osp_special.betaln)\ndef betaln(x, y):\n x, y = _promote_args_inexact(\"betaln\", x, y)\n return lax.lgamma(x) + lax.lgamma(y) - lax.lgamma(x + y)\n\n\n@_wraps(osp_special.betainc)\ndef betainc(a, b, x):\n a, b, x = _promote_args_inexact(\"betainc\", a, b, x)\n return lax.betainc(a, b, x)\n\n\n@_wraps(osp_special.digamma, update_doc=False)\ndef digamma(x):\n x, = _promote_args_inexact(\"digamma\", x)\n return lax.digamma(x)\nad.defjvp(lax.digamma_p, lambda g, x: lax.mul(g, polygamma(1, x)))\n\n\n@_wraps(osp_special.gammainc, update_doc=False)\ndef gammainc(a, x):\n a, x = _promote_args_inexact(\"gammainc\", a, x)\n return lax.igamma(a, x)\n\n\n@_wraps(osp_special.gammaincc, update_doc=False)\ndef gammaincc(a, x):\n a, x = _promote_args_inexact(\"gammaincc\", a, x)\n return lax.igammac(a, x)\n\n\n@_wraps(osp_special.erf)\ndef erf(x):\n x, = _promote_args_inexact(\"erf\", x)\n return lax.erf(x)\n\n\n@_wraps(osp_special.erfc, update_doc=False)\ndef erfc(x):\n x, = _promote_args_inexact(\"erfc\", x)\n return lax.erfc(x)\n\n\n@_wraps(osp_special.erfinv)\ndef erfinv(x):\n x, = _promote_args_inexact(\"erfinv\", x)\n return lax.erf_inv(x)\n\n\n@api.custom_jvp\n@_wraps(osp_special.logit, update_doc=False)\ndef logit(x):\n x = asarray(x)\n return lax.log(lax.div(x, lax.sub(lax._const(x, 1), x)))\nlogit.defjvps(\n lambda g, ans, x: lax.div(g, lax.mul(x, lax.sub(lax._const(x, 1), x))))\n\n\n@api.custom_jvp\n@_wraps(osp_special.expit, update_doc=False)\ndef expit(x):\n x = asarray(x)\n one = lax._const(x, 1)\n return lax.div(one, lax.add(one, lax.exp(lax.neg(x))))\nexpit.defjvps(lambda g, ans, x: g * ans * (lax._const(ans, 1) - ans))\n\n\n@_wraps(osp_special.logsumexp)\ndef logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):\n if b is not None:\n a, b = jnp.broadcast_arrays(a, b)\n dims = _reduction_dims(a, axis)\n dimadd = lambda x: lax.expand_dims(x, dims)\n amax = lax.reduce(a, _constant_like(a, -np.inf), lax.max, dims)\n amax = lax.stop_gradient(lax.select(lax.is_finite(amax), amax, lax.full_like(amax, 0)))\n amax_singletons = dimadd(amax)\n if b is None:\n out = lax.add(lax.log(lax.reduce(lax.exp(lax.sub(a, amax_singletons)),\n _constant_like(a, 0), lax.add, dims)), amax)\n sign = jnp.where(jnp.isnan(out), np.nan, 1.0).astype(out.dtype)\n sign = jnp.where(out == -np.inf, 0.0, sign)\n else:\n sumexp = lax.reduce(lax.mul(lax.exp(lax.sub(a, amax_singletons)), b),\n _constant_like(a, 0), lax.add, dims)\n sign = lax.stop_gradient(lax.sign(sumexp))\n out = lax.add(lax.log(lax.abs(sumexp)), amax)\n if return_sign:\n return (dimadd(out), dimadd(sign)) if keepdims else (out, sign)\n if b is not None:\n out = jnp.where(sign < 0, np.nan, out)\n return dimadd(out) if keepdims else out\n\n\n@_wraps(osp_special.xlogy)\ndef xlogy(x, y):\n x, y = _promote_args_inexact(\"xlogy\", x, y)\n x_ok = x != 0.\n safe_x = jnp.where(x_ok, x, 1.)\n safe_y = jnp.where(x_ok, y, 1.)\n return jnp.where(x_ok, lax.mul(safe_x, lax.log(safe_y)), jnp.zeros_like(x))\n\n\n@_wraps(osp_special.xlog1py, update_doc=False)\ndef xlog1py(x, y):\n x, y = _promote_args_inexact(\"xlog1py\", x, y)\n x_ok = x != 0.\n safe_x = jnp.where(x_ok, x, 1.)\n safe_y = jnp.where(x_ok, y, 1.)\n return jnp.where(x_ok, lax.mul(safe_x, lax.log1p(safe_y)), jnp.zeros_like(x))\n\n\n@_wraps(osp_special.entr)\ndef entr(x):\n x, = _promote_args_inexact(\"entr\", x)\n return lax.select(lax.lt(x, _constant_like(x, 0)),\n lax.full_like(x, -np.inf),\n lax.neg(xlogy(x, x)))\n\n\n@_wraps(osp_special.multigammaln, update_doc=False)\ndef multigammaln(a, d):\n a, = _promote_args_inexact(\"multigammaln\", a)\n d = lax.convert_element_type(d, lax.dtype(a))\n constant = lax.mul(lax.mul(lax.mul(_constant_like(a, 0.25), d),\n lax.sub(d, _constant_like(a, 1))),\n lax.log(_constant_like(a, np.pi)))\n res = jnp.sum(gammaln(jnp.expand_dims(a, axis=-1) -\n lax.div(jnp.arange(d), _constant_like(a, 2))),\n axis=-1)\n return res + constant\n\n\n# coefs of (2k)! / B_{2k} where B are bernoulli numbers\n# those numbers are obtained using https://www.wolframalpha.com\n_BERNOULLI_COEFS = [\n 12,\n -720,\n 30240,\n -1209600,\n 47900160,\n -1307674368000 / 691,\n 74724249600,\n -10670622842880000 / 3617,\n 5109094217170944000 / 43867,\n -802857662698291200000 / 174611,\n 14101100039391805440000 / 77683,\n -1693824136731743669452800000 / 236364091,\n 186134520519971831808000000 / 657931,\n -37893265687455865519472640000000 / 3392780147,\n 759790291646040068357842010112000000 / 1723168255201,\n -134196726836183700385281186201600000000 / 7709321041217,\n]\n\n\n@_wraps(osp_special.zeta)\ndef zeta(x, q=None):\n assert q is not None, \"Riemann zeta function is not implemented yet.\"\n # Reference: Johansson, Fredrik.\n # \"Rigorous high-precision computation of the Hurwitz zeta function and its derivatives.\"\n # Numerical Algorithms 69.2 (2015): 253-270.\n # https://arxiv.org/abs/1309.2877 - formula (5)\n # here we keep the same notation as in reference\n s, a = _promote_args_inexact(\"zeta\", x, q)\n dtype = lax.dtype(a).type\n s_, a_ = jnp.expand_dims(s, -1), jnp.expand_dims(a, -1)\n # precision ~ N, M\n N = M = dtype(8) if lax.dtype(a) == jnp.float32 else dtype(16)\n assert M <= len(_BERNOULLI_COEFS)\n k = np.arange(N, dtype=N.dtype)\n S = jnp.sum((a_ + k) ** -s_, -1)\n I = lax.div((a + N) ** (dtype(1) - s), s - dtype(1))\n T0 = (a + N) ** -s\n s_over_a = (s_ + np.arange(2 * M, dtype=M.dtype)) / (a_ + N)\n T1 = jnp.cumprod(s_over_a, -1)[..., ::2]\n T1 = jnp.clip(T1, a_max=jnp.finfo(dtype).max)\n coefs = np.array(_BERNOULLI_COEFS[:T1.shape[-1]], dtype=dtype)\n T1 = T1 / coefs\n T = T0 * (dtype(0.5) + T1.sum(-1))\n return S + I + T\n\n\n@_wraps(osp_special.polygamma, update_doc=False)\ndef polygamma(n, x):\n assert jnp.issubdtype(lax.dtype(n), jnp.integer)\n n, x = _promote_args_inexact(\"polygamma\", n, x)\n shape = lax.broadcast_shapes(n.shape, x.shape)\n return _polygamma(jnp.broadcast_to(n, shape), jnp.broadcast_to(x, shape))\n\n\n@api.custom_jvp\ndef _polygamma(n, x):\n dtype = lax.dtype(n).type\n n_plus = n + dtype(1)\n sign = dtype(1) - (n_plus % dtype(2)) * dtype(2)\n return jnp.where(n == 0, digamma(x), sign * jnp.exp(gammaln(n_plus)) * zeta(n_plus, x))\n_polygamma.defjvps(None, lambda g, ans, n, x: lax.mul(g, _polygamma(n + 1, x)))\n\n\n# Normal distributions\n\n# Functions \"ndtr\" and \"ndtri\" are derived from calculations made in:\n# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n# In the following email exchange, the author gives his consent to redistribute\n# derived works under an Apache 2.0 license.\n#\n# From: Stephen Moshier <steve@moshier.net>\n# Date: Sat, Jun 9, 2018 at 2:36 PM\n# Subject: Re: Licensing cephes under Apache (BSD-like) license.\n# To: rif <rif@google.com>\n#\n#\n#\n# Hello Rif,\n#\n# Yes, Google may distribute Cephes files under the Apache 2 license.\n#\n# If clarification is needed, I do not favor BSD over other free licenses.\n# I would agree that Apache 2 seems to cover the concern you mentioned\n# about sublicensees.\n#\n# Best wishes for good luck with your projects!\n# Steve Moshier\n#\n#\n#\n# On Thu, 31 May 2018, rif wrote:\n#\n# > Hello Steve.\n# > My name is Rif. I work on machine learning software at Google.\n# >\n# > Your cephes software continues to be incredibly useful and widely used. I\n# > was wondering whether it would be permissible for us to use the Cephes code\n# > under the Apache 2.0 license, which is extremely similar in permissions to\n# > the BSD license (Wikipedia comparisons). This would be quite helpful to us\n# > in terms of avoiding multiple licenses on software.\n# >\n# > I'm sorry to bother you with this (I can imagine you're sick of hearing\n# > about this by now), but I want to be absolutely clear we're on the level and\n# > not misusing your important software. In former conversation with Eugene\n# > Brevdo (ebrevdo@google.com), you wrote \"If your licensing is similar to BSD,\n# > the formal way that has been handled is simply to add a statement to the\n# > effect that you are incorporating the Cephes software by permission of the\n# > author.\" I wanted to confirm that (a) we could use the Apache license, (b)\n# > that we don't need to (and probably you don't want to) keep getting\n# > contacted about individual uses, because your intent is generally to allow\n# > this software to be reused under \"BSD-like\" license, and (c) you're OK\n# > letting incorporators decide whether a license is sufficiently BSD-like?\n# >\n# > Best,\n# >\n# > rif\n# >\n# >\n# >\n\n# log_ndtr uses different functions over the ranges\n# (-infty, lower](lower, upper](upper, infty)\n# Lower bound values were chosen by examining where the support of ndtr\n# appears to be zero, relative to scipy's (which is always 64bit). They were\n# then made more conservative just to be safe. (Conservative means use the\n# expansion more than we probably need to.)\n_LOGNDTR_FLOAT64_LOWER = np.array(-20, np.float64)\n_LOGNDTR_FLOAT32_LOWER = np.array(-10, np.float32)\n\n# Upper bound values were chosen by examining for which values of 'x'\n# Log[cdf(x)] is 0, after which point we need to use the approximation\n# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly\n# conservative, meaning we use the approximation earlier than needed.\n_LOGNDTR_FLOAT64_UPPER = np.array(8, np.float64)\n_LOGNDTR_FLOAT32_UPPER = np.array(5, np.float32)\n\n\ndef ndtr(x):\n r\"\"\"Normal distribution function.\n\n Returns the area under the Gaussian probability density function, integrated\n from minus infinity to x:\n\n .. math::\n \\begin{align}\n \\mathrm{ndtr}(x) =&\n \\ \\frac{1}{\\sqrt{2 \\pi}}\\int_{-\\infty}^{x} e^{-\\frac{1}{2}t^2} dt \\\\\n =&\\ \\frac{1}{2} (1 + \\mathrm{erf}(\\frac{x}{\\sqrt{2}})) \\\\\n =&\\ \\frac{1}{2} \\mathrm{erfc}(\\frac{x}{\\sqrt{2}})\n \\end{align}\n\n Args:\n x: An array of type `float32`, `float64`.\n\n Returns:\n An array with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x` is not floating-type.\n \"\"\"\n x = jnp.asarray(x)\n dtype = lax.dtype(x)\n if dtype not in (jnp.float32, jnp.float64):\n raise TypeError(\n \"x.dtype={} is not supported, see docstring for supported types.\"\n .format(dtype))\n return _ndtr(x)\n\n\ndef _ndtr(x):\n \"\"\"Implements ndtr core logic.\"\"\"\n dtype = lax.dtype(x).type\n half_sqrt_2 = dtype(0.5) * np.sqrt(2., dtype=dtype)\n w = x * half_sqrt_2\n z = lax.abs(w)\n y = lax.select(lax.lt(z, half_sqrt_2),\n dtype(1.) + lax.erf(w),\n lax.select(lax.gt(w, dtype(0.)),\n dtype(2.) - lax.erfc(z),\n lax.erfc(z)))\n return dtype(0.5) * y\n\n\ndef ndtri(p):\n r\"\"\"The inverse of the CDF of the Normal distribution function.\n\n Returns `x` such that the area under the PDF from :math:`-\\infty` to `x` is equal\n to `p`.\n\n A piece-wise rational approximation is done for the function.\n This is a based on the implementation in netlib.\n\n Args:\n p: an array of type `float32`, `float64`.\n\n Returns:\n an array with `dtype=p.dtype`.\n\n Raises:\n TypeError: if `p` is not floating-type.\n \"\"\"\n dtype = lax.dtype(p)\n if dtype not in (jnp.float32, jnp.float64):\n raise TypeError(\n \"x.dtype={} is not supported, see docstring for supported types.\"\n .format(dtype))\n return _ndtri(p)\n\n\ndef _ndtri(p):\n \"\"\"Implements ndtri core logic.\"\"\"\n\n # Constants used in piece-wise rational approximations. Taken from the cephes\n # library:\n # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n p0 = list(reversed([-5.99633501014107895267E1,\n 9.80010754185999661536E1,\n -5.66762857469070293439E1,\n 1.39312609387279679503E1,\n -1.23916583867381258016E0]))\n q0 = list(reversed([1.0,\n 1.95448858338141759834E0,\n 4.67627912898881538453E0,\n 8.63602421390890590575E1,\n -2.25462687854119370527E2,\n 2.00260212380060660359E2,\n -8.20372256168333339912E1,\n 1.59056225126211695515E1,\n -1.18331621121330003142E0]))\n p1 = list(reversed([4.05544892305962419923E0,\n 3.15251094599893866154E1,\n 5.71628192246421288162E1,\n 4.40805073893200834700E1,\n 1.46849561928858024014E1,\n 2.18663306850790267539E0,\n -1.40256079171354495875E-1,\n -3.50424626827848203418E-2,\n -8.57456785154685413611E-4]))\n q1 = list(reversed([1.0,\n 1.57799883256466749731E1,\n 4.53907635128879210584E1,\n 4.13172038254672030440E1,\n 1.50425385692907503408E1,\n 2.50464946208309415979E0,\n -1.42182922854787788574E-1,\n -3.80806407691578277194E-2,\n -9.33259480895457427372E-4]))\n p2 = list(reversed([3.23774891776946035970E0,\n 6.91522889068984211695E0,\n 3.93881025292474443415E0,\n 1.33303460815807542389E0,\n 2.01485389549179081538E-1,\n 1.23716634817820021358E-2,\n 3.01581553508235416007E-4,\n 2.65806974686737550832E-6,\n 6.23974539184983293730E-9]))\n q2 = list(reversed([1.0,\n 6.02427039364742014255E0,\n 3.67983563856160859403E0,\n 1.37702099489081330271E0,\n 2.16236993594496635890E-1,\n 1.34204006088543189037E-2,\n 3.28014464682127739104E-4,\n 2.89247864745380683936E-6,\n 6.79019408009981274425E-9]))\n\n dtype = lax.dtype(p).type\n shape = jnp.shape(p)\n\n def _create_polynomial(var, coeffs):\n \"\"\"Compute n_th order polynomial via Horner's method.\"\"\"\n coeffs = np.array(coeffs, dtype)\n if not coeffs.size:\n return jnp.zeros_like(var)\n return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var\n\n\n maybe_complement_p = jnp.where(p > dtype(-np.expm1(-2.)), dtype(1.) - p, p)\n # Write in an arbitrary value in place of 0 for p since 0 will cause NaNs\n # later on. The result from the computation when p == 0 is not used so any\n # number that doesn't result in NaNs is fine.\n sanitized_mcp = jnp.where(\n maybe_complement_p <= dtype(0.),\n jnp.full(shape, dtype(0.5)),\n maybe_complement_p)\n\n # Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2).\n w = sanitized_mcp - dtype(0.5)\n ww = lax.square(w)\n x_for_big_p = w + w * ww * (_create_polynomial(ww, p0)\n / _create_polynomial(ww, q0))\n x_for_big_p *= -dtype(np.sqrt(2. * np.pi))\n\n # Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z),\n # where z = sqrt(-2. * log(p)), and P/Q are chosen between two different\n # arrays based on whether p < exp(-32).\n z = lax.sqrt(dtype(-2.) * lax.log(sanitized_mcp))\n first_term = z - lax.log(z) / z\n second_term_small_p = (\n _create_polynomial(dtype(1.) / z, p2) /\n _create_polynomial(dtype(1.) / z, q2) / z)\n second_term_otherwise = (\n _create_polynomial(dtype(1.) / z, p1) /\n _create_polynomial(dtype(1.) / z, q1) / z)\n x_for_small_p = first_term - second_term_small_p\n x_otherwise = first_term - second_term_otherwise\n\n x = jnp.where(sanitized_mcp > dtype(np.exp(-2.)),\n x_for_big_p,\n jnp.where(z >= dtype(8.0), x_for_small_p, x_otherwise))\n\n x = jnp.where(p > dtype(1. - np.exp(-2.)), x, -x)\n infinity = jnp.full(shape, dtype(np.inf))\n x_nan_replaced = jnp.where(\n p <= dtype(0.0), -infinity, jnp.where(p >= dtype(1.0), infinity, x))\n return x_nan_replaced\n\n\n@partial(api.custom_jvp, nondiff_argnums=(1,))\ndef log_ndtr(x, series_order=3):\n r\"\"\"Log Normal distribution function.\n\n For details of the Normal distribution function see `ndtr`.\n\n This function calculates :math:`\\log(\\mathrm{ndtr}(x))` by either calling\n :math:`\\log(\\mathrm{ndtr}(x))` or using an asymptotic series. Specifically:\n\n - For `x > upper_segment`, use the approximation `-ndtr(-x)` based on\n :math:`\\log(1-x) \\approx -x, x \\ll 1`.\n - For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique\n and take a log.\n - For `x <= lower_segment`, we use the series approximation of `erf` to compute\n the log CDF directly.\n\n The `lower_segment` is set based on the precision of the input:\n\n .. math::\n \\begin{align}\n \\mathit{lower\\_segment} =&\n \\ \\begin{cases}\n -20 & x.\\mathrm{dtype}=\\mathit{float64} \\\\\n -10 & x.\\mathrm{dtype}=\\mathit{float32} \\\\\n \\end{cases} \\\\\n \\mathit{upper\\_segment} =&\n \\ \\begin{cases}\n 8& x.\\mathrm{dtype}=\\mathit{float64} \\\\\n 5& x.\\mathrm{dtype}=\\mathit{float32} \\\\\n \\end{cases}\n \\end{align}\n\n\n When `x < lower_segment`, the `ndtr` asymptotic series approximation is:\n\n .. math::\n \\begin{align}\n \\mathrm{ndtr}(x) =&\\ \\mathit{scale} * (1 + \\mathit{sum}) + R_N \\\\\n \\mathit{scale} =&\\ \\frac{e^{-0.5 x^2}}{-x \\sqrt{2 \\pi}} \\\\\n \\mathit{sum} =&\\ \\sum_{n=1}^N {-1}^n (2n-1)!! / (x^2)^n \\\\\n R_N =&\\ O(e^{-0.5 x^2} (2N+1)!! / |x|^{2N+3})\n \\end{align}\n\n where :math:`(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a\n `double-factorial\n <https://en.wikipedia.org/wiki/Double_factorial>`_ operator.\n\n\n Args:\n x: an array of type `float32`, `float64`.\n series_order: Positive Python integer. Maximum depth to\n evaluate the asymptotic expansion. This is the `N` above.\n\n Returns:\n an array with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x.dtype` is not handled.\n TypeError: if `series_order` is a not Python `integer.`\n ValueError: if `series_order` is not in `[0, 30]`.\n \"\"\"\n if not isinstance(series_order, int):\n raise TypeError(\"series_order must be a Python integer.\")\n if series_order < 0:\n raise ValueError(\"series_order must be non-negative.\")\n if series_order > 30:\n raise ValueError(\"series_order must be <= 30.\")\n\n x = jnp.asarray(x)\n dtype = lax.dtype(x)\n\n if dtype == jnp.float64:\n lower_segment = _LOGNDTR_FLOAT64_LOWER\n upper_segment = _LOGNDTR_FLOAT64_UPPER\n elif dtype == jnp.float32:\n lower_segment = _LOGNDTR_FLOAT32_LOWER\n upper_segment = _LOGNDTR_FLOAT32_UPPER\n else:\n raise TypeError(\"x.dtype={} is not supported.\".format(np.dtype(dtype)))\n\n # The basic idea here was ported from:\n # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n # We copy the main idea, with a few changes\n # * For x >> 1, and X ~ Normal(0, 1),\n # Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],\n # which extends the range of validity of this function.\n # * We use one fixed series_order for all of 'x', rather than adaptive.\n # * Our docstring properly reflects that this is an asymptotic series, not a\n # Taylor series. We also provided a correct bound on the remainder.\n # * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when\n # x=0. This happens even though the branch is unchosen because when x=0\n # the gradient of a select involves the calculation 1*dy+0*(-inf)=nan\n # regardless of whether dy is finite. Note that the minimum is a NOP if\n # the branch is chosen.\n return jnp.where(\n lax.gt(x, upper_segment),\n -_ndtr(-x), # log(1-x) ~= -x, x << 1\n jnp.where(lax.gt(x, lower_segment),\n lax.log(_ndtr(lax.max(x, lower_segment))),\n _log_ndtr_lower(lax.min(x, lower_segment),\n series_order)))\ndef _log_ndtr_jvp(series_order, primals, tangents):\n (x,), (t,) = primals, tangents\n ans = log_ndtr(x, series_order=series_order)\n t_out = lax.mul(t, lax.exp(lax.sub(_norm_logpdf(x), ans)))\n return ans, t_out\nlog_ndtr.defjvp(_log_ndtr_jvp)\n\ndef _log_ndtr_lower(x, series_order):\n \"\"\"Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`.\"\"\"\n dtype = lax.dtype(x).type\n x_2 = lax.square(x)\n # Log of the term multiplying (1 + sum)\n log_scale = -dtype(0.5) * x_2 - lax.log(-x) - dtype(0.5 * np.log(2. * np.pi))\n return log_scale + lax.log(_log_ndtr_asymptotic_series(x, series_order))\n\n\ndef _log_ndtr_asymptotic_series(x, series_order):\n \"\"\"Calculates the asymptotic series used in log_ndtr.\"\"\"\n dtype = lax.dtype(x).type\n if series_order <= 0:\n return np.array(1, dtype)\n x_2 = lax.square(x)\n even_sum = jnp.zeros_like(x)\n odd_sum = jnp.zeros_like(x)\n x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.\n for n in range(1, series_order + 1):\n y = np.array(_double_factorial(2 * n - 1), dtype) / x_2n\n if n % 2:\n odd_sum += y\n else:\n even_sum += y\n x_2n *= x_2\n return dtype(1.) + even_sum - odd_sum\n\n\ndef _double_factorial(n):\n \"\"\"The double factorial function for small Python integer `n`.\"\"\"\n return np.prod(np.arange(n, 1, -2))\n\n\n_norm_logpdf_constant = np.log(np.sqrt(2 * np.pi))\n\ndef _norm_logpdf(x):\n neg_half = _constant_like(x, -0.5)\n log_normalizer = _constant_like(x, _norm_logpdf_constant)\n return lax.sub(lax.mul(neg_half, lax.square(x)), log_normalizer)\n\n@_wraps(osp_special.i0e)\ndef i0e(x):\n x, = _promote_args_inexact(\"i0e\", x)\n return lax.bessel_i0e(x)\n\n@_wraps(osp_special.i0)\ndef i0(x):\n x, = _promote_args_inexact(\"i0\", x)\n return lax.mul(lax.exp(lax.abs(x)), lax.bessel_i0e(x))\n\n@_wraps(osp_special.i1e)\ndef i1e(x):\n x, = _promote_args_inexact(\"i1e\", x)\n return lax.bessel_i1e(x)\n\n@_wraps(osp_special.i1)\ndef i1(x):\n x, = _promote_args_inexact(\"i1\", x)\n return lax.mul(lax.exp(lax.abs(x)), lax.bessel_i1e(x))\n", "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\n\nimport numpy as np\n\nfrom . import ad_util\nfrom . import core\nfrom . import dtypes\n\n_DIMENSION_TYPES = core._DIMENSION_TYPES\n\nUnshapedArray = core.UnshapedArray\nShapedArray = core.ShapedArray\nConcreteArray = core.ConcreteArray\nAbstractToken = core.AbstractToken\nabstract_token = core.abstract_token\ncanonicalize_shape = core.canonicalize_shape\nraise_to_shaped = core.raise_to_shaped\n\n\ndef make_shaped_array(x):\n dtype = dtypes.canonicalize_dtype(dtypes.result_type(x))\n return ShapedArray(np.shape(x), dtype)\n\ndef zeros_like_array(x):\n dtype = dtypes.canonicalize_dtype(dtypes.result_type(x))\n return zeros_like_shaped_array(ShapedArray(np.shape(x), dtype))\n\narray_types = {np.ndarray, np.bool_,\n np.int8, np.int16, np.int32, np.int64,\n np.uint8, np.uint16, np.uint32, np.uint64,\n dtypes.bfloat16, np.float16, np.float32, np.float64,\n np.complex64, np.complex128,\n np.longlong}\n\nfor t in array_types:\n core.pytype_aval_mappings[t] = ConcreteArray\n ad_util.jaxval_zeros_likers[t] = zeros_like_array\n\n\ndef zeros_like_shaped_array(aval):\n assert isinstance(aval, ShapedArray)\n if aval.dtype == dtypes.float0:\n return np.zeros(aval.shape, dtypes.float0)\n return np.broadcast_to(np.array(0, aval.dtype), aval.shape)\n\nad_util.aval_zeros_likers[ShapedArray] = zeros_like_shaped_array\n\ncore.literalable_types.update(array_types)\n\ndef _zeros_like_python_scalar(t, x):\n return np.array(0, dtypes.python_scalar_dtypes[t])\n\ndef _make_concrete_python_scalar(t, x):\n return ConcreteArray(\n np.array(x, dtype=dtypes.python_scalar_dtypes[t]),\n weak_type=True)\n\nfor t in dtypes.python_scalar_dtypes:\n core.pytype_aval_mappings[t] = partial(_make_concrete_python_scalar, t)\n ad_util.jaxval_zeros_likers[t] = partial(_zeros_like_python_scalar, t)\n\ncore.literalable_types.update(dtypes.python_scalar_dtypes.keys())\n", "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nParallelization primitives.\n\"\"\"\n\nimport collections\nimport warnings\n\nimport numpy as np\n\nfrom jax import core\nfrom jax import dtypes\nfrom jax import tree_util\nfrom jax import source_info_util\nfrom jax.lax import lax\nfrom jax.abstract_arrays import ShapedArray, raise_to_shaped\nfrom jax.interpreters import ad\nfrom jax.interpreters import xla\nfrom jax.interpreters import pxla\nfrom jax.interpreters import batching\nfrom jax.interpreters import partial_eval as pe\nfrom jax.util import partial, unzip2, prod\nfrom jax.lib import xla_client as xc\nfrom jax.lib import xla_bridge as xb\nfrom jax.config import config\nfrom jax._src.numpy import lax_numpy\n\nxops = xc.ops\n\n\n### parallel traceables\n\ndef psum(x, axis_name, *, axis_index_groups=None):\n \"\"\"Compute an all-reduce sum on ``x`` over the pmapped axis ``axis_name``.\n\n If ``x`` is a pytree then the result is equivalent to mapping this function to\n each leaf in the tree.\n\n Inputs of boolean dtype are converted to integers before the reduction.\n\n Args:\n x: array(s) with a mapped axis named ``axis_name``.\n axis_name: hashable Python object used to name a pmapped axis (see the\n :func:`jax.pmap` documentation for more details).\n axis_index_groups: optional list of lists containing axis indices (e.g. for\n an axis of size 4, [[0, 1], [2, 3]] would perform psums over the first\n two and last two replicas). Groups must cover all axis indices exactly\n once, and all groups must be the same size.\n\n\n Returns:\n Array(s) with the same shape as ``x`` representing the result of an\n all-reduce sum along the axis ``axis_name``.\n\n For example, with 4 XLA devices available:\n\n >>> x = np.arange(4)\n >>> y = jax.pmap(lambda x: jax.lax.psum(x, 'i'), axis_name='i')(x)\n >>> print(y)\n [6 6 6 6]\n >>> y = jax.pmap(lambda x: x / jax.lax.psum(x, 'i'), axis_name='i')(x)\n >>> print(y)\n [ 0. 0.16666667 0.33333334 0.5 ]\n \"\"\"\n _validate_axis_index_groups(axis_index_groups)\n leaves, treedef = tree_util.tree_flatten(x)\n leaves = [lax.convert_element_type(l, np.int32)\n if dtypes.dtype(l) == np.bool_ else l for l in leaves]\n out_flat = psum_p.bind(*leaves, axis_name=axis_name,\n axis_index_groups=axis_index_groups)\n return tree_util.tree_unflatten(treedef, out_flat)\n\ndef pmean(x, axis_name, *, axis_index_groups=None):\n \"\"\"Compute an all-reduce mean on ``x`` over the pmapped axis ``axis_name``.\n\n If ``x`` is a pytree then the result is equivalent to mapping this function to\n each leaf in the tree.\n\n Args:\n x: array(s) with a mapped axis named ``axis_name``.\n axis_name: hashable Python object used to name a pmapped axis (see the\n :func:`jax.pmap` documentation for more details).\n axis_index_groups: optional list of lists containing axis indices (e.g. for\n an axis of size 4, [[0, 1], [2, 3]] would perform pmeans over the first\n two and last two replicas). Groups must cover all axis indices exactly\n once, and all groups must be the same size.\n\n Returns:\n Array(s) with the same shape as ``x`` representing the result of an\n all-reduce mean along the axis ``axis_name``.\n\n For example, with 4 XLA devices available:\n\n >>> x = np.arange(4)\n >>> y = jax.pmap(lambda x: jax.lax.pmean(x, 'i'), axis_name='i')(x)\n >>> print(y)\n [ 1.5 1.5 1.5 1.5 ]\n >>> y = jax.pmap(lambda x: x / jax.lax.pmean(x, 'i'), axis_name='i')(x)\n >>> print(y)\n [ 0. 0.66666667 1.33333334 2.0 ]\n \"\"\"\n x = psum(x, axis_name=axis_name, axis_index_groups=axis_index_groups)\n n = psum(1, axis_name=axis_name, axis_index_groups=axis_index_groups)\n return tree_util.tree_map(lambda v: v / n, x)\n\ndef pmax(x, axis_name, *, axis_index_groups=None):\n \"\"\"Compute an all-reduce max on ``x`` over the pmapped axis ``axis_name``.\n\n If ``x`` is a pytree then the result is equivalent to mapping this function to\n each leaf in the tree.\n\n Args:\n x: array(s) with a mapped axis named ``axis_name``.\n axis_name: hashable Python object used to name a pmapped axis (see the\n :func:`jax.pmap` documentation for more details).\n axis_index_groups: optional list of lists containing axis indices (e.g. for\n an axis of size 4, [[0, 1], [2, 3]] would perform pmaxes over the first\n two and last two replicas). Groups must cover all axis indices exactly\n once, and all groups must be the same size.\n\n Returns:\n Array(s) with the same shape as ``x`` representing the result of an\n all-reduce max along the axis ``axis_name``.\n \"\"\"\n _validate_axis_index_groups(axis_index_groups)\n return tree_util.tree_map(partial(\n pmax_p.bind, axis_name=axis_name, axis_index_groups=axis_index_groups), x)\n\ndef pmin(x, axis_name, *, axis_index_groups=None):\n \"\"\"Compute an all-reduce min on ``x`` over the pmapped axis ``axis_name``.\n\n If ``x`` is a pytree then the result is equivalent to mapping this function to\n each leaf in the tree.\n\n Args:\n x: array(s) with a mapped axis named ``axis_name``.\n axis_name: hashable Python object used to name a pmapped axis (see the\n :func:`jax.pmap` documentation for more details).\n axis_index_groups: optional list of lists containing axis indices (e.g. for\n an axis of size 4, [[0, 1], [2, 3]] would perform pmins over the first\n two and last two replicas). Groups must cover all axis indices exactly\n once, and all groups must be the same size.\n\n Returns:\n Array(s) with the same shape as ``x`` representing the result of an\n all-reduce min along the axis ``axis_name``.\n \"\"\"\n _validate_axis_index_groups(axis_index_groups)\n return tree_util.tree_map(partial(\n pmin_p.bind, axis_name=axis_name, axis_index_groups=axis_index_groups), x)\n\ndef _validate_axis_index_groups(axis_index_groups):\n if axis_index_groups is None:\n return\n len_0 = len(axis_index_groups[0])\n if any(len(g) != len_0 for g in axis_index_groups):\n raise ValueError(\"axis_index_groups must all be the same size\")\n axis_space = range(len_0 * len(axis_index_groups))\n if {i for g in axis_index_groups for i in g} != set(axis_space):\n raise ValueError(\"axis_index_groups must cover all indices exactly once\")\n\ndef ppermute(x, axis_name, perm):\n \"\"\"Perform a collective permutation according to the permutation ``perm``.\n\n If ``x`` is a pytree then the result is equivalent to mapping this function to\n each leaf in the tree.\n\n This function is an analog of the CollectivePermute XLA HLO.\n\n Args:\n x: array(s) with a mapped axis named ``axis_name``.\n axis_name: hashable Python object used to name a pmapped axis (see the\n :func:`jax.pmap` documentation for more details).\n perm: list of pairs of ints, representing\n ``(source_index, destination_index)``\n pairs that encode how the mapped axis named ``axis_name`` should be\n shuffled. The integer values are treated as indices into the mapped axis\n ``axis_name``. Any two pairs should not have the same source index or the\n same destination index. For each index of the axis ``axis_name`` that does\n not correspond to a destination index in ``perm``, the corresponding\n values in the result are filled with zeros of the appropriate type.\n\n Returns:\n Array(s) with the same shape as ``x`` with slices along the axis\n ``axis_name`` gathered from ``x`` according to the permutation ``perm``.\n \"\"\"\n return tree_util.tree_map(\n partial(ppermute_p.bind, axis_name=axis_name, perm=tuple(perm)), x)\n\ndef pshuffle(x, axis_name, perm):\n \"\"\"Convenience wrapper of jax.lax.ppermute with alternate permutation encoding\n\n If ``x`` is a pytree then the result is equivalent to mapping this function to\n each leaf in the tree.\n\n Args:\n x: array(s) with a mapped axis named ``axis_name``.\n axis_name: hashable Python object used to name a pmapped axis (see the\n :func:`jax.pmap` documentation for more details).\n perm: list of of ints encoding sources for the permutation to be applied to\n the axis named ``axis_name``, so that the output at axis index i\n comes from the input at axis index perm[i]. Every integer in [0, N) should\n be included exactly once for axis size N.\n\n Returns:\n Array(s) with the same shape as ``x`` with slices along the axis\n ``axis_name`` gathered from ``x`` according to the permutation ``perm``.\n \"\"\"\n if set(perm) != set(range(len(perm))):\n raise ValueError(f\"`perm` does not represent a permutation: {perm}\")\n return ppermute(x, axis_name, list(zip(perm, range(len(perm)))))\n\n\ndef pswapaxes(x, axis_name, axis):\n \"\"\"Swap the pmapped axis ``axis_name`` with the unmapped axis ``axis``.\n\n If ``x`` is a pytree then the result is equivalent to mapping this function to\n each leaf in the tree.\n\n The mapped axis size must be equal to the size of the unmapped axis; that is,\n we must have ``lax.psum(1, axis_name) == x.shape[axis]``.\n\n This function is a special case of ``all_to_all`` where the pmapped axis of\n the input is placed at the position ``axis`` in the output. That is, it is\n equivalent to ``all_to_all(x, axis_name, axis, axis)``.\n\n Args:\n x: array(s) with a mapped axis named ``axis_name``.\n axis_name: hashable Python object used to name a pmapped axis (see the\n :func:`jax.pmap` documentation for more details).\n axis: int indicating the unmapped axis of ``x`` to map with the name\n ``axis_name``.\n\n Returns:\n Array(s) with the same shape as ``x``.\n \"\"\"\n return all_to_all(x, axis_name, axis, axis)\n\ndef all_to_all(x, axis_name, split_axis, concat_axis):\n \"\"\"Materialize the mapped axis and map a different axis.\n\n If ``x`` is a pytree then the result is equivalent to mapping this function to\n each leaf in the tree.\n\n In the output, the input mapped axis ``axis_name`` is materialized at the\n logical axis position ``concat_axis``, and the input unmapped axis at position\n ``split_axis`` is mapped with the name ``axis_name``.\n\n The input mapped axis size must be equal to the size of the axis to be mapped;\n that is, we must have ``lax.psum(1, axis_name) == x.shape[split_axis]``.\n\n Args:\n x: array(s) with a mapped axis named ``axis_name``.\n axis_name: hashable Python object used to name a pmapped axis (see the\n :func:`jax.pmap` documentation for more details).\n split_axis: int indicating the unmapped axis of ``x`` to map with the name\n ``axis_name``.\n concat_axis: int indicating the position in the output to materialize the\n mapped axis of the input with the name ``axis_name``.\n\n Returns:\n Array(s) with shape given by the expression::\n\n np.insert(np.delete(x.shape, split_axis), concat_axis, axis_size)\n\n where ``axis_size`` is the size of the mapped axis named ``axis_name`` in\n the input ``x``, i.e. ``axis_size = lax.psum(1, axis_name)``.\n \"\"\"\n def bind(x):\n if psum(1, axis_name) != x.shape[split_axis]:\n msg = (\"all_to_all requires the size of the mapped axis axis_name to \"\n \"equal x.shape[split_axis], but they are {} and {} respectively.\")\n raise ValueError(msg.format(psum(1, axis_name), x.shape[split_axis]))\n return all_to_all_p.bind(x, split_axis=split_axis, concat_axis=concat_axis,\n axis_name=axis_name)\n return tree_util.tree_map(bind, x)\n\ndef axis_index(axis_name):\n \"\"\"Return the index along the mapped axis ``axis_name``.\n\n Args:\n axis_name: hashable Python object used to name the mapped axis.\n\n Returns:\n An integer representing the index.\n\n For example, with 8 XLA devices available:\n\n >>> from functools import partial\n >>> @partial(jax.pmap, axis_name='i')\n ... def f(_):\n ... return lax.axis_index('i')\n ...\n >>> f(np.zeros(4))\n ShardedDeviceArray([0, 1, 2, 3], dtype=int32)\n >>> f(np.zeros(8))\n ShardedDeviceArray([0, 1, 2, 3, 4, 5, 6, 7], dtype=int32)\n >>> @partial(jax.pmap, axis_name='i')\n ... @partial(jax.pmap, axis_name='j')\n ... def f(_):\n ... return lax.axis_index('i'), lax.axis_index('j')\n ...\n >>> x, y = f(np.zeros((4, 2)))\n >>> print(x)\n [[0 0]\n [1 1]\n [2 2]\n [3 3]]\n >>> print(y)\n [[0 1]\n [0 1]\n [0 1]\n [0 1]]\n \"\"\"\n return axis_index_p.bind(axis_name=axis_name)\n\n\n### parallel primitives\n\ndef _allreduce_soft_pmap_rule(prim, reducer, vals, mapped, chunk_size,\n *, axis_name, axis_index_groups):\n if axis_index_groups is not None:\n raise NotImplementedError(\"soft_pmap does not yet support axis_index_groups\")\n reduced_vals = [reducer(x, [0]) if m else x for x, m in zip(vals, mapped)]\n outs = prim.bind(*reduced_vals, axis_name=axis_name,\n axis_index_groups=axis_index_groups)\n return outs, (False,) * len(vals)\n\ndef _allreduce_translation_rule(prim, c, val, *, axis_name, axis_index_groups,\n axis_env, platform):\n replica_groups = _replica_groups(axis_env, axis_name, axis_index_groups)\n dtype = c.get_shape(val).numpy_dtype()\n scalar = ShapedArray((), dtype)\n computation = xla.primitive_subcomputation(prim, scalar, scalar)\n replica_groups_protos = xc.make_replica_groups(replica_groups)\n return xops.AllReduce(val, computation, replica_groups_protos, None, None)\n\n# It is assumed that all collectives that use this rule are commutative\n# and associative over axis names if they support tuples. That is,\n# they have to satisfy:\n# collective(x, ('i', 'j')) == collective(x, ('j', 'i'))\n# == collective(collective(x, 'j'), 'i')\ndef _split_axis_comm_assoc(primitive, split_name, args, params):\n axis_names = params['axis_name']\n assert isinstance(axis_names, tuple)\n if params['axis_index_groups'] is not None:\n raise NotImplementedError(\"axis_index_groups not supported in axis splitting. \"\n \"Please open a feature request!\")\n remaining_axes = list(axis_names)\n remaining_axes.remove(split_name)\n remaining_axes = tuple(remaining_axes)\n split_params = dict(params, axis_name=split_name)\n remain_params = dict(params, axis_name=remaining_axes)\n split_result = primitive.bind(*args, **split_params)\n if not primitive.multiple_results:\n split_result = (split_result,)\n return primitive.bind(*split_result, **remain_params)\n\n# NB: This is only used for collectives that do not include the vmapped axis name,\n# which is why the rule is so simple. All other collectives go through split_axis.\ndef _collective_batcher(prim, args, dims, **params):\n return prim.bind(*args, **params), dims if prim.multiple_results else dims[0]\n\ndef _batched_reduction_collective(prim, if_mapped, if_unmapped,\n vals_in, dims_in, axis_size,\n axis_name, axis_index_groups):\n if axis_index_groups is not None:\n raise NotImplementedError(\"axis_index_groups not implemented in vmap collectives. \"\n \"Please open a feature request!\")\n vals_out = [if_mapped(v, d) if d is not batching.not_mapped else if_unmapped(v, axis_size)\n for v, d in zip(vals_in, dims_in)]\n dims_out = [batching.not_mapped] * len(vals_in)\n return vals_out, dims_out\n\ndef _replica_groups(axis_env, axis_name, axis_index_groups):\n replica_groups = xla.axis_groups(axis_env, axis_name)\n if axis_index_groups is not None:\n replica_groups = [[axis_group[i] for i in axis_index_group]\n for axis_group in replica_groups\n for axis_index_group in axis_index_groups]\n return replica_groups\n\n# psum translation rule has special handling for complex dtypes\ndef _psum_translation_rule(c, *args, axis_name, axis_index_groups, axis_env,\n platform):\n if platform in (\"cpu\", \"tpu\"):\n return _notuple_psum_translation_rule(c, *args, axis_name=axis_name,\n axis_index_groups=axis_index_groups,\n axis_env=axis_env, platform=platform)\n\n # XLA's tuple all-reduce doesn't support different dtypes in the same\n # allreduce. Instead, we perform once all-reduce for each argument input type.\n args_by_type = collections.defaultdict(lambda: ([], []))\n for i, arg in enumerate(args):\n indices, dtype_args = args_by_type[c.get_shape(arg).numpy_dtype()]\n indices.append(i)\n dtype_args.append(arg)\n\n # The outputs, in the original argument order.\n out = [None] * len(args)\n replica_groups = _replica_groups(axis_env, axis_name, axis_index_groups)\n replica_groups_protos = xc.make_replica_groups(replica_groups)\n for dtype, (indices, dtype_args) in sorted(args_by_type.items()):\n is_complex = dtypes.issubdtype(dtype, np.complexfloating)\n n = len(dtype_args)\n if is_complex:\n dtype_args = ([xops.Real(x) for x in dtype_args] +\n [xops.Imag(x) for x in dtype_args])\n scalar = ShapedArray((), c.get_shape(dtype_args[0]).numpy_dtype())\n computation = xla.primitive_subcomputation(lax.add_p, scalar, scalar)\n all_reduce = xops.AllReduce(xops.Tuple(c, dtype_args), computation,\n replica_groups_protos, None, None)\n if is_complex:\n xs = [xops.Complex(xops.GetTupleElement(all_reduce, i),\n xops.GetTupleElement(all_reduce, n + i)) for i in range(n)]\n else:\n xs = [xops.GetTupleElement(all_reduce, i) for i in range(n)]\n for i, x in zip(indices, xs):\n out[i] = x\n return xops.Tuple(c, out)\n\n# TODO(b/150476027): CPU doesn't support tuple all-reduce correctly. But\n# fortunately we don't really need it in that case because CPU doesn't support\n# cross-task communication either.\n# TODO(b/155446630): An XLA:TPU optimization pass also doesn't support\n# tuple all-reduce yet. Meanwhile, rely on deterministic compiler behavior.\ndef _notuple_psum_translation_rule(c, *args, axis_name, axis_env,\n axis_index_groups, platform):\n def _translate(val):\n psum = partial(_allreduce_translation_rule, lax.add_p, c,\n axis_name=axis_name, axis_env=axis_env,\n axis_index_groups=axis_index_groups, platform=platform)\n dtype = c.get_shape(val).numpy_dtype()\n if dtypes.issubdtype(dtype, np.complexfloating):\n return xops.Complex(psum(xops.Real(val)), psum(xops.Imag(val)))\n else:\n return psum(val)\n return xops.Tuple(c, list(map(_translate, args)))\n\ndef _psum_transpose_rule(cts, axis_name, axis_index_groups):\n nonzero_out_cts, treedef = tree_util.tree_flatten(cts)\n nonzero_in_cts = psum_p.bind(*nonzero_out_cts, axis_name=axis_name,\n axis_index_groups=axis_index_groups)\n return tree_util.tree_unflatten(treedef, nonzero_in_cts)\n\npsum_p = core.Primitive('psum')\npsum_p.multiple_results = True\npsum_p.def_abstract_eval(lambda *args, **params: map(raise_to_shaped, args))\npxla.soft_pmap_rules[psum_p] = \\\n partial(_allreduce_soft_pmap_rule, psum_p, lax._reduce_sum)\nxla.parallel_translations[psum_p] = _psum_translation_rule\nad.deflinear(psum_p, _psum_transpose_rule)\npxla.multi_host_supported_collectives.add(psum_p)\nbatching.split_axis_rules[psum_p] = partial(_split_axis_comm_assoc, psum_p)\nbatching.primitive_batchers[psum_p] = partial(_collective_batcher, psum_p)\nbatching.collective_rules[psum_p] = \\\n partial(_batched_reduction_collective,\n psum_p,\n lambda v, d: v.sum(d),\n lambda v, axis_size: axis_size * v)\n\n# We set a special bind rule for psum so that psum(1, 'i') can be evaluated at\n# tracing time.\n@psum_p.def_custom_bind\ndef psum_bind(*args, axis_name, axis_index_groups):\n if all(not isinstance(x, core.Tracer) for x in args):\n if axis_index_groups is not None:\n size = len(axis_index_groups[0])\n elif type(axis_name) is tuple:\n size = prod([core.axis_frame(name).size for name in axis_name]) # type: ignore\n else:\n size = core.axis_frame(axis_name).size # type: ignore\n return tuple(size * x for x in args)\n return core.Primitive.bind(\n psum_p, *args, axis_name=axis_name, axis_index_groups=axis_index_groups)\n\n\npmax_p = core.Primitive('pmax')\npmax_p.def_abstract_eval(lambda x, **params: raise_to_shaped(x))\nxla.parallel_translations[pmax_p] = \\\n partial(_allreduce_translation_rule, lax.max_p)\npxla.multi_host_supported_collectives.add(pmax_p)\nbatching.split_axis_rules[pmax_p] = partial(_split_axis_comm_assoc, pmax_p)\nbatching.primitive_batchers[pmax_p] = partial(_collective_batcher, pmax_p)\nbatching.collective_rules[pmax_p] = \\\n partial(_batched_reduction_collective,\n pmax_p,\n lambda v, d: v.max(d),\n lambda v, axis_size: v)\n\n\npmin_p = core.Primitive('pmin')\npmin_p.def_abstract_eval(lambda x, **params: raise_to_shaped(x))\nxla.parallel_translations[pmin_p] = \\\n partial(_allreduce_translation_rule, lax.min_p)\npxla.multi_host_supported_collectives.add(pmin_p)\nbatching.split_axis_rules[pmin_p] = partial(_split_axis_comm_assoc, pmin_p)\nbatching.primitive_batchers[pmin_p] = partial(_collective_batcher, pmin_p)\nbatching.collective_rules[pmin_p] = \\\n partial(_batched_reduction_collective,\n pmin_p,\n lambda v, d: v.min(d),\n lambda v, axis_size: v)\n\n\ndef _ppermute_translation_rule(c, x, *, axis_name, axis_env, perm, platform):\n replica_groups = _replica_groups(axis_env, axis_name, None)\n group_size = len(replica_groups[0])\n srcs, dsts = unzip2((src % group_size, dst % group_size) for src, dst in perm)\n if not (len(srcs) == len(set(srcs)) and len(dsts) == len(set(dsts))):\n msg = \"ppermute sources and destinations must be unique, got {}.\"\n raise ValueError(msg.format(perm))\n\n full_perm = []\n for grp in replica_groups:\n grp = list(sorted(grp))\n full_perm.extend((grp[src], grp[dst]) for src, dst in perm)\n return xops.CollectivePermute(x, full_perm)\n\ndef _ppermute_transpose_rule(t, perm, axis_name):\n srcs, dsts = unzip2(perm)\n inverse_perm = list(zip(dsts, srcs))\n return [ppermute(t, axis_name=axis_name, perm=inverse_perm)]\n\ndef _ppermute_batcher(vals_in, dims_in, axis_size, axis_name, perm):\n assert len(perm) == axis_size, \"Permutation doesn't match the axis size!\"\n perm_indices = np.full((axis_size,), -1, dtype=np.int32)\n for s, d in perm:\n perm_indices[s] = d\n vals_out = [lax_numpy.take(v, perm_indices, d) if d is not batching.not_mapped else v\n for v, d in zip(vals_in, dims_in)]\n return vals_out, dims_in\n\nppermute_p = core.Primitive('ppermute')\nppermute_p.def_abstract_eval(lambda x, **params: raise_to_shaped(x))\nad.deflinear(ppermute_p, _ppermute_transpose_rule)\nxla.parallel_translations[ppermute_p] = _ppermute_translation_rule\npxla.multi_host_supported_collectives.add(ppermute_p)\nbatching.primitive_batchers[ppermute_p] = partial(_collective_batcher, ppermute_p)\nbatching.collective_rules[ppermute_p] = _ppermute_batcher\n\n\ndef _moveaxis(src, dst, x):\n perm = [i for i in range(x.ndim) if i != src]\n perm.insert(dst, src)\n return lax.transpose(x, perm)\n\ndef _all_to_all_via_all_gather(x, *, axis_name, split_axis, concat_axis):\n global_full = all_gather(x, axis_name)\n idx = axis_index(axis_name)\n local_slice = lax.dynamic_index_in_dim(global_full, idx, split_axis + 1, keepdims=False)\n return _moveaxis(0, concat_axis, local_slice)\n\ndef _all_to_all_translation_rule(c, x, *, split_axis, concat_axis, axis_name,\n axis_env, platform):\n # Workaround for AllToAll not being implemented on CPU.\n replica_groups = _replica_groups(axis_env, axis_name, None)\n if len(replica_groups[0]) == 1:\n return x\n elif platform != 'tpu':\n warnings.warn(\"all_to_all (and pswapaxes) are only implemented properly for TPUs. All other \"\n \"backends emulate it using a very slow and memory intensive algorithm, so expect \"\n \"significant slowdowns.\")\n lowering = xla.lower_fun(_all_to_all_via_all_gather, multiple_results=False, parallel=True)\n return lowering(c, x,\n split_axis=split_axis, concat_axis=concat_axis, axis_name=axis_name,\n axis_env=axis_env, platform=platform)\n else:\n split_count = len(replica_groups[0])\n if not all(split_count == len(g) for g in replica_groups):\n raise ValueError('Replica groups must be equally sized')\n replica_groups_protos = xc.make_replica_groups(replica_groups)\n if concat_axis == split_axis:\n return xops.AllToAll(x, split_axis, concat_axis, split_count,\n replica_groups_protos)\n else:\n if concat_axis < split_axis:\n split_axis += 1\n elif split_axis < concat_axis:\n concat_axis += 1\n x = xla.lower_fun(partial(lax.expand_dims, dimensions=(concat_axis,)), multiple_results=False)(c, x)\n x = xops.AllToAll(x, split_axis, concat_axis, split_count, replica_groups_protos)\n x = xla.lower_fun(partial(lax.squeeze, dimensions=(split_axis,)), multiple_results=False)(c, x)\n return x\n\ndef _all_to_all_transpose_rule(cts, axis_name, split_axis, concat_axis):\n return (all_to_all(cts, axis_name=axis_name, split_axis=concat_axis, concat_axis=split_axis),)\n\ndef _all_to_all_batcher(vals_in, dims_in, *, axis_name, split_axis, concat_axis):\n x, = vals_in\n d, = dims_in\n if d <= split_axis:\n split_axis += 1\n if d <= concat_axis:\n concat_axis += 1\n # Note: At this point split_axis and concat_axis are adjusted to the extra\n # dimension and we have d != split_axis and d != concat_axis.\n if split_axis < d < concat_axis:\n d -= 1\n elif concat_axis < d < split_axis:\n d += 1\n result = all_to_all_p.bind(x, axis_name=axis_name, split_axis=split_axis, concat_axis=concat_axis)\n return result, d\n\ndef _all_to_all_batched_collective(vals_in, dims_in, axis_size, axis_name, split_axis, concat_axis):\n x, = vals_in\n d, = dims_in\n split_axis_adj = split_axis + (1 if d <= split_axis else 0)\n concat_axis_adj = concat_axis + (1 if split_axis_adj <= concat_axis else 0)\n if d < split_axis_adj < concat_axis_adj:\n split_axis_adj -= 1\n elif concat_axis_adj < split_axis_adj < d:\n split_axis_adj += 1\n return [_moveaxis(d, concat_axis_adj, x)], [split_axis_adj]\n\ndef _all_to_all_split_axis_rule(split_name, vals, params):\n concat_axis = params['concat_axis']\n split_axis = params['split_axis']\n axis_names = params['axis_name']\n assert isinstance(axis_names, tuple)\n x, = vals\n\n split_pos = list(axis_names).index(split_name)\n before_axes = axis_names[:split_pos]\n after_axes = axis_names[split_pos+1:]\n\n # Flatten the split_dim\n split_name_size = psum(1, split_name)\n before_size = psum(1, before_axes)\n after_size = psum(1, after_axes)\n unroll_shape = list(x.shape)\n unroll_shape[split_axis:split_axis+1] = [before_size, split_name_size, after_size]\n unroll_x = lax.reshape(x, unroll_shape)\n\n if before_axes:\n out_before = all_to_all(unroll_x, before_axes, split_axis, concat_axis=0)\n else:\n out_before = _moveaxis(split_axis, 0, unroll_x)\n out_split = all_to_all(out_before, split_name, split_axis + 1, concat_axis=1)\n if after_axes:\n out_after = all_to_all(out_split, after_axes, split_axis + 2, concat_axis=2)\n else:\n out_after = _moveaxis(split_axis + 2, 2, out_split)\n\n # Flatten the concat axes and move them to the right position\n y = out_after.reshape((np.prod(out_after.shape[:3]), *out_after.shape[3:]))\n return _moveaxis(0, concat_axis, y)\n\ndef _all_to_all_abstract_eval(x, axis_name, split_axis, concat_axis):\n input_aval = raise_to_shaped(x)\n shape = list(input_aval.shape)\n size = shape.pop(split_axis)\n shape.insert(concat_axis, size)\n return ShapedArray(tuple(shape), input_aval.dtype, weak_type=False)\n\nall_to_all_p = core.Primitive('all_to_all')\nall_to_all_p.def_abstract_eval(_all_to_all_abstract_eval)\nxla.parallel_translations[all_to_all_p] = _all_to_all_translation_rule\nad.deflinear(all_to_all_p, _all_to_all_transpose_rule)\npxla.multi_host_supported_collectives.add(all_to_all_p)\nbatching.primitive_batchers[all_to_all_p] = _all_to_all_batcher\nbatching.collective_rules[all_to_all_p] = _all_to_all_batched_collective\nbatching.split_axis_rules[all_to_all_p] = _all_to_all_split_axis_rule\n\n\ndef _expand(dim, size, index, x):\n shape = list(x.shape)\n shape.insert(dim, size)\n out = lax.full(shape, lax._const(x, 0))\n return lax.dynamic_update_index_in_dim(out, x, index, dim)\n\ndef _allgather(x, dim, size, index, axis_name, axis_index_groups=None):\n outs = tree_util.tree_map(partial(_expand, dim, size, index), x)\n return psum(outs, axis_name, axis_index_groups=axis_index_groups)\n\ndef all_gather(x, axis_name, *, axis_index_groups=None):\n \"\"\"Gather values of x across all replicas.\n\n If ``x`` is a pytree then the result is equivalent to mapping this function to\n each leaf in the tree.\n\n This is equivalent to, but faster than, all_to_all(broadcast(x)).\n\n Args:\n x: array(s) with a mapped axis named ``axis_name``.\n axis_name: hashable Python object used to name a pmapped axis (see the\n :func:`jax.pmap` documentation for more details).\n axis_index_groups: optional list of lists containing axis indices (e.g. for\n an axis of size 4, [[0, 1], [2, 3]] would run all gather over the first\n two and last two replicas). Groups must cover all axis indices exactly\n once, and all groups must be the same size.\n\n Returns:\n Array(s) representing the result of an all-gather along the axis\n ``axis_name``. Shapes are the same as ``x.shape``, but with a leading\n dimension of the axis_size.\n\n For example, with 4 XLA devices available:\n\n >>> x = np.arange(4)\n >>> y = jax.pmap(lambda x: jax.lax.all_gather(x, 'i'), axis_name='i')(x)\n >>> print(y)\n [[0 1 2 3]\n [0 1 2 3]\n [0 1 2 3]\n [0 1 2 3]]\n\n An example of using axis_index_groups, groups split by even & odd device ids:\n\n >>> x = np.arange(16).reshape(4, 4)\n >>> print(x)\n [[ 0. 1. 2. 3.]\n [ 4. 5. 6. 7.]\n [ 8. 9. 10. 11.]\n [12. 13. 14. 15.]]\n >>> y = jax.pmap(lambda x: jax.lax.all_gather(\n ... x, 'i', axis_index_groups=[[0, 2], [3, 1]]))(x)\n >>> print(y)\n [[[ 0. 1. 2. 3.]\n [ 8. 9. 10. 11.]]\n [[12. 13. 14. 15.]\n [ 4. 5. 6. 7.]]\n [[ 0. 1. 2. 3.]\n [ 8. 9. 10. 11.]]\n [[12. 13. 14. 15.]\n [ 4. 5. 6. 7.]]\n \"\"\"\n\n index = axis_index(axis_name)\n if axis_index_groups is not None:\n indices = np.array(axis_index_groups).flatten()\n axis_index_to_group_index = indices.argsort() % len(axis_index_groups[0])\n index = lax_numpy.array(axis_index_to_group_index)[index]\n\n axis_size = psum(1, axis_name, axis_index_groups=axis_index_groups)\n\n return _allgather(x, 0, axis_size, index, axis_name, axis_index_groups)\n\n\ndef _axis_index_translation_rule(c, *, axis_name, axis_env, platform):\n axis_pos = list(axis_env.names).index(axis_name)\n nreplicas = axis_env.nreps // prod(axis_env.sizes)\n div = xb.constant(c, np.array(nreplicas * prod(axis_env.sizes[axis_pos+1:]),\n dtype=np.uint32))\n mod = xb.constant(c, np.array(axis_env.sizes[axis_pos], dtype=np.uint32))\n unsigned_index = xops.Rem(xops.Div(xops.ReplicaId(c), div), mod)\n return xops.ConvertElementType(unsigned_index, xb.dtype_to_etype(np.int32))\n\ndef _axis_index_soft_pmap_rule(vals, mapped, chunk_size, *, axis_name):\n assert not vals and not mapped\n idx = axis_index(axis_name) # type: ignore\n return idx * chunk_size + np.arange(chunk_size, dtype=np.int32), True\n\naxis_index_p = core.Primitive('axis_index')\nxla.parallel_translations[axis_index_p] = _axis_index_translation_rule\npxla.soft_pmap_rules[axis_index_p] = _axis_index_soft_pmap_rule # type: ignore\naxis_index_p.def_abstract_eval(\n lambda *args, **params: ShapedArray((), np.int32))\npxla.multi_host_supported_collectives.add(axis_index_p)\n\n# Axis index doesn't get any arguments, so that the default bind would have no\n# way to call into a data-dependency based trace such as vmap. Each trace that\n# wants to bind an axis name has to additionally implement `process_axis_index`\n# and put its main trace on the axis env stack.\ndef _axis_index_bind(*, axis_name):\n if not isinstance(axis_name, (tuple, list)):\n axis_name = (axis_name,)\n inner_size = 1\n index = 0\n for name in reversed(axis_name):\n frame = core.axis_frame(name)\n if frame.main_trace is not None:\n trace = frame.main_trace.trace_type(frame.main_trace, core.cur_sublevel())\n name_idx = trace.process_axis_index(frame)\n else:\n name_idx = core.Primitive.bind(axis_index_p, axis_name=name)\n index += name_idx * inner_size\n inner_size *= psum(1, name)\n return index\naxis_index_p.def_custom_bind(_axis_index_bind)\n\ndef _process_axis_index(self, frame):\n return batching.BatchTracer(self, lax_numpy.arange(frame.size, dtype=np.int32), 0)\nbatching.BatchTrace.process_axis_index = _process_axis_index\n\n\n@config.register_omnistaging_disabler\ndef omnistaging_disabler() -> None:\n global axis_index\n\n psum_p.bind = partial(core.Primitive.bind, psum_p)\n psum_p.def_impl(partial(pxla.apply_parallel_primitive, psum_p)) # type: ignore\n pxla.parallel_pure_rules[psum_p] = lambda *args, shape: (x * prod(shape) for x in args) # type: ignore\n\n def _axis_index_bind(*, axis_name):\n dynamic_axis_env = pxla._thread_local_state.dynamic_axis_env\n frame = dynamic_axis_env[axis_name]\n sizes = dynamic_axis_env.sizes[:dynamic_axis_env.index(frame)+1]\n nreps = dynamic_axis_env.nreps\n trace = frame.pmap_trace\n\n out_aval = ShapedArray((), np.int32)\n out_tracer = pe.JaxprTracer(trace, pe.PartialVal.unknown(out_aval), None)\n eqn = pe.new_eqn_recipe([], [out_tracer], axis_index_p,\n dict(nreps=nreps, sizes=sizes, axis_name=axis_name),\n source_info_util.current())\n out_tracer.recipe = eqn\n\n return out_tracer\n\n def _axis_index_translation_rule(c, nreps, sizes, axis_name):\n div = xb.constant(c, np.array(nreps // prod(sizes), dtype=np.uint32))\n mod = xb.constant(c, np.array(sizes[-1], dtype=np.uint32))\n unsigned_index = xops.Rem(xops.Div(xops.ReplicaId(c), div), mod)\n return xops.ConvertElementType(unsigned_index, xb.dtype_to_etype(np.int32))\n\n axis_index_p.def_custom_bind(_axis_index_bind)\n axis_index_p.def_abstract_eval(\n lambda *args, **params: ShapedArray((), np.int32))\n xla.translations[axis_index_p] = _axis_index_translation_rule\n" ]
[ [ "numpy.array", "numpy.log", "numpy.exp", "numpy.expm1", "numpy.arange", "numpy.sqrt", "numpy.dtype" ], [ "numpy.array", "numpy.zeros", "numpy.shape" ], [ "numpy.full", "numpy.array", "numpy.prod", "numpy.arange" ] ]
YanzhaoWu/tensorflow-blue
[ "917ffe4d38e71421193ce08ba2ed2ff4ff50d55f" ]
[ "tensorflow/python/keras/_impl/keras/engine/network.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"A `Network` is way to compose layers: the topological form of a `Model`.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport json\nimport os\nimport weakref\n\nimport numpy as np\nfrom six.moves import zip # pylint: disable=redefined-builtin\n\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras._impl.keras import backend\nfrom tensorflow.python.keras._impl.keras.engine import base_layer\nfrom tensorflow.python.keras._impl.keras.engine import saving\nfrom tensorflow.python.keras._impl.keras.utils import generic_utils\nfrom tensorflow.python.keras._impl.keras.utils import tf_utils\nfrom tensorflow.python.keras._impl.keras.utils.io_utils import ask_to_proceed_with_overwrite\nfrom tensorflow.python.keras._impl.keras.utils.layer_utils import print_summary as print_layer_summary\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import checkpointable\nfrom tensorflow.python.training import checkpointable_utils\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_inspect\n\n\n# pylint: disable=g-import-not-at-top\ntry:\n import h5py\nexcept ImportError:\n h5py = None\n\ntry:\n import yaml\nexcept ImportError:\n yaml = None\n# pylint: enable=g-import-not-at-top\n\n\nclass Network(base_layer.Layer):\n \"\"\"A `Network` is a composition of layers.\n\n It is the topological form of a \"model\". A `Model`\n is simply a `Network` with added training routines.\n \"\"\"\n\n def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called\n # Signature detection\n if (len(args) == 2 or\n len(args) == 1 and 'outputs' in kwargs or\n 'inputs' in kwargs and 'outputs' in kwargs):\n # Graph network\n self._init_graph_network(*args, **kwargs)\n else:\n # Subclassed network\n self._init_subclassed_network(**kwargs)\n\n def _base_init(self, name=None):\n # The following are implemented as property functions:\n # self.trainable_weights\n # self.non_trainable_weights\n # self.input_spec\n # self.losses\n # self.updates\n\n self._init_set_name(name, zero_based=True)\n self._activity_regularizer = None\n # This acts just like the `trainable` attribute of any layer instance.\n # It does not affect users of the underlying layers, only users of the\n # Network instance.\n self.trainable = True\n self._is_compiled = False\n self._expects_training_arg = False\n\n self.supports_masking = False\n if not hasattr(self, 'optimizer'):\n # Don't reset optimizer if already set.\n self.optimizer = None\n\n # Private attributes to implement compatibility with Layer.\n self._updates = [] # Used in symbolic mode only.\n self._losses = [] # Used in symbolic mode only.\n self._scope = None # Never used.\n self._reuse = None # Never used.\n if context.executing_eagerly():\n self._graph = None\n else:\n self._graph = ops.get_default_graph() # Used in symbolic mode only.\n # A Network does not create weights of its own, thus has no dtype.\n self._dtype = None\n\n # All layers in order of horizontal graph traversal.\n # Entries are unique. Includes input and output layers.\n self._layers = []\n\n # Used in symbolic mode only, only in conjunction with graph-networks\n self._outbound_nodes = []\n self._inbound_nodes = []\n\n self._checkpointable_saver = checkpointable_utils.CheckpointableSaver(\n weakref.ref(self))\n # A zero-argument function which should be called and set back to None as\n # soon as the network is built (only applicable to subclassed Models). Runs\n # restore operations when graph building.\n self._in_progress_restore_finalizer = None\n\n def _init_graph_network(self, inputs, outputs, name=None):\n self._uses_inputs_arg = True\n # Normalize and set self.inputs, self.outputs.\n if isinstance(inputs, (list, tuple)):\n self.inputs = list(inputs) # Tensor or list of tensors.\n else:\n self.inputs = [inputs]\n if isinstance(outputs, (list, tuple)):\n self.outputs = list(outputs)\n else:\n self.outputs = [outputs]\n\n # User-provided argument validation.\n if context.executing_eagerly():\n # Check that all inputs/outputs are DeferredTensors.\n for tensor in self.inputs:\n if not isinstance(tensor, base_layer.DeferredTensor): # pylint: disable=protected-access\n raise TypeError('When eager execution is enabled, '\n 'inputs must come from a call to '\n '`tf.keras.Input` (called after '\n 'tfe.enable_eager_execution()). '\n 'Received invalid input: ' + str(tensor))\n for tensor in self.outputs:\n if not isinstance(tensor, base_layer.DeferredTensor): # pylint: disable=protected-access\n raise TypeError('When eager execution is enabled, '\n 'outputs must come from a call to '\n 'a layer (called after '\n 'tfe.enable_eager_execution()). '\n 'Received invalid output: ' + str(tensor))\n # Check for redundancy in inputs.\n if len(set(self.inputs)) != len(self.inputs):\n raise ValueError('The list of inputs passed to the model '\n 'is redundant. '\n 'All inputs should only appear once.'\n ' Found: ' + str(self.inputs))\n for x in self.inputs:\n # Check that x has appropriate `_keras_history` metadata.\n if not hasattr(x, '_keras_history'):\n cls_name = self.__class__.__name__\n raise ValueError('Input tensors to a ' + cls_name + ' ' +\n 'must come from `tf.layers.Input`. '\n 'Received: ' + str(x) +\n ' (missing previous layer metadata).')\n # Check that x is an input tensor.\n # pylint: disable=protected-access\n layer, node_index, tensor_index = x._keras_history\n if len(layer._inbound_nodes) > 1 or (\n layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers):\n cls_name = self.__class__.__name__\n logging.warning(cls_name + ' inputs must come from '\n '`tf.layers.Input` (thus holding past layer metadata), '\n 'they cannot be the output of '\n 'a previous non-Input layer. '\n 'Here, a tensor specified as '\n 'input to \"' + self.name + '\" was not an Input tensor, '\n 'it was generated by layer ' + layer.name + '.\\n'\n 'Note that input tensors are '\n 'instantiated via `tensor = tf.layers.Input(shape)`.\\n'\n 'The tensor that caused the issue was: ' + str(x.name))\n for x in self.outputs:\n if not hasattr(x, '_keras_history'):\n cls_name = self.__class__.__name__\n raise ValueError('Output tensors to a ' + cls_name + ' must be '\n 'the output of a TensorFlow `Layer` '\n '(thus holding past layer metadata). Found: ' + str(x))\n\n self._base_init(name=name)\n self._compute_previous_mask = (\n 'mask' in tf_inspect.getargspec(self.call).args or\n hasattr(self, 'compute_mask'))\n # A Network does not create weights of its own, thus it is already\n # built.\n self.built = True\n self._is_graph_network = True\n\n self._input_layers = []\n self._output_layers = []\n self._input_coordinates = []\n self._output_coordinates = []\n\n # This is for performance optimization when calling the Network on new\n # inputs. Every time the Network is called on a set on input tensors,\n # we compute the output tensors, output masks and output shapes in one pass,\n # then cache them here. When any of these outputs is queried later, we\n # retrieve it from there instead of recomputing it.\n self._output_mask_cache = {}\n self._output_tensor_cache = {}\n self._output_shape_cache = {}\n\n # Build self._output_layers:\n for x in self.outputs:\n layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access\n self._output_layers.append(layer)\n self._output_coordinates.append((layer, node_index, tensor_index))\n\n # Build self._input_layers:\n for x in self.inputs:\n layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access\n # It's supposed to be an input layer, so only one node\n # and one tensor output.\n assert node_index == 0\n assert tensor_index == 0\n self._input_layers.append(layer)\n self._input_coordinates.append((layer, node_index, tensor_index))\n\n # Keep track of the network's nodes and layers.\n nodes, nodes_by_depth, layers, layers_by_depth = _map_graph_network(\n self.inputs, self.outputs)\n self._network_nodes = nodes\n self._nodes_by_depth = nodes_by_depth\n self._layers = layers\n self._layers_by_depth = layers_by_depth\n\n self._track_layers(layers)\n\n # Create the node linking internal inputs to internal outputs.\n base_layer.Node(\n outbound_layer=self,\n inbound_layers=[],\n node_indices=[],\n tensor_indices=[],\n input_tensors=self.inputs,\n output_tensors=self.outputs)\n\n # Fill in the output mask cache.\n masks = []\n for x in self.inputs:\n mask = x._keras_mask if hasattr(x, '_keras_mask') else None # pylint: disable=protected-access\n masks.append(mask)\n mask_cache_key = (generic_utils.object_list_uid(self.inputs) + '_' +\n generic_utils.object_list_uid(masks))\n masks = []\n for x in self.outputs:\n mask = x._keras_mask if hasattr(x, '_keras_mask') else None # pylint: disable=protected-access\n masks.append(mask)\n if len(masks) == 1:\n mask = masks[0]\n else:\n mask = masks\n self._output_mask_cache[mask_cache_key] = mask\n\n # Build self.input_names and self.output_names.\n self.input_names = []\n self.output_names = []\n self._feed_input_names = []\n self._feed_inputs = []\n self._feed_input_shapes = []\n for i, layer in enumerate(self._input_layers):\n self.input_names.append(layer.name)\n if layer.is_placeholder:\n self._feed_input_names.append(layer.name)\n self._feed_input_shapes.append(backend.int_shape(self.inputs[i]))\n # layer.input gives an error in eager mode\n if not context.executing_eagerly():\n self._feed_inputs.append(layer.input)\n for layer in self._output_layers:\n self.output_names.append(layer.name)\n\n def _init_subclassed_network(self, name=None):\n self._base_init(name=name)\n self._is_graph_network = False\n call_args = tf_inspect.getargspec(self.call).args\n if 'training' in call_args:\n self._expects_training_arg = True\n else:\n self._expects_training_arg = False\n if 'inputs' in call_args:\n self._uses_inputs_arg = True\n else:\n self._uses_inputs_arg = False\n self.outputs = None\n self.inputs = None\n self.built = False\n\n def _track_layers(self, layers):\n \"\"\"Add Checkpointable dependencies on a list of Layers.\"\"\"\n weight_layer_index = 0\n for layer_index, layer in enumerate(layers):\n if layer.weights:\n # Keep a separate index for layers which have weights. This allows users\n # to insert Layers without weights anywhere in the network without\n # breaking checkpoints.\n self._track_checkpointable(\n layer, name='layer_with_weights-%d' % weight_layer_index,\n overwrite=True)\n weight_layer_index += 1\n # Even if it doesn't have weights, we should still track everything in\n # case it has/will have Checkpointable dependencies.\n self._track_checkpointable(\n layer, name='layer-%d' % layer_index, overwrite=True)\n\n def __setattr__(self, name, value):\n no_dependency = isinstance(value, checkpointable.NoDependency)\n if no_dependency:\n value = value.value\n if isinstance(value, (base_layer.Layer, Network)):\n try:\n is_graph_network = self._is_graph_network\n except AttributeError:\n raise RuntimeError('It looks like you are subclassing `Model` and you '\n 'forgot to call `super(YourClass, self).__init__()`.'\n ' Always start with this line.')\n if not is_graph_network:\n if value not in self._layers:\n self._layers.append(value)\n if hasattr(value, '_use_resource_variables'):\n # In subclassed models, legacy layers (tf.layers) must always use\n # resource variables.\n value._use_resource_variables = True\n if (not no_dependency\n and isinstance(value, checkpointable.CheckpointableBase)):\n # Layer (and therefore Network/Model) inherit from CheckpointableBase\n # rather than Checkpointable, which means there is no Checkpointable\n # __setattr__ override (it would be a performance issue for functional\n # layers). Therefore Model tracks Checkpointable objects itself.\n self._track_checkpointable(\n checkpointable=value, name=name, overwrite=True)\n super(Network, self).__setattr__(name, value)\n\n def add_variable(self, name, shape, dtype=None, initializer=None,\n regularizer=None, trainable=True, constraint=None):\n raise NotImplementedError('`add_variable` is not supported on Networks.')\n\n def add_loss(self, *args, **kwargs):\n if context.executing_eagerly():\n raise NotImplementedError('`add_loss` is not supported on Networks '\n 'when eager execution is enabled.')\n super(Network, self).add_loss(*args, **kwargs)\n\n @property\n def uses_learning_phase(self):\n return any(\n [getattr(x, '_uses_learning_phase', False) for x in self.outputs])\n\n @property\n def stateful(self):\n return any([(hasattr(layer, 'stateful') and layer.stateful)\n for layer in self.layers])\n\n def reset_states(self):\n for layer in self.layers:\n if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False):\n layer.reset_states()\n\n @property\n def state_updates(self):\n \"\"\"Returns the `updates` from all layers that are stateful.\n\n This is useful for separating training updates and\n state updates, e.g. when we need to update a layer's internal state\n during prediction.\n\n Returns:\n A list of update ops.\n \"\"\"\n state_updates = []\n for layer in self.layers:\n if getattr(layer, 'stateful', False):\n if hasattr(layer, 'updates'):\n state_updates += layer.updates\n return state_updates\n\n def get_weights(self):\n \"\"\"Retrieves the weights of the model.\n\n Returns:\n A flat list of Numpy arrays.\n \"\"\"\n weights = []\n for layer in self.layers:\n weights += layer.weights\n return backend.batch_get_value(weights)\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the model.\n\n Arguments:\n weights: A list of Numpy arrays with shapes and types matching\n the output of `model.get_weights()`.\n \"\"\"\n tuples = []\n for layer in self.layers:\n num_param = len(layer.weights)\n layer_weights = weights[:num_param]\n for sw, w in zip(layer.weights, layer_weights):\n tuples.append((sw, w))\n weights = weights[num_param:]\n backend.batch_set_value(tuples)\n\n def compute_mask(self, inputs, mask):\n if not self._is_graph_network:\n return None\n\n inputs = generic_utils.to_list(inputs)\n if mask is None:\n masks = [None for _ in range(len(inputs))]\n else:\n masks = generic_utils.to_list(mask)\n cache_key = (generic_utils.object_list_uid(inputs)\n + '_' + generic_utils.object_list_uid(masks))\n if cache_key in self._output_mask_cache:\n return self._output_mask_cache[cache_key]\n else:\n _, output_masks = self._run_internal_graph(inputs, mask=masks)\n return output_masks\n\n @property\n def layers(self):\n return self._layers\n\n def get_layer(self, name=None, index=None):\n \"\"\"Retrieves a layer based on either its name (unique) or index.\n\n If `name` and `index` are both provided, `index` will take precedence.\n Indices are based on order of horizontal graph traversal (bottom-up).\n\n Arguments:\n name: String, name of layer.\n index: Integer, index of layer.\n\n Returns:\n A layer instance.\n\n Raises:\n ValueError: In case of invalid layer name or index.\n \"\"\"\n # TODO(fchollet): We could build a dictionary based on layer names\n # since they are constant, but we have not done that yet.\n if index is not None:\n if len(self.layers) <= index:\n raise ValueError('Was asked to retrieve layer at index ' + str(index) +\n ' but model only has ' + str(len(self.layers)) +\n ' layers.')\n else:\n return self.layers[index]\n else:\n if not name:\n raise ValueError('Provide either a layer name or layer index.')\n for layer in self.layers:\n if layer.name == name:\n return layer\n raise ValueError('No such layer: ' + name)\n\n @property\n def updates(self):\n \"\"\"Retrieves the network's updates.\n\n Will only include updates that are either\n unconditional, or conditional on inputs to this model\n (e.g. will not include updates that were created by layers of this model\n outside of the model).\n\n Effectively, `network.updates` behaves like `layer.updates`.\n\n Concrete example:\n\n ```python\n bn = keras.layers.BatchNormalization()\n x1 = keras.layers.Input(shape=(10,))\n _ = bn(x1) # This creates 2 updates.\n\n x2 = keras.layers.Input(shape=(10,))\n y2 = bn(x2) # This creates 2 more updates.\n\n # The BN layer has now 4 updates.\n self.assertEqual(len(bn.updates), 4)\n\n # Let's create a model from x2 to y2.\n model = keras.models.Model(x2, y2)\n\n # The model does not list all updates from its underlying layers,\n # but only the updates that are relevant to it. Updates created by layers\n # outside of the model are discarded.\n self.assertEqual(len(model.updates), 2)\n\n # If you keep calling the model, you append to its updates, just like\n # what happens for a layer.\n x3 = keras.layers.Input(shape=(10,))\n y3 = model(x3)\n self.assertEqual(len(model.updates), 4)\n\n # But if you call the inner BN layer independently, you don't affect\n # the model's updates.\n x4 = keras.layers.Input(shape=(10,))\n _ = bn(x4)\n self.assertEqual(len(model.updates), 4)\n ```\n\n Returns:\n A list of update ops.\n \"\"\"\n if context.executing_eagerly():\n return []\n\n if not self.trainable and not self.stateful:\n return []\n\n updates = []\n for layer in self.layers:\n updates += layer.updates\n\n # `updates` might contain irrelevant updates, so it needs to be filtered\n # with respect to inputs the model has been called on.\n if self.inputs:\n relevant_inputs = self.inputs[:]\n else:\n relevant_inputs = []\n for i in range(1, len(self._inbound_nodes)):\n inputs = self.get_input_at(i)\n if isinstance(inputs, list):\n relevant_inputs += inputs\n else:\n relevant_inputs.append(inputs)\n reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, updates)\n relevant_conditional_updates = [x for x in updates if x in reachable]\n unconditional_updates = [\n x for x in updates if x._unconditional_update] # pylint: disable=protected-access\n # A layer could be used multiple times in a nested structure,\n # so the updates list must be de-duped.\n return list(set(\n relevant_conditional_updates + unconditional_updates + self._updates))\n\n @property\n def losses(self):\n \"\"\"Retrieves the network's losses.\n\n Will only include losses that are either\n unconditional, or conditional on inputs to this model\n (e.g. will not include losses that depend on tensors\n that aren't inputs to this model).\n\n Returns:\n A list of loss tensors.\n \"\"\"\n losses = []\n for layer in self.layers:\n losses += layer.losses\n if context.executing_eagerly():\n return losses\n\n if self.inputs:\n relevant_inputs = self.inputs[:]\n else:\n relevant_inputs = []\n for i in range(1, len(self._inbound_nodes)):\n inputs = self.get_input_at(i)\n if isinstance(inputs, list):\n relevant_inputs += inputs\n else:\n relevant_inputs.append(inputs)\n reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, losses)\n relevant_conditional_losses = [x for x in losses if x in reachable]\n unconditional_losses = [\n x for x in losses if x._unconditional_loss] # pylint: disable=protected-access\n return list(set(\n relevant_conditional_losses + unconditional_losses + self._losses))\n\n @property\n def trainable_weights(self):\n if not self.trainable:\n return []\n weights = []\n for layer in self.layers:\n weights += layer.trainable_weights\n return weights\n\n @property\n def non_trainable_weights(self):\n weights = []\n for layer in self.layers:\n weights += layer.non_trainable_weights\n if not self.trainable:\n trainable_weights = []\n for layer in self.layers:\n trainable_weights += layer.trainable_weights\n return trainable_weights + weights\n return weights\n\n @property\n def input_spec(self):\n \"\"\"Gets the network's input specs.\n\n Returns:\n A list of `InputSpec` instances (one per input to the model)\n or a single instance if the model has only one input.\n \"\"\"\n # If not a graph network, can't assume anything.\n if not self._is_graph_network:\n return None\n\n specs = []\n for layer in self._input_layers:\n if layer.input_spec is None:\n specs.append(None)\n else:\n if not isinstance(layer.input_spec, list):\n raise TypeError('Layer ' + layer.name +\n ' has an input_spec attribute that '\n 'is not a list. We expect a list. '\n 'Found input_spec = ' + str(layer.input_spec))\n specs += layer.input_spec\n if len(specs) == 1:\n return specs[0]\n return specs\n\n def call(self, inputs, training=None, mask=None):\n \"\"\"Calls the model on new inputs.\n\n In this case `call` just reapplies\n all ops in the graph to the new inputs\n (e.g. build a new computational graph from the provided inputs).\n\n Arguments:\n inputs: A tensor or list of tensors.\n training: Boolean or boolean scalar tensor, indicating whether to run\n the `Network` in training mode or inference mode.\n mask: A mask or list of masks. A mask can be\n either a tensor or None (no mask).\n\n Returns:\n A tensor if there is a single output, or\n a list of tensors if there are more than one outputs.\n \"\"\"\n inputs = nest.flatten(inputs)\n if mask is None:\n masks = [None for _ in range(len(inputs))]\n else:\n masks = nest.flatten(mask)\n\n if not context.executing_eagerly():\n # Try to retrieve cached outputs if the layer has already been called\n # on these exact inputs.\n cache_key = (generic_utils.object_list_uid(inputs)\n + '_' + generic_utils.object_list_uid(masks))\n if cache_key in self._output_tensor_cache:\n # Cache hit.\n return self._output_tensor_cache[cache_key]\n # Actually apply the network graph to the new inputs.\n outputs, _ = self._run_internal_graph(inputs,\n training=training,\n mask=masks)\n return outputs\n\n def compute_output_shape(self, input_shape):\n if not self._is_graph_network:\n raise NotImplementedError\n\n if isinstance(input_shape, list):\n input_shapes = []\n for shape in input_shape:\n if shape is not None:\n input_shapes.append(tuple(tensor_shape.TensorShape(shape).as_list()))\n else:\n input_shapes.append(None)\n else:\n if input_shape is not None:\n input_shapes = [tuple(tensor_shape.TensorShape(input_shape).as_list())]\n else:\n input_shapes = [None]\n\n if len(input_shapes) != len(self._input_layers):\n raise ValueError('Invalid input_shape argument ' + str(input_shape) +\n ': model has ' + str(len(self._input_layers)) +\n ' tensor inputs.')\n\n cache_key = generic_utils.object_list_uid(input_shapes)\n if cache_key not in self._output_shape_cache:\n # Cache miss. We have to run the network graph manually (recursive calls\n # to `compute_output_shape`).\n layers_to_output_shapes = {}\n for i in range(len(input_shapes)):\n layer = self._input_layers[i]\n input_shape = input_shapes[i]\n # It's an input layer: then `compute_output_shape` is identity,\n # and there is only one node and one tensor output.\n shape_key = layer.name + '_0_0'\n layers_to_output_shapes[shape_key] = input_shape\n\n depth_keys = list(self._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n # Iterate over nodes, by depth level.\n if len(depth_keys) > 1:\n for depth in depth_keys:\n nodes = self._nodes_by_depth[depth]\n for node in nodes:\n # This is always a single layer, never a list.\n layer = node.outbound_layer\n if layer in self._input_layers:\n # We've already covered the input layers\n # a few lines above.\n continue\n # Potentially redundant list,\n # same size as node.input_tensors.\n input_shapes = []\n for j in range(len(node.inbound_layers)):\n inbound_layer = node.inbound_layers[j]\n node_index = node.node_indices[j]\n tensor_index = node.tensor_indices[j]\n shape_key = inbound_layer.name + '_%s_%s' % (node_index,\n tensor_index)\n input_shape = layers_to_output_shapes[shape_key]\n input_shapes.append(input_shape)\n\n if len(input_shapes) == 1:\n output_shape = layer.compute_output_shape(input_shapes[0])\n else:\n output_shape = layer.compute_output_shape(input_shapes)\n if isinstance(output_shape, list):\n output_shapes = [\n tuple(tensor_shape.TensorShape(shape).as_list())\n for shape in output_shape\n ]\n else:\n output_shapes = [\n tuple(tensor_shape.TensorShape(output_shape).as_list())\n ]\n\n node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access\n for j in range(len(output_shapes)):\n shape_key = layer.name + '_%s_%s' % (node_index, j)\n layers_to_output_shapes[shape_key] = output_shapes[j]\n\n # Read final output shapes from layers_to_output_shapes.\n output_shapes = []\n for i in range(len(self._output_layers)):\n layer, node_index, tensor_index = self._output_coordinates[i]\n shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)\n output_shapes.append(layers_to_output_shapes[shape_key])\n # Store in cache.\n self._output_shape_cache[cache_key] = output_shapes\n else:\n # Cache hit.\n output_shapes = self._output_shape_cache[cache_key]\n\n if isinstance(output_shapes, list):\n if len(output_shapes) == 1:\n return tensor_shape.TensorShape(output_shapes[0])\n else:\n return [tensor_shape.TensorShape(shape) for shape in output_shapes]\n else:\n return tensor_shape.TensorShape(output_shapes)\n\n def _run_internal_graph(self, inputs, training=None, mask=None):\n \"\"\"Computes output tensors for new inputs.\n\n # Note:\n - Expects `inputs` to be a list (potentially with 1 element).\n - Can be run on non-Keras tensors.\n\n Arguments:\n inputs: List of tensors\n training: Boolean learning phase.\n mask: List of masks (tensors or None).\n\n Returns:\n Three lists: output_tensors, output_masks, output_shapes\n \"\"\"\n # Note: masking support is relevant mainly for Keras.\n # It cannot be factored out without having the fully reimplement the network\n # calling logic on the Keras side. We choose to incorporate it in\n # Network because 1) it may be useful to fully support in tf.layers in\n # the future and 2) Keras is a major user of Network. If you don't\n # use masking, it does not interfere with regular behavior at all and you\n # can ignore it.\n if mask is None:\n masks = [None for _ in range(len(inputs))]\n else:\n masks = mask\n\n # Dictionary mapping reference tensors to tuples\n # (computed tensor, compute mask)\n # we assume a 1:1 mapping from tensor to mask\n # TODO(fchollet): raise exception when a `.compute_mask()` call\n # does not return a list the same size as `call`\n tensor_map = {}\n for x, y, mask in zip(self.inputs, inputs, masks):\n tensor_map[str(id(x))] = (y, mask)\n\n depth_keys = list(self._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n for depth in depth_keys:\n nodes = self._nodes_by_depth[depth]\n for node in nodes:\n # This is always a single layer, never a list.\n layer = node.outbound_layer\n reference_input_tensors = node.input_tensors\n reference_output_tensors = node.output_tensors\n\n # If all previous input tensors are available in tensor_map,\n # then call node.inbound_layer on them.\n computed_data = [] # List of tuples (input, mask).\n for x in reference_input_tensors:\n if str(id(x)) in tensor_map:\n computed_data.append(tensor_map[str(id(x))])\n\n if len(computed_data) == len(reference_input_tensors):\n # Call layer (reapplying ops to new inputs).\n with ops.name_scope(layer.name):\n if node.arguments:\n kwargs = node.arguments\n else:\n kwargs = {}\n if len(computed_data) == 1:\n computed_tensor, computed_mask = computed_data[0]\n # Ensure mask propagation if applicable.\n if 'mask' in tf_inspect.getargspec(layer.call).args:\n kwargs.setdefault('mask', computed_mask)\n if 'training' in tf_inspect.getargspec(layer.call).args:\n kwargs.setdefault('training', training)\n\n output_tensors = nest.flatten(\n layer.call(computed_tensor, **kwargs))\n if hasattr(layer, 'compute_mask'):\n output_masks = layer.compute_mask(computed_tensor,\n computed_mask)\n if output_masks is None:\n output_masks = [None for _ in output_tensors]\n else:\n output_masks = nest.flatten(output_masks)\n else:\n output_masks = [None for _ in output_tensors]\n computed_tensors = [computed_tensor]\n computed_masks = [computed_mask]\n else:\n computed_tensors = [x[0] for x in computed_data]\n computed_masks = [x[1] for x in computed_data]\n if 'mask' in tf_inspect.getargspec(layer.call).args:\n kwargs.setdefault('mask', computed_masks)\n if 'training' in tf_inspect.getargspec(layer.call).args:\n kwargs.setdefault('training', training)\n\n output_tensors = nest.flatten(\n layer.call(computed_tensors, **kwargs))\n\n if hasattr(layer, 'compute_mask'):\n output_masks = layer.compute_mask(computed_tensors,\n computed_masks)\n if output_masks is None:\n output_masks = [None for _ in output_tensors]\n else:\n output_masks = nest.flatten(output_masks)\n else:\n output_masks = [None for _ in output_tensors]\n\n if not context.executing_eagerly():\n if layer.activity_regularizer is not None:\n regularization_losses = [\n layer.activity_regularizer(x) for x in output_tensors\n ]\n # Apply activity regularizer if any:\n layer.add_loss(regularization_losses, computed_tensors)\n\n # Update tensor_map.\n for x, y, mask in zip(reference_output_tensors, output_tensors,\n output_masks):\n tensor_map[str(id(x))] = (y, mask)\n\n output_tensors = []\n output_masks = []\n output_shapes = []\n for x in self.outputs:\n assert str(id(x)) in tensor_map, 'Could not compute output ' + str(x)\n tensor, mask = tensor_map[str(id(x))]\n output_shapes.append(backend.int_shape(x))\n output_tensors.append(tensor)\n output_masks.append(mask)\n\n if len(output_tensors) == 1:\n output_tensors = output_tensors[0]\n if output_shapes is not None:\n output_shapes = output_shapes[0]\n if output_masks is not None:\n output_masks = output_masks[0]\n\n if not context.executing_eagerly():\n # Update cache;\n # keys are based on ids on input tensors and inputs masks.\n cache_key = (generic_utils.object_list_uid(inputs)\n + '_' + generic_utils.object_list_uid(masks))\n self._output_tensor_cache[cache_key] = output_tensors\n self._output_mask_cache[cache_key] = output_masks\n\n if output_shapes is not None:\n input_shapes = [backend.int_shape(x) for x in inputs]\n cache_key = generic_utils.object_list_uid(input_shapes)\n self._output_shape_cache[cache_key] = output_shapes\n\n return output_tensors, output_masks\n\n def get_config(self):\n if not self._is_graph_network:\n raise NotImplementedError\n\n config = {\n 'name': self.name,\n }\n node_conversion_map = {}\n for layer in self.layers:\n if issubclass(layer.__class__, Network):\n # Networks start with a pre-existing node\n # linking their input to output.\n kept_nodes = 1\n else:\n kept_nodes = 0\n for original_node_index, node in enumerate(layer._inbound_nodes):\n node_key = _make_node_key(layer.name, original_node_index)\n if node_key in self._network_nodes:\n node_conversion_map[node_key] = kept_nodes\n kept_nodes += 1\n layer_configs = []\n for layer in self.layers: # From the earliest layers on.\n layer_class_name = layer.__class__.__name__\n layer_config = layer.get_config()\n filtered_inbound_nodes = []\n for original_node_index, node in enumerate(layer._inbound_nodes):\n node_key = _make_node_key(layer.name, original_node_index)\n if node_key in self._network_nodes:\n # The node is relevant to the model:\n # add to filtered_inbound_nodes.\n if node.arguments:\n try:\n json.dumps(node.arguments)\n kwargs = node.arguments\n except TypeError:\n logging.warning(\n 'Layer ' + layer.name +\n ' was passed non-serializable keyword arguments: ' +\n str(node.arguments) + '. They will not be included '\n 'in the serialized model (and thus will be missing '\n 'at deserialization time).')\n kwargs = {}\n else:\n kwargs = {}\n if node.inbound_layers:\n node_data = []\n for i in range(len(node.inbound_layers)):\n inbound_layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n tensor_index = node.tensor_indices[i]\n node_key = _make_node_key(inbound_layer.name, node_index)\n new_node_index = node_conversion_map.get(node_key, 0)\n node_data.append(\n [inbound_layer.name, new_node_index, tensor_index, kwargs])\n filtered_inbound_nodes.append(node_data)\n layer_configs.append({\n 'name': layer.name,\n 'class_name': layer_class_name,\n 'config': layer_config,\n 'inbound_nodes': filtered_inbound_nodes,\n })\n config['layers'] = layer_configs\n\n # Gather info about inputs and outputs.\n model_inputs = []\n for i in range(len(self._input_layers)):\n layer, node_index, tensor_index = self._input_coordinates[i]\n node_key = _make_node_key(layer.name, node_index)\n if node_key not in self._network_nodes:\n continue\n new_node_index = node_conversion_map[node_key]\n model_inputs.append([layer.name, new_node_index, tensor_index])\n config['input_layers'] = model_inputs\n model_outputs = []\n for i in range(len(self._output_layers)):\n layer, node_index, tensor_index = self._output_coordinates[i]\n node_key = _make_node_key(layer.name, node_index)\n if node_key not in self._network_nodes:\n continue\n new_node_index = node_conversion_map[node_key]\n model_outputs.append([layer.name, new_node_index, tensor_index])\n config['output_layers'] = model_outputs\n return copy.deepcopy(config)\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n \"\"\"Instantiates a Model from its config (output of `get_config()`).\n\n Arguments:\n config: Model config dictionary.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\n Returns:\n A model instance.\n\n Raises:\n ValueError: In case of improperly formatted config dict.\n \"\"\"\n # Layer instances created during\n # the graph reconstruction process\n created_layers = {}\n\n # Dictionary mapping layer instances to\n # node data that specifies a layer call.\n # It acts as a queue that maintains any unprocessed\n # layer call until it becomes possible to process it\n # (i.e. until the input tensors to the call all exist).\n unprocessed_nodes = {}\n\n def add_unprocessed_node(layer, node_data):\n if layer not in unprocessed_nodes:\n unprocessed_nodes[layer] = [node_data]\n else:\n unprocessed_nodes[layer].append(node_data)\n\n def process_node(layer, node_data):\n \"\"\"Deserialize a node.\n\n Arguments:\n layer: layer instance.\n node_data: node config dict.\n\n Raises:\n ValueError: In case of improperly formatted `node_data` dict.\n \"\"\"\n input_tensors = []\n for input_data in node_data:\n inbound_layer_name = input_data[0]\n inbound_node_index = input_data[1]\n inbound_tensor_index = input_data[2]\n if len(input_data) == 3:\n kwargs = {}\n elif len(input_data) == 4:\n kwargs = input_data[3]\n else:\n raise ValueError('Improperly formatted model config.')\n if inbound_layer_name not in created_layers:\n add_unprocessed_node(layer, node_data)\n return\n inbound_layer = created_layers[inbound_layer_name]\n if len(inbound_layer._inbound_nodes) <= inbound_node_index:\n add_unprocessed_node(layer, node_data)\n return\n inbound_node = inbound_layer._inbound_nodes[inbound_node_index]\n input_tensors.append(inbound_node.output_tensors[inbound_tensor_index])\n # Call layer on its inputs, thus creating the node\n # and building the layer if needed.\n if input_tensors:\n if len(input_tensors) == 1:\n layer(input_tensors[0], **kwargs)\n else:\n layer(input_tensors, **kwargs)\n\n def process_layer(layer_data):\n \"\"\"Deserializes a layer, then call it on appropriate inputs.\n\n Arguments:\n layer_data: layer config dict.\n\n Raises:\n ValueError: In case of improperly formatted `layer_data` dict.\n \"\"\"\n layer_name = layer_data['name']\n\n # Instantiate layer.\n from tensorflow.python.keras._impl.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top\n\n layer = deserialize_layer(layer_data, custom_objects=custom_objects)\n created_layers[layer_name] = layer\n\n # Gather layer inputs.\n inbound_nodes_data = layer_data['inbound_nodes']\n for node_data in inbound_nodes_data:\n # We don't process nodes (i.e. make layer calls)\n # on the fly because the inbound node may not yet exist,\n # in case of layer shared at different topological depths\n # (e.g. a model such as A(B(A(B(x)))))\n add_unprocessed_node(layer, node_data)\n\n # First, we create all layers and enqueue nodes to be processed\n for layer_data in config['layers']:\n process_layer(layer_data)\n # Then we process nodes in order of layer depth.\n # Nodes that cannot yet be processed (if the inbound node\n # does not yet exist) are re-enqueued, and the process\n # is repeated until all nodes are processed.\n while unprocessed_nodes:\n for layer_data in config['layers']:\n layer = created_layers[layer_data['name']]\n if layer in unprocessed_nodes:\n for node_data in unprocessed_nodes.pop(layer):\n process_node(layer, node_data)\n\n name = config.get('name')\n input_tensors = []\n output_tensors = []\n for layer_data in config['input_layers']:\n layer_name, node_index, tensor_index = layer_data\n assert layer_name in created_layers\n layer = created_layers[layer_name]\n layer_output_tensors = layer._inbound_nodes[node_index].output_tensors\n input_tensors.append(layer_output_tensors[tensor_index])\n for layer_data in config['output_layers']:\n layer_name, node_index, tensor_index = layer_data\n assert layer_name in created_layers\n layer = created_layers[layer_name]\n layer_output_tensors = layer._inbound_nodes[node_index].output_tensors\n output_tensors.append(layer_output_tensors[tensor_index])\n return cls(inputs=input_tensors, outputs=output_tensors, name=name)\n\n def save(self, filepath, overwrite=True, include_optimizer=True):\n \"\"\"Saves the model to a single HDF5 file.\n\n The savefile includes:\n - The model architecture, allowing to re-instantiate the model.\n - The model weights.\n - The state of the optimizer, allowing to resume training\n exactly where you left off.\n\n This allows you to save the entirety of the state of a model\n in a single file.\n\n Saved models can be reinstantiated via `keras.models.load_model`.\n The model returned by `load_model`\n is a compiled model ready to be used (unless the saved model\n was never compiled in the first place).\n\n Arguments:\n filepath: String, path to the file to save the weights to.\n overwrite: Whether to silently overwrite any existing file at the\n target location, or provide the user with a manual prompt.\n include_optimizer: If True, save optimizer's state together.\n\n Example:\n\n ```python\n from keras.models import load_model\n\n model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'\n del model # deletes the existing model\n\n # returns a compiled model\n # identical to the previous one\n model = load_model('my_model.h5')\n ```\n \"\"\"\n if not self._is_graph_network:\n raise NotImplementedError\n\n from tensorflow.python.keras._impl.keras.models import save_model # pylint: disable=g-import-not-at-top\n save_model(self, filepath, overwrite, include_optimizer)\n\n def save_weights(self, filepath, overwrite=True, save_format=None):\n \"\"\"Saves all layer weights.\n\n Either saves in HDF5 or in TensorFlow format based on the `save_format`\n argument.\n\n When saving in HDF5 format, the weight file has:\n - `layer_names` (attribute), a list of strings\n (ordered names of model layers).\n - For every layer, a `group` named `layer.name`\n - For every such layer group, a group attribute `weight_names`,\n a list of strings\n (ordered names of weights tensor of the layer).\n - For every weight in the layer, a dataset\n storing the weight value, named after the weight tensor.\n\n When saving in TensorFlow format, all objects referenced by the network are\n saved in the same format as `tf.train.Checkpoint`, including any `Layer`\n instances or `Optimizer` instances assigned to object attributes. For\n networks constructed from inputs and outputs using `tf.keras.Model(inputs,\n outputs)`, `Layer` instances used by the network are tracked/saved\n automatically. For user-defined classes which inherit from `tf.keras.Model`,\n `Layer` instances must be assigned to object attributes, typically in the\n constructor. See the documentation of `tf.train.Checkpoint` and\n `tf.keras.Model` for details.\n\n Arguments:\n filepath: String, path to the file to save the weights to. When saving\n in TensorFlow format, this is the prefix used for checkpoint files\n (multiple files are generated). Note that the '.h5' suffix causes\n weights to be saved in HDF5 format.\n overwrite: Whether to silently overwrite any existing file at the\n target location, or provide the user with a manual prompt.\n save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or\n '.keras' will default to HDF5 if `save_format` is `None`. Otherwise\n `None` defaults to 'tf'.\n\n Raises:\n ImportError: If h5py is not available when attempting to save in HDF5\n format.\n ValueError: For invalid/unknown format arguments.\n \"\"\"\n filepath_is_h5 = filepath.endswith('.h5') or filepath.endswith('.keras')\n if save_format is None:\n if filepath_is_h5:\n save_format = 'h5'\n else:\n save_format = 'tf'\n else:\n user_format = save_format.lower().strip()\n if user_format in ('tensorflow', 'tf'):\n save_format = 'tf'\n elif user_format in ('hdf5', 'h5', 'keras'):\n save_format = 'h5'\n else:\n raise ValueError(\n 'Unknown format \"%s\". Was expecting one of {\"tf\", \"h5\"}.' % (\n save_format,))\n if save_format == 'tf' and filepath_is_h5:\n raise ValueError(\n ('save_weights got save_format=\"tf\"/\"tensorflow\", but the '\n 'filepath (\"%s\") looks like an HDF5 file. Omit the \".h5\"/\".keras\" '\n 'when saving in TensorFlow format.')\n % filepath)\n\n if save_format == 'h5' and h5py is None:\n raise ImportError(\n '`save_weights` requires h5py when saving in hdf5.')\n if save_format == 'tf':\n check_filepath = filepath + '.index'\n else:\n check_filepath = filepath\n # If file exists and should not be overwritten:\n if not overwrite and os.path.isfile(check_filepath):\n proceed = ask_to_proceed_with_overwrite(check_filepath)\n if not proceed:\n return\n if save_format == 'h5':\n with h5py.File(filepath, 'w') as f:\n saving.save_weights_to_hdf5_group(f, self.layers)\n else:\n self._checkpointable_saver.save(filepath)\n\n def load_weights(self, filepath, by_name=False):\n \"\"\"Loads all layer weights, either from a TensorFlow or an HDF5 weight file.\n\n If `by_name` is False weights are loaded based on the network's\n topology. This means the architecture should be the same as when the weights\n were saved. Note that layers that don't have weights are not taken into\n account in the topological ordering, so adding or removing layers is fine as\n long as they don't have weights.\n\n If `by_name` is True, weights are loaded into layers only if they share the\n same name. This is useful for fine-tuning or transfer-learning models where\n some of the layers have changed.\n\n Only topological loading (`by_name=False`) is supported when loading weights\n from the TensorFlow format. Note that topological loading differs slightly\n between TensorFlow and HDF5 formats for user-defined classes inheriting from\n `tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the\n TensorFlow format loads based on the object-local names of attributes to\n which layers are assigned in the `Model`'s constructor.\n\n Arguments:\n filepath: String, path to the weights file to load. For weight files in\n TensorFlow format, this is the file prefix (the same as was passed\n to `save_weights`).\n by_name: Boolean, whether to load weights by name or by topological\n order. Only topological loading is supported for weight files in\n TensorFlow format.\n\n Returns:\n When loading a weight file in TensorFlow format, returns the same status\n object as `tf.train.Checkpoint.restore`. When graph building, restore\n ops are run automatically as soon as the network is built (on first call\n for user-defined classes inheriting from `Model`, immediately if it is\n already built).\n\n When loading weights in HDF5 format, returns `None`.\n\n Raises:\n ImportError: If h5py is not available and the weight file is in HDF5\n format.\n \"\"\"\n try:\n pywrap_tensorflow.NewCheckpointReader(filepath)\n save_format = 'tf'\n except errors_impl.DataLossError:\n # The checkpoint is not readable in TensorFlow format. Try HDF5.\n save_format = 'h5'\n if save_format == 'tf':\n status = self._checkpointable_saver.restore(filepath)\n if by_name:\n raise NotImplementedError(\n 'Weights may only be loaded based on topology into Models when '\n 'loading TensorFlow-formatted weights (got by_name=True to '\n 'load_weights).')\n if not context.executing_eagerly():\n finalizer = status.run_restore_ops\n if self.built:\n finalizer()\n else:\n # Hold on to this status object until the network is built (for\n # subclassed Models). Then we'll run restore ops if necessary.\n self._in_progress_restore_finalizer = finalizer\n return status\n if h5py is None:\n raise ImportError(\n '`load_weights` requires h5py when loading weights from HDF5.')\n if self._is_graph_network and not self.built:\n raise NotImplementedError(\n 'Unable to load weights saved in HDF5 format into a subclassed '\n 'Model which has not created its variables yet. Call the Model '\n 'first, then load the weights.')\n with h5py.File(filepath, 'r') as f:\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, self.layers)\n else:\n saving.load_weights_from_hdf5_group(f, self.layers)\n\n def _post_build_cleanup(self):\n super(Network, self)._post_build_cleanup()\n if self._in_progress_restore_finalizer is not None:\n # Runs queued restore operations left over from load_weights when graph\n # building.\n self._in_progress_restore_finalizer()\n self._in_progress_restore_finalizer = None\n\n def _updated_config(self):\n \"\"\"Util shared between different serialization methods.\n\n Returns:\n Model config with Keras version information added.\n \"\"\"\n from tensorflow.python.keras._impl.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top\n\n config = self.get_config()\n model_config = {\n 'class_name': self.__class__.__name__,\n 'config': config,\n 'keras_version': keras_version,\n 'backend': backend.backend()\n }\n return model_config\n\n def to_json(self, **kwargs):\n \"\"\"Returns a JSON string containing the network configuration.\n\n To load a network from a JSON save file, use\n `keras.models.model_from_json(json_string, custom_objects={})`.\n\n Arguments:\n **kwargs: Additional keyword arguments\n to be passed to `json.dumps()`.\n\n Returns:\n A JSON string.\n \"\"\"\n def get_json_type(obj):\n # If obj is any numpy type\n if type(obj).__module__ == np.__name__:\n return obj.item()\n\n # If obj is a python 'type'\n if type(obj).__name__ == type.__name__:\n return obj.__name__\n\n raise TypeError('Not JSON Serializable:', obj)\n\n model_config = self._updated_config()\n return json.dumps(model_config, default=get_json_type, **kwargs)\n\n def to_yaml(self, **kwargs):\n \"\"\"Returns a yaml string containing the network configuration.\n\n To load a network from a yaml save file, use\n `keras.models.model_from_yaml(yaml_string, custom_objects={})`.\n\n `custom_objects` should be a dictionary mapping\n the names of custom losses / layers / etc to the corresponding\n functions / classes.\n\n Arguments:\n **kwargs: Additional keyword arguments\n to be passed to `yaml.dump()`.\n\n Returns:\n A YAML string.\n\n Raises:\n ImportError: if yaml module is not found.\n \"\"\"\n if yaml is None:\n raise ImportError('Requires yaml module installed.')\n return yaml.dump(self._updated_config(), **kwargs)\n\n def summary(self, line_length=None, positions=None, print_fn=None):\n \"\"\"Prints a string summary of the network.\n\n Arguments:\n line_length: Total length of printed lines\n (e.g. set this to adapt the display to different\n terminal window sizes).\n positions: Relative or absolute positions of log elements\n in each line. If not provided,\n defaults to `[.33, .55, .67, 1.]`.\n print_fn: Print function to use. Defaults to `print`.\n It will be called on each line of the summary.\n You can set it to a custom function\n in order to capture the string summary.\n \"\"\"\n print_layer_summary(self,\n line_length=line_length,\n positions=positions,\n print_fn=print_fn)\n\n\ndef get_source_inputs(tensor, layer=None, node_index=None):\n \"\"\"Returns the list of input tensors necessary to compute `tensor`.\n\n Output will always be a list of tensors\n (potentially with 1 element).\n\n Arguments:\n tensor: The tensor to start from.\n layer: Origin layer of the tensor. Will be\n determined via tensor._keras_history if not provided.\n node_index: Origin node index of the tensor.\n\n Returns:\n List of input tensors.\n \"\"\"\n if not hasattr(tensor, '_keras_history'):\n return tensor\n\n if layer is None or node_index:\n layer, node_index, _ = tensor._keras_history\n if not layer._inbound_nodes:\n return [tensor]\n else:\n node = layer._inbound_nodes[node_index]\n if not node.inbound_layers:\n # Reached an Input layer, stop recursion.\n return node.input_tensors\n else:\n source_tensors = []\n for i in range(len(node.inbound_layers)):\n x = node.input_tensors[i]\n layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n previous_sources = get_source_inputs(x, layer, node_index)\n # Avoid input redundancy.\n for x in previous_sources:\n if x not in source_tensors:\n source_tensors.append(x)\n return source_tensors\n\n\ndef _make_node_key(layer_name, node_index):\n return layer_name + '_ib-' + str(node_index)\n\n\ndef _map_graph_network(inputs, outputs):\n \"\"\"Validates a network's topology and gather its layers and nodes.\n\n Arguments:\n inputs: List of input tensors.\n outputs: List of outputs tensors.\n\n Returns:\n A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.\n - nodes: list of Node instances.\n - nodes_by_depth: dict mapping ints (depth) to lists of node instances.\n - layers: list of Layer instances.\n - layers_by_depth: dict mapping ints (depth) to lists of layer instances.\n\n Raises:\n ValueError: In case the network is not valid (e.g. disconnected graph).\n \"\"\"\n # Network_nodes: set of nodes included in the graph of layers\n # (not all nodes included in the layers are relevant to the current graph).\n network_nodes = set() # ids of all nodes relevant to the Network\n nodes_depths = {} # dict {node: depth value}\n layers_depths = {} # dict {layer: depth value}\n layer_indices = {} # dict {layer: index in traversal}\n nodes_in_decreasing_depth = []\n\n def build_map(tensor,\n finished_nodes,\n nodes_in_progress,\n layer,\n node_index,\n tensor_index):\n \"\"\"Builds a map of the graph of layers.\n\n This recursively updates the map `layer_indices`,\n the list `nodes_in_decreasing_depth` and the set `network_nodes`.\n\n Arguments:\n tensor: Some tensor in a graph.\n finished_nodes: Set of nodes whose subgraphs have been traversed\n completely. Useful to prevent duplicated work.\n nodes_in_progress: Set of nodes that are currently active on the\n recursion stack. Useful to detect cycles.\n layer: Layer from which `tensor` comes from. If not provided,\n will be obtained from `tensor._keras_history`.\n node_index: Node index from which `tensor` comes from.\n tensor_index: Tensor_index from which `tensor` comes from.\n\n Raises:\n ValueError: if a cycle is detected.\n \"\"\"\n node = layer._inbound_nodes[node_index] # pylint: disable=protected-access\n\n # Prevent cycles.\n if node in nodes_in_progress:\n raise ValueError('The tensor ' + str(tensor) + ' at layer \"' +\n layer.name + '\" is part of a cycle.')\n\n # Don't repeat work for shared subgraphs\n if node in finished_nodes:\n return\n\n node_key = _make_node_key(layer.name, node_index)\n # Update network_nodes.\n network_nodes.add(node_key)\n\n # Store the traversal order for layer sorting.\n if layer not in layer_indices:\n layer_indices[layer] = len(layer_indices)\n\n nodes_in_progress.add(node)\n\n # Propagate to all previous tensors connected to this node.\n for i in range(len(node.inbound_layers)):\n x = node.input_tensors[i]\n layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n tensor_index = node.tensor_indices[i]\n build_map(x, finished_nodes, nodes_in_progress, layer,\n node_index, tensor_index)\n\n finished_nodes.add(node)\n nodes_in_progress.remove(node)\n nodes_in_decreasing_depth.append(node)\n\n finished_nodes = set()\n nodes_in_progress = set()\n for x in outputs:\n layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access\n build_map(x, finished_nodes, nodes_in_progress,\n layer=layer,\n node_index=node_index,\n tensor_index=tensor_index)\n\n for node in reversed(nodes_in_decreasing_depth):\n # If the depth is not set, the node has no outbound nodes (depth 0).\n depth = nodes_depths.setdefault(node, 0)\n\n # Update the depth of the corresponding layer\n previous_depth = layers_depths.get(node.outbound_layer, 0)\n # If we've seen this layer before at a higher depth,\n # we should use that depth instead of the node depth.\n # This is necessary for shared layers that have inputs at different\n # depth levels in the graph.\n depth = max(depth, previous_depth)\n layers_depths[node.outbound_layer] = depth\n nodes_depths[node] = depth\n\n # Update the depth of inbound nodes.\n # The \"depth\" of a node is the max of the depths\n # of all layers it is connected to.\n for i in range(len(node.inbound_layers)):\n inbound_layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n inbound_node = inbound_layer._inbound_nodes[node_index] # pylint: disable=protected-access\n previous_depth = nodes_depths.get(inbound_node, 0)\n nodes_depths[inbound_node] = max(depth + 1, previous_depth)\n\n # Build a dict {depth: list of nodes with this depth}\n nodes_by_depth = {}\n for node, depth in nodes_depths.items():\n if depth not in nodes_by_depth:\n nodes_by_depth[depth] = []\n nodes_by_depth[depth].append(node)\n\n # Build a dict {depth: list of layers with this depth}\n layers_by_depth = {}\n for layer, depth in layers_depths.items():\n if depth not in layers_by_depth:\n layers_by_depth[depth] = []\n layers_by_depth[depth].append(layer)\n\n # Get sorted list of layer depths.\n depth_keys = list(layers_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Set self.layers and self._layers_by_depth.\n layers = []\n for depth in depth_keys:\n layers_for_depth = layers_by_depth[depth]\n # Network.layers needs to have a deterministic order:\n # here we order them by traversal order.\n layers_for_depth.sort(key=lambda x: layer_indices[x])\n layers.extend(layers_for_depth)\n\n # Get sorted list of node depths.\n depth_keys = list(nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Check that all tensors required are computable.\n # computable_tensors: all tensors in the graph\n # that can be computed from the inputs provided.\n computable_tensors = []\n for x in inputs:\n computable_tensors.append(x)\n\n layers_with_complete_input = [] # To provide a better error msg.\n for depth in depth_keys:\n for node in nodes_by_depth[depth]:\n layer = node.outbound_layer\n if layer:\n for x in node.input_tensors:\n if x not in computable_tensors:\n raise ValueError('Graph disconnected: '\n 'cannot obtain value for tensor ' + str(x) +\n ' at layer \"' + layer.name + '\". '\n 'The following previous layers '\n 'were accessed without issue: ' +\n str(layers_with_complete_input))\n for x in node.output_tensors:\n computable_tensors.append(x)\n layers_with_complete_input.append(layer.name)\n\n # Ensure name unicity, which will be crucial for serialization\n # (since serialized nodes refer to layers by their name).\n all_names = [layer.name for layer in layers]\n for name in all_names:\n if all_names.count(name) != 1:\n raise ValueError('The name \"' + name + '\" is used ' +\n str(all_names.count(name)) + ' times in the model. '\n 'All layer names should be unique.')\n return network_nodes, nodes_by_depth, layers, layers_by_depth\n" ]
[ [ "tensorflow.python.keras._impl.keras.backend.batch_get_value", "tensorflow.python.keras._impl.keras.utils.layer_utils.print_summary", "tensorflow.python.keras._impl.keras.backend.backend", "tensorflow.python.util.nest.flatten", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.keras._impl.keras.utils.tf_utils.get_reachable_from_inputs", "tensorflow.python.util.tf_inspect.getargspec", "tensorflow.python.keras._impl.keras.layers.deserialize", "tensorflow.python.keras._impl.keras.models.save_model", "tensorflow.python.keras._impl.keras.engine.saving.load_weights_from_hdf5_group", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.keras._impl.keras.utils.io_utils.ask_to_proceed_with_overwrite", "tensorflow.python.keras._impl.keras.backend.batch_set_value", "tensorflow.python.keras._impl.keras.engine.saving.load_weights_from_hdf5_group_by_name", "tensorflow.python.keras._impl.keras.utils.generic_utils.object_list_uid", "tensorflow.python.keras._impl.keras.engine.base_layer.Node", "tensorflow.python.keras._impl.keras.backend.int_shape", "tensorflow.python.keras._impl.keras.utils.generic_utils.to_list", "tensorflow.python.keras._impl.keras.engine.saving.save_weights_to_hdf5_group", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.pywrap_tensorflow.NewCheckpointReader", "tensorflow.python.framework.ops.get_default_graph" ] ]
os-climate/witness-core
[ "3ef9a44d86804c5ad57deec3c9916348cb3bfbb8", "3ef9a44d86804c5ad57deec3c9916348cb3bfbb8", "3ef9a44d86804c5ad57deec3c9916348cb3bfbb8", "3ef9a44d86804c5ad57deec3c9916348cb3bfbb8" ]
[ "climateeconomics/tests/l1_test_gradient_agriculture_discipline.py", "climateeconomics/tests/_l1_test_gradient_services_discipline.py", "climateeconomics/sos_wrapping/sos_wrapping_dice/carboncycle/carboncycle_discipline.py", "climateeconomics/core/core_emissions/ghg_emissions_model.py" ]
[ "'''\nCopyright 2022 Airbus SAS\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nfrom os.path import join, dirname\nfrom pandas import read_csv\nfrom pathlib import Path\nfrom sos_trades_core.execution_engine.execution_engine import ExecutionEngine\nfrom sos_trades_core.tests.core.abstract_jacobian_unit_test import AbstractJacobianUnittest\nimport unittest\nimport pandas as pd\nimport numpy as np\n\n\nclass AgricultureJacobianDiscTest(AbstractJacobianUnittest):\n\n def setUp(self):\n\n self.name = 'Test'\n self.ee = ExecutionEngine(self.name)\n\n def analytic_grad_entry(self):\n return [\n self.test_agriculture_discipline_analytic_grad\n ]\n\n def test_agriculture_discipline_analytic_grad(self):\n\n self.model_name = 'agriculture'\n ns_dict = {'ns_witness': f'{self.name}',\n 'ns_functions': f'{self.name}',\n 'ns_public': f'{self.name}',\n 'ns_agriculture': f'{self.name}'\n }\n\n self.ee.ns_manager.add_ns_def(ns_dict)\n\n mod_path = 'climateeconomics.sos_wrapping.sos_wrapping_agriculture.agriculture.agriculture_disc.AgricultureDiscipline'\n builder = self.ee.factory.get_builder_from_module(\n self.model_name, mod_path)\n\n self.ee.factory.set_builders_to_coupling_builder(builder)\n\n self.ee.configure()\n self.ee.display_treeview_nodes()\n\n self.year_start = 2020\n self.year_end = 2055\n self.time_step = 1\n years = np.arange(self.year_start, self.year_end + 1, 1)\n year_range = self.year_end - self.year_start + 1\n\n population = np.array(np.linspace(8000, 9000, year_range))\n self.population_df = pd.DataFrame(\n {\"years\": years, 'population': population})\n self.population_df.index = years\n\n temperature = np.array(np.linspace(1.05, 5, year_range))\n self.temperature_df = pd.DataFrame(\n {\"years\": years, \"temp_atmo\": temperature})\n self.temperature_df.index = years\n\n self.default_kg_to_m2 = {'red meat': 360,\n 'white meat': 16,\n 'milk': 8.9,\n 'eggs': 6.3,\n 'rice and maize': 2.9,\n 'potatoes': 0.9,\n 'fruits and vegetables': 0.8,\n }\n self.default_kg_to_kcal = {'red meat': 2566,\n 'white meat': 1860,\n 'milk': 550,\n 'eggs': 1500,\n 'rice and maize': 1150,\n 'potatoes': 670,\n 'fruits and vegetables': 624,\n }\n red_meat_percentage = np.linspace(6, 1, year_range)\n white_meat_percentage = np.linspace(14, 5, year_range)\n self.red_meat_percentage = pd.DataFrame({\n 'years': years,\n 'red_meat_percentage': red_meat_percentage})\n self.white_meat_percentage = pd.DataFrame({\n 'years': years,\n 'white_meat_percentage': white_meat_percentage})\n\n self.other = np.linspace(0.08, 0.08, year_range)\n\n self.diet_df = pd.DataFrame({'red meat': [11.02],\n 'white meat': [31.11],\n 'milk': [79.27],\n 'eggs': [9.68],\n 'rice and maize': [97.76],\n 'potatoes': [32.93],\n 'fruits and vegetables': [217.62],\n })\n\n self.param = {'year_start': self.year_start,\n 'year_end': self.year_end,\n 'time_step': self.time_step,\n 'diet_df': self.diet_df,\n 'kg_to_kcal_dict': self.default_kg_to_kcal,\n 'population_df': self.population_df,\n 'kg_to_m2_dict': self.default_kg_to_m2,\n 'red_meat_percentage': self.red_meat_percentage,\n 'white_meat_percentage': self.white_meat_percentage,\n 'other_use_agriculture': self.other\n }\n\n values_dict = {f'{self.name}.year_start': self.year_start,\n f'{self.name}.year_end': self.year_end,\n f'{self.name}.{self.model_name}.diet_df': self.diet_df,\n f'{self.name}.{self.model_name}.kg_to_kcal_dict': self.default_kg_to_kcal,\n f'{self.name}.{self.model_name}.kg_to_m2_dict': self.default_kg_to_m2,\n f'{self.name}.population_df': self.population_df,\n f'{self.name}.temperature_df': self.temperature_df,\n f'{self.name}.red_meat_percentage': self.red_meat_percentage,\n f'{self.name}.white_meat_percentage': self.white_meat_percentage,\n f'{self.name}.{self.model_name}.other_use_agriculture': self.other,\n }\n self.ee.dm.set_values_from_dict(values_dict)\n\n self.ee.execute()\n\n disc_techno = self.ee.root_process.sos_disciplines[0]\n #AbstractJacobianUnittest.DUMP_JACOBIAN = True\n self.check_jacobian(location=dirname(__file__), filename=f'jacobian_agriculture_discipline.pkl', discipline=disc_techno,\n step=1e-15, derr_approx='complex_step',\n inputs=[f'{self.name}.population_df', \n f'{self.name}.temperature_df',\n f'{self.name}.red_meat_percentage',\n f'{self.name}.white_meat_percentage',\n ],\n outputs=[f'{self.name}.total_food_land_surface'])\n", "'''\nCopyright 2022 Airbus SAS\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\nimport unittest\nimport numpy as np\nimport pandas as pd\nfrom os.path import join, dirname\nfrom pandas import DataFrame, read_csv\nfrom scipy.interpolate import interp1d\n\nfrom sos_trades_core.execution_engine.execution_engine import ExecutionEngine\nfrom sos_trades_core.tests.core.abstract_jacobian_unit_test import AbstractJacobianUnittest\n\n\nclass ServicesJacobianDiscTest(AbstractJacobianUnittest):\n #AbstractJacobianUnittest.DUMP_JACOBIAN = True\n\n def setUp(self):\n\n self.name = 'Test'\n self.ee = ExecutionEngine(self.name)\n self.year_start = 2020\n self.year_end = 2050\n self.time_step = 1\n self.years = np.arange(self.year_start, self.year_end + 1, self.time_step)\n self.nb_per = round((self.year_end - self.year_start) / self.time_step + 1)\n # -------------------------\n # input\n data_dir = join(dirname(__file__), 'data')\n global_data_dir = join(dirname(dirname(__file__)), 'data')\n\n total_workforce_df = read_csv(join(data_dir, 'workingage_population_df.csv'))\n total_workforce_df = total_workforce_df[total_workforce_df['years']<=self.year_end]\n #multiply ageworking pop by employment rate and by % in services\n workforce = total_workforce_df['population_1570']* 0.659 * 0.509\n self.workforce_df = pd.DataFrame({'years': self.years, 'workforce': workforce})\n\n #Energy_supply\n brut_net = 1/1.45\n share_indus = 0.37\n #prepare energy df \n energy_outlook = pd.DataFrame({\n 'year': [2010, 2017, 2018, 2025, 2030, 2035, 2040, 2050, 2060, 2100],\n 'energy': [149.483879, 162.7848774, 166.4685636, 180.7072889, 189.6932084, 197.8418842, 206.1201182, 220.000, 250.0, 300.0]})\n f2 = interp1d(energy_outlook['year'], energy_outlook['energy'])\n #Find values for 2020, 2050 and concat dfs \n energy_supply = f2(np.arange(self.year_start, self.year_end+1))\n energy_supply_values = energy_supply * brut_net * share_indus\n energy_supply_df = pd.DataFrame({'years': self.years, 'Total production': energy_supply_values})\n energy_supply_df.index = self.years\n self.energy_supply_df = energy_supply_df\n #energy_supply_df.loc[2020, 'Total production'] = 91.936\n\n #Investment growth at 2% \n init_value = 25\n invest_serie = []\n invest_serie.append(init_value)\n for year in np.arange(1, self.nb_per):\n invest_serie.append(invest_serie[year - 1] * 1.02)\n self.total_invest = pd.DataFrame({'years': self.years, 'investment': invest_serie})\n \n #damage\n self.damage_df = pd.DataFrame({'years': self.years, 'damages': np.zeros(self.nb_per), 'damage_frac_output': np.zeros(self.nb_per),\n 'base_carbon_price': np.zeros(self.nb_per)})\n self.damage_df.index = self.years\n self.damage_df['damage_frac_output'] = 1e-2 \n \n \n def analytic_grad_entry(self):\n return [\n self.test_services_analytic_grad,\n self.test_services_withotudamagetoproductivity\n ]\n\n def test_services_analytic_grad(self):\n\n self.model_name = 'Services'\n ns_dict = {'ns_witness': f'{self.name}',\n 'ns_energy_mix': f'{self.name}',\n 'ns_public': f'{self.name}',\n 'ns_functions': f'{self.name}',\n 'ns_ref':f'{self.name}' }\n \n self.ee.ns_manager.add_ns_def(ns_dict)\n\n mod_path = 'climateeconomics.sos_wrapping.sos_wrapping_sectors.services.services_discipline.ServicesDiscipline'\n builder = self.ee.factory.get_builder_from_module(\n self.model_name, mod_path)\n\n self.ee.factory.set_builders_to_coupling_builder(builder)\n\n self.ee.configure()\n self.ee.display_treeview_nodes()\n\n inputs_dict = {f'{self.name}.year_start': self.year_start,\n f'{self.name}.year_end': self.year_end,\n f'{self.name}.time_step': self.time_step,\n f'{self.name}.damage_to_productivity': True,\n f'{self.name}.frac_damage_prod': 0.3,\n f'{self.name}.energy_production': self.energy_supply_df,\n f'{self.name}.damage_df': self.damage_df,\n f'{self.name}.workforce_df': self.workforce_df,\n f'{self.name}.sector_investment': self.total_invest,\n f'{self.name}.alpha': 0.5\n }\n\n self.ee.load_study_from_input_dict(inputs_dict)\n disc_techno = self.ee.root_process.sos_disciplines[0]\n self.check_jacobian(location=dirname(__file__), filename=f'jacobian_services_discipline.pkl',\n discipline=disc_techno, step=1e-15, derr_approx='complex_step',\n inputs=[f'{self.name}.energy_production',\n f'{self.name}.damage_df',\n f'{self.name}.workforce_df',\n f'{self.name}.sector_investment'],\n outputs=[f'{self.name}.production_df', \n f'{self.name}.capital_df',\n f'{self.name}.emax_enet_constraint'])\n \n def test_services_withotudamagetoproductivity(self):\n\n self.model_name = 'Services'\n ns_dict = {'ns_witness': f'{self.name}',\n 'ns_energy_mix': f'{self.name}',\n 'ns_public': f'{self.name}',\n 'ns_functions': f'{self.name}',\n 'ns_ref':f'{self.name}' }\n \n self.ee.ns_manager.add_ns_def(ns_dict)\n\n mod_path = 'climateeconomics.sos_wrapping.sos_wrapping_sectors.services.services_discipline.ServicesDiscipline'\n builder = self.ee.factory.get_builder_from_module(\n self.model_name, mod_path)\n\n self.ee.factory.set_builders_to_coupling_builder(builder)\n\n self.ee.configure()\n self.ee.display_treeview_nodes()\n\n inputs_dict = {f'{self.name}.year_start': self.year_start,\n f'{self.name}.year_end': self.year_end,\n f'{self.name}.time_step': self.time_step,\n f'{self.name}.damage_to_productivity': False,\n f'{self.name}.frac_damage_prod': 0.3,\n f'{self.name}.energy_production': self.energy_supply_df,\n f'{self.name}.damage_df': self.damage_df,\n f'{self.name}.workforce_df': self.workforce_df,\n f'{self.name}.sector_investment': self.total_invest,\n f'{self.name}.alpha': 0.5\n }\n\n self.ee.load_study_from_input_dict(inputs_dict)\n disc_techno = self.ee.root_process.sos_disciplines[0]\n self.check_jacobian(location=dirname(__file__), filename=f'jacobian_services_discipline_withoutdamage.pkl',\n discipline=disc_techno, step=1e-15, derr_approx='complex_step',\n inputs=[f'{self.name}.energy_production',\n f'{self.name}.damage_df',\n f'{self.name}.workforce_df',\n f'{self.name}.sector_investment'],\n outputs=[f'{self.name}.production_df', \n f'{self.name}.capital_df',\n f'{self.name}.emax_enet_constraint'])\n", "'''\nCopyright 2022 Airbus SAS\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n# coding: utf-8\nfrom sos_trades_core.execution_engine.sos_discipline import SoSDiscipline\nfrom climateeconomics.core.core_dice.geophysical_model import CarbonCycle\nfrom sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\nfrom sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter\nimport pandas as pd\n\n\nclass CarbonCycleDiscipline(SoSDiscipline):\n\n # ontology information\n _ontology_data = {\n 'label': 'Carbon Cycle DICE Model',\n 'type': 'Research',\n 'source': 'SoSTrades Project',\n 'validated': '',\n 'validated_by': 'SoSTrades Project',\n 'last_modification_date': '',\n 'category': '',\n 'definition': '',\n 'icon': 'fas fa-recycle fa-fw',\n 'version': '',\n }\n _maturity = 'Research'\n\n DESC_IN = {\n\n 'year_start': {'type': 'int', 'default': 2015, 'unit': 'year', 'visibility': 'Shared', 'namespace': 'ns_dice'},\n 'year_end': {'type': 'int', 'default': 2100, 'unit': 'year', 'visibility': 'Shared', 'namespace': 'ns_dice'},\n 'time_step': {'type': 'int', 'default': 5, 'unit': 'year per period', 'visibility': 'Shared', 'namespace': 'ns_dice'},\n 'conc_lower_strata': {'type': 'int', 'default': 1720, 'unit': 'Gtc'},\n 'conc_upper_strata': {'type': 'int', 'default': 360, 'unit': 'Gtc'},\n 'conc_atmo': {'type': 'int', 'default': 588, 'unit': 'Gtc'},\n 'init_conc_atmo': {'type': 'int', 'default': 851, 'unit': 'Gtc'},\n 'init_upper_strata': {'type': 'int', 'default': 460, 'unit': 'Gtc'},\n 'init_lower_strata': {'type': 'int', 'default': 1740, 'unit': 'Gtc'},\n 'b_twelve': {'type': 'float', 'visibility': SoSDiscipline.INTERNAL_VISIBILITY, 'default': 0.12, 'unit': '[-]'},\n 'b_twentythree': {'type': 'float', 'visibility': SoSDiscipline.INTERNAL_VISIBILITY, 'default': 0.007, 'unit': '[-]'},\n 'lo_mat': {'type': 'float', 'default': 10},\n 'lo_mu': {'type': 'float', 'default': 100},\n 'lo_ml': {'type': 'float', 'default': 1000},\n 'emissions_df': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_scenario'}}\n\n DESC_OUT = {'carboncycle_df': {'type': 'dataframe',\n 'visibility': 'Shared', 'namespace': 'ns_scenario'}}\n\n def run(self):\n # get input of discipline\n param_in = self.get_sosdisc_inputs()\n\n # compute output\n carboncycle = CarbonCycle(param_in)\n carboncycle_df = carboncycle.compute(param_in)\n dict_values = {'carboncycle_df': carboncycle_df}\n\n # store data\n self.store_sos_outputs_values(dict_values)\n\n def get_chart_filter_list(self):\n\n # For the outputs, making a graph for tco vs year for each range and for specific\n # value of ToT with a shift of five year between then\n chart_filters = []\n\n chart_list = ['atmosphere concentration',\n 'Atmospheric concentrations parts per million']\n # First filter to deal with the view : program or actor\n chart_filters.append(ChartFilter(\n 'Charts', chart_list, chart_list, 'charts'))\n\n return chart_filters\n\n def get_post_processing_list(self, chart_filters=None):\n\n # For the outputs, making a graph for tco vs year for each range and for specific\n # value of ToT with a shift of five year between then\n\n instanciated_charts = []\n\n # Overload default value with chart filter\n if chart_filters is not None:\n for chart_filter in chart_filters:\n if chart_filter.filter_key == 'charts':\n chart_list = chart_filter.selected_values\n carboncycle_df = self.get_sosdisc_outputs('carboncycle_df')\n carboncycle_df = resize_df(carboncycle_df)\n\n if 'atmosphere concentration' in chart_list:\n\n #carboncycle_df = discipline.get_sosdisc_outputs('carboncycle_df')\n atmo_conc = carboncycle_df['atmo_conc']\n\n years = list(atmo_conc.index)\n\n year_start = years[0]\n year_end = years[len(years) - 1]\n\n max_value = atmo_conc.values.max()\n\n chart_name = 'atmosphere concentration of carbon'\n\n new_chart = TwoAxesInstanciatedChart('years', 'carbon concentration (Gtc)',\n [year_start - 5, year_end + 5], [\n 0, max_value * 1.1],\n chart_name)\n\n visible_line = True\n\n ordonate_data = list(atmo_conc)\n\n new_series = InstanciatedSeries(\n years, ordonate_data, 'atmosphere concentration', 'lines', visible_line)\n\n new_chart.series.append(new_series)\n\n instanciated_charts.append(new_chart)\n\n if 'Atmospheric concentrations parts per million' in chart_list:\n\n #carboncycle_df = discipline.get_sosdisc_outputs('carboncycle_df')\n ppm = carboncycle_df['ppm']\n\n years = list(ppm.index)\n\n year_start = years[0]\n year_end = years[len(years) - 1]\n\n max_value = ppm.values.max()\n\n chart_name = 'Atmospheric concentrations parts per million'\n\n new_chart = TwoAxesInstanciatedChart('years', 'Atmospheric concentrations parts per million',\n [year_start - 5, year_end + 5], [\n 0, max_value * 1.1],\n chart_name)\n\n visible_line = True\n\n ordonate_data = list(ppm)\n\n new_series = InstanciatedSeries(\n years, ordonate_data, 'ppm', 'lines', visible_line)\n\n new_chart.series.append(new_series)\n\n instanciated_charts.append(new_chart)\n\n return instanciated_charts\n\n\ndef resize_df(df):\n\n index = df.index\n i = len(index) - 1\n key = df.keys()\n to_check = df.loc[index[i], key[0]]\n\n while to_check == 0:\n i = i - 1\n to_check = df.loc[index[i], key[0]]\n\n size_diff = len(index) - i\n new_df = pd.DataFrame()\n\n if size_diff == 0:\n new_df = df\n else:\n for element in key:\n new_df[element] = df[element][0:i + 1]\n new_df.index = index[0: i + 1]\n\n return new_df\n\n\ndef resize_array(array):\n\n i = len(array) - 1\n to_check = array[i]\n\n while to_check == 0:\n i = i - 1\n to_check = to_check = array[i]\n\n size_diff = len(array) - i\n new_array = array[0:i]\n\n return new_array\n\n\ndef resize_index(index, array):\n\n l = len(array)\n new_index = index[0:l]\n return new_index\n", "'''\nCopyright 2022 Airbus SAS\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\nimport numpy as np\nimport pandas as pd\nfrom energy_models.core.stream_type.carbon_models.nitrous_oxide import N2O\n\n\nclass GHGEmissions():\n '''\n Used to compute ghg emissions from different sectors\n '''\n GHG_TYPE_LIST = [N2O.name, 'CO2', 'CH4']\n\n def __init__(self, param):\n '''\n Constructor\n '''\n self.param = param\n self.configure_parameters()\n self.create_dataframe()\n self.sector_list = ['energy', 'land', 'industry']\n\n def configure_parameters(self):\n self.year_start = self.param['year_start']\n self.year_end = self.param['year_end']\n self.time_step = self.param['time_step']\n\n self.CO2_land_emissions = self.param['CO2_land_emissions']\n self.CO2_indus_emissions_df = self.param['CO2_indus_emissions_df']\n self.GHG_total_energy_emissions = self.param['GHG_total_energy_emissions']\n # Conversion factor 1Gtc = 44/12 GT of CO2\n # Molar masses C02 (12+2*16=44) / C (12)\n self.gtco2_to_gtc = 44 / 12\n\n self.gwp_20 = self.param['GHG_global_warming_potential20']\n self.gwp_100 = self.param['GHG_global_warming_potential100']\n\n def configure_parameters_update(self, inputs_dict):\n\n self.CO2_land_emissions = inputs_dict['CO2_land_emissions']\n self.CO2_indus_emissions_df = inputs_dict['CO2_indus_emissions_df']\n self.GHG_total_energy_emissions = inputs_dict['GHG_total_energy_emissions']\n self.create_dataframe()\n\n def create_dataframe(self):\n '''\n Create the dataframe and fill it with values at year_start\n '''\n # declare class variable as local variable\n year_start = self.year_start\n year_end = self.year_end\n\n self.years_range = np.arange(\n year_start, year_end + 1, self.time_step)\n\n self.ghg_emissions_df = pd.DataFrame({'years': self.years_range})\n self.gwp_emissions = pd.DataFrame({'years': self.years_range})\n\n def compute_land_emissions(self):\n '''\n Compute emissions from land\n '''\n\n self.ghg_emissions_df['CO2 land_emissions'] = self.CO2_land_emissions.drop(\n 'years', axis=1).sum(axis=1).values\n self.ghg_emissions_df['CH4 land_emissions'] = 0.\n self.ghg_emissions_df['N2O land_emissions'] = 0.\n\n def compute_total_emissions(self):\n '''\n Total emissions taking energy emissions as inputs\n '''\n self.ghg_emissions_df['CO2 industry_emissions'] = self.CO2_indus_emissions_df['indus_emissions'].values\n self.ghg_emissions_df['CH4 industry_emissions'] = 0.\n self.ghg_emissions_df['N2O industry_emissions'] = 0.\n\n for ghg in self.GHG_TYPE_LIST:\n self.ghg_emissions_df[f'{ghg} energy_emissions'] = self.GHG_total_energy_emissions[f'Total {ghg} emissions'].values\n\n self.ghg_emissions_df[f'Total {ghg} emissions'] = self.ghg_emissions_df[f'{ghg} land_emissions'].values + \\\n self.ghg_emissions_df[f'{ghg} industry_emissions'].values + \\\n self.ghg_emissions_df[f'{ghg} energy_emissions'].values\n\n def compute_gwp(self):\n\n for ghg in self.GHG_TYPE_LIST:\n\n self.gwp_emissions[f'{ghg}_20'] = self.ghg_emissions_df[f'Total {ghg} emissions'] * self.gwp_20[ghg]\n self.gwp_emissions[f'{ghg}_100'] = self.ghg_emissions_df[f'Total {ghg} emissions'] * self.gwp_100[ghg]\n\n def compute_co2_emissions_for_carbon_cycle(self):\n co2_emissions_df = self.ghg_emissions_df[['years', 'Total CO2 emissions']].rename(\n {'Total CO2 emissions': 'total_emissions'}, axis=1)\n\n co2_emissions_df['cum_total_emissions'] = co2_emissions_df['total_emissions'].cumsum(\n )\n return co2_emissions_df\n\n def compute(self):\n \"\"\"\n Compute outputs of the model\n \"\"\"\n\n self.compute_land_emissions()\n self.compute_total_emissions()\n self.compute_gwp()\n" ]
[ [ "pandas.DataFrame", "numpy.linspace", "numpy.arange" ], [ "pandas.DataFrame", "scipy.interpolate.interp1d", "numpy.arange", "numpy.zeros" ], [ "pandas.DataFrame" ], [ "pandas.DataFrame", "numpy.arange" ] ]
BYU-Hydroinformatics/pywaterml
[ "d4c88a0402dec61d466edb1fa5dbda4544f7a738" ]
[ "tests/test.py" ]
[ "# from pywaterml.waterML import WaterMLOperations\n\nimport sys\nsys.path.append(\"/Users/ElkiGio/pypack/pywaterml\")\nimport pywaterml\nfrom pywaterml.waterML import WaterMLOperations\nimport time\nimport pandas as pd\nurl_testing = [\n # # [\"http://gs-service-production.geodab.eu/gs-service/services/essi/view/whos-country/hiscentral.asmx/GetWaterOneFlowServiceInfo\",True],\n # # [\"http://gs-service-production.geodab.eu/gs-service/services/essi/view/whos-transboundary/hiscentral.asmx/GetWaterOneFlowServiceInfo\",True],\n [\"http://hydroportal.cuahsi.org/nevados/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://hydroportal.cuahsi.org/para_la_naturaleza/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://hydroportal.cuahsi.org/CALVIN_HHS/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://hydroportal.cuahsi.org/CCBEPDAP/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://hydroportal.cuahsi.org/glacialridge/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://hydroportal.cuahsi.org/KentState/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://hydroportal.cuahsi.org/czo_boulder/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://128.187.106.131/app/index.php/dr/services/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://hydroportal.cuahsi.org/Ramsar_atacama/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://hydrolite.ddns.net/italia/hsl-bol/index.php/default/services/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://hydroportal.cuahsi.org/czo_catalina/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://hydroportal.cuahsi.org/czo_catalina/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://gs-service-production.geodab.eu/gs-service/services/essi/view/gs-view-and(whos-country,gs-view-country(RUS))/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://gs-service-production.geodab.eu/gs-service/services/essi/view/whos-arctic/cuahsi_1_1.asmx?WSDL\", False],\n [\"http://gs-service-production.geodab.eu/gs-service/services/essi/view/whos-plata/cuahsi_1_1.asmx?WSDL\", False]\n]\n\n# url_testing = \"http://hydroportal.cuahsi.org/czo_catalina/cuahsi_1_1.asmx?WSDL\"\n# water = WaterMLOperations(url = url_testing)\n\ndef main():\n try:\n for url in url_testing:\n print(\"TESTING \", url)\n single_test_quick(url[0],url[1])\n print(\"Successful testing the different Endpoints\")\n except Exception as e:\n print(e)\n\n\ndef single_test_quick(url_testing,url_catalog_testing = False):\n start_time = time.time()\n try:\n if url_catalog_testing:\n print(\"***************WOF GetWaterOneFlowServicesInfo****************\")\n water = WaterMLOperations(url = url_testing)\n wateroneflowservices = water.GetWaterOneFlowServicesInfo()\n print(\"WaterOneFlow Services Available\",wateroneflowservices)\n\n print(\"*************WOF Available and Not available******************\")\n available_wof = water.AvailableServices()\n # print(available_wof)\n good_services = available_wof['available']\n bad_services = available_wof['broken']\n print(\"From Available Services\",\"Services Working: \", len(good_services), \"Services Not Working: \", len(bad_services))\n else:\n water = WaterMLOperations(url = url_testing)\n sites = water.GetSites()\n variables = water.GetVariables()\n print(\"************Passing: GETSITES***************\")\n print(len(sites))\n\n print(\"************Passing: GETVARIABLES***********\")\n print((variables['variables']))\n\n print(\"***********Passing: GETSITEINFO****************\")\n fullSiteCodeFirstSite = sites[0]['fullSiteCode']\n print(fullSiteCodeFirstSite)\n siteInfo = water.GetSiteInfo(fullSiteCodeFirstSite)\n print(len(siteInfo['siteInfo']))\n #\n # print(\"**********Passing: GETSITESBYBOUNDINGBOX***************\")\n #\n # \"\"\"\n # UNCOMMENT TO USE WITH THE epsg:3857\n # \"\"\"\n # # BoundsRearranged = [-7398417.229789019,2048546.619479188,-7368453.914701229,2080306.2047316788]\n # # BoundsRearranged = [-7401666.338691997, 2060618.8113541743, -7378996.124391947, 2071003.588530944]\n # # SitesByBoundingBox = water.GetSitesByBoxObject(BoundsRearranged,'epsg:3857')\n # \"\"\"\n # UNCOMMENT TO USE WITH THE epsg:4326\n # \"\"\"\n # BoundsRearranged = [-66.4903,18.19699,-66.28665,18.28559]\n # SitesByBoundingBox = water.GetSitesByBoxObject(BoundsRearranged,'epsg:4326')\n # print(\"The number of sites in the bounding box is: \",len(SitesByBoundingBox))\n\n\n print(\"***********Passing: GETVALUES****************\")\n if len(siteInfo['siteInfo']) > 0:\n\n fullVariableCodeFirstVariable = siteInfo['siteInfo'][0]['fullVariableCode']\n # methodID = siteInfo['siteInfo'][0]['methodID']\n start_date = siteInfo['siteInfo'][0]['beginDateTime'].split('T')[0]\n end_date = siteInfo['siteInfo'][0]['endDateTime'].split('T')[0]\n variableResponse = water.GetValues(fullSiteCodeFirstSite, fullVariableCodeFirstVariable, start_date, end_date)\n df_values = pd.DataFrame.from_dict(variableResponse['values'])\n print(list(df_values))\n print(\"The variable and site contains values \",len(variableResponse['values']))\n else:\n print(\"No values for the variable and site selected\")\n\n\n # return\n print(\"***********Passing: GETSITESBYVARIABLE****************\")\n\n variablesTest = [variables['variables'][0]['variableCode']]\n print(\"Variable for testing: \", variablesTest)\n\n \"\"\"\n USING A COOKIE CUTTER\n \"\"\"\n sitesFiltered = water.GetSitesByVariable(variablesTest,sites)\n print(\"Sites using the GetSitesByVariable With CookieCutter\", len(sitesFiltered))\n\n \"\"\"\n WITHOUT USING A COOKIE CUTTER\n \"\"\"\n sitesFiltered = water.GetSitesByVariable(variablesTest)\n print(\"Sites using the GetSitesByVariable No CookieCutter\", len(sitesFiltered))\n\n\n if len(siteInfo['siteInfo']) > 0:\n print(\"**********Passing: INTERPOLATIONS***************\")\n interpol_b = water.GetInterpolation(variableResponse, 'backward')\n interpol_f = water.GetInterpolation(variableResponse, 'forward')\n interpol_m = water.GetInterpolation(variableResponse, 'mean')\n print(\"The lenght of the interpolated values is \",len(interpol_f))\n print(\"The lenght of the interpolated values is\",len(interpol_b))\n print(\"The lenght of the interpolated values is\",len(interpol_m))\n\n print(\"**********Passing: GETMONTHLYAVERAGES***************\")\n\n m_avg = water.GetMonthlyAverage(None, fullSiteCodeFirstSite, fullVariableCodeFirstVariable, start_date, end_date)\n print(\"Monthly Averages:\",m_avg)\n\n print(\"**********Passing: GETCLUSTERSMONTHLYAVG***************\")\n y_pred = water.GetClustersMonthlyAvg(sites,siteInfo['siteInfo'][0]['variableCode'])\n print(\"Clusters\", len(y_pred))\n\n else:\n print(\"No values for the variable and site selected\")\n\n\n except Exception as e:\n print(e)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n print(\"Test was sucessful\")\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
jamartinh/d3rlpy
[ "87f478451674ef769eb8ce74e3663c4d3b1c325d" ]
[ "tests/preprocessing/test_action_scalers.py" ]
[ "import pytest\nimport torch\nimport numpy as np\nimport gym\n\nfrom d3rlpy.dataset import MDPDataset, Episode\nfrom d3rlpy.preprocessing import create_action_scaler\nfrom d3rlpy.preprocessing import MinMaxActionScaler\n\n\n@pytest.mark.parametrize(\"scaler_type\", [\"min_max\"])\ndef test_create_action_scaler(scaler_type):\n scaler = create_action_scaler(scaler_type)\n if scaler_type == \"min_max\":\n assert isinstance(scaler, MinMaxActionScaler)\n\n\n@pytest.mark.parametrize(\"action_size\", [10])\n@pytest.mark.parametrize(\"batch_size\", [32])\ndef test_min_max_action_scaler(action_size, batch_size):\n actions = np.random.random((batch_size, action_size)).astype(\"f4\")\n\n max = actions.max(axis=0)\n min = actions.min(axis=0)\n\n scaler = MinMaxActionScaler(maximum=max, minimum=min)\n\n # check range\n y = scaler.transform(torch.tensor(actions))\n assert np.all(y.numpy() >= -1.0)\n assert np.all(y.numpy() <= 1.0)\n\n x = torch.rand((batch_size, action_size))\n y = scaler.transform(x)\n ref_y = (x.numpy() - min.reshape((1, -1))) / (max - min).reshape((1, -1))\n assert np.allclose(y.numpy(), ref_y * 2.0 - 1.0)\n\n assert scaler.get_type() == \"min_max\"\n params = scaler.get_params()\n assert np.all(params[\"minimum\"] == min)\n assert np.all(params[\"maximum\"] == max)\n assert torch.allclose(scaler.reverse_transform(y), x, atol=1e-6)\n\n\n@pytest.mark.parametrize(\"observation_shape\", [(100,)])\n@pytest.mark.parametrize(\"action_size\", [10])\n@pytest.mark.parametrize(\"batch_size\", [32])\ndef test_min_max_action_scaler_with_episode(\n observation_shape, action_size, batch_size\n):\n shape = (batch_size,) + observation_shape\n observations = np.random.random(shape)\n actions = np.random.random((batch_size, action_size)).astype(\"f4\")\n rewards = np.random.random(batch_size)\n terminals = np.random.randint(2, size=batch_size)\n terminals[-1] = 1.0\n\n dataset = MDPDataset(\n observations=observations,\n actions=actions,\n rewards=rewards,\n terminals=terminals,\n )\n\n max = actions.max(axis=0)\n min = actions.min(axis=0)\n\n scaler = MinMaxActionScaler()\n scaler.fit(dataset.episodes)\n\n x = torch.rand((batch_size, action_size))\n\n y = scaler.transform(x)\n ref_y = (x.numpy() - min.reshape((1, -1))) / (max - min).reshape((1, -1))\n\n assert np.allclose(y.numpy(), ref_y * 2.0 - 1.0)\n params = scaler.get_params()\n assert np.all(params[\"minimum\"] == min)\n assert np.all(params[\"maximum\"] == max)\n\n\ndef test_min_max_action_scaler_with_env():\n env = gym.make(\"Pendulum-v0\")\n\n scaler = MinMaxActionScaler()\n scaler.fit_with_env(env)\n\n assert np.all(scaler._minimum == env.action_space.low)\n assert np.all(scaler._maximum == env.action_space.high)\n" ]
[ [ "torch.rand", "torch.tensor", "numpy.random.randint", "numpy.all", "numpy.random.random" ] ]
Ahsantw/cudf
[ "e099688d5ca7dd20104930485a829881a68c522a" ]
[ "python/cudf/cudf/tests/test_sparse_df.py" ]
[ "# Copyright (c) 2018, NVIDIA CORPORATION.\nimport os.path\n\nimport numpy as np\nimport pyarrow as pa\nimport pytest\nfrom numba import cuda\n\nfrom cudf import DataFrame, Series\nfrom cudf.comm.gpuarrow import GpuArrowReader\nfrom cudf.testing._utils import assert_eq\n\n\ndef read_data():\n import pandas as pd\n\n basedir = os.path.dirname(__file__)\n datapath = os.path.join(basedir, \"data\", \"ipums.pkl\")\n try:\n df = pd.read_pickle(datapath)\n except Exception as excpr:\n if type(excpr).__name__ == \"FileNotFoundError\":\n pytest.skip(\".pkl file is not found\")\n else:\n print(type(excpr).__name__)\n\n names = []\n arrays = []\n for k in df.columns:\n arrays.append(pa.Array.from_pandas(df[k]))\n names.append(k)\n batch = pa.RecordBatch.from_arrays(arrays, names)\n schema = batch.schema.serialize().to_pybytes()\n schema = np.ndarray(\n shape=len(schema), dtype=np.byte, buffer=bytearray(schema)\n )\n data = batch.serialize().to_pybytes()\n data = np.ndarray(shape=len(data), dtype=np.byte, buffer=bytearray(data))\n darr = cuda.to_device(data)\n return df, schema, darr\n\n\ndef test_fillna():\n _, schema, darr = read_data()\n gar = GpuArrowReader(schema, darr)\n masked_col = gar[8]\n sr = Series(data=masked_col.data)\n dense = sr.nans_to_nulls().fillna(123)\n np.testing.assert_equal(123, dense.to_numpy())\n assert len(dense) == len(sr)\n assert dense.null_count == 0\n\n\ndef test_to_dense_array():\n data = np.random.random(8)\n mask = np.asarray([0b11010110], dtype=np.byte)\n\n sr = Series.from_masked_array(data=data, mask=mask, null_count=3)\n assert sr.has_nulls\n assert sr.null_count != len(sr)\n filled = sr.to_numpy(na_value=np.nan)\n dense = sr.dropna().to_numpy()\n assert dense.size < filled.size\n assert filled.size == len(sr)\n\n\ndef test_reading_arrow_sparse_data():\n pdf, schema, darr = read_data()\n gar = GpuArrowReader(schema, darr)\n gdf = DataFrame(gar.to_dict())\n assert_eq(pdf, gdf)\n" ]
[ [ "pandas.read_pickle", "numpy.random.random", "numpy.asarray" ] ]
albimc/deep-reinforcement-learning
[ "e11a6c9d4c8991cf229e686b645ae22ec4cff4f5" ]
[ "p3_collab-compet/model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport pdb\n\n\ndef hidden_init(layer):\n fan_in = layer.weight.data.size()[0]\n lim = 1. / np.sqrt(fan_in)\n return (-lim, lim)\n\n\nclass ActorNetwork(nn.Module):\n def __init__(self, state_size, hidden_in_dim, hidden_out_dim, action_size, seed):\n\n super(ActorNetwork, self).__init__()\n\n self.seed = torch.manual_seed(seed)\n self.bn = nn.BatchNorm1d(state_size)\n self.fc1 = nn.Linear(state_size, hidden_in_dim)\n self.fc2 = nn.Linear(hidden_in_dim, hidden_out_dim)\n self.fc3 = nn.Linear(hidden_out_dim, action_size)\n self.nonlin = F.leaky_relu # relu # leaky_relu\n self.reset_parameters()\n\n def reset_parameters(self):\n self.fc1.weight.data.uniform_(*hidden_init(self.fc1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.fc3.weight.data.uniform_(-1e-3, 1e-3)\n\n def forward(self, states):\n\n if states.dim() == 1:\n x = self.bn(states.unsqueeze(0))\n else:\n x = self.bn(states)\n\n h1 = self.nonlin(self.fc1(x))\n h2 = self.nonlin(self.fc2(h1))\n h3 = (self.fc3(h2))\n return F.tanh(h3)\n\n\nclass CriticNetwork(nn.Module):\n def __init__(self, state_size, action_size, num_agents, hidden_in_dim, hidden_out_dim, seed):\n\n super(CriticNetwork, self).__init__()\n\n self.seed = torch.manual_seed(seed)\n\n self.bn = nn.BatchNorm1d(state_size*num_agents)\n self.fc1 = nn.Linear(state_size*num_agents, hidden_in_dim)\n # self.fc2 = nn.Linear(hidden_in_dim + action_size * num_agents, hidden_out_dim)\n self.fc2 = nn.Linear(hidden_in_dim + action_size*num_agents, hidden_out_dim)\n self.fc3 = nn.Linear(hidden_out_dim, 1)\n self.nonlin = F.leaky_relu # relu # leaky_relu\n self.reset_parameters()\n\n def reset_parameters(self):\n self.fc1.weight.data.uniform_(*hidden_init(self.fc1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.fc3.weight.data.uniform_(-1e-3, 1e-3)\n\n def forward(self, states, actions):\n\n if states.dim() == 1:\n x = self.bn(states.unsqueeze(0))\n else:\n x = self.bn(states)\n\n hs = self.nonlin(self.fc1(x))\n h1 = torch.cat((hs, actions), dim=1)\n h2 = self.nonlin(self.fc2(h1))\n return self.fc3(h2)\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.manual_seed", "torch.nn.BatchNorm1d", "numpy.sqrt", "torch.nn.functional.tanh" ] ]
ruchikachavhan/atari-dqn
[ "00369088e6af9ba3fd7961edf87834e51ec58be5" ]
[ "dqn.py" ]
[ "import math\nimport random\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom collections import namedtuple\nfrom itertools import count\nfrom PIL import Image\nimport cv2\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torchvision.transforms as T\nimport pygame\nimport imageio\n\n# set up matplotlib\nis_ipython = 'inline' in matplotlib.get_backend()\nif is_ipython:\n from IPython import display\n\nplt.ion()\n\n# if gpu is to be used\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nTransition = namedtuple('Transition',\n ('state', 'action', 'next_state', 'reward'))\n\n\nclass ReplayMemory(object):\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n \"\"\"Saves a transition.\"\"\"\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Transition(*args)\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n return len(self.memory)\n\n\nclass DQN(nn.Module):\n\n def __init__(self, h, w, outputs):\n super(DQN, self).__init__()\n self.conv1 = nn.Conv2d(4, 32, kernel_size=8, stride=4)\n self.bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)\n self.bn2 = nn.BatchNorm2d(64)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)\n self.bn3 = nn.BatchNorm2d(64)\n\n self.fc1 = nn.Linear(12544, 512)\n self.fc2 = nn.Linear(512, 2)\n\n\n def forward(self, x):\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n x = F.relu(self.fc1(x.view(-1)))\n x = F.relu(self.fc2(x))\n return x\n\n\nresize = T.Compose([T.ToPILImage(),\n T.Resize(40, interpolation=Image.CUBIC),\n T.ToTensor()])\n\n\ndef get_image(screen):\n\tpil_string_image = pygame.image.tostring(screen, \"RGBA\",False)\n\tpil_image = Image.frombytes(\"RGBA\",(400,300),pil_string_image)\n\tpil_image = remove_transparency(pil_image)\n\tnp_image = np.array(pil_image)\n\treturn np_image\n\ndef get_next_state(curr_screen, action, x_coord, block_break, x1, y1, angle):\n\tscreen = pygame.display.set_mode((400,300))\n\tscreen = curr_screen\n\timage = np.zeros((4, 300,400,4))\n\treward = 0.0\n\tfor i in range(0, 4):\n\t\tif action == 0 : x_coord -= 20\n\t\tif action == 1 : x_coord += 20\n\t\tif(x_coord<=0):\n\t\t\tx_coord = 0\n\t\tif(x_coord>=370):\n\t\t\tx_coord = 370\n\t\tlife_end = False\n\t\tscreen.fill((0, 0, 0))\n\t\treward = 0.0\n\t\tfor bl in range(0, len(block_break)):\n\t\t\tpygame.draw.rect(screen, (255,255,255), block_break[bl])\n\t\tblock = pygame.draw.rect(screen, (0,255,120), pygame.Rect(x_coord, 290, 50,10))\n\t\tif(x1>=0 and x1<400 and y1>=0 and y1<300):\n\t\t\tx1 += math.sin(angle)*15.0\n\t\t\ty1 += math.cos(angle)*15.0\n\t\tif(x1>=400):\n\t\t\tx1 = 395\n\t\t\tangle = -angle \n\t\telif(x1<0):\n\t\t\tx1 = 0\n\t\t\tangle = -angle \n\t\telif(y1>=300):\n\t\t\tlife_end = True\n\t\t\treward += -1.0\n\t\t\tdone = True\n\t\telif(y1<0):\n\t\t\ty1 = 0\n\t\t\tangle = 3.14-angle \n\t\tball = pygame.draw.circle(screen, (0, 255,255), (int(x1), int(y1)), 10)\n\t\tif ball.colliderect(block):\n\t\t\treward += 0.5\n\t\t\tangle = 3.14-angle\n\t\tnew_blocks = []\n\t\tfor l in range(0, len(block_break)):\n\t\t\tif ball.colliderect(block_break[l]):\n\t\t\t\tangle = 3.14-angle\n\t\t\t\treward += 1.0\n\t\t\telse:\n\t\t\t\tnew_blocks.append(block_break[l])\n\t\tblock_break = new_blocks\n\t\timage[i]= get_image(screen)\n\treturn image , screen, reward, x_coord, block_break, x1, y1, angle,life_end\n\n\n\ndef remove_transparency(im, bg_colour=(255, 255, 255)):\n\tif im.mode in ('RGBA') or (im.mode == 'P' and 'transparency' in im.info):\n\t\talpha = im.convert('RGBA').split()[-1]\n\t\tbg = Image.new(\"RGBA\", im.size, bg_colour + (255,))\n\t\tbg.paste(im, mask=alpha)\n\t\treturn bg\n\telse:\n\t\treturn im\n\ndef get_initial_state():\n\tinitial_history = 4\n\tblock_break=[]\n\tscreen = pygame.display.set_mode((400,300))\n\timage = np.zeros((4,300,400,4))\n\tx1 = 200\n\ty1 = 150\n\tstart_y = 100\n\tstart_x = 0\n\ty_coord = 290\n\tx_coord = 200\n\tangle = random.randint(-60, 60)\n\tangle = angle*3.14/180\n\tlife_end = False\n\tfor i in range(0, 2):\n\t\tstart_x = 0\n\t\tfor b in range(0, 10):\n\t\t\tblock_break.append(pygame.Rect(start_x, start_y, 40,10))\n\t\t\tstart_x +=40\n\t\tstart_y +=10\n\tfor bl in range(0, len(block_break)):\n\t\tpygame.draw.rect(screen, (225,0,0) , block_break[bl])\n\tblock = pygame.draw.rect(screen, (0,255,120), pygame.Rect(x_coord, y_coord, 50,10))\n\tball = pygame.draw.circle(screen, (0, 255,255), (int(x1), int(y1)), 10)\n\tfor it in range(0, initial_history):\n\t\tscreen.fill((0, 0, 0))\n\t\tfor bl in range(0, len(block_break)):\n\t\t\tpygame.draw.rect(screen, (255,255,255), block_break[bl])\n\t\tblock = pygame.draw.rect(screen, (0,255,120), pygame.Rect(x_coord, y_coord, 50,10))\n\t\tball = pygame.draw.circle(screen, (0, 255,255), (int(x1), int(y1)), 10)\n\t\tif(x1>=0 and x1<400 and y1>=0 and y1<300):\n\t\t\tx1 += math.sin(angle)*15.0\n\t\t\ty1 += math.cos(angle)*15.0\n\t\tif(x1>=400):\n\t\t\tx1 = 395\n\t\t\tangle = -angle \n\t\telif(x1<0):\n\t\t\tx1 = 0\n\t\t\tangle = -angle \n\t\telif(y1>=300):\n\t\t\tlife_end = True\n\t\t\tdone = True\n\t\telif(y1<0):\n\t\t\ty1 = 0\n\t\t\tangle = 3.14-angle \n\n\t\tif ball.colliderect(block):\n\t\t\tangle = 3.14-angle\n\t\t\n\t\tnew_blocks = []\n\t\tfor l in range(0, len(block_break)):\n\t\t\tif ball.colliderect(block_break[l]):\n\t\t\t\tangle = 3.14-angle\n\t\t\telse:\n\t\t\t\tnew_blocks.append(block_break[l])\n\t\tblock_break = new_blocks\n\t\timage[it]= get_image(screen)\n\treturn image, screen, x_coord, block_break, x1, y1, angle\n\n\nBATCH_SIZE = 1\nGAMMA = 0.999\nEPS_START = 0.9\nEPS_END = 0.05\nEPS_DECAY = 200\nTARGET_UPDATE = 10\n\n\nscreen_height = 300\nscreen_width = 400\nn_actions = 2\npolicy_net = DQN(screen_height, screen_width, n_actions).to(device)\ntarget_net = DQN(screen_height, screen_width, n_actions).to(device)\ntarget_net.load_state_dict(policy_net.state_dict())\ntarget_net.eval()\n\noptimizer = optim.RMSprop(policy_net.parameters())\nmemory = ReplayMemory(10000)\n\n\nsteps_done = 0\n\n\ndef select_action(state):\n global steps_done\n sample = random.random()\n eps_threshold = EPS_END + (EPS_START - EPS_END) * \\\n math.exp(-1. * steps_done / EPS_DECAY)\n steps_done += 1\n if sample > eps_threshold:\n with torch.no_grad():\n return np.argmax(policy_net(state))\n else:\n return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)\n\n\nepisode_durations = []\n\n\ndef plot_durations():\n plt.figure(2)\n plt.clf()\n durations_t = torch.tensor(episode_durations, dtype=torch.float)\n plt.title('Training...')\n plt.xlabel('Episode')\n plt.ylabel('Duration')\n plt.plot(durations_t.numpy())\n # Take 100 episode averages and plot them too\n if len(durations_t) >= 100:\n means = durations_t.unfold(0, 100, 1).mean(1).view(-1)\n means = torch.cat((torch.zeros(99), means))\n plt.plot(means.numpy())\n\n plt.pause(0.001) # pause a bit so that plots are updated\n if is_ipython:\n display.clear_output(wait=True)\n display.display(plt.gcf())\n\n\ndef resize_batch(images):\n\tresized = np.zeros((4,84,84,4))\n\tfor i in range(0, images.shape[0]):\n\t\tresized[i][:][:][:] = cv2.resize(images[i], (84,84))\n\treturn resized\t\t\n\ndef optimize_model():\n if len(memory) < BATCH_SIZE:\n return\n transitions = memory.sample(BATCH_SIZE)\n # Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for\n # detailed explanation). This converts batch-array of Transitions\n # to Transition of batch-arrays.\n batch = Transition(*zip(*transitions))\n\n # Compute a mask of non-final states and concatenate the batch elements\n # (a final state would've been the one after which simulation ended)\n non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,\n batch.next_state)), device=device, dtype=torch.uint8)\n non_final_next_states = torch.cat([s for s in batch.next_state\n if s is not None])\n state_batch = torch.cat(batch.state)\n action_batch = torch.cat(batch.action)\n reward_batch = torch.cat(batch.reward)\n state_action_values = policy_net(state_batch)\n next_state_values = torch.zeros(BATCH_SIZE, device=device)\n next_state_values[non_final_mask] = max(target_net(non_final_next_states))\n # Compute the expected Q values\n expected_state_action_values = (next_state_values * GAMMA) + reward_batch\n\n # Compute Huber loss\n loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))\n # Optimize the model\n optimizer.zero_grad()\n loss.backward()\n for param in policy_net.parameters():\n param.grad.data.clamp_(-1, 1)\n optimizer.step()\n return loss\n\nnum_episodes = 5000000000000000000\nloss_list = []\nfor i_episode in range(num_episodes):\n # Initialize the environment and state\n\tscenes=[]\n\tdone = False\n\tinit_screen_image , game_screen, x_coord, block_break, x1, y1, angle = get_initial_state()\n\tfor i in range(0, init_screen_image.shape[0]):\n\t\tscenes.append(init_screen_image[i])\n\tloss = 0.0\n\tframe_count = 0\n\tfor t in range(0, 100):\n\t\tframe_count+=1\n\t\t# Select and perform an action\n\t\tinit_screen_image = resize_batch(init_screen_image)\n\t\tinit_screen_image_ = torch.tensor(init_screen_image).float()\n\t\tinit_screen_image_ = torch.reshape(init_screen_image_, [4, 4, 84, 84])\n\t\taction = select_action(init_screen_image_)\n\t\taction_ = torch.tensor(action).long()\n\t\taction_ = torch.reshape(action_, [1,1])\n\t\tnext_state, screen , reward,x_coord, block_break, x1, y1, angle, done = get_next_state(game_screen, action, x_coord, block_break, x1, y1, angle)\t\t\t\t\t\n\t\treward = torch.tensor([reward], device=device)\n\n\t\t# Store the transition in memory\n\t\tnext_state_arr = resize_batch(next_state)\n\t\tnext_state_ = torch.tensor(next_state_arr).float()\n\t\tnext_state_ = torch.reshape(next_state_, [4, 4, 84, 84])\n\t\tmemory.push(init_screen_image_, action_, next_state_, reward)\n\n\t\t# Move to the next state\n\t\tinit_screen_image = next_state\n\t\tfor i in range(0, next_state.shape[0]):\n\t\t\tscenes.append(next_state[i])\n\t\t# Perform one step of the optimization (on the target network)\n\t\tloss += optimize_model()\n\t\n\t\tif done:\n\t\t episode_durations.append(t + 1)\n\t\t break\n\t\t# Update the target network, copying all weights and biases in DQN\n\t\tif i_episode % TARGET_UPDATE == 0:\n\t\t\ttarget_net.load_state_dict(policy_net.state_dict())\n\n\t\t# print('Complete')\n\tloss = loss/frame_count\n\tprint(\"reward for this episode\", reward.item())\n\tprint(\"loss\", loss/frame_count)\n\tvid_name = 'vid' +str(i_episode)+'.gif'\n\timageio.mimsave(vid_name , scenes)\n\tprint(\"Completed one episode\")\n\tloss_list.append(loss)\n\tplt.ion()\n\tplt.figure(200)\n\tplt.plot(loss_list)\n\tplt.show()\n\tplt.pause(0.05)\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.nn.BatchNorm2d", "torch.cuda.is_available", "matplotlib.pyplot.gcf", "torch.reshape", "torch.tensor", "torch.zeros", "numpy.array", "numpy.zeros", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "torch.nn.Conv2d", "matplotlib.pyplot.show", "matplotlib.pyplot.clf", "matplotlib.pyplot.ion", "matplotlib.pyplot.xlabel", "torch.no_grad", "matplotlib.pyplot.plot", "matplotlib.pyplot.pause", "matplotlib.pyplot.ylabel", "matplotlib.get_backend" ] ]
sean-frye/dask-cuda
[ "b24567d0cc3be7570f19b3bd5fe8d31d4591b623" ]
[ "dask_cuda/explicit_comms/dataframe_merge.py" ]
[ "import asyncio\n\nimport pandas\n\nfrom dask.dataframe.shuffle import partitioning_index, shuffle_group\nfrom distributed.protocol import to_serialize\n\nimport cudf\n\nfrom . import comms\n\n\nasync def send_df(ep, df):\n if df is None:\n return await ep.write(\"empty\")\n else:\n return await ep.write([to_serialize(df)])\n\n\nasync def recv_df(ep):\n ret = await ep.read()\n if ret == \"empty\":\n return None\n else:\n return ret[0]\n\n\nasync def barrier(rank, eps):\n if rank == 0:\n await asyncio.gather(*[ep.read() for ep in eps.values()])\n else:\n await eps[0].write(\"dummy\")\n\n\nasync def broadcast(rank, root_rank, eps, df=None):\n if rank == root_rank:\n await asyncio.gather(*[send_df(ep, df) for ep in eps.values()])\n return df\n else:\n return await recv_df(eps[root_rank])\n\n\nasync def send_bins(eps, bins):\n futures = []\n for rank, ep in eps.items():\n futures.append(send_df(ep, bins[rank]))\n await asyncio.gather(*futures)\n\n\nasync def recv_bins(eps, bins):\n futures = []\n for ep in eps.values():\n futures.append(recv_df(ep))\n bins.extend(await asyncio.gather(*futures))\n\n\nasync def exchange_and_concat_bins(rank, eps, bins):\n ret = [bins[rank]]\n await asyncio.gather(recv_bins(eps, ret), send_bins(eps, bins))\n return concat([df for df in ret if df is not None])\n\n\ndef concat(df_list):\n if len(df_list) == 0:\n return None\n elif isinstance(df_list[0], (cudf.DataFrame, cudf.Series)):\n return cudf.concat(df_list)\n else:\n return pandas.concat(df_list)\n\n\ndef partition_by_hash(df, columns, n_chunks, ignore_index=False):\n \"\"\" Splits dataframe into partitions\n\n The partitions is determined by the hash value of the rows in `columns`.\n\n Parameters\n ----------\n df: DataFrame\n columns: label or list\n Column names on which to split the dataframe\n npartition: int\n Number of partitions\n ignore_index : bool, default False\n Set True to ignore the index of `df`\n\n Returns\n -------\n out: Dict[int, DataFrame]\n A dictionary mapping integers in {0..npartition} to dataframes.\n \"\"\"\n if df is None:\n return [None] * n_chunks\n\n # Hashing `columns` in `df` and assing it to the \"_partitions\" column\n df[\"_partitions\"] = partitioning_index(df[columns], n_chunks)\n # Split `df` based on the hash values in the \"_partitions\" column\n try:\n # For Dask < 2.17 compatibility\n ret = shuffle_group(df, \"_partitions\", 0, n_chunks, n_chunks, ignore_index)\n except TypeError:\n ret = shuffle_group(\n df, \"_partitions\", 0, n_chunks, n_chunks, ignore_index, n_chunks\n )\n\n # Let's remove the partition column and return the partitions\n del df[\"_partitions\"]\n for df in ret.values():\n del df[\"_partitions\"]\n return ret\n\n\nasync def hash_join(n_chunks, rank, eps, left_table, right_table, left_on, right_on):\n left_bins = partition_by_hash(left_table, left_on, n_chunks, ignore_index=True)\n left_df = exchange_and_concat_bins(rank, eps, left_bins)\n right_bins = partition_by_hash(right_table, right_on, n_chunks, ignore_index=True)\n left_df = await left_df\n right_df = await exchange_and_concat_bins(rank, eps, right_bins)\n return left_df.merge(right_df, left_on=left_on, right_on=right_on)\n\n\nasync def single_partition_join(\n n_chunks,\n rank,\n eps,\n left_table,\n right_table,\n left_on,\n right_on,\n single_table,\n single_rank,\n):\n if single_table == \"left\":\n left_table = await broadcast(rank, single_rank, eps, left_table)\n else:\n assert single_table == \"right\"\n right_table = await broadcast(rank, single_rank, eps, right_table)\n\n return left_table.merge(right_table, left_on=left_on, right_on=right_on)\n\n\nasync def _dataframe_merge(s, workers, dfs_nparts, dfs_parts, left_on, right_on):\n \"\"\" Worker job that merge local DataFrames\n\n Parameters\n ----------\n s: dict\n Worker session state\n workers: set\n Set of ranks of all the participants\n dfs_nparts: list of dict\n List of dict that for each worker rank specifices the\n number of partitions that worker has. If the worker doesn't\n have any partitions, it is excluded from the dict.\n E.g. `dfs_nparts[0][1]` is how many partitions of the \"left\"\n dataframe worker 1 has.\n dfs_parts: list of lists of Dataframes\n List of inputs, which in this case are two dataframe lists.\n left_on : label or list, or array-like\n Column to join on in the left DataFrame. Other than in pandas\n arrays and lists are only support if their length is 1.\n right_on : label or list, or array-like\n Column to join on in the right DataFrame. Other than in pandas\n arrays and lists are only support if their length is 1.\n\n Returns\n -------\n merged_dataframe: DataFrame\n \"\"\"\n\n def df_concat(df_parts):\n \"\"\"Making sure df_parts is a single dataframe or None\"\"\"\n if len(df_parts) == 0:\n return None\n elif len(df_parts) == 1:\n return df_parts[0]\n else:\n return concat(df_parts)\n\n assert s[\"rank\"] in workers\n\n # Trimming such that all participanting workers get a rank within 0..len(workers)\n trim_map = {}\n for i in range(s[\"nworkers\"]):\n if i in workers:\n trim_map[i] = len(trim_map)\n\n rank = trim_map[s[\"rank\"]]\n eps = {trim_map[i]: s[\"eps\"][trim_map[i]] for i in workers if i != s[\"rank\"]}\n\n df1 = df_concat(dfs_parts[0])\n df2 = df_concat(dfs_parts[1])\n\n if len(dfs_nparts[0]) == 1 and len(dfs_nparts[1]) == 1:\n return df1.merge(df2, left_on=left_on, right_on=right_on)\n elif len(dfs_nparts[0]) == 1:\n return await single_partition_join(\n len(workers),\n rank,\n eps,\n df1,\n df2,\n left_on,\n right_on,\n \"left\",\n trim_map[\n next(iter(dfs_nparts[0]))\n ], # Extracting the only key in `dfs_nparts[0]`\n )\n elif len(dfs_nparts[1]) == 1:\n return await single_partition_join(\n len(workers),\n rank,\n eps,\n df1,\n df2,\n left_on,\n right_on,\n \"right\",\n trim_map[\n next(iter(dfs_nparts[1]))\n ], # Extracting the only key in `dfs_nparts[1]`\n )\n else:\n return await hash_join(len(workers), rank, eps, df1, df2, left_on, right_on)\n\n\ndef dataframe_merge(left, right, on=None, left_on=None, right_on=None, how=\"inner\"):\n \"\"\"Merge two Dask DataFrames\n\n This will merge the two datasets, either on the indices, a certain column\n in each dataset or the index in one dataset and the column in another.\n\n Requires an activate client.\n\n Parameters\n ----------\n left: dask.dataframe.DataFrame\n right: dask.dataframe.DataFrame\n how : {'left', 'right', 'outer', 'inner'}, default: 'inner'\n How to handle the operation of the two objects:\n\n - left: use calling frame's index (or column if on is specified)\n - right: use other frame's index\n - outer: form union of calling frame's index (or column if on is\n specified) with other frame's index, and sort it\n lexicographically\n - inner: form intersection of calling frame's index (or column if\n on is specified) with other frame's index, preserving the order\n of the calling's one\n\n on : label or list\n Column or index level names to join on. These must be found in both\n DataFrames. If on is None and not merging on indexes then this\n defaults to the intersection of the columns in both DataFrames.\n left_on : label or list, or array-like\n Column to join on in the left DataFrame. Other than in pandas\n arrays and lists are only support if their length is 1.\n right_on : label or list, or array-like\n Column to join on in the right DataFrame. Other than in pandas\n arrays and lists are only support if their length is 1.\n left_index : boolean, default False\n Use the index from the left DataFrame as the join key.\n right_index : boolean, default False\n Use the index from the right DataFrame as the join key.\n suffixes : 2-length sequence (tuple, list, ...)\n Suffix to apply to overlapping column names in the left and\n right side, respectively\n indicator : boolean or string, default False\n If True, adds a column to output DataFrame called \"_merge\" with\n information on the source of each row. If string, column with\n information on source of each row will be added to output DataFrame,\n and column will be named value of string. Information column is\n Categorical-type and takes on a value of \"left_only\" for observations\n whose merge key only appears in `left` DataFrame, \"right_only\" for\n observations whose merge key only appears in `right` DataFrame,\n and \"both\" if the observation’s merge key is found in both.\n\n Returns\n -------\n merged_dataframe: dask.dataframe.DataFrame\n\n Notes\n -----\n This function submits jobs the each available worker explicitly and the\n number of partitions of `left` and `right` might change (typically to the\n number of workers).\n \"\"\"\n\n # Making sure that the \"on\" arguments are list of column names\n if on:\n on = [on] if isinstance(on, str) else list(on)\n if left_on:\n left_on = [left_on] if isinstance(left_on, str) else list(left_on)\n if right_on:\n right_on = [right_on] if isinstance(right_on, str) else list(right_on)\n\n if left_on is None:\n left_on = on\n if right_on is None:\n right_on = on\n\n if not (left_on and right_on):\n raise ValueError(\n \"Some combination of the on, left_on, and right_on arguments must be set\"\n )\n\n if how != \"inner\":\n raise NotImplementedError('Only support `how=\"inner\"`')\n\n return comms.default_comms().dataframe_operation(\n _dataframe_merge, df_list=(left, right), extra_args=(left_on, right_on)\n )\n" ]
[ [ "pandas.concat" ] ]
Blitzman/CarND-Behavioral-Cloning-P3
[ "9ae7bd9ffe5b7db59c74132a30feff6c9412af2b" ]
[ "drive.py" ]
[ "import argparse\nimport base64\nfrom datetime import datetime\nimport os\nimport shutil\nimport cv2\n\nimport numpy as np\nimport socketio\nimport eventlet\nimport eventlet.wsgi\nfrom PIL import Image\nfrom flask import Flask\nfrom io import BytesIO\n\nfrom keras.models import load_model\nimport h5py\nfrom keras import __version__ as keras_version\n\ndef preprocess_image(img):\n preprocessed_img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n return preprocessed_img\n\nsio = socketio.Server()\napp = Flask(__name__)\nmodel = None\nprev_image_array = None\n\n\nclass SimplePIController:\n def __init__(self, Kp, Ki):\n self.Kp = Kp\n self.Ki = Ki\n self.set_point = 0.\n self.error = 0.\n self.integral = 0.\n\n def set_desired(self, desired):\n self.set_point = desired\n\n def update(self, measurement):\n # proportional error\n self.error = self.set_point - measurement\n\n # integral error\n self.integral += self.error\n\n return self.Kp * self.error + self.Ki * self.integral\n\n\ncontroller = SimplePIController(0.1, 0.002)\nset_speed = 16\ncontroller.set_desired(set_speed)\n\n\n@sio.on('telemetry')\ndef telemetry(sid, data):\n if data:\n # The current steering angle of the car\n steering_angle = data[\"steering_angle\"]\n # The current throttle of the car\n throttle = data[\"throttle\"]\n # The current speed of the car\n speed = data[\"speed\"]\n # The current image from the center camera of the car\n imgString = data[\"image\"]\n image = Image.open(BytesIO(base64.b64decode(imgString)))\n image_array = np.asarray(image)\n image_array = preprocess_image(image_array)\n steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))\n\n throttle = controller.update(float(speed))\n\n print(steering_angle, throttle)\n send_control(steering_angle, throttle)\n\n # save frame\n if args.image_folder != '':\n timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]\n image_filename = os.path.join(args.image_folder, timestamp)\n image.save('{}.jpg'.format(image_filename))\n else:\n # NOTE: DON'T EDIT THIS.\n sio.emit('manual', data={}, skip_sid=True)\n\n\n@sio.on('connect')\ndef connect(sid, environ):\n print(\"connect \", sid)\n send_control(0, 0)\n\n\ndef send_control(steering_angle, throttle):\n sio.emit(\n \"steer\",\n data={\n 'steering_angle': steering_angle.__str__(),\n 'throttle': throttle.__str__()\n },\n skip_sid=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Remote Driving')\n parser.add_argument(\n 'model',\n type=str,\n help='Path to model h5 file. Model should be on the same path.'\n )\n parser.add_argument(\n 'image_folder',\n type=str,\n nargs='?',\n default='',\n help='Path to image folder. This is where the images from the run will be saved.'\n )\n args = parser.parse_args()\n\n # check that model Keras version is same as local Keras version\n f = h5py.File(args.model, mode='r')\n model_version = f.attrs.get('keras_version')\n keras_version = str(keras_version).encode('utf8')\n\n if model_version != keras_version:\n print('You are using Keras version ', keras_version,\n ', but the model was built using ', model_version)\n\n model = load_model(args.model)\n\n if args.image_folder != '':\n print(\"Creating image folder at {}\".format(args.image_folder))\n if not os.path.exists(args.image_folder):\n os.makedirs(args.image_folder)\n else:\n shutil.rmtree(args.image_folder)\n os.makedirs(args.image_folder)\n print(\"RECORDING THIS RUN ...\")\n else:\n print(\"NOT RECORDING THIS RUN ...\")\n\n # wrap Flask application with engineio's middleware\n app = socketio.Middleware(sio, app)\n\n # deploy as an eventlet WSGI server\n eventlet.wsgi.server(eventlet.listen(('', 4567)), app)\n" ]
[ [ "numpy.asarray" ] ]
netomap/MNIST-WGAN-GP
[ "6b531536e1d844b25720a7895e2b1537fb889f1a" ]
[ "criar_gif.py" ]
[ "import pathlib, re\nfrom pickletools import optimize\nfrom PIL import Image, ImageDraw\nfrom numpy import dtype\nimport numpy as np\n\ndef listar_imagens(img_dir):\n lista = [str(l) for l in list(pathlib.Path(img_dir).glob('*.png'))]\n aux = []\n for img_path in lista:\n epoch = int(re.findall(r'[0-9]{1,}', img_path)[0])\n aux.append([img_path, epoch])\n \n # Coloca as imagens em ordem crescente de época\n aux = sorted([[imgp, epo] for imgp, epo in aux], key=lambda item: item[1], reverse=False)\n aux = np.array(aux, dtype=np.object)\n return aux\n\nif (__name__ == '__main__'):\n lista = listar_imagens('./imgs')\n\n imgs_pil = [Image.open(img) for img in lista[:,0]]\n imgs_pil[0].save('video.gif', save_all=True, append_images=imgs_pil[1:], optimize=False, duration=120, loop=0)\n print ('ok')" ]
[ [ "numpy.array" ] ]
edwardoughton/itmlogic
[ "1e454e3b4b3c8e24c4bc74ec6c076a2f97d86d23" ]
[ "src/itmlogic/diffraction_attenuation/aknfe.py" ]
[ "import math\nimport numpy as np\n\ndef aknfe(v2):\n \"\"\"\n Returns the attenuation due to a single knife edge - the Fresnel integral (in decibels,\n Eqn 4.21 of \"The ITS Irregular Terrain Model, version 1.2.2: The Algorithm\" – see also\n Eqn 6.1) evaluated for nu equal to the square root of the input argument.\n\n Parameters\n ----------\n v2 : float\n Input for computing knife edge diffraction.\n\n Returns\n -------\n aknfe1 : float\n Attenuation due to a single knife edge.\n\n \"\"\"\n if v2 < 5.76:\n if v2 <= 0: ### addition to avoid logging v2 <= 0\n v2 = 0.00001 ### addition to avoid logging v2 <= 0\n aknfe1 = 6.02 + 9.11 * math.sqrt(v2) - 1.27 * v2\n\n else:\n\n aknfe1 = 12.953 + 4.343 * np.log(v2)\n\n return aknfe1\n" ]
[ [ "numpy.log" ] ]
Ralphyan/VectorCapsNet
[ "ea6911c44821bdf473d25edcc1b58248dad31f79" ]
[ "main.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#ac\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Simple, end-to-end, LeNet-5-like convolutional MNIST model example.\n\nThis should achieve a test error of 0.7%. Please keep this model as simple and\nlinear as possible, it is meant as a tutorial for simple convolutional models.\nRun with --self_test on the command line to execute a short self-test.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport gzip\nimport os\nimport sys\nimport time\n\nimport numpy as np\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nfrom settings import *\n#import datasets.mnist as mnist\n# CVDF mirror of http://yann.lecun.com/exdb/mnist/\nSOURCE_URL = 'https://storage.googleapis.com/cvdf-datasets/mnist/'\nWORK_DIRECTORY = 'data'\nIMAGE_SIZE = 28\nNUM_CHANNELS = 1\nPIXEL_DEPTH = 255\nNUM_LABELS = 10\nVALIDATION_SIZE = 5000 # Size of the validation set.\nSEED = None # Set to None for random seed.\nBATCH_SIZE = 16\nNUM_EPOCHS =200\nEVAL_BATCH_SIZE = 16\nEVAL_FREQUENCY = 100 # Number of steps between evaluations.\n\n\nFLAGS = None\n\ndef _dense_to_one_hot(labels, num_classes):\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n num_labels = labels.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros(\n shape=(num_labels, num_classes), dtype=np.uint8\n )\n labels_one_hot.flat[index_offset + labels.ravel()] = 1\n return labels_one_hot\n\ndef data_type():\n \"\"\"Return the type of the activations, weights, and placeholder variables.\"\"\"\n if FLAGS.use_fp16:\n return tf.float16\n else:\n return tf.float32\n\n\ndef fake_data(num_images):\n \"\"\"Generate a fake dataset that matches the dimensions of MNIST.\"\"\"\n data = np.ndarray(\n shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),\n dtype=np.float32)\n labels = np.zeros(shape=(num_images,), dtype=np.int64)\n for image in xrange(num_images):\n label = image % 2\n data[image, :, :, 0] = label - 0.5\n labels[image] = label\n return data, labels\n\n\ndef error_rate(predictions, labels):\n \"\"\"Return the error rate based on dense predictions and sparse labels.\"\"\"\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == np.argmax(labels,1)) /\n predictions.shape[0])\n\n\ndef main(_):\n if FLAGS.self_test:\n print('Running self-test.')\n train_data, train_labels = fake_data(256)\n validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)\n test_data, test_labels = fake_data(EVAL_BATCH_SIZE)\n num_epochs = 1\n else:\n # Extract it into np arrays.\n (train_data,train_labels),(test_data,test_labels) = tf.keras.datasets.mnist.load_data(path='mnist.npz')\n # (train_data,train_labels),(test_data,test_labels) = tf.keras.datasets.cifar10.load_data()\n if np.rank(train_data)==3:\n train_data = np.expand_dims(train_data,axis=-1)\n test_data = np.expand_dims(test_data,axis=-1)\n #train_data = extract_data(train_data_filename, 60000)\n #train_labels = extract_labels(train_labels_filename, 60000)\n #test_data = extract_data(test_data_filename, 10000)\n #test_labels = extract_labels(test_labels_filename, 10000)\n\n # Generate a validation set.\n validation_data = train_data[:VALIDATION_SIZE, ...]\n validation_labels = train_labels[:VALIDATION_SIZE]\n train_data = train_data[VALIDATION_SIZE:, ...]\n train_labels = train_labels[VALIDATION_SIZE:]\n num_epochs = NUM_EPOCHS\n\n # convert labels to one-hot\n train_labels = _dense_to_one_hot(train_labels,NUM_LABELS)\n test_labels = _dense_to_one_hot(test_labels,NUM_LABELS)\n validation_labels = _dense_to_one_hot(validation_labels,NUM_LABELS)\n\n train_size = train_labels.shape[0]\n\n \n # This is where training samples and labels are fed to the graph.\n # These placeholder nodes will be fed a batch of training data at each\n # training step using the {feed_dict} argument to the Run() call below.\n train_data_node = tf.placeholder(\n data_type(),\n shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))\n train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,NUM_LABELS))\n\n eval_data = tf.placeholder(\n data_type(),\n shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))\n eval_label = tf.placeholder(tf.int64, shape=(EVAL_BATCH_SIZE,NUM_LABELS))\n # We will replicate the model structure for the training subgraph, as well\n # as the evaluation subgraphs, while sharing the trainable parameters.\n epsilon = 1e-9\n def model(data, label,train=False):\n \n with tf.variable_scope('model',reuse=tf.AUTO_REUSE):\n logits,recon=capsules.nets.capsules_v0(\n data, label,num_classes=10, iterations=1, name='capsulesEM-V0'\n )\n #activations = tf.nn.relu(activations)\n #logits_sum = tf.reduce_sum(activations,axis=1,keepdims=True)\n #logits_sum = tf.tile(logits_sum,[1,NUM_LABELS])+epsilon\n #logits = activations / logits_sum\n #logits = tf.log((logits+epsilon)/ (1-logits + epsilon)) + epsilon\n #logits = tf.reshape(activations,shape=[activations.shape[0],-1])\n #logits = tf.layers.dense(logits,units=10)\n #return tf.nn.softmax(activations+1e-9,axis=-1)\n return logits,recon\n #return logits\n # Training computation: logits + cross-entropy loss.\n logits,recon = model(train_data_node,train_labels_node, True)\n #loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n # labels=train_labels_node, logits=logits))\n \n #loss = tf.reduce_mean(-tf.reduce_sum(logits * tf.log(tf.one_hot(train_labels_node,NUM_LABELS)+epsilon), reduction_indices=[1]))\n\n #loss = capsules.nets.spread_loss(\n # train_labels_node, logits, margin=1.0, name='spread_loss'\n # )\n \n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=train_labels_node, logits=logits, name='cross_entropy_loss'\n )\n )\n loss = loss #+tf.reduce_mean((recon-train_data_node)**2)*1e-2\n number_params=np.sum([np.product([xi.value for xi in x.get_shape()]) for x in tf.all_variables()])\n print(\"Number of parameters:{}\".format(number_params//1e3),\"K\")\n # Optimizer: set up a variable that's incremented once per batch and\n # controls the learning rate decay.\n batch = tf.Variable(0, dtype=data_type())\n # Decay once per epoch, using an exponential schedule starting at 0.01.\n learning_rate = tf.train.exponential_decay(\n 0.001, # Base learning rate.\n batch * BATCH_SIZE, # Current index into the dataset.\n train_size, # Decay step.\n 0.95, # Decay rate.\n staircase=True)\n # Use simple momentum for the optimization.\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss,\n global_step=batch)\n\n # Predictions for the current training minibatch.\n train_prediction = tf.nn.softmax(logits)\n\n # Predictions for the test and validation, which we'll compute less often.\n eval_prediction = tf.nn.softmax(model(eval_data,eval_label)[0])\n\n # Small utility function to evaluate a dataset by feeding batches of data to\n # {eval_data} and pulling the results from {eval_predictions}.\n # Saves memory and enables this to run on smaller GPUs.\n def eval_in_batches(data, sess):\n \"\"\"Get all predictions for a dataset by running it in small batches.\"\"\"\n size = data.shape[0]\n if size < EVAL_BATCH_SIZE:\n raise ValueError(\"batch size for evals larger than dataset: %d\" % size)\n predictions = np.ndarray(shape=(size, NUM_LABELS), dtype=np.float32)\n for begin in range(0, size, EVAL_BATCH_SIZE):\n end = begin + EVAL_BATCH_SIZE\n if end <= size:\n predictions[begin:end, :] = sess.run(\n eval_prediction,\n feed_dict={eval_data: data[begin:end, ...]})\n else:\n batch_predictions = sess.run(\n eval_prediction,\n feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})\n predictions[begin:, :] = batch_predictions[begin - size:, :]\n return predictions\n\n # Create a local session to run the training.\n start_time = time.time()\n saver = tf.train.Saver()\n\n # create log files\n try:\n os.stat(\"./eval_log\")\n except:\n os.mkdir(\"eval_log\")\n minibatch_file = open(\"./eval_log/minibatch_error.txt\", 'w')\n validation_file = open(\"./eval_log/validation_error.txt\", 'w')\n test_file = open(\"./eval_log/test_error.txt\",\"w\")\n\n with tf.Session() as sess:\n # Run all the initializers to prepare the trainable parameters.\n tf.global_variables_initializer().run()\n print('Initialized!')\n # Loop through training steps.\n for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):\n # Compute the offset of the current minibatch in the data.\n # Note that we could use better randomization across epochs.\n offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)\n batch_data = train_data[offset:(offset + BATCH_SIZE), ...]\n batch_labels = train_labels[offset:(offset + BATCH_SIZE)]\n # This dictionary maps the batch data (as a np array) to the\n # node in the graph it schould be fed to.\n feed_dict = {train_data_node: batch_data,\n train_labels_node: batch_labels}\n # Run the optimizer to update weights.\n sess.run(optimizer, feed_dict=feed_dict)\n # print some extra information once reach the evaluation frequency\n if step % EVAL_FREQUENCY == 0:\n # tf.Print([batch_data[0,...]],logits)\n # fetch some extra nodes' data\n l, lr, predictions = sess.run([loss, learning_rate, train_prediction],\n feed_dict=feed_dict)\n elapsed_time = time.time() - start_time\n start_time = time.time()\n print('Step %d (epoch %.2f), %.1f ms' %\n (step, float(step) * BATCH_SIZE / train_size,\n 1000 * elapsed_time / EVAL_FREQUENCY))\n print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))\n\n minibatch_error = error_rate(predictions, batch_labels)\n print('Minibatch error: %.1f%%' % minibatch_error)\n\n validation_error = error_rate(\n eval_in_batches(validation_data, sess), validation_labels)\n print('Validation error: %.1f%%' % validation_error)\n\n # test_error = error_rate(eval_in_batches(test_data, sess), test_labels)\n # print('Test error: %.1f%%' % test_error)\n\n minibatch_file.write(str(minibatch_error) + '\\n')\n minibatch_file.flush()\n\n validation_file.write(str(validation_error) + '\\n')\n validation_file.flush()\n # test_file.write(str(test_error) + '\\n')\n\n sys.stdout.flush()\n\n if step%10000 ==0 :\n saver.save(sess,\"./my_train_log/step.{}.ckpt\".format(step))\n test_error = error_rate(eval_in_batches(test_data, sess), test_labels)\n test_file.write(str(test_error)+'\\n')\n test_file.flush()\n sys.stdout.flush()\n # Finally print the result!\n test_error = error_rate(eval_in_batches(test_data, sess), test_labels)\n print('Test error: %.1f%%' % test_error)\n if FLAGS.self_test:\n print('test_error', test_error)\n assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (\n test_error,)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--use_fp16',\n default=False,\n help='Use half floats instead of full floats if True.',\n action='store_true')\n parser.add_argument(\n '--self_test',\n default=False,\n action='store_true',\n help='True if running a self test.')\n\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n app = QApplication(sys.argv)\n app.aboutToQuit.connect(app.deleteLater)\n\n\n" ]
[ [ "tensorflow.keras.datasets.mnist.load_data", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.train.AdamOptimizer", "numpy.zeros", "numpy.rank", "tensorflow.Session", "tensorflow.train.Saver", "numpy.ndarray", "tensorflow.variable_scope", "tensorflow.placeholder", "numpy.arange", "numpy.argmax", "tensorflow.nn.softmax", "tensorflow.train.exponential_decay", "tensorflow.app.run", "tensorflow.global_variables_initializer", "tensorflow.all_variables", "numpy.expand_dims" ] ]
ijpulidos/pymdtools
[ "484160f57094bf701f5d1603baa5f40abee61d8c" ]
[ "scripts/preprocessing/complete_residues.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nScript that completes a PDB structure with missing residues or atoms.\n\nIt uses MODELLER as backend, it automatically builds a MODELLER align file and\nthen performs the completion with a MODELLER model and its tools.\n\nRequires BioPython>=1.74 and Modeller>=9.21.\n\"\"\"\n\nimport os\nimport argparse\nimport shutil\nimport warnings\n\n# Modules to read PDB and build align file\nfrom Bio.PDB import PDBParser\nfrom Bio.PDB.Selection import unfold_entities\nimport numpy as np\nfrom Bio.SeqUtils import seq1\nimport textwrap\n\n# Modules for completing missing residues\nfrom modeller import *\nfrom modeller.automodel import * # Load the automodel class\n\n\ndef insert_gap(aaindex, residue_list):\n \"\"\"\n Inserts a gap in the residue list in the position corresponding to the amino acid index.\n \"\"\"\n tmplist = residue_list.copy()\n for index, res in enumerate(tmplist):\n if aaindex < res[0]: # Given index less than current index in list\n residue_list.insert(index, (aaindex, \"-\"))\n return tmplist\n residue_list.append((aaindex, \"-\"))\n\n\ndef get_chain_missing_res(missing_residues, chainID):\n \"\"\"\n Function that returns a list of missing residues from a given chain identifier/letter.\n \"\"\"\n result = [residue for residue in missing_residues if residue[\"chain\"] == chainID]\n return result\n\n\ndef build_align_file(input_pdb, pdbcode, output_align_file=\"protein.ali\"):\n \"\"\"\n Function that takes a PDB filepath, detects missing residues and builds a MODELLER align file with this information,\n to be later used for completing residues.\n :param input_pdb: PDB filepath\n :param pdbcode: Code identifier for the PDB structure. Ex: 2y8d\n :param output_align_file: Filepath for output align file.\n :return:\n \"\"\"\n\n # Read structure and extract present and missing residues\n pdbparser = PDBParser()\n structure = pdbparser.get_structure(pdbcode, input_pdb)\n chains = unfold_entities(structure, \"C\") # Get chains\n\n missing_residues = structure.header[\n \"missing_residues\"\n ] # Get missing residues from whole structure\n\n # Remove alignment file if exists\n try:\n os.remove(output_align_file)\n except FileNotFoundError:\n pass\n\n # Where to store the sequences from structure separated by chains/index\n whole_gapped = []\n whole_full = []\n\n for chain in chains:\n chain_id = chain.get_id()\n residues = unfold_entities(chain, \"R\") # Get residues of chain\n missing_res_chain = get_chain_missing_res(missing_residues, chain_id)\n\n # Residues with empty id[0] are the 'real' residues, others are solvent or different.\n residues_list = [\n (residue.id[1], seq1(residue.resname))\n for residue in residues\n if residue.id[0] == \" \"\n ]\n for mis_res in missing_res_chain:\n insert_gap(mis_res[\"ssseq\"], residues_list)\n\n # Sequence with gaps\n try:\n gapped_seq = \"\".join(np.array(residues_list)[:, 1])\n except IndexError:\n # Warn the user if the residues list is empty (probably HETATOMS)\n msg = \"Residues list for chain {} is empty. Check PDB, probably chain is\" \\\n \"full of HETATOM type atoms. Leaving chain empty in align \" \\\n \"file.\".format(chain)\n warnings.warn(msg)\n gapped_seq = \"\" # Empty seq for chain full of HETATOM or non-standard\n # Make the line width the correct/expected one for modeller align file\n textwrap.wrap(gapped_seq, width=75, break_on_hyphens=False)\n\n # Full sequence without gaps by replacing gaps with the missing res\n full_seq = gapped_seq\n for mis_res in missing_residues:\n full_seq = full_seq.replace(\"-\", seq1(mis_res[\"res_name\"]), 1)\n\n whole_gapped.append(gapped_seq)\n whole_full.append(full_seq)\n\n # For checking full_seq\n # print(full_seq)\n\n # Building whole strings to write to file. \"/\" char separates chains.\n whole_gapped_str = \"/\".join(whole_gapped)\n whole_full_str = \"/\".join(whole_full)\n\n # Writing to file\n # Remember sequences have to end with the * character\n with open(output_align_file, \"a+\") as file:\n # Writing structure/gapped section\n file.write(\">P1;\" + structure.id + \"\\n\")\n file.write(\"structureX:\" + structure.id + \":FIRST:@ END:@\" + 5 * \":.\" + \"\\n\")\n for line in textwrap.wrap(\n whole_gapped_str + \"*\", width=75, break_on_hyphens=False\n ):\n file.write(\"%s\\n\" % line)\n # Writing full sequence section\n file.write(\">P1;\" + structure.id + \"_fill\\n\")\n file.write(\"sequence:\" + structure.id + \":FIRST:@ END:@\" + 5 * \":.\" + \"\\n\")\n for line in textwrap.wrap(\n whole_full_str + \"*\", width=75, break_on_hyphens=False\n ):\n file.write(\"%s\\n\" % line)\n\n\ndef complete_residues(pdbcode, align_file=\"protein.ali\", loop_ref=False):\n \"\"\"\n Function that completes residues based on an alignment file using MODELLER software.\n :param pdbcode: PDB code identifier of the structure with missing residues.\n :param align_file: Path to the align-formatted file with gaps as missing residues.\n :param loop_ref: (optional) Boolean for specifying loop refinement, doesn't always work.\n :return:\n \"\"\"\n # Get the sequence of the coded PDB file, and write to an alignment file\n e = environ()\n m = model(e, file=pdbcode)\n aln = alignment(e)\n aln.append_model(m, align_codes=pdbcode)\n aln.write(file=pdbcode + \".seq\")\n\n # Completing residues\n log.verbose()\n env = environ()\n\n # directories for input atom files (local dir)\n env.io.atom_files_directory = [\".\"]\n\n if loop_ref is True:\n # For loop refinement - Doesn't always work\n a = loopmodel(env, alnfile=align_file, knowns=pdbcode, sequence=code + \"_fill\")\n a.loop.starting_model = 1\n a.loop.ending_model = 2\n a.loop.md_level = refine.fast\n else:\n a = automodel(\n env, alnfile=align_file, knowns=pdbcode, sequence=pdbcode + \"_fill\"\n )\n\n a.starting_model = 1\n a.ending_model = 1\n\n a.make()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Builds alignment file for MODELLER.\")\n parser.add_argument(\n \"--input\",\n \"-i\",\n type=str,\n help=\"Input PDB file with missing residues.\",\n required=True,\n )\n parser.add_argument(\n \"--pdbcode\",\n type=str,\n help=\"PDB code identifier for the structure.\",\n required=True,\n )\n parser.add_argument(\n \"--output\",\n \"-o\",\n type=str,\n help=\"(Optional) Output align (.ali) file for completing residues.\",\n required=False,\n default=\"protein.ali\",\n )\n parser.add_argument(\n \"--loop_ref\",\n \"-lr\",\n dest=\"loop_ref\",\n help=\"(Optional) Enables loop refinement. \" \"Does not always work.\",\n required=False,\n action=\"store_true\",\n )\n parser.set_defaults(feature=True)\n\n args = parser.parse_args()\n\n # the PDB file has to be in the same directory, copying and using the code as name.\n pdb_path = args.input\n code = args.pdbcode\n print(\"input: \" + os.path.abspath(args.input))\n print(\"cwd: \" + os.getcwd() + code + \".pdb\")\n if os.path.abspath(args.input) == os.getcwd() + \"/\" + code + \".pdb\":\n raise ValueError(\n \"Input file comes from current working directory and cannot have \"\n + code\n + \".pdb already as a name. Please change the name (or location) of input PDB file.\"\n )\n else:\n shutil.copy(pdb_path, \"./\" + code + \".pdb\")\n\n build_align_file(args.input, args.pdbcode, output_align_file=args.output)\n complete_residues(code, align_file=args.output, loop_ref=args.loop_ref)\n" ]
[ [ "numpy.array" ] ]
pushkar-khetrapal/RealTimePanoptic-TensorRT
[ "c59d1eff81e75d06a0ecd288366f792e68ffe814" ]
[ "realtime_panoptic/models/panoptic_from_dense_box.py" ]
[ "# Copyright 2020 Toyota Research Institute. All rights reserved.\nimport torch\nimport torch.nn.functional as F\nfrom realtime_panoptic.utils.bounding_box import BoxList\nfrom realtime_panoptic.utils.boxlist_ops import (boxlist_nms, cat_boxlist, remove_small_boxes)\n\nclass PanopticFromDenseBox:\n \"\"\"Performs post-processing on the outputs of the RTPanonet.\n\n Parameters\n ----------\n pre_nms_thresh: float\n Acceptance class probability threshold for bounding box candidates before NMS.\n\n pre_nms_top_n: int\n Maximum number of accepted bounding box candidates before NMS.\n\n nms_thresh: float\n NMS threshold.\n\n fpn_post_nms_top_n: int\n Maximum number of detected object per image.\n\n min_size: int\n Minimum dimension of accepted detection.\n\n num_classes: int\n Number of total semantic classes (stuff and things).\n\n mask_thresh: float\n Bounding box IoU threshold to determined 'similar bounding box' in mask reconstruction.\n\n instance_id_range: list of int\n [min_id, max_id] defines the range of id in 1:num_classes that corresponding to thing classes.\n\n is_training: bool\n Whether the current process is during training process.\n \"\"\"\n\n def __init__(\n self,\n pre_nms_thresh,\n pre_nms_top_n,\n nms_thresh,\n fpn_post_nms_top_n,\n min_size,\n num_classes,\n mask_thresh,\n instance_id_range,\n is_training\n ):\n super(PanopticFromDenseBox, self).__init__()\n # assign parameters\n self.pre_nms_thresh = pre_nms_thresh\n self.pre_nms_top_n = pre_nms_top_n\n self.nms_thresh = nms_thresh\n self.fpn_post_nms_top_n = fpn_post_nms_top_n\n self.min_size = min_size\n self.num_classes = num_classes\n self.mask_thresh = mask_thresh\n self.instance_id_range = instance_id_range\n self.is_training = is_training\n\n def process(\n self, locations, box_cls, box_regression, centerness, levelness_logits, semantic_logits, image_sizes\n ):\n \"\"\" Reconstruct panoptic segmentation result from raw predictions.\n\n This function conduct post processing of panoptic head raw prediction, including bounding box\n prediction, semantic segmentation and levelness to reconstruct instance segmentation results.\n\n Parameters\n ----------\n locations: list of torch.Tensor\n Corresponding pixel locations of each FPN predictions.\n\n box_cls: list of torch.Tensor\n Predicted bounding box class from each FPN layers.\n\n box_regression: list of torch.Tensor\n Predicted bounding box offsets from each FPN layers.\n\n centerness: list of torch.Tensor\n Predicted object centerness from each FPN layers.\n\n levelness_logits:\n Global prediction of best source FPN layer for each pixel location.\n\n semantic_logits:\n Global prediction of semantic segmentation.\n\n image_sizes: list of [int,int]\n Image sizes.\n\n Returns:\n --------\n boxlists: list of BoxList\n reconstructed instances with masks.\n \"\"\"\n num_locs_per_level = [len(loc_per_level) for loc_per_level in locations]\n\n sampled_boxes = []\n for i, (l, o, b, c) in enumerate(zip(locations[:-1], box_cls, box_regression, centerness)):\n if self.is_training:\n layer_boxes = self.forward_for_single_feature_map(l, o, b, c, image_sizes)\n for layer_box in layer_boxes:\n pred_indices = layer_box.get_field(\"indices\")\n pred_indices = pred_indices + sum(num_locs_per_level[:i])\n layer_box.add_field(\"indices\", pred_indices)\n sampled_boxes.append(layer_boxes)\n else:\n sampled_boxes.append(self.forward_for_single_feature_map(l, o, b, c, image_sizes))\n\n # sampled_boxes are a list of bbox_list per level\n # the following converts it to per image\n boxlists = list(zip(*sampled_boxes))\n # per image, concat bbox_list of different levels into one bbox_list\n # boxlists is a list of bboxlists of N images\n try:\n boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]\n boxlists = self.select_over_all_levels(boxlists)\n except Exception as e:\n print(e)\n for boxlist in boxlists:\n for box in boxlist:\n print(box, \"box shape\", box.bbox.shape)\n\n # Generate bounding box feature map at size of [H/4, W/4] with bounding box prediction as features.\n levelness_locations = locations[-1]\n _, c_semantic, _, _ = semantic_logits.shape\n N, _, h_map, w_map = levelness_logits.shape\n bounding_box_feature_map = self.generate_box_feature_map(levelness_locations, box_regression, levelness_logits)\n\n # process semantic raw prediction\n semantic_logits = F.interpolate(semantic_logits, size=(h_map, w_map), mode='bilinear')\n semantic_logits = semantic_logits.view(N, c_semantic, h_map, w_map).permute(0, 2, 3, 1)\n semantic_logits = semantic_logits.reshape(N, -1, c_semantic)\n\n # insert semantic prob into mask\n semantic_probability = F.softmax(semantic_logits, dim=2)\n semantic_probability = semantic_probability[:, :, self.instance_id_range[0]:]\n boxlists = self.mask_reconstruction(\n boxlists=boxlists,\n box_feature_map=bounding_box_feature_map,\n semantic_prob=semantic_probability,\n box_feature_map_location=levelness_locations,\n h_map=h_map,\n w_map=w_map\n )\n # resize instance masks to original image size\n if not self.is_training:\n for boxlist in boxlists:\n masks = boxlist.get_field(\"mask\")\n # NOTE: BoxList size is the image size without padding. MASK here is a mask with padding.\n # Mask need to be interpolated into padded image size and then crop to unpadded size.\n w, h = boxlist.size\n if len(masks.shape) == 3 and masks.shape[0] != 0:\n masks = F.interpolate(masks.unsqueeze(0), size=(h_map * 4, w_map * 4), mode='bilinear').squeeze()\n else:\n # handle 0 shape dummy mask.\n masks = masks.view([-1, h_map * 4, w_map * 4])\n masks = masks >= self.mask_thresh\n if len(masks.shape) < 3:\n masks = masks.unsqueeze(0)\n masks = masks[:, 0:h, 0:w].contiguous()\n boxlist.add_field(\"mask\", masks)\n return boxlists\n\n def forward_for_single_feature_map(self, locations, box_cls, box_regression, centerness, image_sizes):\n \"\"\"Recover dense bounding box detection results from raw predictions for each FPN layer.\n\n Parameters\n ----------\n locations: torch.Tensor\n Corresponding pixel location of FPN feature map with size of (N, H * W, 2).\n\n box_cls: torch.Tensor\n Predicted bounding box class probability with size of (N, C, H, W).\n\n box_regression: torch.Tensor\n Predicted bounding box offset centered at corresponding pixel with size of (N, 4, H, W).\n\n centerness: torch.Tensor\n Predicted centerness of corresponding pixel with size of (N, 1, H, W).\n\n Note: N is the number of FPN level.\n\n Returns\n -------\n results: List of BoxList\n A list of dense bounding boxes from each FPN layer.\n \"\"\"\n\n N, C, H, W = box_cls.shape\n # M = H x W is the total number of proposal for this single feature map\n\n # put in the same format as locations\n # from (N, C, H, W) to (N, H, W, C)\n box_cls = box_cls.view(N, C, H, W).permute(0, 2, 3, 1)\n # from (N, H, W, C) to (N, M, C)\n # map class prob to (-1, +1)\n box_cls = box_cls.reshape(N, -1, C).sigmoid()\n # from (N, 4, H, W) to (N, H, W, 4) to (N, M, 4)\n box_regression = box_regression.view(N, 4, H, W).permute(0, 2, 3, 1)\n box_regression = box_regression.reshape(N, -1, 4)\n # from (N, 4, H, W) to (N, H, W, 1) to (N, M)\n # map centerness prob to (-1, +1)\n centerness = centerness.view(N, 1, H, W).permute(0, 2, 3, 1)\n centerness = centerness.reshape(N, -1).sigmoid()\n\n # before NMS, per level filter out low cls prob with threshold 0.05\n # after this candidate_inds of size (N, M, C) with values corresponding to\n # low prob predictions become 0, otherwise 1\n candidate_inds = box_cls > self.pre_nms_thresh\n\n # pre_nms_top_n of size (N, M * C) => (N, 1)\n # N -> batch index, 1 -> total number of bbox predictions per image\n pre_nms_top_n = candidate_inds.view(N, -1).sum(1)\n # total number of proposal before NMS\n # if have more than self.pre_nms_top_n (1000) clamp to 1000\n pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)\n\n # multiply the classification scores with centerness scores\n # (N, M, C) * (N, M, 1)\n box_cls = box_cls * centerness[:, :, None]\n\n results = []\n for i in range(N):\n # filer out low score candidates\n per_box_cls = box_cls[i] # (M, C)\n per_candidate_inds = candidate_inds[i] # (M, C)\n # per_box_cls of size P, P < M * C\n per_box_cls = per_box_cls[per_candidate_inds]\n\n # indices of seeds bounding boxes\n # 0-dim corresponding to M, location\n # 1-dim corresponding to C, class\n per_candidate_nonzeros = per_candidate_inds.nonzero()\n # Each of the following is of size P < M * C\n per_box_loc = per_candidate_nonzeros[:, 0]\n per_class = per_candidate_nonzeros[:, 1] + 1\n\n # per_box_regression of size (M, 4)\n per_box_regression = box_regression[i]\n # (M, 4) => (P, 4)\n # in P, there might be identical bbox prediction in M\n per_box_regression = per_box_regression[per_box_loc]\n # (M, 2) => (P, 2)\n # in P, there might be identical locations in M\n per_locations = locations[per_box_loc]\n\n\n # upperbound of the number of predictions for this image\n per_pre_nms_top_n = pre_nms_top_n[i]\n\n # if valid predictions is more than the upperbound\n # only select topK\n if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():\n per_box_cls, top_k_indices = \\\n per_box_cls.topk(per_pre_nms_top_n, sorted=False)\n per_class = per_class[top_k_indices]\n per_box_regression = per_box_regression[top_k_indices]\n per_locations = per_locations[top_k_indices]\n if self.is_training:\n per_box_loc = per_box_loc[top_k_indices]\n\n detections = torch.stack([\n per_locations[:, 0] - per_box_regression[:, 0],\n per_locations[:, 1] - per_box_regression[:, 1],\n per_locations[:, 0] + per_box_regression[:, 2],\n per_locations[:, 1] + per_box_regression[:, 3],\n ],\n dim=1)\n\n h, w = image_sizes[i]\n\n boxlist = BoxList(detections, (int(w), int(h)), mode=\"xyxy\")\n boxlist.add_field(\"labels\", per_class)\n boxlist.add_field(\"scores\", per_box_cls)\n if self.is_training:\n boxlist.add_field(\"indices\", per_box_loc)\n\n boxlist = boxlist.clip_to_image(remove_empty=False)\n boxlist = remove_small_boxes(boxlist, self.min_size)\n results.append(boxlist)\n return results\n\n def generate_box_feature_map(self, location, box_regression, levelness_logits):\n \"\"\"Generate bounding box feature aggregating dense bounding box predictions.\n\n Parameters\n ----------\n location: torch.Tensor\n Pixel location of levelness.\n\n box_regression: list of torch.Tensor\n Bounding box offsets from each FPN.\n\n levelness_logits: torch.Tenor\n Global prediction of best source FPN layer for each pixel location.\n Predict at the resolution of (H/4, W/4).\n\n Returns\n -------\n bounding_box_feature_map: torch.Tensor\n Aggregated bounding box feature map.\n \"\"\"\n upscaled_box_reg = []\n N, _, h_map, w_map = levelness_logits.shape\n downsampled_shape = torch.Size((h_map, w_map))\n for box_reg in box_regression:\n upscaled_box_reg.append(F.interpolate(box_reg, size=downsampled_shape, mode='bilinear').unsqueeze(1))\n\n # N_level, 4, h_map, w_map\n upscaled_box_reg = torch.cat(upscaled_box_reg, 1)\n\n max_v, level = torch.max(levelness_logits[:, 1:, :, :], dim=1)\n\n box_feature_map = torch.gather(\n upscaled_box_reg, dim=1, index=level.unsqueeze(1).expand([N, 4, h_map, w_map]).unsqueeze(1)\n )\n\n box_feature_map = box_feature_map.view(N, 4, h_map, w_map).permute(0, 2, 3, 1)\n box_feature_map = box_feature_map.reshape(N, -1, 4)\n # generate all valid bboxes from feature map\n # shape (N, M, 4)\n levelness_locations_repeat = location.repeat(N, 1, 1)\n bounding_box_feature_map = torch.stack([\n levelness_locations_repeat[:, :, 0] - box_feature_map[:, :, 0],\n levelness_locations_repeat[:, :, 1] - box_feature_map[:, :, 1],\n levelness_locations_repeat[:, :, 0] + box_feature_map[:, :, 2],\n levelness_locations_repeat[:, :, 1] + box_feature_map[:, :, 3],\n ], dim=2)\n return bounding_box_feature_map\n\n def mask_reconstruction(self, boxlists, box_feature_map, semantic_prob, box_feature_map_location, h_map, w_map):\n \"\"\"Reconstruct instance mask from dense bounding box and semantic smoothing.\n\n Parameters\n ----------\n boxlists: List of Boxlist\n Object detection result after NMS.\n\n box_feature_map: torch.Tensor\n Aggregated bounding box feature map.\n\n semantic_prob: torch.Tensor\n Prediction semantic probability.\n\n box_feature_map_location: torch.Tensor\n Corresponding pixel location of bounding box feature map.\n\n h_map: int\n Height of bounding box feature map.\n\n w_map: int\n Width of bounding box feature map.\n \"\"\"\n for i, (boxlist, per_image_bounding_box_feature_map, per_image_semantic_prob,\n box_feature_map_loc) in enumerate(zip(boxlists, box_feature_map, semantic_prob, box_feature_map_location)):\n\n # decode mask from bbox embedding\n if len(boxlist) > 0:\n # query_boxes is of shape (P, 4)\n # dense_detections is of shape (P', 4)\n # P' is larger than P\n query_boxes = boxlist.bbox\n propose_cls = boxlist.get_field(\"labels\")\n # (P, 4) -> (P, 4, 1) -> (P, 4, P) -> (P, P', 4)\n propose_bbx = query_boxes.unsqueeze(2).repeat(1, 1,\n per_image_bounding_box_feature_map.shape[0]).permute(0, 2, 1)\n # (P',4) -> (4, P') -> (1, 4, P') -> (P, 4, P') -> (P, P', 4)\n voting_bbx = per_image_bounding_box_feature_map.permute(1, 0).unsqueeze(0).repeat(query_boxes.shape[0], 1,\n 1).permute(0, 2, 1)\n # implementation based on IOU for bbox_correlation_map\n # 0, 1, 2, 3 => left, top, right, bottom\n proposal_area = (propose_bbx[:, :, 2] - propose_bbx[:, :, 0]) * \\\n (propose_bbx[:, :, 3] - propose_bbx[:, :, 1])\n voting_area = (voting_bbx[:, :, 2] - voting_bbx[:, :, 0]) * \\\n (voting_bbx[:, :, 3] - voting_bbx[:, :, 1])\n w_intersect = torch.min(voting_bbx[:, :, 2], propose_bbx[:, :, 2]) - \\\n torch.max(voting_bbx[:, :, 0], propose_bbx[:, :, 0])\n h_intersect = torch.min(voting_bbx[:, :, 3], propose_bbx[:, :, 3]) - \\\n torch.max(voting_bbx[:, :, 1], propose_bbx[:, :, 1])\n w_intersect = w_intersect.clamp(min=0.0)\n h_intersect = h_intersect.clamp(min=0.0)\n w_general = torch.max(voting_bbx[:, :, 2], propose_bbx[:, :, 2]) - \\\n torch.min(voting_bbx[:, :, 0], propose_bbx[:, :, 0])\n h_general = torch.max(voting_bbx[:, :, 3], propose_bbx[:, :, 3]) - \\\n torch.min(voting_bbx[:, :, 1], propose_bbx[:, :, 1])\n # calculate IOU\n area_intersect = w_intersect * h_intersect\n area_union = proposal_area + voting_area - area_intersect\n torch.cuda.synchronize()\n\n area_general = w_general * h_general + 1e-7\n bbox_correlation_map = (area_intersect + 1.0) / (area_union + 1.0) - \\\n (area_general - area_union) / area_general\n\n per_image_cls_prob = per_image_semantic_prob[:, propose_cls - 1].permute(1, 0)\n # bbox_correlation_map is of size (P or per_pre_nms_top_n, P')\n bbox_correlation_map = bbox_correlation_map * per_image_cls_prob\n # query_boxes.shape[0] is the number of filtered boxes\n masks = bbox_correlation_map.view(query_boxes.shape[0], h_map, w_map)\n if len(masks.shape) < 3:\n masks = masks.unsqueeze(0)\n boxlist.add_field(\"mask\", masks)\n else:\n dummy_masks = torch.zeros(len(boxlist), h_map,\n w_map).float().to(boxlist.bbox.device).to(boxlist.bbox.dtype)\n boxlist.add_field(\"mask\", dummy_masks)\n return boxlists\n\n def select_over_all_levels(self, boxlists):\n \"\"\"NMS of bounding box candidates.\n\n Parameters\n ----------\n boxlists: list of Boxlist\n Pre-NMS bounding boxes.\n\n Returns\n -------\n results: list of Boxlist\n Final detection result.\n \"\"\"\n num_images = len(boxlists)\n results = []\n for i in range(num_images):\n boxlist = boxlists[i]\n scores = boxlist.get_field(\"scores\")\n labels = boxlist.get_field(\"labels\")\n if self.is_training:\n indices = boxlist.get_field(\"indices\")\n boxes = boxlist.bbox\n\n result = []\n w, h = boxlist.size\n # skip the background\n if boxes.shape[0] < 1:\n results.append(boxlist)\n continue\n for j in range(1, self.num_classes):\n inds = (labels == j).nonzero().view(-1)\n if len(inds) > 0:\n scores_j = scores[inds]\n boxes_j = boxes[inds, :].view(-1, 4)\n\n boxlist_for_class = BoxList(boxes_j, boxlist.size, mode=\"xyxy\")\n boxlist_for_class.add_field(\"scores\", scores_j)\n\n if self.is_training:\n indices_j = indices[inds]\n boxlist_for_class.add_field(\"indices\", indices_j)\n\n boxlist_for_class = boxlist_nms(boxlist_for_class, self.nms_thresh, score_field=\"scores\")\n num_labels = len(boxlist_for_class)\n boxlist_for_class.add_field(\n \"labels\", torch.full((num_labels, ), j, dtype=torch.int64, device=scores.device)\n )\n result.append(boxlist_for_class)\n result = cat_boxlist(result)\n\n # global NMS\n result = boxlist_nms(result, 0.97, score_field=\"scores\")\n\n number_of_detections = len(result)\n\n # Limit to max_per_image detections **over all classes**\n if number_of_detections > self.fpn_post_nms_top_n > 0:\n cls_scores = result.get_field(\"scores\")\n image_thresh, _ = torch.kthvalue(cls_scores.cpu(), number_of_detections - self.fpn_post_nms_top_n + 1)\n keep = cls_scores >= image_thresh.item()\n keep = torch.nonzero(keep).squeeze(1)\n result = result[keep]\n results.append(result)\n return results" ]
[ [ "torch.Size", "torch.nonzero", "torch.cat", "torch.stack", "torch.cuda.synchronize", "torch.min", "torch.max", "torch.nn.functional.interpolate", "torch.full", "torch.nn.functional.softmax" ] ]
Mutanne/hiworld
[ "d4c536775ecdd948b6fa205cd43fb5f92c7496c5" ]
[ "python/Ta4_csv.py" ]
[ "import pandas as pd\n\ndf = pd.read_csv(\"hiworld\\\\python\\\\Arquivo.csv\", encoding = \"UTF-8\", sep = \",\")\n#dfj = pd.read_json(\"hiworld\\python\\Arquivo.json\") #deu pau\n\nprint(df)\n" ]
[ [ "pandas.read_csv" ] ]
18F/10x-MLaaS
[ "3e1df3bbd88037c20e916fab2c07117a63e3c639" ]
[ "HSM/utils/qualtrics.py" ]
[ "import requests\nimport zipfile\nimport json\nimport os\nimport sys\nimport pandas as pd\nfrom time import sleep\nfrom utils.config import qualtrics_sitewide_creds\nfrom utils import db, db_utils\n\n\nclass QualtricsApi:\n \"\"\"Query Qualtrics API for new survey responses and then write to database.\n\n Attributes:\n apiToken (str): a Qualtrics API token.\n surveyId (str): the survey id.\n fileFormat (str): the preferred file format. Only 'json' is possible now.\n dataCenter (str): the datacenter from the hostname of the qualtrics\n account url\n \"\"\"\n\n def __init__(self, last_response_id, apiToken=None, surveyId=None, fileFormat='json',\n dataCenter='cemgsa'):\n print(\"Getting data from Qualtrics...\")\n if not apiToken and not surveyId:\n apiToken = qualtrics_sitewide_creds['apiToken']\n surveyId = qualtrics_sitewide_creds['surveyId']\n\n self.apiToken = apiToken\n self.surveyId = surveyId\n self.fileFormat = fileFormat\n self.dataCenter = dataCenter\n if not last_response_id:\n db_utils.create_postgres_db()\n db.dal.connect()\n session = db.dal.Session()\n last_response_id = db_utils.fetch_last_RespondentID(session)\n self.lastResponseId = last_response_id\n\n def download_responses(self):\n \"\"\"\n Void function that gets and writes survey responses within the working\n directory.\n\n The process of getting survey responses requires four steps:\n 1. Request the responses with the CreateResponseExport API.\n 2. Request the export status with the GetResponseExportProgress API.\n 3. Once the export progress is 100, make a GET request to retrieve\n the response file, which will be a zipped file.\n 4. Unzip the file to find the survey responses in the format you\n requested (csv, csv2013, xml, json, or spss).\n\n Returns:\n None\n \"\"\"\n\n # Setting static parameters\n requestCheckProgress = 0\n baseUrl = \"https://{0}.gov1.qualtrics.com/API/v3/responseexports/\".format(self.dataCenter)\n headers = {\n \"content-type\": \"application/json\",\n \"x-api-token\": self.apiToken,\n }\n # Step 1: Creating Data Export\n downloadRequestUrl = baseUrl\n downloadRequestPayload = {\n \"format\": self.fileFormat,\n \"surveyId\": self.surveyId,\n \"useLabels\": True\n }\n # Include lastResponseId in payload if provided during init\n if self.lastResponseId:\n downloadRequestPayload['lastResponseId'] = self.lastResponseId\n\n downloadRequestResponse = requests.request(\"POST\", downloadRequestUrl,\n data=json.dumps(downloadRequestPayload),\n headers=headers)\n\n status_code = downloadRequestResponse.json()['meta']['httpStatus']\n if '200' in status_code:\n print('Post Request to Qualtrics was a success!')\n else:\n print(status_code)\n # TODO: log errors, including 500 status codes (see GH37)\n sys.exit(0)\n progressId = downloadRequestResponse.json()[\"result\"][\"id\"]\n\n # Step 2: Checking on Data Export Progress and waiting until export is ready\n while requestCheckProgress < 100:\n sleep(2)\n requestCheckUrl = baseUrl + progressId\n print(requestCheckUrl)\n requestCheckResponse = requests.request(\"GET\", requestCheckUrl, headers=headers)\n requestCheckProgress = requestCheckResponse.json()[\"result\"][\"percentComplete\"]\n print(\"Download is \" + str(requestCheckProgress) + \" complete\")\n\n # Step 3: Downloading file\n requestDownloadUrl = baseUrl + progressId + '/file'\n print(requestDownloadUrl)\n requestDownload = requests.request(\"GET\", requestDownloadUrl,\n headers=headers, stream=True)\n\n # Step 4: Unzipping the file\n with open(\"RequestFile.zip\", \"wb\") as f:\n for chunk in requestDownload.iter_content(chunk_size=1024):\n f.write(chunk)\n zipfile.ZipFile(\"RequestFile.zip\").extractall(\"temp\")\n os.remove(\"RequestFile.zip\")\n\n def get_data(self):\n \"\"\"\n Convert the json into a pandas dataframe\n \"\"\"\n file_name = os.path.join(os.getcwd(), 'temp', qualtrics_sitewide_creds['filename'])\n with open(file_name, encoding='utf8') as f:\n data = json.load(f)\n df = pd.DataFrame(data['responses'])\n # replace np.nan with None so sql insertions don't insert 'nan' strings\n df = df.where(pd.notnull(df), None)\n os.remove(file_name)\n df_n_rows = df.shape[0]\n # if number of rows more than zero\n if df_n_rows > 0:\n return df\n else:\n print(\"No new survey responses to download. Exiting\")\n sys.exit(0)\n" ]
[ [ "pandas.DataFrame", "pandas.notnull" ] ]
namanshrimali/doepd.ai
[ "fc57af2e131965d9d6c89e39a3eeab41c8dff40b" ]
[ "models/doepd_net.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nfrom .midas.midas_net import MidasNet\r\n\r\nclass DoepdNet(torch.nn.Module):\r\n \"\"\"\r\n There are 3 run modes available for this model.\r\n 1. Yolo : Trains/Inferences only yolo layer, while ignoring midas and planeRCNN\r\n 2. PlaneRCNN : Trains/Inferences only PlaneRCNN layer, while ignoring midas and yolo\r\n 3. All : Trains/Inferences every layer\r\n \"\"\"\r\n midas_encoder_layered_output = []\r\n \r\n def __init__(self, run_mode, midas_weights = \"weights/model-f6b98070.pt\", image_size=384):\r\n super(DoepdNet, self).__init__()\r\n self.run_mode = run_mode\r\n\r\n self.midas_net = MidasNet(midas_weights) \r\n midas_encoder_filters = [256, 256, 512, 512, 1024] # output filters from each layer of resnext 101\r\n if self.run_mode == 'yolo' or self.run_mode == 'all':\r\n from .yolo.yolo_decoder import YoloDecoder\r\n # Each of the three layers in yolo takes input from last 3 layers of midas\r\n self.yolo_decoder = YoloDecoder(midas_encoder_filters, (image_size, image_size))\r\n self.yolo_layers = self.yolo_decoder.yolo_layers\r\n self.midas_layer_2_to_yolo_small_obj = nn.Conv2d(in_channels= 512, out_channels = 256, kernel_size = 1, padding = 0)\r\n self.midas_layer_3_to_yolo_med_obj = nn.Conv2d(in_channels= 1024, out_channels = 512, kernel_size = 1, padding = 0)\r\n self.midas_layer_4_to_yolo_med_obj = nn.Conv2d(in_channels= 2048, out_channels = 512, kernel_size = 1, padding = 0)\r\n self.midas_layer_4_to_yolo_large_obj = nn.Conv2d(in_channels= 2048, out_channels = 1024, kernel_size = 1, padding = 0)\r\n \r\n # if self.run_mode == 'planercnn' or self.run_mode == 'all':\r\n # from .planercnn.planercnn_decoder import MaskRCNN\r\n # from utils.config import PlaneConfig\r\n\r\n # import sys\r\n \r\n # sys.argv=['']\r\n # del sys\r\n \r\n # from utils.options import parse_args\r\n # args = parse_args()\r\n # config = PlaneConfig(args)\r\n # self.plane_rcnn_decoder = MaskRCNN(config)\r\n \r\n # Freeze training for midas (encoder)\r\n for param in self.midas_net.pretrained.parameters():\r\n param.requires_grad = False\r\n \r\n def forward(self, x, plane_rcnn_image_meta = None, augment=False, mode='inference_detection', use_nms=2, use_refinement=True, return_feature_map=False):\r\n doepd_forward_output = [None, None, None]\r\n encoder_layered_outputs = self.midas_net.forward_encoder(x)\r\n \r\n if self.run_mode == 'yolo' or self.run_mode == 'all':\r\n yolo_small = self.midas_layer_2_to_yolo_small_obj(encoder_layered_outputs[1]) # midas resnext 101 layer 2\r\n yolo_med = self.midas_layer_3_to_yolo_med_obj(encoder_layered_outputs[2]) # midas resnext 101 layer 3\r\n yolo_med_before_upsample = self.midas_layer_4_to_yolo_med_obj(encoder_layered_outputs[3]) # midas resnext 101 layer 4\r\n yolo_large = self.midas_layer_4_to_yolo_large_obj(encoder_layered_outputs[3]) # midas resnext 101 layer 4\r\n \r\n doepd_forward_output[0] = self.yolo_decoder.forward([yolo_small, yolo_med_before_upsample, yolo_med, yolo_large], augment=augment)\r\n \r\n if self.run_mode == 'midas' or self.run_mode == 'all':\r\n doepd_forward_output[1] = self.midas_net.forward_decoder(encoder_layered_outputs)\r\n \r\n # if self.run_mode == 'planercnn' or self.run_mode == 'all':\r\n # doepd_forward_output[2] = self.plane_rcnn_decoder.predict(x, plane_rcnn_image_meta, mode, encoder_layered_outputs = encoder_layered_outputs, return_feature_map= return_feature_map)\r\n return doepd_forward_output\r\n \r\ndef load_doepd_weights(self, device='cpu', scratch=False, train_mode = False, load_mode='all'):\r\n yolo_weights = []\r\n chkpt = [None, None]\r\n from .yolo.yolo_decoder import load_yolo_decoder_weights\r\n if not scratch:\r\n # loading yolo weights \r\n if self.run_mode == 'yolo' or self.run_mode == 'all':\r\n yolo_weight_file = None\r\n # loading yolo weights from last/best based on train_mode. Will update to add planercnn weights\r\n if train_mode:\r\n yolo_weight_file = 'weights/doepd_yolo_last.pt'\r\n else:\r\n yolo_weight_file = 'weights/doepd_yolo_best.pt'\r\n \r\n chkpt[0] = torch.load(yolo_weight_file, map_location = \"cpu\") \r\n num_items = 0\r\n \r\n for k, v in chkpt[0]['model'].items():\r\n if num_items>=666 and num_items<756:\r\n if not k.endswith('num_batches_tracked'):\r\n yolo_weights.append(v.detach().numpy())\r\n num_items = num_items + 1 \r\n \r\n load_yolo_decoder_weights(self.yolo_decoder, yolo_weights)\r\n \r\n self.midas_layer_2_to_yolo_small_obj.weight = torch.nn.Parameter(chkpt[0]['model']['midas_layer_2_to_yolo_small_obj.weight'])\r\n self.midas_layer_2_to_yolo_small_obj.bias = torch.nn.Parameter(chkpt[0]['model']['midas_layer_2_to_yolo_small_obj.bias'])\r\n self.midas_layer_3_to_yolo_med_obj.weight = torch.nn.Parameter(chkpt[0]['model']['midas_layer_3_to_yolo_med_obj.weight'])\r\n self.midas_layer_3_to_yolo_med_obj.bias = torch.nn.Parameter(chkpt[0]['model']['midas_layer_3_to_yolo_med_obj.bias'])\r\n self.midas_layer_4_to_yolo_med_obj.weight = torch.nn.Parameter(chkpt[0]['model']['midas_layer_4_to_yolo_med_obj.weight'])\r\n self.midas_layer_4_to_yolo_med_obj.bias = torch.nn.Parameter(chkpt[0]['model']['midas_layer_4_to_yolo_med_obj.bias'])\r\n self.midas_layer_4_to_yolo_large_obj.weight = torch.nn.Parameter(chkpt[0]['model']['midas_layer_4_to_yolo_large_obj.weight'])\r\n self.midas_layer_4_to_yolo_large_obj.bias = torch.nn.Parameter(chkpt[0]['model']['midas_layer_4_to_yolo_large_obj.bias'])\r\n \r\n # elif self.run_mode == 'planercnn' or self.run_mode == 'all':\r\n # planer_cnn_file = 'weights/planer_checkpoint.pth'\r\n # chkpt[1] = torch.load(planer_cnn_file, map_location = \"cpu\") \r\n # num_items = 0\r\n \r\n # for k, v in chkpt[1].items():\r\n # if num_items>=728:\r\n # # eg k = depth.deconv1.2.running_var\r\n # # we need plane_rcnn_decoder.depth.deconv1.2.running_var\r\n # self.plane_rcnn_decoder.state_dict()[f'plane_rcnn_decoder.{k}'] = v.data\r\n # num_items = num_items + 1 \r\n \r\n else:\r\n # loading yolo_best weights : got from 300 epochs trained in Assignment 13\r\n \r\n yolo_weight_file='weights/yolo_old_300.pt'\r\n \r\n chkpt[0] = torch.load(yolo_weight_file, map_location = device)\r\n \r\n num_items=0\r\n for k, v in chkpt[0]['model'].items():\r\n if num_items >= 354:\r\n if not k.endswith('num_batches_tracked'):\r\n if v.shape[0]!=255:\r\n yolo_weights.append(v.detach().numpy())\r\n num_items = num_items + 1\r\n \r\n load_yolo_decoder_weights(self.yolo_decoder, yolo_weights)\r\n \r\n return chkpt\r\n" ]
[ [ "torch.nn.Conv2d", "torch.load", "torch.nn.Parameter" ] ]
cjporteo/ml-NBA-asg-predictor
[ "2a0f8b9660def980a910b839152b0b2f9418844e" ]
[ "Data Collection/bballref_ASG_scrape.py" ]
[ "from bs4 import BeautifulSoup\nfrom collections import defaultdict\nimport pandas as pd\nimport pickle\nimport requests\nfrom unidecode import unidecode\n\n# this dictionary will map players to a set containing all the years in which they were selected for an all-star game, either initially or as a replacement\nall_star_appearances = defaultdict(set)\n\n# rows to ignore when iterating the roster tables\nignore_fields = set(['Team Totals', 'Reserves'])\n\nSTART_YEAR, END_YEAR = 1970, 2020\n\n # unidecode doesn't catch the accented c in Peja's last name (Stojakovic), fix it\n # also overwrite any instance of Metta World Peace to Ron Artest\ndef fix_name(full_name):\n\tfirst_name = full_name.split(' ')[0]\n\tif first_name == 'Peja':\n\t\treturn 'Peja Stojakovic'\n\telif first_name == 'Metta':\n\t\treturn 'Ron Artest'\n\telse:\n\t\treturn unidecode(full_name)\n\nfor year in range(START_YEAR, END_YEAR):\n\n\t# no ASG played in 1999 because of the lockout\n\tif year == 1999:\n\t\tcontinue\n\n\tprint('Scraping ASG {} data...'.format(year))\n\n\t# will store all the all-stars for this year\n\tall_stars = set([])\n\n\thtml = requests.get('https://www.basketball-reference.com/allstar/NBA_{}.html'.format(year)).content\n\tsoup = BeautifulSoup(html, 'html.parser')\n\n\t# this part was annoying - back when ASG was always East vs. West, the tables were encoded with id=\"East\"/id=\"West\" so they could be extracted more easily/reliably\n\t# but now, you have games like Giannis vs. LeBron and the table id's are different, so I had to extract them by index, which is unreliable in the event that the \n\t# site's design changes in the future\n\n\t# gets rosters for team 1 and team 2\n\ts1, s2 = soup.findAll('table')[1:3]\n\n\tdf1 = pd.read_html(str(s1))[0]\n\tdf2 = pd.read_html(str(s2))[0]\n\n\t# get the all-stars from teams 1 and 2\n\tfor df in [df1, df2]:\n\t\tfor i, row in df.iterrows():\n\t\t\tif pd.notnull(row[0]) and row[0] not in ignore_fields:\n\t\t\t\tplayer = row[0]\n\t\t\t\tall_stars.add(fix_name(player))\n\n\t# gets all li elements in the page\n\ts3 = soup.findAll('li') \n\n\t# finds the li element that contains the data pertaining to injury related selections - players who were selected but couldn't participate due to injury,\n\t# and their respective replacements\n\t#\n\t# since all_stars is a hashset, we don't need to worry about accidentally double counting an all-star\n\tfor s in s3:\n\t\tif 'Did not play' in str(s):\n\t\t\tfor player in [name.get_text() for name in s.findAll('a')]: # all the injured players and their replacements\n\t\t\t\tall_stars.add(fix_name(player))\n\t\t\tbreak\n\n\t# update the appearances dictionary\n\tfor player in all_stars:\n\t\tall_star_appearances[player].add(year)\n\nsorted_all_star_appearances = sorted([(player, sorted(list(appearances))) for player, appearances in all_star_appearances.items()], key = lambda x : -len(x[1]))\n\nprint('\\nAll all-star appearances since 1970 (sorted by number of appearances):\\n')\n\nfor player, appearances in sorted_all_star_appearances:\n\tprint('{}: {}'.format(player, appearances))\n\n# export the dictionary to local disk for future recall in statsnba_fullscrape.py\nout = open('all_star_appearances.pickle', 'wb')\npickle.dump(all_star_appearances, out)\nout.close" ]
[ [ "pandas.notnull" ] ]
RMeli/gnina-torch
[ "eb57e2a62628d39f2a66e7fa1748e80705366761" ]
[ "tests/conftest.py" ]
[ "import os\n\nimport molgrid\nimport pytest\nimport torch\n\n\ndef pytest_addoption(parser):\n # Allows user to force tests to run on the CPU (useful to get performance on CI)\n parser.addoption(\n \"--nogpu\",\n action=\"store_false\",\n help=\"Force tests to run on CPU\",\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef device(pytestconfig):\n \"\"\"\n Configure device.\n\n Notes\n -----\n Tests run automatically on the GPU if available, unless the user forces tests to run\n ion the CPU by passing the :code:`--nogpu` option.\n \"\"\"\n\n gpu = pytestconfig.getoption(\"--nogpu\")\n\n if gpu:\n device_index = 0\n device = torch.device(\n f\"cuda:{device_index}\" if torch.cuda.is_available() else \"cpu\"\n )\n molgrid.set_gpu_device(device_index)\n else:\n device = torch.device(\"cpu\")\n\n return device\n\n\n@pytest.fixture(scope=\"session\")\ndef trainfile() -> str:\n \"\"\"\n Path to small training file.\n \"\"\"\n path = os.path.dirname(__file__)\n return os.path.join(path, \"data\", \"test.types\")\n\n\n@pytest.fixture\ndef testfile() -> str:\n \"\"\"\n Path to small test file.\n \"\"\"\n path = os.path.dirname(__file__)\n return os.path.join(path, \"data\", \"test.types\")\n\n\n@pytest.fixture(scope=\"session\")\ndef dataroot() -> str:\n \"\"\"\n Path to test directory.\n \"\"\"\n path = os.path.dirname(__file__)\n return os.path.join(path, \"data\", \"mols\")\n" ]
[ [ "torch.device", "torch.cuda.is_available" ] ]
SergiR1996/PELEAnalysis-Processing
[ "ad844f14487998eb963186d3b8f314cc9307df28", "ad844f14487998eb963186d3b8f314cc9307df28" ]
[ "PELEAnalysis-Processing/Protein_Mutator/MutateScore.py", "PELEAnalysis-Processing/PELE_scripts/PELEPlot3.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Global imports\nimport sys,time\nimport numpy as np\nfrom scipy.optimize import linear_sum_assignment\nfrom scipy.spatial.distance import cdist\nimport itertools\nimport argparse as ap\nimport multiprocessing as mp\n\n# Script information\n__author__ = \"Sergi Rodà\"\n__license__ = \"MIT\"\n__version__ = \"1.0.1\"\n__maintainer__ = \"Sergi Rodà\"\n__email__ = \"sergi.rodallordes@bsc.es\"\n\n\nclass MutateScore():\n\n\tdef __init__(self):\n\t\t\n\t\tself.__ref_file, self.__filename, self.__ref_residues_indices, self.__Atom_types, self.__output = self.parseArgs()\n\n\tdef parseArgs(self):\n\t\t\"\"\"\n\t\tParse arguments from command-line\n\t\t\"\"\"\n\n\t\tparser = ap.ArgumentParser(description='Script used to compute the local RMSD of the specified \\\n\t\t\tresidues of a reference PDB file against all the residues of a target PDB file')\n\t\toptional = parser._action_groups.pop()\n\t\trequired = parser.add_argument_group('required arguments')\n\t\tparser.add_argument(\"reference\", metavar=\"FILE\",type=str, help=\"path to reference PDB file\")\n\t\tparser.add_argument(\"input\", metavar=\"FILE\",type=str, help=\"path to input PDB file\")\n\t\trequired.add_argument(\"-r\",\"--residues\",required=True,metavar=\"STRING\",\n\t\t\t\t\t\t\t\ttype=int,nargs='*',help=\"reference residue indices\")\n\t\toptional.add_argument(\"-AT\",\"--Atom_types\",metavar=\"STRING\",type=str,nargs='*',\n\t\t\thelp=\"Atom types for the RMSD calculation. They must be indicated \\\n\t\t\twith underscore referring to spaces\",default=[\"_CA_\",\"_N__\",\"_O__\"])\n\t\toptional.add_argument(\"-O\",\"--output\",metavar=\"STRING\",type=str,\n\t\t\thelp=\"Output filename\",default=\"scores.txt\")\n\t\tparser._action_groups.append(optional)\n\t\targs = parser.parse_args()\n\n\t\tself.__ref_file = args.reference\n\t\tself.__filename =args.input\n\t\tself.__ref_residues_indices = args.residues\n\t\tself.__Atom_types = args.Atom_types\n\t\tself.__output = args.output\n\n\t\treturn self.__ref_file, self.__filename, self.__ref_residues_indices, self.__Atom_types, self.__output\n\n\t@property\n\tdef filename(self):\n\t\treturn self.__filename\n\n\t@property\n\tdef ref_residues_indices(self):\n\t\treturn self.__ref_residues_indices\n\t\n\n\tdef GetCoordinates(self,file,ref_coord = False):\n\t\t\"\"\"\n\t\tThis method returns the coordinates of a PDB file and\n\t\treturns them. The reference coordinates are stored or \n\t\tnot according to a boolean value.\n\n\t\tPARAMETERS\n\t\t----------\n\t\tfile : string\n\t\t\t\tPDB filename\n\t\tref_coord: bool\n\t\t\t\tBoolean value to indicate whether they are\n\t\t\t\treference residues or not\n\n\t\tRETURNS\n\t\t------\n\t\tres_coord: list of floats\n\t\t\"\"\"\n\n\t\tPDB = open(file)\n\t\tcoordinates,res_coord,aux_coord,counter = [],[],[],0\n\t\tfor line in PDB:\n\t\t\tif (line[0:4] == \"ATOM\" or line[0:6] == \"HETATM\") and (line[12:16].replace(\" \",\"_\") in self.__Atom_types):\n\t\t\t\tif ref_coord:\n\t\t\t\t\tif int(line[22:26].strip()) in self.__ref_residues_indices:\n\t\t\t\t\t\tx = float(line[30:38].strip())\n\t\t\t\t\t\ty = float(line[38:46].strip())\n\t\t\t\t\t\tz = float(line[46:54].strip())\n\t\t\t\t\t\tres_coord.append([x,y,z])\n\t\t\t\telse:\n\t\t\t\t\tx = float(line[30:38].strip())\n\t\t\t\t\ty = float(line[38:46].strip())\n\t\t\t\t\tz = float(line[46:54].strip())\n\t\t\t\t\taux_coord.append([x,y,z])\n\t\t\t\t\tcounter +=1\n\t\t\t\t\tif counter%len(self.__Atom_types) == 0:\n\t\t\t\t\t\tres_coord.append(aux_coord)\n\t\t\t\t\t\taux_coord = []\n\n\t\treturn res_coord\n\n\tdef RMSD(self,ref_coordinates,coordinates):\n\t\t\"\"\"\n\t\tThis method computes the root-mean-square deviation (RMSD) for\n\t\tthe target coordinates against the reference coordinates.\n\n\t\tPARAMETERS\n\t\t----------\n\t\tref_coordinates : array of floats\n\t\t\t\tCoordinates of the reference atoms\n\t\tcoordinates: array of floats\n\t\t\t\tCoordinates of the target atoms\n\n\t\tRETURNS\n\t\t------\n\t\trmsd: float\n\t\t\t\tValue of the RMSD\n\t\t\"\"\"\n\n\t\tD,N,RMS = len(ref_coordinates[0]),len(ref_coordinates),0.0\n\t\tfor v, w in zip(ref_coordinates, coordinates):\n\t\t\t\tRMS += sum([(v[i] - w[i])**2.0 for i in range(D)])\n\t\trmsd = np.sqrt((RMS/N))\n\n\t\treturn rmsd\n\n\tdef Translate(self,coordinates):\n\t\t\"\"\"\n\t\tThis method computes the centroid of some coordinates and\n\t\tsubstracts it from them, translating to the center of \n\t\tcoordinates.\n\n\t\tPARAMETERS\n\t\t----------\n\t\tcoordinates: array of floats\n\t\t\t\tCoordinates of the atoms\n\n\t\tRETURNS\n\t\t------\n\t\tcentered_coordinates: array of floats\n\t\t\t\tCentered coordinates of the atoms\n\t\t\"\"\"\n\n\t\tcentered_coordinates = []\n\t\tC = np.mean(coordinates,axis=0)\n\t\tcentered_coordinates = (coordinates-C)\n\n\t\treturn centered_coordinates\n\n\tdef Kabsch(self,coordinates,ref_coordinates):\n\t\t\"\"\"\n\t\thttps://en.wikipedia.org/wiki/Kabsch_algorithm\n\t\t\"\"\"\n\n\t\t# Computation of the covariance matrix\n\t\tC = np.dot(np.transpose(coordinates), ref_coordinates)\n\n\t\t# Computation of the optimal rotation matrix\n\t\tV, S, W = np.linalg.svd(C)\n\t\td = (np.linalg.det(V) * np.linalg.det(W)) < 0.0\n\n\t\tif d:\n\t\t\tS[-1] = -S[-1]\n\t\t\tV[:, -1] = -V[:, -1]\n\n\t\t# Create Rotation matrix U\n\t\tU = np.dot(V, W)\n\n\t\treturn U\n\n\tdef Rotate(self,coordinates,ref_coordinates):\n\t\t\"\"\"\n\t\tThis method computes the optimal rotate matrix\n\t\tto rotate the coordinates according to the \n\t\treference coordinates.\n\n\t\tPARAMETERS\n\t\t----------\n\t\tcoordinates: array of floats\n\t\t\t\tCoordinates of the target atoms\n\t\tref_coordinates: array of floats\n\t\t\t\tCoordinates of the reference atoms\n\n\t\tRETURNS\n\t\t------\n\t\tcoordinates: array of floats\n\t\t\t\tRotated coordinates of the atoms\n\t\t\"\"\"\n\n\t\tU = self.Kabsch(coordinates,ref_coordinates)\n\n\t\t# Rotate P\n\t\tcoordinates = np.dot(coordinates,U)\n\n\t\treturn coordinates\n\n\tdef DecompressList(self,coordinates):\n\t\t\"\"\"\n\t\tThis method decompress the coordinates saved \n\t\tas a list of lists into a list\n\n\t\tPARAMETERS\n\t\t----------\n\t\tcoordinates: array of floats\n\t\t\t\tCoordinates of the atoms\n\n\t\tRETURNS\n\t\t------\n\t\tnew_coordinates: array of floats\n\t\t\t\tRotated coordinates of the atoms\t\n\t\t\"\"\"\n\n\t\tnew_coordinates = []\n\t\tfor sublist in coordinates:\n\t\t\tfor item in sublist:\n\t\t\t\tnew_coordinates.append(item)\n\n\t\treturn new_coordinates\n\n\tdef FindResidues(self,file,coordinates):\n\t\t\"\"\"\n\t\tThis method finds the residues that are contained in \n\t\tthe coordinates of the specified atoms.\n\n\t\tPARAMETERS\n\t\t----------\n\t\tfile: string\n\t\t\t\tPDB filename\n\t\tcoordinates: array of floats\n\t\t\t\tCoordinates of the atoms\n\n\t\tRETURNS\n\t\t------\n\t\t\"\".join(Residues): string\n\t\t\t\tThe name of the residues that contain the coordinates\t\n\t\t\"\"\"\n\n\t\tdef RoundFloat(number):\n\t\t\t\"\"\"\n\t\t\tThis method appends up to 3 decimals to\n\t\t\tall coordinates in order to be converted\n\t\t\tto float in the FindResidues method.\n\n\t\t\tPARAMETERS\n\t\t\t----------\n\t\t\tnumber: float\n\t\t\t\t\tcoordinate in one of the axis\n\n\t\t\tRETURNS\n\t\t\t------\n\t\t\tA: string\n\t\t\t\t\tThe string of the coodinates with 3 decimals\t\n\t\t\t\"\"\"\n\t\t\tA = str(round(number,3))\n\t\t\twhile len(A.split(\".\")[1]) < 3:\n\t\t\t\tA+=\"0\"\n\n\t\t\treturn A\n\n\t\tResidues,res_index = [],[]\n\t\tPDB = open(file)\n\t\tlines = PDB.readlines();PDB.close()\n\t\tfor i in range(len(coordinates)):\n\t\t\tfor line in lines:\n\t\t\t\tif (line[0:4] == \"ATOM\" or line[0:6] == \"HETATM\") and (line[12:16].replace(\" \",\"_\") in self.__Atom_types):\n\t\t\t\t\tx,y,z = RoundFloat(coordinates[i][0][0]),RoundFloat(coordinates[i][0][1]),RoundFloat(coordinates[i][0][2])\n\t\t\t\t\tif (line[30:38].strip() == x) and (line[38:46].strip() == y) and (line[46:54].strip() == z):\n\t\t\t\t\t\tif line[22:26].strip() not in res_index:\n\t\t\t\t\t\t\tResidues.append(line[17:20].strip()+\"_\"+line[22:26].strip()+\" \")\n\n\t\treturn \"\".join(Residues)\n\n\tdef ComputeScore(self,combination):\n\t\t\"\"\"\n\t\tThis method computes the RMSD for all the permutations \n\t\tin a combination of coordinates of some target \n\t\tresidues.\n\n\t\tPARAMETERS\n\t\t----------\n\t\tcombination: list of lists of floats\n\t\t\t\tCoordinates of the atoms of the target residues\n\n\t\tRETURNS\n\t\t------\n\t\tresults: list of lists of a integer and a string\n\t\t\t\tThe RMSD and the residues of the combination\t\n\t\t\"\"\"\n\n\t\tref_coordinates = self.GetCoordinates(self.__ref_file, True)\n\t\tref_coordinates = self.Translate(ref_coordinates)\n\t\tref_coordinates = np.array(ref_coordinates)\n\t\tresults = []\n\n\t\tfor permutation in itertools.permutations(combination,len(self.__ref_residues_indices)):\n\t\t\tcombination_cent = self.Translate(self.DecompressList(permutation))\n\t\t\tfinal_coordinates = self.Rotate(combination_cent,ref_coordinates)\n\t\t\tRMSD = self.RMSD(ref_coordinates,final_coordinates)\n\t\t\tAux = [RMSD,self.FindResidues(self.__filename,permutation)]\n\t\t\tif Aux[0] < 1.0:\n\t\t\t \tprint(Aux)\n\t\t\tresults.append(Aux)\n\n\t\treturn results\n\n\tdef main(self):\n\t\t\"\"\"\n\t\tMain function\n\n\t\tIt is called when this script is the main program called by the interpreter\n\t\t\"\"\"\n\n\t\toutput = open(self.__output,\"w\")\n\t\tresults = []\n\n\t\tstart = time.time()\n\t\tprint(\"\\nRMSD of all combinations is starting to be computed \\n\")\n\n\t\tpool = mp.Pool(6)\n\t\tresults.append(pool.map(\n\t\t\tself.ComputeScore,itertools.combinations(self.GetCoordinates(self.filename),len(self.ref_residues_indices))))\n\t\tpool.terminate()\n\n\t\tresults = self.DecompressList(self.DecompressList((results)))\n\t\t\n\t\tresults.sort(key= lambda x : x[0])\n\n\t\tfor elem in results:\n\t\t\toutput.write(\"\\nRMSD: {}\".format(elem[0])+\" // Residues: {} \\n\".format(elem[1]))\n\n\t\tend = time.time()\n\t\tprint(\"\\nThe main code needed {} seconds to compute all scores for all the combinations \\n\".format(end-start))\n\n\nif __name__==\"__main__\":\n\t\"\"\"Call the main function\"\"\"\n\tA = MutateScore()\n\tA.main()\n", "# -*- coding: utf-8 -*-\n\n\n# Global imports\nfrom __future__ import unicode_literals\nimport os\nimport glob\nimport argparse as ap\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use(\"tkagg\")\nimport seaborn as sns\nfrom math import isnan\n\n# Local imports\nfrom PELEParseReports import *\n\n\n# Script information\n__author__ = [\"Marti Municoy\", \"Sergi Rodà\"]\n__license__ = \"MIT\"\n__version__ = \"1.0.1\"\n__maintainer__ = [\"Marti Municoy\", \"Sergi Rodà\"]\n__email__ = [\"marti.municoy@bsc.es\",\"sergi.rodallordes@bsc.es\"]\n\n# Possible font of the axis and the title\nDict_of_fonts={\"title\" : {'family': 'serif',\n 'weight': 'bold',\n 'size': 16,},\n\"axis\" : {'family': 'serif',\n 'weight': 'normal',\n 'size': 14,}}\n\n\n# Functions\ndef parseArgs():\n \"\"\"Parse arguments from command-line\n\n RETURNS\n -------\n reports : string\n list of report files to look for data\n x_data : string\n data to parse and assign to the X axis\n y_data : string\n data to parse and assign to the Y axis\n z_data : string\n data to parse and assign to the colorbar or the 3rd axis\n z_data : string\n data to parse and assign to the colorbar of the 3D scatter plot\n z_max : float\n it sets the maximum range value of the colorbar\n z_min : float\n it sets the minimum range value of the colorbar\n output_path : string\n output directory where the resulting plot will be saved\n title : string\n title of the generated figure\n font: list of strings\n\t\t key representing the font style in the font dictionary\n color : string\n optional color to use for the figure\n size: integer\n The font size of all the labels in the plot\n scatterplot: boolean\n Perform the 2D scatter plot of two PELE metrics and a 3rd metric as colorbar (hover function)\n twodensityplot: boolean\n Perform the 2D density plot of two PELE metrics (with the points represented)\n densityplot: boolean\n Perform the density plot of one PELE metric\n pointplot: boolean\n Perform the 2D scatter plot of two PELE metrics and a 3rd metric as colorbar\n threeDscatterplot: boolean\n Perform the 3D scatter plot of three PELE metrics and a 4th metric as colorbar (hover function)\n bestquantile : float\n it sets the best quantile out of the data to be plotted\n \"\"\"\n\n parser = ap.ArgumentParser(description='Script used to generate plots from the metrics \\\n of the reports files from a PELE simulation')\n optional = parser._action_groups.pop()\n required = parser.add_argument_group('required arguments')\n required.add_argument(\"-i\", \"--input\", required=True, metavar=\"FILE\",\n type=str, nargs='*', help=\"path to report files\")\n optional.add_argument(\"-X\", \"--xaxis\", metavar=\"INTEGER [METRIC]\",\n type=str, nargs='*', help=\"column numbers and \" +\n \"metric to plot on the X axis\", default=None)\n optional.add_argument(\"-Y\", \"--yaxis\", metavar=\"INTEGER [METRIC]\",\n type=str, nargs='*', help=\"column numbers and \" +\n \"metric to plot on the Y axis\", default=None)\n optional.add_argument(\"-Z\", \"--zaxis\", metavar=\"INTEGER [METRIC]\",\n type=str, nargs='*', help=\"column numbers and \" +\n \"metric to represent in the colorbar\", default=None)\n optional.add_argument(\"-Z2\",\"--z2axis\", metavar=\"INTEGER [METRIC]\",\n type=str, nargs='*', help=\"column numbers and \" +\n \"metric to represent in the colorbar of the 3D scatter plot\", default=None)\n optional.add_argument(\"-o\", \"--output\", metavar=\"PATH\", type=str,\n help=\"output path to save figure\", default=None)\n optional.add_argument(\"-r\", \"--Zmin\", metavar=\"FLOAT\", type=float,\n help=\"minimum Z value for the colorbar (or Z2 value in the 3D scatter plot)\",\n default=None)\n optional.add_argument(\"-R\", \"--Zmax\", metavar=\"FLOAT\", type=float,\n help=\"maximum Z value for the colorbar (or Z2 value in the 3D scatter plot)\",\n default=None)\n optional.add_argument(\"-T\",\"--title\", metavar=\"STRING\", type=str,\n\t\t\t help = \"title of the figure\", default=\"\")\n optional.add_argument(\"-F\",\"--font\", metavar=\"STRING [STRING]\", type=str,nargs='*',\n help = \"a list of the name of the font of the axis and the title\", default=\"\")\n optional.add_argument(\"-CO\",\"--color\", metavar=\"STRING\", type=str,\n help = \"The color that you want to apply to the plot (only use when no colorbar is used)\", default=\"\")\n optional.add_argument(\"-CM\",\"--colormap\", metavar=\"STRING\", type=str,\n help = \"The color that you want to apply to the colorbar (only for SP and TP and must be matplotlib valid)\", default=\"\")\n optional.add_argument(\"-S\",\"--size\", metavar=\"INTEGER\", type=int,\n help = \"the size of the font for all the plot (only for SP, DP, PP, and TP)\", default=12)\n optional.add_argument(\"-SP\",\"--scatterplot\"\n ,help = \"Perform the archetypical PELEPlot\", action = \"store_true\")\n optional.add_argument(\"-DDP\",\"--twodensityplot\"\n ,help = \"Perform a 2D density plot of the two selected variables\", action = \"store_true\")\n optional.add_argument(\"-DP\",\"--densityplot\"\n ,help = \"Perform the densityPlot of the metric specified in X\", action = \"store_true\")\n optional.add_argument(\"-PP\",\"--pointplot\"\n ,help = \"Perform the pointPlot of the metric specified in X and Y\", action = \"store_true\")\n optional.add_argument(\"-TP\",\"--threeDscatterplot\"\n ,help = \"Perform the 3D scatter plot of the metric specified in X, Y, and Z [and Z 2]\", action = \"store_true\")\n optional.add_argument(\"-BQ\",\"--bestquantile\", metavar=\"FLOAT\", type=float\n ,help = \"Take only the best quantile to plot (in DDP, DP, and PP)\", default=None)\n parser._action_groups.append(optional)\n args = parser.parse_args()\n\n reports = parseReports(args.input, parser)\n\n x_data = args.xaxis\n y_data = args.yaxis\n z_data = args.zaxis\n z2_data = args.z2axis\n\n if z_data is None:\n if args.Zmin is not None or args.Zmax is not None:\n print(\"No data to represent in the colorbar (Z axis). \" +\n \"Custom ranges are ignored.\")\n z_min = None\n z_max = None\n else:\n z_min = args.Zmin\n z_max = args.Zmax\n\n output_path = args.output\n title = args.title\n font=args.font\n if len(font)==0:\n font=[\"\",\"\"]\n color = args.color; colormap = args.colormap\n size = args.size\n SP = args.scatterplot\n DDP = args.twodensityplot\n DP = args.densityplot\n PP = args.pointplot\n TP = args.threeDscatterplot\n BQ = args.bestquantile\n\n return reports, x_data, y_data, z_data, z2_data, z_min, z_max, output_path, title, font, color, colormap, size, SP, DDP, DP, PP, TP, BQ\n\n\ndef addUnits(metric_name):\n \"\"\"Add units according to the input metric\n\n PARAMETERS\n ----------\n metric_name : string\n name of the metric to plot\n\n RETURNS\n -------\n label : string\n name of the metric to plot with the units that were added to it\n \"\"\"\n\n if \"energy\" in metric_name.lower():\n label = metric_name + \" ($kcal/mol$)\"\n elif \"energies\" in metric_name.lower():\n label = metric_name + \" ($kcal/mol$)\"\n elif \"distance\" in metric_name.lower():\n label = metric_name + \" ($\\AA$)\"\n elif \"rmsd\" in metric_name.lower():\n label = metric_name + \" ($\\AA$)\"\n else:\n label = metric_name\n return label\n \ndef parseAxisData(axis_data):\n \"\"\"It sets the columns and label of the data that wants to be plotted\n\n PARAMETERS\n ----------\n axis_data : string\n axis data to parse\n\n RETURNS\n -------\n parsed_data : tuple of a list and a string\n the list specifies the report columns that want to be plotted\n in the axis and the string sets the name of the axis\n \"\"\"\n\n if axis_data is None:\n return ([None, ], None)\n else:\n try:\n rows = [int(axis_data[0]), ]\n except ValueError:\n print(\"Warning: axis data not recognized: {}\".format(axis_data))\n return ([None, ], None)\n if len(axis_data) == 1:\n return (rows, None)\n elif len(axis_data) > 1:\n label_index = 1\n while axis_data[label_index] == \"+\":\n label_index += 2\n try:\n rows.append(int(axis_data[2]))\n except (ValueError, IndexError):\n print(\"Warning: axis data not recognized: \" +\n \"{}\".format(axis_data))\n return ([None, ], None)\n if len(axis_data) == label_index:\n return (rows, '?')\n label = addUnits(axis_data[label_index])\n return (rows, label)\n\n print(\"Warning: axis data not recognized: {}\".format(axis_data))\n return ([None, ], None)\n\n\ndef scatterPlot(reports,\n x_rows = [None, ], y_rows = [None, ], z_rows = [None, ],\n x_name = None, y_name = None, z_name = None,\n output_path = None, z_max = None, z_min = None, title = \"\", font = [\"\",\"\"], color = \"\", colormap = \"\", size=12):\n \"\"\"Represent the scatter plot\n\n PARAMETERS\n ----------\n reports : string\n list of report files to look for data\n x_rows : list of integers\n integers which specify the report columns to represent in the X\n axis\n y_rows : list of integers\n integers which specify the report columns to represent in the Y\n axis\n z_rows : list of integers\n integers which specify the report columns to represent in the\n colorbar\n x_name : string\n label of the X axis\n y_name : string\n label of the Y axis\n z_name : string\n label of the colorbar\n output_path : string\n output directory where the resulting plot will be saved\n z_max : float\n it sets the maximum range value of the colorbar\n z_min : float\n it sets the minimum range value of the colorbar\n title: string\n\t it sets the title name of the plot\n color : string\n color to be used for the figure (when only 2 variables are used)\n size: integer\n The font size of all the labels in the plot\n \"\"\"\n\n # The different variables are created and the size of the labels is set\n x_values, y_values, z_values, labels, annotations = [], [], [], [], []\n plt.rcParams.update({'font.size': size})\n\n # Set the rows and their labels to perform the scatter plot if they are not specified\n with open(reports[0], 'r') as report_file:\n line = report_file.readline()\n if None in x_rows:\n x_rows = [7, ]\n x_name = \"RMSD ($\\AA$)\"\n if None in y_rows:\n y_rows = [5, ]\n y_name = \"Energy ($kcal/mol$)\"\n if x_name is None:\n x_name = str(line.split(\" \")[x_rows[0] - 1])\n if y_name is None:\n y_name = str(line.split(\" \")[y_rows[0] - 1])\n if (None not in z_rows) and (z_name is None):\n z_name = str(line.split(\" \")[z_rows[0] - 1])\n z_name = addUnits(z_name)\n\n # Get the report files and save the directory where the report is saved and their number\n for report in reports:\n report_directory = os.path.dirname(report)\n report_number = os.path.basename(report).split('_')[-1].split('.')[0]\n \n # Open the report file and save the valeus that will be represented in the 2D scatter plot\n with open(report, 'r') as report_file:\n next(report_file)\n for i, line in enumerate(report_file):\n x_total = 0.\n y_total = 0.\n z_total = 0.\n\n for x_row in x_rows:\n x_total += float(line.split()[x_row - 1])\n\n for y_row in y_rows:\n y_total += float(line.split()[y_row - 1])\n\n if None not in z_rows:\n for z_row in z_rows:\n z_total += float(line.split()[z_row - 1])\n\n if isnan(x_total) or isnan(y_total) or isnan(z_total):\n continue\n\n x_values.append(x_total)\n y_values.append(y_total)\n z_values.append(z_total)\n\n epoch = report_directory.split('/')[-1]\n if not epoch.isdigit():\n epoch = '0'\n\n annotations.append(\"Epoch: \" + epoch + \"\\n\" +\n \"Trajectory: \" + report_number + \"\\n\" +\n \"Model: \" + str(i + 1))\n\n labels.append(0)\n\n if z_max is None:\n z_max = max(z_values)\n\n if z_min is None:\n z_min = min(z_values)\n\n if z_min == z_max:\n cmap = plt.cm.autumn\n else:\n if colormap!=\"\":\n cmap = plt.cm.get_cmap(\"{}\".format(colormap))\n else:\n cmap = plt.cm.plasma\n\n norm = plt.Normalize(z_min, z_max)\n\n fig, ax = plt.subplots()\n\n if output_path is not None:\n s = 20\n else:\n s = None\n\n if color!=\"\" and (None in z_rows):\n sc = plt.scatter(x_values, y_values, c=color, s=s)\n else:\n sc = plt.scatter(x_values, y_values, c=z_values, cmap=cmap, s=s,\n norm=norm)\n\n ax.margins(0.05)\n ax.set_facecolor('white')\n if font[0]!=\"\":\n plt.ylabel(y_name,Dict_of_fonts[font[0]])\n plt.xlabel(x_name,Dict_of_fonts[font[0]])\n else:\n plt.ylabel(y_name)\n plt.xlabel(x_name)\n if font[1]!=\"\":\n plt.title(title,Dict_of_fonts[font[1]])\n else:\n plt.title(title)\n\n annot = ax.annotate(\"\", xy=(0, 0), xytext=(20, 20),\n textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"w\"),\n arrowprops=dict(arrowstyle=\"->\"))\n annot.set_visible(False)\n\n # Activate the colorbar only if the Z axis contains data to plot\n if None not in z_rows:\n cbar = plt.colorbar(sc, drawedges=False)\n if font[0]!=\"\":\n cbar.set_label(z_name,Dict_of_fonts[font[0]])\n else:\n cbar.set_label(z_name)\n\n def update_annot(ind):\n \"\"\"Update the information box of the selected point\"\"\"\n pos = sc.get_offsets()[ind[\"ind\"][0]]\n annot.xy = pos\n annot.set_text(annotations[int(ind[\"ind\"][0])])\n if color!=\"\" and (None in z_rows):\n annot.get_bbox_patch().set_facecolor(color)\n else:\n annot.get_bbox_patch().set_facecolor(cmap(norm(\n z_values[ind[\"ind\"][0]])))\n \n\n def hover(event):\n \"\"\"Action to perform when hovering the mouse on a point\"\"\"\n vis = annot.get_visible()\n if event.inaxes == ax:\n cont, ind = sc.contains(event)\n if cont:\n update_annot(ind)\n annot.set_visible(True)\n fig.canvas.draw_idle()\n else:\n if vis:\n annot.set_visible(False)\n fig.canvas.draw_idle()\n\n # Respond to mouse motion\n fig.canvas.mpl_connect(\"motion_notify_event\", hover)\n\n # Save or display the plot depending on whether an output path was set or not\n if output_path is not None:\n plt.savefig(output_path)\n else:\n plt.show()\n\ndef TwoDensityPlot(reports,\n x_rows = [None, ], y_rows = [None, ],\n x_name = None, y_name = None,\n output_path = None, title = \"\", font = [\"\",\"\"], color = \"\", size=12, bestquantile = None):\n \"\"\"Represent the density plot between two variables\n\n PARAMETERS\n ----------\n reports : string\n list of report files to look for data\n x_rows : list of integers\n integers which specify the report columns to represent in the X\n axis\n y_rows : list of integers\n integers which specify the report columns to represent in the Y\n axis\n z_rows : list of integers\n integers which specify the report columns to represent in the\n colorbar\n x_name : string\n label of the X axis\n y_name : string\n label of the Y axis\n z_name : string\n label of the colorbar\n output_path : string\n output directory where the resulting plot will be saved\n z_max : float\n it sets the maximum range value of the colorbar\n z_min : float\n it sets the minimum range value of the colorbar\n title: string\n it sets the title name of the plot\n color : string\n color to be used for the figure\n size: integer\n The font size of all the labels in the plot\n \"\"\"\n\n # The different variables are created and the size of the labels is \n x_values, y_values = [], []\n plt.rcParams.update({'font.size': size})\n # Set the rows and their labels to perform the scatter plot if they \n with open(reports[0], 'r') as report_file:\n line = report_file.readline()\n if None in x_rows:\n x_rows = [7, ]\n x_name = \"RMSD ($\\AA$)\"\n if None in y_rows:\n y_rows = [5, ]\n y_name = \"Energy ($kcal/mol$)\"\n if x_name is None:\n x_name = str(line.split(\" \")[x_rows[0] - 1])\n if y_name is None:\n y_name = str(line.split(\" \")[y_rows[0] - 1])\n # Get the report files and save the directory where the report is saved and their number\n for report in reports:\n report_directory = os.path.dirname(report)\n report_number = os.path.basename(report).split('_')[-1].split('.')[0]\n \n # Open the report file and save the valeus that will be represented in the 2D scatter plot\n with open(report, 'r') as report_file:\n next(report_file)\n for i, line in enumerate(report_file):\n x_total = 0.\n y_total = 0.\n for x_row in x_rows:\n x_total += float(line.split()[x_row - 1])\n for y_row in y_rows:\n y_total += float(line.split()[y_row - 1])\n if isnan(x_total) or isnan(y_total):\n continue\n x_values.append(x_total)\n y_values.append(y_total)\n\n if color==\"\":\n color = \"blue\"\n\n # Get the best quantile out of the data according to the X axis to be plotted\n if bestquantile is not None:\n df = pd.DataFrame(list(zip(x_values,y_values)),columns=[\"X axis\",\"Y axis\"])\n quantile_df = df[df[\"X axis\"] < df[\"X axis\"].quantile(bestquantile)]\n x_values, y_values = quantile_df[\"X axis\"].values,quantile_df[\"Y axis\"].values\n\n sns.set(rc={'figure.figsize':(11.7,8.27),\"font.size\":28,\"axes.titlesize\":20,\"axes.labelsize\":28, \"xtick.labelsize\":24,\"ytick.labelsize\":24},style=\"white\")\n plot = sns.JointGrid(x_values, y_values,space=0)\n plot = plot.plot_joint(plt.scatter, edgecolor=\"k\", c=color)\n sns.scatterplot(x_values, y_values, color=color) \n sns.distplot(y_values, kde=True, ax=plot.ax_marg_y, vertical=True, color=color,axlabel=False)\n sns.distplot(x_values, kde=True, ax=plot.ax_marg_x, color=color,axlabel=False)\n\n if font[0]!=\"\":\n plt.ylabel(y_name,Dict_of_fonts[font[0]])\n plt.xlabel(x_name,Dict_of_fonts[font[0]])\n else:\n plt.ylabel(y_name)\n plt.xlabel(x_name)\n if font[1]!=\"\":\n plt.title(title,Dict_of_fonts[font[1]])\n else:\n plt.title(title)\n\n # Save or display the plot depending on whether an output path was set or not\n if output_path is not None:\n plt.savefig(output_path)\n else:\n plt.show()\n\ndef densityPlot(reports, x_rows = [None, ], x_name = None, title = \"\", color = \"\", size = 12, bestquantile = None):\n \"\"\"Represent the density plot\n\n PARAMETERS\n ----------\n reports : string\n list of report files to look for data\n x_rows : list of integers\n integers which specify the report columns to represent in the X\n axis\n x_name : string\n label of the X axis\n title: string\n it sets the title name of the plot\n size: integer\n The font size of all the labels in the plot\n color : string\n color to be used for the figure\n \"\"\"\n \n x_values = []\n plt.rcParams.update({'font.size': size})\n\n for report in reports:\n report_file = open(report,'r')\n i=0\n for line in report_file:\n if i!=0:\n x_values.append(float(line.split()[x_rows[0] - 1]))\n else:\n pass\n i+=1\n\n if color==\"\":\n color = \"blue\"\n\n # Get the best quantile out of the data according to the X axis to be plotted\n if bestquantile is not None:\n df = pd.DataFrame(list(x_values),columns=[\"X axis\"])\n quantile_df = df[df[\"X axis\"] < df[\"X axis\"].quantile(bestquantile)]\n x_values = quantile_df[\"X axis\"].values\n\n sns.distplot(x_values, color=color)\n plt.title(title)\n plt.xlabel(x_name)\n if x_name !=None:\n plt.ylabel(\"Density (1/{})\".format(x_name.split(\"(\")[1][0:-1]))\n plt.show()\n\ndef pointPlot(reports, x_rows = [None, ], x_name = None, y_rows = [None, ], y_name = None, title = \"\", color = \"\", size = 12, bestquantile = None):\n \"\"\"Represent the scatter point plot\n\n PARAMETERS\n ----------\n reports : string\n list of report files to look for data\n x_rows : list of integers\n integers which specify the report columns to represent in the X\n axis\n x_name : string\n label of the X axis\n y_rows : list of integers\n integers which specify the report columns to represent in the Y\n axis\n y_name : string\n label of the Y axis\n title: string\n it sets the title name of the plot\n color : string\n color to be used for the figure\n size: integer\n The font size of all the labels in the plot\n \"\"\" \n\n x_values,y_values = [],[]\n plt.rcParams.update({'font.size': size})\n\n for report in reports:\n report_file = open(report,'r')\n i=0\n for line in report_file:\n if i!=0:\n x_values.append(float(line.split()[x_rows[0] - 1]))\n y_values.append(float(line.split()[y_rows[0] - 1]))\n else:\n pass\n i+=1\n\n # Get the best quantile out of the data according to the X axis to be plotted\n if bestquantile is not None:\n df = pd.DataFrame(list(zip(x_values,y_values)),columns=[\"X axis\",\"Y axis\"])\n quantile_df = df[df[\"X axis\"] < df[\"X axis\"].quantile(bestquantile)]\n x_values, y_values = quantile_df[\"X axis\"].values,quantile_df[\"Y axis\"].values \n\n if color==\"\":\n color=\"blue\"\n\n plt.scatter(x_values,y_values,c=color)\n plt.title(title)\n plt.xlabel(x_name)\n plt.ylabel(y_name)\n plt.show()\n\ndef ThreeDPlot(reports, x_rows = [None, ], x_name = None, y_rows = [None, ], y_name = None,\n z_rows = [None, ], z_name = None, z2_rows = [None, ], z2_name = None,\n z_max = None, z_min = None, output_path = None, title = \"\", font = [\"\",\"\"], color = \"\", colormap = \"\", size = 12):\n \"\"\"Represent the 3D scatter plot of some PELE metrics\n\n PARAMETERS\n ----------\n reports : string\n list of report files to look for data\n x_rows : list of integers\n integers which specify the report columns to represent in the X\n axis\n x_name : string\n label of the X axis\n y_rows : list of integers\n integers which specify the report columns to represent in the Y\n axis\n y_name : string\n label of the Y axis\n z_rows : list of integers\n integers which specify the report columns to represent in the Z\n axis\n z_name : string\n label of the Z axis\n z2_rows : list of integers\n integers which specify the report columns to represent in the colorbar\n z2_name : string\n label of the colorbar\n output_path : string\n output directory where the resulting plot will be saved\n z_max : float\n it sets the maximum range value of the colorbar\n z_min : float\n it sets the minimum range value of the colorbar\n title: string\n it sets the title name of the plot\n color : string\n color to be used for the figure (when only 3 variables are used)\n size: integer\n The font size of all the labels in the plot\n \"\"\"\n\n # The different variables are created and the size of the labels is set\n x_values, y_values, z_values, z2_values, labels, annotations = [], [], [], [], [], []\n plt.rcParams.update({'font.size': size})\n\n # Set the rows and their labels to perform the scatter plot if they are not specified\n with open(reports[0], 'r') as report_file:\n line = report_file.readline()\n if None in x_rows:\n x_rows = [7, ]\n x_name = \"RMSD ($\\AA$)\"\n if None in y_rows:\n y_rows = [5, ]\n y_name = \"Energy ($kcal/mol$)\"\n if None in z_rows:\n z_rows = [6, ]\n z_name = \"SASA of the ligand\"\n if x_name is None:\n x_name = str(line.split(\" \")[x_rows[0] - 1])\n if y_name is None:\n y_name = str(line.split(\" \")[y_rows[0] - 1])\n if z_name is None:\n z_name = str(line.split(\" \")[z_rows[0] - 1])\n if (None not in z2_rows) and (z2_name is None):\n z2_name = str(line.split(\" \")[z2_rows[0] - 1])\n z2_name = addUnits(z2_name)\n\n # Get the report files and save the directory where the report is saved and their number\n for report in reports:\n report_directory = os.path.dirname(report)\n report_number = os.path.basename(report).split('_')[-1].split('.')[0]\n\n # Open the report file and save the valeus that will be represented in the 3D scatter plot\n with open(report, 'r') as report_file:\n next(report_file)\n for i, line in enumerate(report_file):\n x_total = 0.\n y_total = 0.\n z_total = 0.\n z2_total = 0.\n\n for x_row in x_rows:\n x_total += float(line.split()[x_row - 1])\n\n for y_row in y_rows:\n y_total += float(line.split()[y_row - 1])\n\n for z_row in z_rows:\n z_total += float(line.split()[z_row - 1])\n\n if None not in z2_rows:\n for z2_row in z2_rows:\n z2_total += float(line.split()[z2_row - 1])\n\n if isnan(x_total) or isnan(y_total) or isnan(z_total) or isnan(z2_total):\n continue\n\n x_values.append(x_total)\n y_values.append(y_total)\n z_values.append(z_total)\n z2_values.append(z2_total)\n\n epoch = report_directory.split('/')[-1]\n if not epoch.isdigit():\n epoch = '0'\n\n annotations.append(\"Epoch: \" + epoch + \"\\n\" +\n \"Trajectory: \" + report_number + \"\\n\" +\n \"Model: \" + str(i + 1))\n\n labels.append(0)\n\n if z_max is None:\n z_max = max(z2_values)\n\n if z_min is None:\n z_min = min(z2_values)\n\n if z_min == z_max:\n cmap = plt.cm.autumn\n else:\n if colormap!=\"\":\n cmap = plt.cm.get_cmap(\"{}\".format(colormap))\n else:\n cmap = plt.cm.plasma\n\n norm = plt.Normalize(z_min, z_max)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n if color!=\"\" and (None in z2_rows):\n sc = ax.scatter(x_values, y_values, z_values, c=color)\n else:\n sc = ax.scatter(x_values, y_values, z_values, c=z2_values, cmap=cmap,\n norm=norm)\n\n ax.margins(0.05)\n ax.set_facecolor('white')\n if font[0]!=\"\":\n ax.set_ylabel(y_name,Dict_of_fonts[font[0]])\n ax.set_xlabel(x_name,Dict_of_fonts[font[0]])\n ax.set_zlabel(z_name,Dict_of_fonts[font[0]])\n else:\n ax.set_ylabel(y_name)\n ax.set_xlabel(x_name)\n ax.set_zlabel(z_name)\n if font[1]!=\"\":\n ax.set_title(title,Dict_of_fonts[font[1]])\n else:\n ax.set_title(title)\n\n annot = ax.annotate(\"\", xy=(0, 0), xytext=(20, 20),\n textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"w\"),\n arrowprops=dict(arrowstyle=\"->\"))\n annot.set_visible(False)\n\n # Activate the colorbar only if the Z2 axis contains data to plot\n if None not in z2_rows:\n cbar = plt.colorbar(sc, drawedges=False)\n if font[0]!=\"\":\n cbar.set_label(z2_name,Dict_of_fonts[font[0]])\n else:\n cbar.set_label(z2_name)\n\n def update_annot(ind):\n \"\"\"Update the information box of the selected point\"\"\"\n pos = sc.get_offsets()[ind[\"ind\"][0]]\n annot.xy = pos\n annot.set_text(annotations[int(ind[\"ind\"][0])])\n if color!=\"\" and (None in z2_rows):\n annot.get_bbox_patch().set_facecolor(color)\n else:\n annot.get_bbox_patch().set_facecolor(cmap(norm(\n z2_values[ind[\"ind\"][0]])))\n\n def hover(event):\n \"\"\"Action to perform when hovering the mouse on a point\"\"\"\n vis = annot.get_visible()\n if event.inaxes == ax:\n cont, ind = sc.contains(event)\n if cont:\n update_annot(ind)\n annot.set_visible(True)\n fig.canvas.draw_idle()\n else:\n if vis:\n annot.set_visible(False)\n fig.canvas.draw_idle()\n\n # Respond to mouse motion\n fig.canvas.mpl_connect(\"motion_notify_event\", hover)\n\n # Save or display the plot depending on whether an output path was set or not\n if output_path is not None:\n plt.savefig(output_path)\n else:\n plt.show()\n\ndef main():\n \"\"\"Main function\n\n It is called when this script is the main program called by the interpreter\n \"\"\"\n\n # Parse command-line arguments\n reports, x_data, y_data, z_data, z2_data, z_min, z_max, output_path, title, font, color, colormap, size, SP, DDP, DP, PP, TP, BQ = parseArgs()\n\n # Parse axis data to label it properly\n x_rows, x_name = parseAxisData(x_data)\n y_rows, y_name = parseAxisData(y_data)\n z_rows, z_name = parseAxisData(z_data)\n z2_rows, z2_name = parseAxisData(z2_data)\n\n # Generate the plot\n if SP:\n scatterPlot(reports,\n x_rows=x_rows, y_rows=y_rows, z_rows=z_rows,\n x_name=x_name, y_name=y_name, z_name=z_name,\n z_min=z_min, z_max=z_max,\n output_path=output_path,title=title,font=font,color=color,colormap=colormap,size=size)\n if DDP:\n TwoDensityPlot(reports,\n x_rows=x_rows, y_rows=y_rows,\n x_name=x_name, y_name=y_name,\n output_path=output_path,title=title,font=font,color=color,size=size,bestquantile=BQ)\n if DP:\n densityPlot(reports,\n x_rows=x_rows,x_name=x_name,title=title,color=color,size=size,bestquantile=BQ)\n if PP:\n pointPlot(reports,\n x_rows=x_rows, y_rows=y_rows,x_name=x_name,y_name=y_name,title=title,color=color,size=size,bestquantile=BQ)\n if TP:\n ThreeDPlot(reports,\n x_rows=x_rows, y_rows=y_rows, z_rows=z_rows, z2_rows=z2_rows,\n x_name=x_name, y_name=y_name, z_name=z_name, z2_name=z2_name,\n z_min=z_min, z_max=z_max,\n output_path=output_path,title=title,font=font,color=color,colormap=colormap,size=size)\n\n\nif __name__ == \"__main__\":\n \"\"\"Call the main function\"\"\"\n main()\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.linalg.det", "numpy.mean", "numpy.transpose", "numpy.linalg.svd", "numpy.sqrt" ], [ "matplotlib.use", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.Normalize", "matplotlib.pyplot.show" ] ]
h2oai/driverlessai-recipes
[ "508663ed6bd1961925fcb29fde268493fe40e9e8" ]
[ "transformers/image/image_url_transformer.py" ]
[ "\"\"\"Convert a path to an image (JPG/JPEG/PNG) to a vector of class probabilities created by a pretrained ImageNet deeplearning model (Keras, TensorFlow).\"\"\"\nimport importlib\nfrom h2oaicore.transformer_utils import CustomTransformer\nfrom h2oaicore.models import TensorFlowModel\nimport datatable as dt\nimport numpy as np\nfrom h2oaicore.systemutils import small_job_pool, user_dir, dummypool, print_debug, remove\nimport requests\nimport shutil\nimport uuid\nimport os\n\n\nclass MyImgTransformer(TensorFlowModel, CustomTransformer):\n # Need Pillow before nlp imports keras, else when here too late.\n # I.e. wasn't enough to put keras imports inside fit/transform to delay after Pillow installed\n _modules_needed_by_name = ['pillow==8.3.2']\n _tensorflow = True\n _mojo = False\n _parallel_task = True # assumes will use n_jobs in params_base\n _can_use_gpu = True\n _can_use_multi_gpu = True\n _testing_can_skip_failure = False # ensure tested as if shouldn't fail\n\n @staticmethod\n def is_enabled():\n return True\n\n @staticmethod\n def do_acceptance_test():\n return True # False\n\n @staticmethod\n def get_default_properties():\n return dict(col_type=\"image\", min_cols=1, max_cols=1, relative_importance=1)\n\n @staticmethod\n def enabled_setting():\n return 'on'\n\n def __init__(self, batch_size=32, **kwargs):\n CustomTransformer.__init__(self, **kwargs)\n TensorFlowModel.__init__(self, **kwargs)\n self.uuid_tmp = str(uuid.uuid4())[:6]\n self.experiment_id = self.__class__.__name__ + self.uuid_tmp\n #super().__init__(**kwargs)\n self.batch_size = batch_size\n self.model_name = \"resnet_keras.h5p\"\n self.uuid = \"%s-img-data-\" % self.__class__.__name__ + self.model_name # + str(uuid.uuid4())[:6] # no, keeps changing and re-loadeing every init\n self.model_path = os.path.join(user_dir(), self.uuid + \".model\")\n self.model_tmp_path = self.model_path + \"_\" + self.uuid_tmp + \".tmp\"\n if not os.path.exists(self.model_path):\n self.download(\n url=\"http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/recipes/transformers/img/%s\" % self.model_name,\n dest=self.model_path)\n with open(self.model_path, 'rb') as f:\n self.model_bytes = f.read()\n # remove(self.model_path) # avoid re-downloads\n\n def atomic_move(self, src, dst):\n try:\n shutil.move(src, dst)\n except FileExistsError:\n pass\n remove(src)\n\n def download(self, url, dest):\n if os.path.exists(dest):\n print(\"already downloaded %s -> %s\" % (url, dest))\n return\n print(\"downloading %s to %s\" % (url, dest))\n url_data = requests.get(url, stream=True)\n if url_data.status_code != requests.codes.ok:\n msg = \"Cannot get url %s, code: %s, reason: %s\" % (\n str(url), str(url_data.status_code), str(url_data.reason))\n raise requests.exceptions.RequestException(msg)\n url_data.raw.decode_content = True\n if not os.path.isdir(os.path.dirname(dest)):\n os.makedirs(os.path.dirname(dest), exist_ok=True)\n uuid_tmp = str(uuid.uuid4())[:6]\n dest_tmp = dest + \"_\" + uuid_tmp + \".tmp\"\n with open(dest_tmp, 'wb') as f:\n shutil.copyfileobj(url_data.raw, f)\n self.atomic_move(dest_tmp, dest)\n\n @property\n def display_name(self):\n return \"MyImgTransformerBatchSize%d\" % self.batch_size\n\n @staticmethod\n def get_parameter_choices():\n return dict(batch_size=[16, 32, 64])\n\n @staticmethod\n def get_default_properties():\n return dict(col_type=\"categorical\", min_cols=1, max_cols=1, relative_importance=1)\n\n def preprocess_image(self, source_img_path, check_only=False):\n try:\n final_img_path = os.path.join(user_dir(), self.uuid, os.path.basename(source_img_path))\n except: # we are sometimes getting np.float32, why?\n return None\n delete = False\n if not os.path.exists(final_img_path):\n if not os.path.exists(source_img_path):\n try:\n self.download(source_img_path, final_img_path)\n except requests.RequestException as e:\n # print_debug(\"Error: %s for source_img_path: %s\" % (str(e), str(source_img_path)))\n return None\n delete = False # True to avoid re-download or a race condition between multiple procs\n else:\n final_img_path = source_img_path\n if not check_only:\n import h2oaicore.keras as keras\n importlib.reload(keras)\n img = keras.preprocessing.image.load_img(final_img_path, target_size=(224, 224))\n if delete:\n remove(final_img_path)\n x = keras.preprocessing.image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = keras.applications.resnet50.preprocess_input(x)\n return x\n else:\n return True\n\n def fit_transform(self, X: dt.Frame, y: np.array = None, **kwargs):\n return self.transform(X, **kwargs)\n\n def transform(self, X: dt.Frame, **kwargs):\n if not os.path.exists(self.model_path):\n os.makedirs(os.path.dirname(self.model_path), exist_ok=True)\n with open(self.model_path, 'wb') as f:\n f.write(self.model_bytes)\n\n # remove(self.model_path) # can't remove, used by other procs or later\n self.col_name = self.input_feature_names[0]\n values = X[:, self.col_name].to_numpy().ravel()\n self.batch_size = min(len(values), self.batch_size)\n values_ = np.array_split(values, int(len(values) / self.batch_size) + 1)\n print(values_)\n\n # check if data is image related\n results = []\n for v in values_:\n images = []\n for x in v:\n if True or x[-4:] in [\".jpg\", \".png\", \".jpeg\"]:\n image = self.preprocess_image(x, check_only=True)\n images.append(image)\n else:\n raise NotImplementedError\n # deal with missing images (None in images)\n images = [x for x in images if x is not None]\n results.extend(images)\n\n if len(results) > 0:\n # don't use GPU memory unless actually found relevant data\n import h2oaicore.keras as keras\n # self.tf_config = self.set_tf_config(kwargs)\n self.tf_config = self.ConfigProto()\n # self.tf_config.gpu_options.allow_growth = True\n self.tf_config.gpu_options.per_process_gpu_memory_fraction = 0.3\n keras.backend.set_session(session=TensorFlowModel.make_sess(self.tf_config))\n # importlib.reload(keras)\n self.model = keras.models.load_model(self.model_path)\n\n results = []\n for v in values_:\n images = []\n for x in v:\n if True or x[-4:] in [\".jpg\", \".png\", \".jpeg\"]:\n image = self.preprocess_image(x)\n images.append(image)\n else:\n raise NotImplementedError\n # deal with missing images (None in images)\n good_imagei = None\n for imagei, image in enumerate(images):\n if image is not None:\n good_imagei = imagei\n break\n if len(images) > 0:\n msg = \"no good images out of %d images\" % len(images)\n if False:\n assert good_imagei is not None, msg\n elif good_imagei is None:\n pass\n # print_debug(msg)\n if good_imagei is not None:\n for imagei, image in enumerate(images):\n if image is None:\n images[imagei] = images[good_imagei] * 0 # impute 0 for missing images\n images = np.vstack(images)\n results.append(self.model.predict(images))\n if len(results) > 0:\n return dt.Frame(np.vstack(results))\n else:\n return dt.Frame([0] * X.shape[0])\n" ]
[ [ "numpy.vstack", "numpy.expand_dims" ] ]
ArkhamWJZ/SummerProject
[ "bae2aa3e859766097d5ae6efd7063adf58a56ac8" ]
[ "back_end/imgProcess/end2end_model/model/my_dataset.py" ]
[ "# -*- coding: UTF-8 -*-\nimport sys\nimport os\nfrom torch.utils.data import DataLoader,Dataset\nimport torchvision.transforms as transforms\nfrom PIL import Image\n\nsys.path.append('../')\n# print(type(sys.path), '\\r\\n'.join(sys.path))\n\nimport torch.nn as nn\nfrom end2end_model.training_set_gen import gen_config\nfrom end2end_model.training_set_gen import one_hot_encoding as ohe\n\nclass mydataset(Dataset):\n\n def __init__(self, folder, transform=None):\n self.train_image_file_paths = [os.path.join(folder, image_file) for image_file in os.listdir(folder)]\n self.transform = transform\n\n def __len__(self):\n return len(self.train_image_file_paths)\n\n def __getitem__(self, idx):\n image_root = self.train_image_file_paths[idx]\n image_name = image_root.split(os.path.sep)[-1]\n image = Image.open(image_root)\n if self.transform is not None:\n image = self.transform(image)\n print('DUBUG IN MY_DATASET:',image_name.split('_')[0])\n label = ohe.encode(image_name.split('_')[0]) # 为了方便,在生成图片的时候,图片文件的命名格式 \"4个数字或者数字_时间戳.PNG\", 4个字母或者即是图片的验证码的值,字母大写,同时对该值做 one-hot 处理\n return image, label\n\ntransform = transforms.Compose([\n # transforms.ColorJitter(),\n transforms.Grayscale(),\n transforms.ToTensor(),\n # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n])\ndef get_train_data_loader(BATCH_SIZE=64):\n\n dataset = mydataset(gen_config.TRAIN_DATASET_PATH, transform=transform)\n return DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)\n\ndef get_test_data_loader():\n dataset = mydataset(gen_config.TEST_DATASET_PATH, transform=transform)\n return DataLoader(dataset, batch_size=1, shuffle=True)\n \n\ndef get_predict_data_loader():\n dataset = mydataset(gen_config.PREDICT_DATASET_PATH, transform=transform)\n return DataLoader(dataset, batch_size=1, shuffle=True)" ]
[ [ "torch.utils.data.DataLoader" ] ]
anirudhacharya/aesara
[ "cbf91122296b68ee2ad592b2312d56f6ff65ba53", "cbf91122296b68ee2ad592b2312d56f6ff65ba53" ]
[ "aesara/tensor/elemwise.py", "tests/link/test_jax.py" ]
[ "from copy import copy\nfrom typing import Tuple, Union\n\nimport numpy as np\n\nimport aesara.tensor.basic\nfrom aesara.configdefaults import config\nfrom aesara.gradient import DisconnectedType\nfrom aesara.graph.basic import Apply\nfrom aesara.graph.null_type import NullType\nfrom aesara.graph.op import COp, ExternalCOp, OpenMPOp\nfrom aesara.graph.params_type import ParamsType\nfrom aesara.graph.utils import MethodNotDefined\nfrom aesara.link.c.basic import failure_code\nfrom aesara.misc.frozendict import frozendict\nfrom aesara.misc.safe_asarray import _asarray\nfrom aesara.printing import FunctionPrinter, pprint\nfrom aesara.scalar import get_scalar_type\nfrom aesara.scalar.basic import Scalar\nfrom aesara.scalar.basic import bool as scalar_bool\nfrom aesara.scalar.basic import identity as scalar_identity\nfrom aesara.scalar.basic import transfer_type, upcast\nfrom aesara.tensor import _get_vector_length, as_tensor_variable\nfrom aesara.tensor import elemwise_cgen as cgen\nfrom aesara.tensor import get_vector_length\nfrom aesara.tensor.type import (\n TensorType,\n continuous_dtypes,\n discrete_dtypes,\n float_dtypes,\n lvector,\n)\nfrom aesara.utils import uniq\n\n\n_numpy_ver = [int(n) for n in np.__version__.split(\".\")[:2]]\n\n\nclass DimShuffle(ExternalCOp):\n \"\"\"\n Allows to reorder the dimensions of a tensor or insert or remove\n broadcastable dimensions.\n\n In the following examples, 'x' means that we insert a broadcastable\n dimension and a numerical index represents the dimension of the same\n rank in the tensor passed to perform.\n\n Parameters\n ----------\n input_broadcastable\n The expected broadcastable pattern of the input\n new_order\n A list representing the relationship between the input's\n dimensions and the output's dimensions. Each element of the\n list can either be an index or 'x'. Indices must be encoded\n as python integers, not aesara symbolic integers.\n inplace : bool, optional\n If True (default), the output will be a view of the input.\n\n Notes\n -----\n If `j = new_order[i]` is an index, the output's ith dimension\n will be the input's jth dimension.\n If `new_order[i]` is `x`, the output's ith dimension will\n be 1 and Broadcast operations will be allowed to do broadcasting\n over that dimension.\n\n If `input.broadcastable[i] == False` then `i` must be found in new_order.\n Broadcastable dimensions, on the other hand, can be discarded.\n\n .. code-block:: python\n\n DimShuffle((False, False, False), ['x', 2, 'x', 0, 1])\n\n This op will only work on 3d tensors with no broadcastable\n dimensions. The first dimension will be broadcastable,\n then we will have the third dimension of the input tensor as\n the second of the resulting tensor, etc. If the tensor has\n shape (20, 30, 40), the resulting tensor will have dimensions\n (1, 40, 1, 20, 30). (AxBxC tensor is mapped to 1xCx1xAxB tensor)\n\n .. code-block:: python\n\n DimShuffle((True, False), [1])\n\n This op will only work on 2d tensors with the first dimension\n broadcastable.\n The second dimension of the input tensor will be the first dimension of\n the resulting tensor.\n If the tensor has shape (1, 20), the resulting tensor will have shape\n (20, ).\n\n Examples\n --------\n .. code-block:: python\n\n DimShuffle((), ['x']) # make a 0d (scalar) into a 1d vector\n DimShuffle((False, False), [0, 1]) # identity\n DimShuffle((False, False), [1, 0]) # inverts the 1st and 2nd dimensions\n DimShuffle((False,), ['x', 0]) # make a row out of a 1d vector\n # (N to 1xN)\n DimShuffle((False,), [0, 'x']) # make a column out of a 1d vector\n # (N to Nx1)\n DimShuffle((False, False, False), [2, 0, 1]) # AxBxC to CxAxB\n DimShuffle((False, False), [0, 'x', 1]) # AxB to Ax1xB\n DimShuffle((False, False), [1, 'x', 0]) # AxB to Bx1xA\n\n The reordering of the dimensions can be done with the numpy.transpose\n function.\n Adding, subtracting dimensions can be done with reshape.\n\n \"\"\"\n\n _f16_ok = True\n check_input = False\n __props__ = (\"input_broadcastable\", \"new_order\", \"inplace\")\n c_func_file = \"c_code/dimshuffle.c\"\n c_func_name = \"APPLY_SPECIFIC(cpu_dimshuffle)\"\n\n @property\n def params_type(self):\n # We can't directly create `params_type` as class attribute\n # because of importation issues related to TensorType.\n return ParamsType(\n input_broadcastable=TensorType(dtype=\"bool\", broadcastable=(False,)),\n _new_order=lvector,\n transposition=TensorType(dtype=\"uint32\", broadcastable=(False,)),\n inplace=scalar_bool,\n )\n\n @property\n def _new_order(self):\n # Param for C code.\n # self.new_order may contain 'x', which is not a valid integer value.\n # We replace it with -1.\n return [(-1 if x == \"x\" else x) for x in self.new_order]\n\n @property\n def transposition(self):\n return self.shuffle + self.drop\n\n def __init__(self, input_broadcastable, new_order, inplace=True):\n super().__init__([self.c_func_file], self.c_func_name)\n self.input_broadcastable = tuple(input_broadcastable)\n self.new_order = tuple(new_order)\n if inplace is True:\n self.inplace = inplace\n else:\n raise ValueError(\n \"DimShuffle is inplace by default and hence the inplace for DimShuffle must be true\"\n )\n\n for i, j in enumerate(new_order):\n if j != \"x\":\n # There is a bug in numpy that results in\n # isinstance(x, integer_types) returning False for\n # numpy integers. See\n # <http://projects.scipy.org/numpy/ticket/2235>.\n if not isinstance(j, (int, np.integer)):\n raise TypeError(\n \"DimShuffle indices must be python ints. \"\n f\"Got: '{j}' of type '{type(j)}'.\"\n )\n if j >= len(input_broadcastable):\n raise ValueError(\n f\"new_order[{i}] is {j}, but the input only has \"\n f\"{len(input_broadcastable)} axes.\"\n )\n if j in new_order[(i + 1) :]:\n raise ValueError(\n \"The same input dimension may not appear \"\n \"twice in the list of output dimensions\",\n new_order,\n )\n\n # list of dimensions of the input to drop\n self.drop = []\n for i, b in enumerate(input_broadcastable):\n if i not in new_order:\n # we want to drop this dimension because it's not a value in\n # new_order\n if b == 1: # 1 aka True\n self.drop.append(i)\n else:\n # we cannot drop non-broadcastable dimensions\n raise ValueError(\n \"You cannot drop a non-broadcastable dimension:\",\n f\" {input_broadcastable}, {new_order}\",\n )\n\n # this is the list of the original dimensions that we keep\n self.shuffle = [x for x in new_order if x != \"x\"]\n\n # list of dimensions of the output that are broadcastable and were not\n # in the original input\n self.augment = [i for i, x in enumerate(new_order) if x == \"x\"]\n\n if self.inplace:\n self.view_map = {0: [0]}\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n if not hasattr(self, \"func_files\"):\n # Perhaps we are loading an old `Op` version of DimShuffle.\n # Let's just build the ExternalCOp.\n super().__init__([self.c_func_file], self.c_func_name)\n\n def make_node(self, _input):\n input = as_tensor_variable(_input)\n ib = tuple(input.type.broadcastable)\n if not ib == self.input_broadcastable:\n if len(ib) != len(self.input_broadcastable):\n raise TypeError(\n \"The number of dimensions of the \"\n f\"input is incorrect for this op. Expected {self.input_broadcastable}, got {ib}.\"\n )\n for expected, b in zip(self.input_broadcastable, ib):\n if expected is True and b is False:\n raise TypeError(\n \"The broadcastable pattern of the \"\n f\"input is incorrect for this op. Expected {self.input_broadcastable}, got {ib}.\"\n )\n # else, expected == b or expected is False and b is True\n # Both case are good.\n\n ob = []\n for value in self.new_order:\n if value == \"x\":\n ob.append(True)\n else:\n ob.append(ib[value])\n\n output = TensorType(dtype=input.type.dtype, broadcastable=ob)()\n\n return Apply(self, [input], [output])\n\n def __str__(self):\n if self.inplace:\n return \"InplaceDimShuffle{%s}\" % \",\".join(str(x) for x in self.new_order)\n else:\n return \"DimShuffle{%s}\" % \",\".join(str(x) for x in self.new_order)\n\n def perform(self, node, inp, out, params):\n (input,) = inp\n (storage,) = out\n # drop\n res = input\n if type(res) != np.ndarray and type(res) != np.memmap:\n raise TypeError(res)\n\n # transpose\n res = res.transpose(self.shuffle + self.drop)\n\n # augment\n shape = list(res.shape[: len(self.shuffle)])\n for augm in self.augment:\n shape.insert(augm, 1)\n res = res.reshape(shape)\n\n # copy (if not inplace)\n if not self.inplace:\n res = np.copy(res)\n\n storage[0] = np.asarray(res) # asarray puts scalars back into array\n\n def infer_shape(self, fgraph, node, shapes):\n (ishp,) = shapes\n # transpose\n rval = [ishp[i] for i in self.shuffle]\n\n # augment\n for augm in self.augment:\n rval.insert(augm, 1)\n return [rval]\n\n def R_op(self, inputs, eval_points):\n if None in eval_points:\n return [None]\n return self(*eval_points, return_list=True)\n\n def grad(self, inp, grads):\n\n (x,) = inp\n (gz,) = grads\n gz = as_tensor_variable(gz)\n grad_order = [\"x\"] * len(x.type.broadcastable)\n for i, v in enumerate(self.new_order):\n if v != \"x\":\n grad_order[v] = i\n # Do not make the DimShuffle inplace as an optimization at the\n # canonicalization optimization phase will remove the inplace.\n # The inplace will be reintroduced automatically later in the graph.\n if inp[0].dtype in discrete_dtypes:\n return [inp[0].zeros_like(dtype=config.floatX)]\n else:\n return [\n DimShuffle(gz.type.broadcastable, grad_order)(\n Elemwise(scalar_identity)(gz)\n )\n ]\n\n\nclass DimShufflePrinter:\n def __p(self, new_order, pstate, r):\n if new_order != () and new_order[0] == \"x\":\n return f\"{self.__p(new_order[1:], pstate, r)}\"\n # return \"[%s]\" % self.__p(new_order[1:], pstate, r)\n if list(new_order) == list(range(r.type.ndim)):\n return pstate.pprinter.process(r)\n if list(new_order) == list(reversed(range(r.type.ndim))):\n return f\"{pstate.pprinter.process(r)}.T\"\n return \"DimShuffle{{{}}}({})\".format(\n \", \".join(map(str, new_order)),\n pstate.pprinter.process(r),\n )\n\n def process(self, r, pstate):\n if r.owner is None:\n raise TypeError(\"Can only print DimShuffle.\")\n elif isinstance(r.owner.op, DimShuffle):\n ord = r.owner.op.new_order\n return self.__p(ord, pstate, r.owner.inputs[0])\n else:\n raise TypeError(\"Can only print DimShuffle.\")\n\n\npprint.assign(DimShuffle, DimShufflePrinter())\n\n\nclass Elemwise(OpenMPOp):\n \"\"\"\n Generalizes a scalar op to tensors.\n\n All the inputs must have the same number of dimensions. When the\n Op is performed, for each dimension, each input's size for that\n dimension must be the same. As a special case, it can also be 1\n but only if the input's broadcastable flag is True for that\n dimension. In that case, the tensor is (virtually) replicated\n along that dimension to match the size of the others.\n\n The dtypes of the outputs mirror those of the scalar Op that is\n being generalized to tensors. In particular, if the calculations\n for an output are done inplace on an input, the output type must\n be the same as the corresponding input type (see the doc of\n `ScalarOp` to get help about controlling the output type)\n\n Parameters\n ----------\n scalar_op\n An instance of a subclass of `ScalarOp` which works uniquely\n on scalars.\n inplace_pattern\n A dictionary that maps the index of an output to the\n index of an input so the output is calculated inplace using\n the input's storage. (Just like destroymap, but without the lists.)\n nfunc_spec\n Either None or a tuple of three elements,\n (nfunc_name, nin, nout) such that getattr(numpy, nfunc_name)\n implements this operation, takes nin inputs and nout outputs.\n Note that nin cannot always be inferred from the scalar op's\n own nin field because that value is sometimes 0 (meaning a\n variable number of inputs), whereas the numpy function may\n not have varargs.\n\n Notes\n -----\n | Elemwise(add) represents + on tensors (x + y)\n | Elemwise(add, {0 : 0}) represents the += operation (x += y)\n | Elemwise(add, {0 : 1}) represents += on the second argument (y += x)\n | Elemwise(mul)(rand(10, 5), rand(1, 5)) the second input is completed \\\nalong the first dimension to match the first input\n | Elemwise(true_div)(rand(10, 5), rand(10, 1)) same but along the \\\nsecond dimension\n | Elemwise(int_div)(rand(1, 5), rand(10, 1)) the output has size (10, 5)\n | Elemwise(log)(rand(3, 4, 5))\n\n \"\"\"\n\n __props__ = (\"scalar_op\", \"inplace_pattern\")\n\n def __init__(\n self, scalar_op, inplace_pattern=None, name=None, nfunc_spec=None, openmp=None\n ):\n assert not isinstance(scalar_op, type(self))\n if inplace_pattern is None:\n inplace_pattern = frozendict({})\n self.name = name\n self.scalar_op = scalar_op\n self.inplace_pattern = inplace_pattern\n self.destroy_map = {o: [i] for o, i in self.inplace_pattern.items()}\n\n if nfunc_spec is None:\n nfunc_spec = getattr(scalar_op, \"nfunc_spec\", None)\n self.nfunc_spec = nfunc_spec\n self.__setstate__(self.__dict__)\n super().__init__(openmp=openmp)\n\n def __getstate__(self):\n d = copy(self.__dict__)\n d.pop(\"ufunc\")\n d.pop(\"nfunc\")\n d.pop(\"__epydoc_asRoutine\", None)\n return d\n\n def __setstate__(self, d):\n super().__setstate__(d)\n self.ufunc = None\n self.nfunc = None\n self.inplace_pattern = frozendict(self.inplace_pattern)\n\n def get_output_info(self, dim_shuffle, *inputs):\n \"\"\"Return the outputs dtype and broadcastable pattern and the\n dimshuffled inputs.\n\n \"\"\"\n shadow = self.scalar_op.make_node(\n *[get_scalar_type(dtype=i.type.dtype).make_variable() for i in inputs]\n )\n\n target_length = max([input.type.ndim for input in inputs])\n\n args = []\n for input in inputs:\n length = input.type.ndim\n difference = target_length - length\n if not difference:\n args.append(input)\n else:\n # TODO: use LComplete instead\n args.append(\n dim_shuffle(\n input.type.broadcastable,\n [\"x\"] * difference + list(range(length)),\n )(input)\n )\n inputs = args\n\n # HERE: all the broadcast dims have the same length now\n\n # cleverness: we iterate over the first, second, third broadcast flag\n # of all inputs in parallel... the all() gives us each output\n # broadcastable bit in turn.\n\n # it is multiplied by nout because Elemwise supports multiple outputs\n # (nout of them)\n out_broadcastables = [\n [\n all(bcast)\n for bcast in zip(*[input.type.broadcastable for input in inputs])\n ]\n ] * shadow.nout\n\n # inplace_pattern maps output idx -> input idx\n inplace_pattern = self.inplace_pattern\n if inplace_pattern:\n for overwriter, overwritten in inplace_pattern.items():\n for ob, ib in zip(\n out_broadcastables[overwriter],\n inputs[overwritten].type.broadcastable,\n ):\n if ib and not ob:\n raise ValueError(\n \"Operation cannot be done inplace on an input \"\n \"with broadcasted dimensions.\"\n )\n\n out_dtypes = [o.type.dtype for o in shadow.outputs]\n if any(\n inputs[i].type.dtype != out_dtypes[o] for o, i in inplace_pattern.items()\n ):\n raise TypeError(\n (\n \"Cannot do an inplace operation on incompatible data types.\",\n ([i.type.dtype for i in inputs], out_dtypes, inplace_pattern),\n )\n )\n assert len(out_dtypes) == len(out_broadcastables)\n return out_dtypes, out_broadcastables, inputs\n\n def make_node(self, *inputs):\n \"\"\"\n If the inputs have different number of dimensions, their shape\n is left-completed to the greatest number of dimensions with 1s\n using DimShuffle.\n \"\"\"\n inputs = [as_tensor_variable(i) for i in inputs]\n out_dtypes, out_broadcastables, inputs = self.get_output_info(\n DimShuffle, *inputs\n )\n outputs = [\n TensorType(dtype=dtype, broadcastable=broadcastable)()\n for dtype, broadcastable in zip(out_dtypes, out_broadcastables)\n ]\n return Apply(self, inputs, outputs)\n\n def __str__(self):\n if self.name is None:\n if self.inplace_pattern:\n items = list(self.inplace_pattern.items())\n items.sort()\n return f\"Elemwise{{{self.scalar_op}}}{items}\"\n else:\n return \"Elemwise{%s}\" % (self.scalar_op)\n else:\n return self.name\n\n def R_op(self, inputs, eval_points):\n outs = self(*inputs, return_list=True)\n rval = [None for x in outs]\n # For each output\n for idx, out in enumerate(outs):\n # make such that _bgrads computes only the gradients of the\n # current output on the inputs ( and not all outputs)\n ograds = [x.zeros_like() for x in outs]\n ograds[idx] = aesara.tensor.basic.ones_like(out)\n\n bgrads = self._bgrad(inputs, outs, ograds)\n rop_out = None\n\n for jdx, (inp, eval_point) in enumerate(zip(inputs, eval_points)):\n # if None, then we can just ignore this branch ..\n # what we do is to assume that for any non-differentiable\n # branch, the gradient is actually 0, which I think is not\n # the right thing to do .. have to talk to Ian and James\n # about it\n\n if bgrads[jdx] is None or isinstance(\n bgrads[jdx].type, DisconnectedType\n ):\n pass\n elif eval_point is not None:\n if rop_out is None:\n rop_out = bgrads[jdx] * eval_point\n else:\n rop_out = rop_out + bgrads[jdx] * eval_point\n\n rval[idx] = rop_out\n\n return rval\n\n def connection_pattern(self, node):\n\n if hasattr(self.scalar_op, \"connection_pattern\"):\n return self.scalar_op.connection_pattern(node)\n\n return [[True for output in node.outputs] for ipt in node.inputs]\n\n def L_op(self, inputs, outs, ograds):\n from aesara.tensor.math import sum as aet_sum\n\n # Compute grad with respect to broadcasted input\n rval = self._bgrad(inputs, outs, ograds)\n\n # TODO: make sure that zeros are clearly identifiable\n # to the gradient.grad method when the outputs have\n # some integer and some floating point outputs\n if any(out.type.dtype not in continuous_dtypes for out in outs):\n # For integer output, return value may only be zero or undefined\n # We don't bother with trying to check that the scalar ops\n # correctly returned something that evaluates to 0, we just make\n # the return value obviously zero so that gradient.grad can tell\n # this op did the right thing.\n new_rval = []\n for elem, ipt in zip(rval, inputs):\n if isinstance(elem.type, (NullType, DisconnectedType)):\n new_rval.append(elem)\n else:\n elem = ipt.zeros_like()\n if str(elem.type.dtype) not in continuous_dtypes:\n elem = elem.astype(config.floatX)\n assert str(elem.type.dtype) not in discrete_dtypes\n new_rval.append(elem)\n return new_rval\n\n # sum out the broadcasted dimensions\n for i, ipt in enumerate(inputs):\n if isinstance(rval[i].type, (NullType, DisconnectedType)):\n continue\n\n # List of all the dimensions that are broadcastable for input[i] so\n # we can sum over them\n # TODO: only count dimensions that were effectively broadcasted\n to_sum = [\n j\n for j, bcast in enumerate(ipt.type.broadcastable)\n if bcast and not outs[0].broadcastable[j]\n ]\n\n if to_sum:\n sr = aet_sum(rval[i], axis=to_sum, keepdims=True)\n rval[i] = sr\n\n return rval\n\n def _bgrad(self, inputs, outputs, ograds):\n # returns grad, with respect to broadcasted versions of inputs\n\n with config.change_flags(compute_test_value=\"off\"):\n\n def as_scalar(t):\n if isinstance(t.type, (NullType, DisconnectedType)):\n return t\n return get_scalar_type(t.type.dtype)()\n\n scalar_inputs = list(map(as_scalar, inputs))\n scalar_ograds = list(map(as_scalar, ograds))\n scalar_outputs = self.scalar_op.make_node(\n *[get_scalar_type(dtype=i.type.dtype).make_variable() for i in inputs]\n ).outputs\n scalar_igrads = self.scalar_op.L_op(\n scalar_inputs, scalar_outputs, scalar_ograds\n )\n for igrad in scalar_igrads:\n assert igrad is not None, self.scalar_op\n\n if not isinstance(scalar_igrads, (list, tuple)):\n raise TypeError(\n f\"{str(self.scalar_op)}.grad returned {str(type(scalar_igrads))} instead of list or tuple\"\n )\n\n nd = len(inputs[0].type.broadcastable) # this is the same for everyone\n\n def transform(r):\n # From a graph of ScalarOps, make a graph of Broadcast ops.\n if isinstance(r.type, (NullType, DisconnectedType)):\n return r\n if r in scalar_inputs:\n return inputs[scalar_inputs.index(r)]\n if r in scalar_outputs:\n return outputs[scalar_outputs.index(r)]\n if r in scalar_ograds:\n return ograds[scalar_ograds.index(r)]\n node = r.owner\n if node is None:\n # the gradient contains a constant, translate it as\n # an equivalent TensorType of size 1 and proper number of\n # dimensions\n res = aesara.tensor.basic.constant(\n np.asarray(r.data), dtype=r.type.dtype\n )\n return DimShuffle((), [\"x\"] * nd)(res)\n\n new_r = Elemwise(node.op, {})(*[transform(ipt) for ipt in node.inputs])\n return new_r\n\n ret = []\n for scalar_igrad, ipt in zip(scalar_igrads, inputs):\n if scalar_igrad is None:\n # undefined gradient\n ret.append(None)\n continue\n ret.append(transform(scalar_igrad))\n\n return ret\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n # Postpone the ufunc building to the last minutes due to:\n # - NumPy ufunc support only up to 31 inputs.\n # But our c code support more.\n # - nfunc is reused for scipy and scipy is optional\n if len(node.inputs) > 32 and self.ufunc and impl == \"py\":\n impl = \"c\"\n\n if getattr(self, \"nfunc_spec\", None) and impl != \"c\":\n self.nfunc = getattr(np, self.nfunc_spec[0], None)\n if self.nfunc is None:\n # Not inside NumPy. So probably another package like scipy.\n symb = self.nfunc_spec[0].split(\".\")\n for idx in range(1, len(self.nfunc_spec[0])):\n try:\n module = __import__(\".\".join(symb[:idx]))\n except ImportError:\n break\n for sub in symb[1:]:\n try:\n module = getattr(module, sub)\n except AttributeError:\n module = None\n break\n self.nfunc = module\n\n if (\n len(node.inputs) < 32\n and (self.nfunc is None or self.scalar_op.nin != len(node.inputs))\n and self.ufunc is None\n and impl == \"py\"\n ):\n\n ufunc = np.frompyfunc(\n self.scalar_op.impl, len(node.inputs), self.scalar_op.nout\n )\n if self.scalar_op.nin > 0:\n # We can reuse it for many nodes\n self.ufunc = ufunc\n else:\n node.tag.ufunc = ufunc\n\n # Numpy ufuncs will sometimes perform operations in\n # float16, in particular when the input is int8.\n # This is not something that we want, and we do not\n # do it in the C code, so we specify that the computation\n # should be carried out in the returned dtype.\n # This is done via the \"sig\" kwarg of the ufunc, its value\n # should be something like \"ff->f\", where the characters\n # represent the dtype of the inputs and outputs.\n\n # NumPy 1.10.1 raise an error when giving the signature\n # when the input is complex. So add it only when inputs is int.\n out_dtype = node.outputs[0].dtype\n if (\n out_dtype in float_dtypes\n and isinstance(self.nfunc, np.ufunc)\n and node.inputs[0].dtype in discrete_dtypes\n ):\n char = np.sctype2char(out_dtype)\n sig = char * node.nin + \"->\" + char * node.nout\n node.tag.sig = sig\n node.tag.fake_node = Apply(\n self.scalar_op,\n [\n get_scalar_type(dtype=input.type.dtype).make_variable()\n for input in node.inputs\n ],\n [\n get_scalar_type(dtype=output.type.dtype).make_variable()\n for output in node.outputs\n ],\n )\n\n self.scalar_op.prepare_node(node.tag.fake_node, None, None, impl)\n\n def perform(self, node, inputs, output_storage):\n if len(node.inputs) >= 32:\n # Some versions of NumPy will segfault, other will raise a\n # ValueError, if the number of inputs to a ufunc is 32 or more.\n # In that case, the C version should be used, or Elemwise fusion\n # should be disabled.\n super().perform(node, inputs, output_storage)\n\n for d, dim_shapes in enumerate(zip(*(i.shape for i in inputs))):\n if len(set(dim_shapes) - {1}) > 1:\n raise ValueError(f\"Shapes on dimension {d} do not match: {dim_shapes}\")\n\n # Determine the shape of outputs\n out_shape = []\n for values in zip(*[input.shape for input in inputs]):\n if any(v == 0 for v in values):\n # All non-broadcasted dimensions should be zero\n assert max(values) <= 1\n out_shape.append(0)\n else:\n out_shape.append(max(values))\n out_shape = tuple(out_shape)\n\n ufunc_args = inputs\n ufunc_kwargs = {}\n # We supported in the past calling manually op.perform.\n # To keep that support we need to sometimes call self.prepare_node\n if self.nfunc is None and self.ufunc is None:\n self.prepare_node(node, None, None, \"py\")\n if self.nfunc and len(inputs) == self.nfunc_spec[1]:\n ufunc = self.nfunc\n nout = self.nfunc_spec[2]\n if hasattr(node.tag, \"sig\"):\n ufunc_kwargs[\"sig\"] = node.tag.sig\n # Unfortunately, the else case does not allow us to\n # directly feed the destination arguments to the nfunc\n # since it sometimes requires resizing. Doing this\n # optimization is probably not worth the effort, since we\n # should normally run the C version of the Op.\n else:\n # the second calling form is used because in certain versions of\n # numpy the first (faster) version leads to segfaults\n if self.ufunc:\n ufunc = self.ufunc\n elif not hasattr(node.tag, \"ufunc\"):\n # It happen that make_thunk isn't called, like in\n # get_scalar_constant_value\n self.prepare_node(node, None, None, \"py\")\n # prepare_node will add ufunc to self or the tag\n # depending if we can reuse it or not. So we need to\n # test both again.\n if self.ufunc:\n ufunc = self.ufunc\n else:\n ufunc = node.tag.ufunc\n else:\n ufunc = node.tag.ufunc\n\n nout = ufunc.nout\n\n variables = ufunc(*ufunc_args, **ufunc_kwargs)\n\n if nout == 1:\n variables = [variables]\n\n for i, (variable, storage, nout) in enumerate(\n zip(variables, output_storage, node.outputs)\n ):\n if getattr(variable, \"dtype\", \"\") == \"object\":\n # Since numpy 1.6, function created with numpy.frompyfunc\n # always return an ndarray with dtype object\n variable = np.asarray(variable, dtype=nout.dtype)\n\n if i in self.inplace_pattern:\n odat = inputs[self.inplace_pattern[i]]\n odat[...] = variable\n storage[0] = odat\n\n # Sometimes NumPy return a Python type.\n # Some Aesara op return a different dtype like floor, ceil,\n # trunc, eq, ...\n elif not isinstance(variable, np.ndarray) or variable.dtype != nout.dtype:\n variable = np.asarray(variable, nout.dtype)\n # The next line is needed for numpy 1.9. Otherwise\n # there are tests that fail in DebugMode.\n # Normally we would call aesara.misc._asarray, but it\n # is faster to inline the code. We know that the dtype\n # are the same string, just different typenum.\n if np.dtype(nout.dtype).num != variable.dtype.num:\n variable = variable.view(dtype=nout.dtype)\n storage[0] = variable\n # numpy.real return a view!\n elif not variable.flags.owndata:\n storage[0] = variable.copy()\n else:\n storage[0] = variable\n\n def infer_shape(self, fgraph, node, i_shapes):\n rval = []\n for o in node.outputs:\n oshp = []\n for dim, b in enumerate(o.type.broadcastable):\n b_dim = None\n if b:\n # this is broadcastable\n b_dim = 1\n else:\n # there must be some input that is not broadcastable in\n # dimension 'dim'\n for ishp, i in zip(i_shapes, node.inputs):\n if isinstance(i.type, Scalar):\n continue # we skip scalar\n if not i.type.broadcastable[dim]:\n # input i is not broadcastable in position dim\n # therefore if its shape is known, we can use it\n # as the output shape\n if ishp[dim]:\n b_dim = ishp[dim]\n break\n\n # b_dim might still be None, if every input's shape was unknown\n # in dimension 'dim'\n oshp.append(b_dim)\n # TODO: it would be interesting to return the constraining\n # information that if one of the inputs shape[dim] is known\n # and another input's shape[dim] is not, that we can now assume\n # that the other input's shape[dim] is the same as the first.\n rval.append(tuple(oshp))\n return rval\n\n def _c_all(self, node, nodename, inames, onames, sub):\n # Some `Op`s directly call `Elemwise._c_all` or `Elemwise.c_code`\n # To not request all of them to call prepare_node(), do it here.\n # There is no harm if it get called multiple times.\n if not hasattr(node.tag, \"fake_node\"):\n self.prepare_node(node, None, None, \"c\")\n _inames = inames\n _onames = onames\n\n inames = uniq(inames)\n inputs = uniq(node.inputs)\n # assert that inames and inputs order stay consistent.\n # This is to protect again futur change of uniq.\n assert len(inames) == len(inputs)\n ii, iii = list(zip(*uniq(list(zip(_inames, node.inputs)))))\n assert all([x == y for x, y in zip(ii, inames)])\n assert all([x == y for x, y in zip(iii, inputs)])\n\n defines = \"\"\n undefs = \"\"\n\n # The destroy map is a map of output indices to input indices\n # that overwrite them. We just convert them to the actual\n # Variables.\n dmap = {\n node.outputs[o]: [node.inputs[i]] for o, i in self.inplace_pattern.items()\n }\n\n # dtypes of the inputs\n idtypes = [input.type.dtype_specs()[1] for input in inputs]\n\n # These are the outputs that we will need to allocate\n # (output, name, name of the c type), transposed\n real = list(\n zip(\n *[\n (r, s, r.type.dtype_specs()[1])\n for r, s in zip(node.outputs, onames)\n if r not in dmap\n ]\n )\n )\n if real:\n real_outputs, real_onames, real_odtypes = real\n else:\n real_outputs, real_onames, real_odtypes = [], [], []\n\n # Outputs that are aliased with an input (inplace)\n # (output, name), transposed (c type name not needed since we don't\n # need to allocate.\n aliased = list(\n zip(*[(r, s) for (r, s) in zip(node.outputs, onames) if r in dmap])\n )\n if aliased:\n aliased_outputs, aliased_onames = aliased\n else:\n aliased_outputs, aliased_onames = [], []\n\n # for each input:\n # same as range(ndim), but with 'x' at all broadcastable positions\n orders = [\n [x and \"x\" or i for i, x in enumerate(input.type.broadcastable)]\n for input in inputs\n ]\n\n # number of nested loops we will need (all inputs have same\n # dimensionality)\n nnested = len(orders[0])\n sub = dict(sub)\n for i, (input, iname) in enumerate(zip(inputs, inames)):\n # the c generators will substitute the input names for\n # references to loop variables lv0, lv1, ...\n sub[f\"lv{i}\"] = iname\n\n decl = cgen.make_declare(orders, idtypes, sub)\n checks = cgen.make_checks(orders, idtypes, sub)\n\n # Check if all inputs (except broadcasted scalar) are fortran.\n # In that case, create an fortran output ndarray.\n z = list(zip(inames, inputs))\n alloc_fortran = \" && \".join(\n [\n f\"PyArray_ISFORTRAN({arr})\"\n for arr, var in z\n if not all(var.broadcastable)\n ]\n )\n # If it is a scalar, make it c contig to prevent problem with\n # NumPy C and F contig not always set as both of them.\n if len(alloc_fortran) == 0:\n alloc_fortran = \"0\"\n\n alloc = \"\"\n # We loop over the \"real\" outputs, i.e., those that are not\n # inplace (must be allocated) and we declare/allocate/check\n # them\n for output, oname, odtype in zip(real_outputs, real_onames, real_odtypes):\n i += 1 # before this loop, i = number of inputs\n sub[f\"lv{i}\"] = oname\n sub[\"olv\"] = oname\n alloc += cgen.make_declare(\n [list(range(nnested))], [odtype], dict(sub, lv0=oname)\n )\n alloc += cgen.make_alloc(orders, odtype, sub, fortran=alloc_fortran)\n alloc += cgen.make_checks(\n [list(range(nnested))], [odtype], dict(sub, lv0=oname)\n )\n olv_index = i # index of the last output\n\n # We loop over the \"aliased\" outputs, i.e., those that are\n # inplace (overwrite the contents of one of the inputs) and\n # make the output pointers point to their corresponding input\n # pointers.\n for output, oname in zip(aliased_outputs, aliased_onames):\n olv_index = inputs.index(dmap[output][0])\n iname = inames[olv_index]\n # We make the output point to the corresponding input and\n # decrease the reference of whatever the output contained\n # prior to this\n alloc += (\n \"\"\"\n if (%(oname)s) {\n Py_XDECREF(%(oname)s);\n }\n %(oname)s = %(iname)s;\n Py_XINCREF(%(oname)s);\n \"\"\"\n % locals()\n )\n # We alias the scalar variables\n defines += f\"#define {oname}_i {iname}_i\\n\"\n undefs += f\"#undef {oname}_i\\n\"\n\n # Note: here, olv_index is either the index of the last output\n # which is allocated, OR, if there are any aliased outputs,\n # the index of the last of these aliased outputs.\n\n # We generate the C code of the inner loop using the scalar op\n if self.openmp:\n # If we are using openmp, we need to get rid of the \"goto\"\n # statement in sub['fail']. For now we recreate it here.\n fail = failure_code(sub, use_goto=False)\n else:\n fail = sub[\"fail\"]\n task_code = self.scalar_op.c_code(\n node.tag.fake_node,\n nodename + \"_scalar_\",\n [f\"{s}_i\" for s in _inames],\n [f\"{s}_i\" for s in onames],\n dict(sub, fail=fail),\n )\n code = (\n \"\"\"\n {\n %(defines)s\n %(task_code)s\n %(undefs)s\n }\n \"\"\"\n % locals()\n )\n\n loop_orders = orders + [list(range(nnested))] * len(real_onames)\n dtypes = idtypes + list(real_odtypes)\n if all(\n [o.ndim <= 1 for o in node.outputs]\n or\n # Use simpler code when output ndim == 0 or 1\n # or for broadcated scalar.\n all(node.outputs[0].broadcastable)\n ):\n if nnested:\n all_code = [(\"\", \"\")] * (nnested - 1) + [(\"\", code)] + [\"\"]\n else:\n all_code = [code]\n if len(all_code) == 1:\n # No loops\n task_decl = \"\".join(\n [\n \"{}& {}_i = *{}_iter;\\n\".format(dtype, name, name)\n for name, dtype in zip(\n inames + list(real_onames), idtypes + list(real_odtypes)\n )\n ]\n )\n\n preloops = {}\n for i, (loop_order, dtype) in enumerate(zip(loop_orders, dtypes)):\n for j, index in enumerate(loop_order):\n if index != \"x\":\n preloops.setdefault(j, \"\")\n preloops[j] += (\n \"%%(lv%(i)s)s_iter = (%(dtype)s*)\"\n \"(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()\n ) % sub\n break\n else: # all broadcastable\n preloops.setdefault(0, \"\")\n preloops[0] += (\n \"%%(lv%(i)s)s_iter = (%(dtype)s*)\"\n \"(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()\n ) % sub\n\n init_array = preloops.get(0, \" \")\n loop = (\n \"\"\"\n {\n %(defines)s\n %(init_array)s\n %(task_decl)s\n %(task_code)s\n %(undefs)s\n }\n \"\"\"\n % locals()\n )\n else:\n loop = cgen.make_loop(\n loop_orders=loop_orders,\n dtypes=dtypes,\n loop_tasks=all_code,\n sub=sub,\n openmp=self.openmp,\n )\n else:\n loop = cgen.make_reordered_loop(\n init_loop_orders=loop_orders,\n olv_index=olv_index,\n dtypes=dtypes,\n inner_task=code,\n sub=sub,\n openmp=self.openmp,\n )\n\n # If all inputs and outputs are contiguous\n # and the scalar op define optimized code for that case\n # use it! The scalar_op need to check the broadcast flag himself.\n if (\n all([o.ndim >= 1 for o in node.outputs])\n and\n # Don't use the contig code for broadcasted scalar.\n not all(node.outputs[0].broadcastable)\n ):\n contig = None\n try:\n contig = self.scalar_op.c_code_contiguous(\n node, nodename + \"_scalar_contig_\", _inames, onames, sub\n )\n except MethodNotDefined:\n # Try to make one generic version, this will help the\n # compiler to vectorize the code as their won't be as\n # many ptr and the stride will be hard coded.\n if all(\n [\n io.broadcastable == node.outputs[0].broadcastable\n or all(io.broadcastable)\n for io in node.inputs + node.outputs\n ]\n ):\n z = onames[0]\n contig = f\"\"\"\n // All output have the same size\n npy_intp n = PyArray_SIZE({z});\n \"\"\"\n index = \"\"\n for x, var in zip(inames + onames, inputs + node.outputs):\n if not all(var.broadcastable):\n contig += (\n \"\"\"\n dtype_%(x)s * %(x)s_ptr = (dtype_%(x)s*) PyArray_DATA(%(x)s);\n \"\"\"\n % locals()\n )\n index += (\n \"\"\"\n dtype_%(x)s& %(x)s_i = %(x)s_ptr[i];\n \"\"\"\n % locals()\n )\n else:\n contig += (\n \"\"\"\n dtype_%(x)s& %(x)s_i = ((dtype_%(x)s*) PyArray_DATA(%(x)s))[0];\n \"\"\"\n % locals()\n )\n if self.openmp:\n contig += f\"\"\"#pragma omp parallel for if(n>={int(config.openmp_elemwise_minsize)})\n \"\"\"\n contig += (\n \"\"\"\n for(int i=0; i<n; i++){\n %(index)s\n %(task_code)s;\n }\n \"\"\"\n % locals()\n )\n if contig is not None:\n z = list(zip(inames + onames, inputs + node.outputs))\n cond1 = \" && \".join(\n [\n \"PyArray_ISCONTIGUOUS(%s)\" % arr\n for arr, var in z\n if not all(var.broadcastable)\n ]\n )\n cond2 = \" && \".join(\n [\n \"PyArray_ISFORTRAN(%s)\" % arr\n for arr, var in z\n if not all(var.broadcastable)\n ]\n )\n loop = (\n \"\"\"\n if((%(cond1)s) || (%(cond2)s)){\n %(contig)s\n }else{\n %(loop)s\n }\n \"\"\"\n % locals()\n )\n return decl, checks, alloc, loop, \"\"\n\n def c_code(self, node, nodename, inames, onames, sub):\n if (\n any(i.dtype == \"float16\" for i in node.inputs)\n or any(o.dtype == \"float16\" for o in node.outputs)\n or\n # This is for Composite\n getattr(self.scalar_op, \"inner_float16\", False)\n ):\n # Disable C code for float16 vars\n raise NotImplementedError()\n code = \"\\n\".join(self._c_all(node, nodename, inames, onames, sub))\n return code\n\n def c_headers(self, **kwargs):\n return [\"<vector>\", \"<algorithm>\"]\n\n def c_header_dirs(self, **kwargs):\n return self.scalar_op.c_header_dirs(**kwargs)\n\n def c_support_code(self, **kwargs):\n return self.scalar_op.c_support_code(**kwargs)\n\n def c_support_code_apply(self, node, nodename):\n support_code = self.scalar_op.c_support_code_apply(node, nodename + \"_scalar_\")\n return support_code\n\n def c_code_cache_version_apply(self, node):\n version = [13] # the version corresponding to the c code in this Op\n\n # now we insert versions for the ops on which we depend...\n scalar_node = Apply(\n self.scalar_op,\n [\n get_scalar_type(dtype=input.type.dtype).make_variable()\n for input in node.inputs\n ],\n [\n get_scalar_type(dtype=output.type.dtype).make_variable()\n for output in node.outputs\n ],\n )\n version.append(self.scalar_op.c_code_cache_version_apply(scalar_node))\n for i in node.inputs + node.outputs:\n version.append(get_scalar_type(dtype=i.type.dtype).c_code_cache_version())\n version.append((\"openmp\", self.openmp))\n if all(version):\n return tuple(version)\n else:\n return ()\n\n\nclass CAReduce(COp):\n \"\"\"\n CAReduce = Commutative Associative Reduce\n Reduces a scalar operation along the specified axis(es).\n (The scalar op should be both commutative and assocative)\n\n The output will have the same shape as the input minus the reduced\n dimensions. It will contain the variable of accumulating all values\n over the reduced dimensions using the specified scalar op.\n\n Parameters\n ----------\n scalar_op\n A binary scalar op with only one output.\n It must be commutative and associative.\n axis\n - The dimension along which we want to reduce\n - List of dimensions that we want to reduce\n - If None, all dimensions are reduced\n\n Notes\n -----\n .. code-block:: python\n\n CAReduce(add) # sum (ie, acts like the numpy sum operation)\n CAReduce(mul) # product\n CAReduce(maximum) # max\n CAReduce(minimum) # min\n CAReduce(or_) # any # not lazy\n CAReduce(and_) # all # not lazy\n CAReduce(xor) # a bit at 1 tell that there was an odd number of\n # bit at that position that where 1. 0 it was an\n # even number ...\n\n In order to (eventually) optimize memory usage patterns,\n CAReduce makes zero guarantees on the order in which it\n iterates over the dimensions and the elements of the\n array(s). Therefore, to ensure consistent variables, the scalar\n operation represented by the reduction must be both commutative\n and associative (eg add, multiply, maximum, binary or/and/xor - but not\n subtract, divide or power).\n\n \"\"\"\n\n __props__: Union[\n Tuple[str], Tuple[str, str], Tuple[str, str, str], Tuple[str, str, str, str]\n ] = (\"scalar_op\", \"axis\")\n\n def __init__(self, scalar_op, axis=None):\n if scalar_op.nin not in [-1, 2] or scalar_op.nout != 1:\n raise NotImplementedError(\n \"CAReduce only supports binary functions with a single \" \"output.\"\n )\n\n self.axis = None\n self.ufunc_is_vectorized = False\n self.scalar_op = scalar_op\n self.set_ufunc(scalar_op)\n\n if axis is not None:\n if isinstance(axis, (int, np.integer)) or (\n isinstance(axis, np.ndarray) and not axis.shape\n ):\n self.axis = (int(axis),)\n else:\n self.axis = tuple(axis)\n\n def set_ufunc(self, scalar_op):\n if hasattr(scalar_op, \"nfunc_spec\") and hasattr(np, scalar_op.nfunc_spec[0]):\n self.ufunc = getattr(np, scalar_op.nfunc_spec[0])\n else:\n self.ufunc = np.frompyfunc(scalar_op.impl, 2, 1)\n self.ufunc_is_vectorized = True\n\n def _output_dtype(self, input_dtype):\n return input_dtype\n\n def make_node(self, input):\n input = as_tensor_variable(input)\n inp_dims = input.type.ndim\n inp_bdcast = input.type.broadcastable\n inp_dtype = input.type.dtype\n copy_op = False\n\n axis = self.axis\n if axis is None:\n axis = list(range(len(inp_bdcast)))\n\n axis = list(axis)\n for i, a in enumerate(axis):\n if a >= inp_dims or a < -inp_dims:\n raise ValueError(\n f\"Not enough dimensions on {input} to reduce on axis {a}\"\n )\n if a < 0:\n copy_op = True\n axis[i] = a + inp_dims\n\n # We can't call self.__class__() as there is a class that\n # inherits from CAReduce that doesn't have the same signature\n if copy_op:\n op = copy(self)\n op.set_ufunc(op.scalar_op)\n assert len(axis) == len(self.axis)\n op.axis = tuple(axis)\n else:\n op = self\n\n broadcastable = [x for i, x in enumerate(inp_bdcast) if i not in axis]\n\n output = TensorType(\n dtype=self._output_dtype(inp_dtype), broadcastable=broadcastable\n )()\n\n return Apply(op, [input], [output])\n\n def __getstate__(self):\n d = copy(self.__dict__)\n d.pop(\"ufunc\", None)\n return d\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n self.set_ufunc(self.scalar_op)\n\n def __str__(self):\n if self.axis is not None:\n return \"Reduce{{{}}}{{{}}}\".format(\n self.scalar_op,\n \", \".join(str(x) for x in self.axis),\n )\n else:\n return \"Reduce{%s}\" % self.scalar_op\n\n def perform(self, node, inp, out):\n (input,) = inp\n (output,) = out\n axis = self.axis\n if axis is None:\n axis = list(range(input.ndim))\n\n if hasattr(self, \"acc_dtype\") and self.acc_dtype is not None:\n acc_dtype = self.acc_dtype\n else:\n acc_dtype = node.outputs[0].type.dtype\n\n variable = np.array(input, dtype=acc_dtype)\n\n if axis:\n # Reducing functions built using np.frompyfunc() do not\n # support reduction along multiple axes. Hence loop through\n # each, otherwise numpy's inbuilt reduction functions\n # support reduction along multiple axes directly.\n if self.ufunc_is_vectorized:\n to_reduce = reversed(sorted(axis))\n for dimension in to_reduce:\n variable = self.ufunc.reduce(variable, dimension, dtype=acc_dtype)\n else:\n variable = self.ufunc.reduce(variable, axis=tuple(axis))\n output[0] = _asarray(variable, dtype=node.outputs[0].type.dtype)\n else:\n # Force a copy\n output[0] = np.array(variable, copy=True, dtype=node.outputs[0].type.dtype)\n\n def infer_shape(self, fgraph, node, shapes):\n (ishape,) = shapes\n axis = self.axis\n if axis is None:\n return ((),)\n return (\n [\n ishape[i]\n for (i, b) in enumerate(node.inputs[0].type.broadcastable)\n if i not in axis\n ],\n )\n\n def _c_all(self, node, name, inames, onames, sub):\n\n input = node.inputs[0]\n output = node.outputs[0]\n\n iname = inames[0]\n oname = onames[0]\n\n idtype = input.type.dtype_specs()[1]\n odtype = output.type.dtype_specs()[1]\n\n acc_dtype = getattr(self, \"acc_dtype\", None)\n\n if acc_dtype is not None:\n if acc_dtype == \"float16\":\n raise MethodNotDefined(\"no c_code for float16\")\n acc_type = TensorType(\n broadcastable=node.outputs[0].broadcastable, dtype=acc_dtype\n )\n adtype = acc_type.dtype_specs()[1]\n else:\n adtype = odtype\n\n axis = self.axis\n if axis is None:\n axis = list(range(len(input.type.broadcastable)))\n\n if len(axis) == 0:\n # The acc_dtype is never a downcast compared to the input dtype\n # So we just need a cast to the output dtype.\n var = aesara.tensor.basic.cast(input, node.outputs[0].dtype)\n if var is input:\n var = Elemwise(scalar_identity)(input)\n assert var.dtype == node.outputs[0].dtype\n return var.owner.op._c_all(var.owner, name, inames, onames, sub)\n\n order1 = [i for i in range(input.type.ndim) if i not in axis]\n order = order1 + list(axis)\n\n nnested = len(order1)\n\n sub = dict(sub)\n for i, (input, iname) in enumerate(zip(node.inputs, inames)):\n sub[f\"lv{i}\"] = iname\n\n decl = \"\"\n if adtype != odtype:\n # Create an accumulator variable different from the output\n aname = \"acc\"\n decl = acc_type.c_declare(aname, sub)\n decl += acc_type.c_init(aname, sub)\n else:\n # the output is the accumulator variable\n aname = oname\n\n decl += cgen.make_declare([order], [idtype], sub)\n checks = cgen.make_checks([order], [idtype], sub)\n\n alloc = \"\"\n i += 1\n sub[f\"lv{i}\"] = oname\n sub[\"olv\"] = oname\n\n # Allocate output buffer\n alloc += cgen.make_declare(\n [list(range(nnested)) + [\"x\"] * len(axis)], [odtype], dict(sub, lv0=oname)\n )\n alloc += cgen.make_alloc([order1], odtype, sub)\n alloc += cgen.make_checks(\n [list(range(nnested)) + [\"x\"] * len(axis)], [odtype], dict(sub, lv0=oname)\n )\n\n if adtype != odtype:\n # Allocate accumulation buffer\n sub[f\"lv{i}\"] = aname\n sub[\"olv\"] = aname\n\n alloc += cgen.make_declare(\n [list(range(nnested)) + [\"x\"] * len(axis)],\n [adtype],\n dict(sub, lv0=aname),\n )\n alloc += cgen.make_alloc([order1], adtype, sub)\n alloc += cgen.make_checks(\n [list(range(nnested)) + [\"x\"] * len(axis)],\n [adtype],\n dict(sub, lv0=aname),\n )\n\n identity = self.scalar_op.identity\n\n if np.isposinf(identity):\n if input.type.dtype in [\"float32\", \"float64\"]:\n identity = \"__builtin_inf()\"\n elif input.type.dtype.startswith(\"uint\") or input.type.dtype == \"bool\":\n identity = \"1\"\n else:\n identity = \"NPY_MAX_\" + str(input.type.dtype).upper()\n elif np.isneginf(identity):\n if input.type.dtype in [\"float32\", \"float64\"]:\n identity = \"-__builtin_inf()\"\n elif input.type.dtype.startswith(\"uint\") or input.type.dtype == \"bool\":\n identity = \"0\"\n else:\n identity = \"NPY_MIN_\" + str(input.type.dtype).upper()\n elif identity is None:\n raise TypeError(f\"The {self.scalar_op} does not define an identity.\")\n\n task0_decl = (\n f\"{adtype}& {aname}_i = *{aname}_iter;\\n\" f\"{aname}_i = {identity};\"\n )\n\n task1_decl = f\"{idtype}& {inames[0]}_i = *{inames[0]}_iter;\\n\"\n\n task1_code = self.scalar_op.c_code(\n Apply(\n self.scalar_op,\n [\n get_scalar_type(dtype=iv.type.dtype).make_variable()\n for iv in (node.inputs * 2)\n ],\n [\n get_scalar_type(dtype=ov.type.dtype).make_variable()\n for ov in node.outputs\n ],\n ),\n None,\n [f\"{aname}_i\", f\"{inames[0]}_i\"],\n [f\"{aname}_i\"],\n sub,\n )\n code1 = f\"\"\"\n {{\n {task1_decl}\n {task1_code}\n }}\n \"\"\"\n\n if node.inputs[0].type.ndim:\n if len(axis) == 1:\n all_code = [(\"\", \"\")] * nnested + [(task0_decl, code1), \"\"]\n else:\n all_code = (\n [(\"\", \"\")] * nnested\n + [(task0_decl, \"\")]\n + [(\"\", \"\")] * (len(axis) - 2)\n + [(\"\", code1), \"\"]\n )\n else:\n all_code = [task0_decl + code1]\n loop = cgen.make_loop_careduce(\n [order, list(range(nnested)) + [\"x\"] * len(axis)],\n [idtype, adtype],\n all_code,\n sub,\n )\n\n end = \"\"\n if adtype != odtype:\n end = f\"\"\"\n PyArray_CopyInto({oname}, {aname});\n \"\"\"\n end += acc_type.c_cleanup(aname, sub)\n\n return decl, checks, alloc, loop, end\n\n def c_code(self, node, name, inames, onames, sub):\n code = \"\\n\".join(self._c_all(node, name, inames, onames, sub))\n return code\n\n def c_headers(self, **kwargs):\n # Sometimes, Elemwise's c_code is returned, so we need its headers\n return [\"<vector>\", \"<algorithm>\"]\n\n def c_code_cache_version_apply(self, node):\n # the version corresponding to the c code in this Op\n version = [9]\n\n # now we insert versions for the ops on which we depend...\n scalar_node = Apply(\n self.scalar_op,\n [\n get_scalar_type(dtype=input.type.dtype).make_variable()\n for input in node.inputs\n ],\n [\n get_scalar_type(dtype=output.type.dtype).make_variable()\n for output in node.outputs\n ],\n )\n version.append(self.scalar_op.c_code_cache_version_apply(scalar_node))\n for i in node.inputs + node.outputs:\n version.append(get_scalar_type(dtype=i.type.dtype).c_code_cache_version())\n if all(version):\n return tuple(version)\n else:\n return ()\n\n\nclass CAReduceDtype(CAReduce):\n \"\"\"\n Reduces a scalar operation along the specified axis(es).\n\n This subclass of CAReduce accepts an additional \"dtype\" parameter,\n that specifies which dtype the output should be.\n\n It also accepts an optional \"acc_dtype\", which specify the dtype that\n will be used for the accumulation.\n\n So, the accumulation will be done into a tensor of dtype \"acc_dtype\",\n then it will be casted into \"dtype\" and returned.\n\n If no dtype is provided, one will be inferred so as not to lose\n too much precision.\n\n Parameters\n ----------\n scalar_op\n A binary scalar op with only one output.\n It must be commutative and associative.\n\n axis\n * the dimension along which we want to reduce\n * list of dimensions that we want to reduce\n * if None, all dimensions are reduced\n\n dtype\n The dtype of the returned tensor. If None, then we use the default\n dtype which is the same as the input tensor's dtype except when:\n\n * the input dtype is a signed integer of precision < 64 bit, in which\n case we use int64\n * the input dtype is an unsigned integer of precision < 64 bit, in\n which case we use uint64\n\n This default dtype does _not_ depend on the value of \"acc_dtype\".\n This behavior is similar in spirit to that of numpy (except numpy\n uses the default machine integer while we always use 64 bit\n integers to avoid platform-dependent behavior).\n\n acc_dtype\n The dtype of the internal accumulator.\n If None (default), we use the dtype in the list below,\n or the input dtype if its precision is higher:\n\n * for int dtypes, we use at least int64;\n * for uint dtypes, we use at least uint64;\n * for float dtypes, we use at least float64;\n * for complex dtypes, we use at least complex128.\n\n \"\"\"\n\n __props__: Union[Tuple[str, str, str], Tuple[str, str, str, str]] = (\n \"scalar_op\",\n \"axis\",\n \"dtype\",\n \"acc_dtype\",\n )\n\n def __init__(self, scalar_op, axis=None, dtype=None, acc_dtype=None):\n super().__init__(scalar_op, axis=axis)\n self.dtype = dtype\n self.acc_dtype = acc_dtype\n\n def __setstate__(self, d):\n super().__setstate__(d)\n if not hasattr(self, \"dtype\"):\n # This is needed as old pickled will crash otherwise.\n # We need to keep the old dtype behavior as the op\n # could be in an apply node with a specified dtype.\n self.dtype = \"OLD\"\n\n if not hasattr(self, \"acc_dtype\"):\n # acc_dtype is not used by any external Op, so we do not\n # need to keep the previous behaviour here.\n self.acc_dtype = None\n\n def _output_dtype(self, idtype):\n dtype = self.dtype\n if dtype == \"OLD\":\n return dict(\n int8=\"int32\",\n int16=\"int32\",\n int32=\"int64\",\n uint8=\"uint32\",\n uint16=\"uint32\",\n uint32=\"uint64\",\n ).get(idtype, idtype)\n if dtype is None:\n # If input has a discrete dtype, upcast it to 64\n return dict(\n bool=\"int64\",\n int8=\"int64\",\n int16=\"int64\",\n int32=\"int64\",\n uint8=\"uint64\",\n uint16=\"uint64\",\n uint32=\"uint64\",\n ).get(idtype, idtype)\n else:\n # The important is that the accumulator dtype does not\n # lose precision. Then, the result can be downcasted.\n return dtype\n\n def _acc_dtype(self, idtype):\n acc_dtype = self.acc_dtype\n if acc_dtype is None:\n return dict(\n bool=\"int64\",\n int8=\"int64\",\n int16=\"int64\",\n int32=\"int64\",\n uint8=\"uint64\",\n uint16=\"uint64\",\n uint32=\"uint64\",\n float16=\"float32\",\n float32=\"float64\",\n complex64=\"complex128\",\n ).get(idtype, idtype)\n elif acc_dtype in continuous_dtypes and idtype in discrete_dtypes:\n # Specifying a continuous accumulator for discrete input is OK\n return acc_dtype\n else:\n # The conversion has to be considered an upcast.\n upcasted_dtype = upcast(idtype, acc_dtype)\n if acc_dtype != upcasted_dtype:\n raise TypeError(\n f\"Cannot build {self} node with input dtype {idtype} \"\n f\"and acc_dtype {acc_dtype}, as precision would be lost. \"\n \"To correct this error, you can:\\n\"\n \" - not specify acc_dtype, or\\n\"\n f\" - use an acc_dtype at least as precise as {upcasted_dtype}.\\n\"\n ' - specify \"dtype\" instead of \"acc_dtype\", so '\n \"the reduction will be precise, but the result will \"\n 'be casted into \"dtype\" at the end.\\n'\n \"If you are expecting the precision loss, you can \"\n f'use tensor.cast(..., dtype=\"{acc_dtype}\"), on your input.'\n )\n return acc_dtype\n\n def make_node(self, input):\n # We need to redefine make_node so that, if self.dtype is None,\n # we can infer what dtype should be, and create a node from an Op\n # of the appropriate dtype.\n input = as_tensor_variable(input)\n dtype = self._output_dtype(input.dtype)\n acc_dtype = self._acc_dtype(input.dtype)\n\n assert dtype is not None\n assert acc_dtype is not None\n\n if dtype == self.dtype and acc_dtype == self.acc_dtype:\n # Don't build another instance\n op = self\n else:\n op = copy(self)\n op.set_ufunc(self.scalar_op)\n op.dtype = dtype\n op.acc_dtype = acc_dtype\n\n assert op.acc_dtype is not None\n\n # TODO: Why doesn't `make_node` just take these\n # automatically-determined values as arguments?\n return super(CAReduceDtype, op).make_node(input)\n\n def __str__(self):\n name = self.__class__.__name__\n if self.__class__.__name__ == \"CAReduceDtype\":\n name = (\"ReduceDtype{%s}\" % self.scalar_op,)\n axis = \"\"\n if self.axis is not None:\n axis = \", \".join(str(x) for x in self.axis)\n axis = f\"axis=[{axis}], \"\n return f\"{name}{{{axis}acc_dtype={self.acc_dtype}}}\"\n\n\ndef scalar_elemwise(*symbol, nfunc=None, nin=None, nout=None, symbolname=None):\n \"\"\"Replace a symbol definition with an `Elemwise`-wrapped version of the corresponding scalar `Op`.\n\n If it is not ``None``, the `nfunc` argument should be a string such that\n ``getattr(numpy, nfunc)`` implements a vectorized version of the `Elemwise`\n operation. `nin` is the number of inputs expected by that function, and nout\n is the number of **destination** inputs it takes. That is, the function\n should take nin + nout inputs. `nout == 0` means that the numpy function does\n not take a NumPy array argument to put its result in.\n\n \"\"\"\n import aesara.scalar as scalar\n\n def construct(symbol):\n nonlocal symbolname\n\n symbolname = symbolname or symbol.__name__\n\n if symbolname.endswith(\"_inplace\"):\n elemwise_name = f\"Elemwise{{{symbolname},inplace}}\"\n scalar_op = getattr(scalar, symbolname[: -len(\"_inplace\")])\n inplace_scalar_op = scalar_op.__class__(transfer_type(0))\n rval = Elemwise(\n inplace_scalar_op,\n {0: 0},\n name=elemwise_name,\n nfunc_spec=(nfunc and (nfunc, nin, nout)),\n )\n else:\n elemwise_name = f\"Elemwise{{{symbolname},no_inplace}}\"\n scalar_op = getattr(scalar, symbolname)\n rval = Elemwise(\n scalar_op, name=elemwise_name, nfunc_spec=(nfunc and (nfunc, nin, nout))\n )\n\n if getattr(symbol, \"__doc__\"):\n rval.__doc__ = symbol.__doc__ + \"\\n\" + rval.__doc__\n\n # for the meaning of this see the ./epydoc script\n # it makes epydoc display rval as if it were a function, not an object\n rval.__epydoc_asRoutine = symbol\n rval.__module__ = symbol.__module__\n\n pprint.assign(rval, FunctionPrinter(symbolname.replace(\"_inplace\", \"=\")))\n\n return rval\n\n if symbol:\n return construct(symbol[0])\n else:\n return construct\n\n\n@_get_vector_length.register(Elemwise)\ndef _get_vector_length_Elemwise(op, var):\n if len(var.owner.inputs) == 1 and len(var.owner.outputs) == 1:\n return get_vector_length(var.owner.inputs[0])\n\n raise ValueError(f\"Length of {var} cannot be determined\")\n", "from functools import partial\nfrom typing import Optional\n\nimport numpy as np\nimport pytest\nfrom packaging.version import parse as version_parse\n\nimport aesara.scalar.basic as aes\nfrom aesara.compile.function import function\nfrom aesara.compile.mode import Mode\nfrom aesara.compile.ops import DeepCopyOp, ViewOp\nfrom aesara.compile.sharedvalue import SharedVariable, shared\nfrom aesara.configdefaults import config\nfrom aesara.graph.basic import Apply\nfrom aesara.graph.fg import FunctionGraph\nfrom aesara.graph.op import Op, get_test_value\nfrom aesara.graph.optdb import OptimizationQuery\nfrom aesara.ifelse import ifelse\nfrom aesara.link.jax import JAXLinker\nfrom aesara.scalar.basic import Composite\nfrom aesara.scan.basic import scan\nfrom aesara.tensor import basic as aet\nfrom aesara.tensor import blas as aet_blas\nfrom aesara.tensor import elemwise as aet_elemwise\nfrom aesara.tensor import extra_ops as aet_extra_ops\nfrom aesara.tensor import nlinalg as aet_nlinalg\nfrom aesara.tensor import nnet as aet_nnet\nfrom aesara.tensor import slinalg as aet_slinalg\nfrom aesara.tensor import subtensor as aet_subtensor\nfrom aesara.tensor.elemwise import Elemwise\nfrom aesara.tensor.math import MaxAndArgmax\nfrom aesara.tensor.math import all as aet_all\nfrom aesara.tensor.math import clip, cosh, erf, erfc, erfinv, gammaln, log\nfrom aesara.tensor.math import max as aet_max\nfrom aesara.tensor.math import maximum, prod, sigmoid, softplus\nfrom aesara.tensor.math import sum as aet_sum\nfrom aesara.tensor.random.basic import RandomVariable, normal\nfrom aesara.tensor.random.utils import RandomStream\nfrom aesara.tensor.shape import Shape, Shape_i, SpecifyShape, reshape\nfrom aesara.tensor.type import (\n dscalar,\n dvector,\n iscalar,\n ivector,\n lscalar,\n matrix,\n scalar,\n tensor,\n tensor3,\n vector,\n)\n\n\njax = pytest.importorskip(\"jax\")\n\nopts = OptimizationQuery(include=[None], exclude=[\"cxx_only\", \"BlasOpt\"])\njax_mode = Mode(JAXLinker(), opts)\npy_mode = Mode(\"py\", opts)\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef set_aesara_flags():\n with config.change_flags(cxx=\"\", compute_test_value=\"ignore\"):\n yield\n\n\ndef compare_jax_and_py(\n fgraph: FunctionGraph,\n test_inputs: iter,\n assert_fn: Optional[callable] = None,\n must_be_device_array: bool = True,\n):\n \"\"\"Function to compare python graph output and jax compiled output for testing equality\n\n In the tests below computational graphs are defined in Aesara. These graphs are then passed to\n this function which then compiles the graphs in both jax and python, runs the calculation\n in both and checks if the results are the same\n\n Parameters\n ----------\n fgraph: FunctionGraph\n Aesara function Graph object\n test_inputs: iter\n Numerical inputs for testing the function graph\n assert_fn: func, opt\n Assert function used to check for equality between python and jax. If not\n provided uses np.testing.assert_allclose\n must_be_device_array: Bool\n Checks for instance of jax.interpreters.xla.DeviceArray. For testing purposes\n if this device array is found it indicates if the result was computed by jax\n\n Returns\n -------\n jax_res\n\n \"\"\"\n if assert_fn is None:\n assert_fn = partial(np.testing.assert_allclose, rtol=1e-4)\n\n fn_inputs = [i for i in fgraph.inputs if not isinstance(i, SharedVariable)]\n aesara_jax_fn = function(fn_inputs, fgraph.outputs, mode=jax_mode)\n jax_res = aesara_jax_fn(*test_inputs)\n\n if must_be_device_array:\n if isinstance(jax_res, list):\n assert all(\n isinstance(res, jax.interpreters.xla.DeviceArray) for res in jax_res\n )\n else:\n assert isinstance(jax_res, jax.interpreters.xla.DeviceArray)\n\n aesara_py_fn = function(fn_inputs, fgraph.outputs, mode=py_mode)\n py_res = aesara_py_fn(*test_inputs)\n\n if len(fgraph.outputs) > 1:\n for j, p in zip(jax_res, py_res):\n assert_fn(j, p)\n else:\n assert_fn(jax_res, py_res)\n\n return jax_res\n\n\ndef test_jax_Alloc():\n x = aet.alloc(0.0, 2, 3)\n x_fg = FunctionGraph([], [x])\n\n (jax_res,) = compare_jax_and_py(x_fg, [])\n\n assert jax_res.shape == (2, 3)\n\n x = aet.alloc(1.1, 2, 3)\n x_fg = FunctionGraph([], [x])\n\n compare_jax_and_py(x_fg, [])\n\n x = aet.AllocEmpty(\"float32\")(2, 3)\n x_fg = FunctionGraph([], [x])\n\n def compare_shape_dtype(x, y):\n (x,) = x\n (y,) = y\n return x.shape == y.shape and x.dtype == y.dtype\n\n compare_jax_and_py(x_fg, [], assert_fn=compare_shape_dtype)\n\n a = scalar(\"a\")\n x = aet.alloc(a, 20)\n x_fg = FunctionGraph([a], [x])\n\n compare_jax_and_py(x_fg, [10.0])\n\n a = vector(\"a\")\n x = aet.alloc(a, 20, 10)\n x_fg = FunctionGraph([a], [x])\n\n compare_jax_and_py(x_fg, [np.ones(10, dtype=config.floatX)])\n\n\ndef test_jax_shape_ops():\n x_np = np.zeros((20, 3))\n x = Shape()(aet.as_tensor_variable(x_np))\n x_fg = FunctionGraph([], [x])\n\n compare_jax_and_py(x_fg, [], must_be_device_array=False)\n\n x = Shape_i(1)(aet.as_tensor_variable(x_np))\n x_fg = FunctionGraph([], [x])\n\n compare_jax_and_py(x_fg, [], must_be_device_array=False)\n\n\n@pytest.mark.xfail(\n version_parse(jax.__version__) >= version_parse(\"0.2.12\"),\n reason=\"Omnistaging cannot be disabled\",\n)\ndef test_jax_specify_shape():\n x_np = np.zeros((20, 3))\n x = SpecifyShape()(aet.as_tensor_variable(x_np), (20, 3))\n x_fg = FunctionGraph([], [x])\n\n compare_jax_and_py(x_fg, [])\n\n with config.change_flags(compute_test_value=\"off\"):\n\n x = SpecifyShape()(aet.as_tensor_variable(x_np), (2, 3))\n x_fg = FunctionGraph([], [x])\n\n with pytest.raises(AssertionError):\n compare_jax_and_py(x_fg, [])\n\n\ndef test_jax_compile_ops():\n\n x = DeepCopyOp()(aet.as_tensor_variable(1.1))\n x_fg = FunctionGraph([], [x])\n\n compare_jax_and_py(x_fg, [])\n\n x_np = np.zeros((20, 1, 1))\n x = aet.Rebroadcast((0, False), (1, True), (2, False))(aet.as_tensor_variable(x_np))\n x_fg = FunctionGraph([], [x])\n\n compare_jax_and_py(x_fg, [])\n\n with config.change_flags(compute_test_value=\"off\"):\n x = aet.Rebroadcast((0, True), (1, False), (2, False))(\n aet.as_tensor_variable(x_np)\n )\n x_fg = FunctionGraph([], [x])\n\n with pytest.raises(ValueError):\n compare_jax_and_py(x_fg, [])\n\n x = ViewOp()(aet.as_tensor_variable(x_np))\n x_fg = FunctionGraph([], [x])\n\n compare_jax_and_py(x_fg, [])\n\n\ndef test_jax_basic():\n rng = np.random.default_rng(28494)\n\n x = matrix(\"x\")\n y = matrix(\"y\")\n b = vector(\"b\")\n\n # `ScalarOp`\n z = cosh(x ** 2 + y / 3.0)\n\n # `[Inc]Subtensor`\n out = aet_subtensor.set_subtensor(z[0], -10.0)\n out = aet_subtensor.inc_subtensor(out[0, 1], 2.0)\n out = out[:5, :3]\n\n out_fg = FunctionGraph([x, y], [out])\n\n test_input_vals = [\n np.tile(np.arange(10), (10, 1)).astype(config.floatX),\n np.tile(np.arange(10, 20), (10, 1)).astype(config.floatX),\n ]\n (jax_res,) = compare_jax_and_py(out_fg, test_input_vals)\n\n # Confirm that the `Subtensor` slice operations are correct\n assert jax_res.shape == (5, 3)\n\n # Confirm that the `IncSubtensor` operations are correct\n assert jax_res[0, 0] == -10.0\n assert jax_res[0, 1] == -8.0\n\n out = clip(x, y, 5)\n out_fg = FunctionGraph([x, y], [out])\n compare_jax_and_py(out_fg, test_input_vals)\n\n out = aet.diagonal(x, 0)\n out_fg = FunctionGraph([x], [out])\n compare_jax_and_py(\n out_fg, [np.arange(10 * 10).reshape((10, 10)).astype(config.floatX)]\n )\n\n out = aet_slinalg.cholesky(x)\n out_fg = FunctionGraph([x], [out])\n compare_jax_and_py(\n out_fg,\n [\n (np.eye(10) + rng.standard_normal(size=(10, 10)) * 0.01).astype(\n config.floatX\n )\n ],\n )\n\n # not sure why this isn't working yet with lower=False\n out = aet_slinalg.Cholesky(lower=False)(x)\n out_fg = FunctionGraph([x], [out])\n compare_jax_and_py(\n out_fg,\n [\n (np.eye(10) + rng.standard_normal(size=(10, 10)) * 0.01).astype(\n config.floatX\n )\n ],\n )\n\n out = aet_slinalg.solve(x, b)\n out_fg = FunctionGraph([x, b], [out])\n compare_jax_and_py(\n out_fg,\n [\n np.eye(10).astype(config.floatX),\n np.arange(10).astype(config.floatX),\n ],\n )\n\n out = aet.diag(b)\n out_fg = FunctionGraph([b], [out])\n compare_jax_and_py(out_fg, [np.arange(10).astype(config.floatX)])\n\n out = aet_nlinalg.det(x)\n out_fg = FunctionGraph([x], [out])\n compare_jax_and_py(\n out_fg, [np.arange(10 * 10).reshape((10, 10)).astype(config.floatX)]\n )\n\n out = aet_nlinalg.matrix_inverse(x)\n out_fg = FunctionGraph([x], [out])\n compare_jax_and_py(\n out_fg,\n [\n (np.eye(10) + rng.standard_normal(size=(10, 10)) * 0.01).astype(\n config.floatX\n )\n ],\n )\n\n\n@pytest.mark.parametrize(\n \"x, y, x_val, y_val\",\n [\n (scalar(\"x\"), scalar(\"y\"), np.array(10), np.array(20)),\n (scalar(\"x\"), vector(\"y\"), np.array(10), np.arange(10, 20)),\n (\n matrix(\"x\"),\n vector(\"y\"),\n np.arange(10 * 20).reshape((20, 10)),\n np.arange(10, 20),\n ),\n ],\n)\ndef test_jax_Composite(x, y, x_val, y_val):\n x_s = aes.float64(\"x\")\n y_s = aes.float64(\"y\")\n\n comp_op = Elemwise(Composite([x_s, y_s], [x_s + y_s * 2 + aes.exp(x_s - y_s)]))\n\n out = comp_op(x, y)\n\n out_fg = FunctionGraph([x, y], [out])\n\n test_input_vals = [\n x_val.astype(config.floatX),\n y_val.astype(config.floatX),\n ]\n _ = compare_jax_and_py(out_fg, test_input_vals)\n\n\ndef test_jax_FunctionGraph_names():\n import inspect\n\n from aesara.link.jax.dispatch import jax_funcify\n\n x = scalar(\"1x\")\n y = scalar(\"_\")\n z = scalar()\n q = scalar(\"def\")\n\n out_fg = FunctionGraph([x, y, z, q], [x, y, z, q], clone=False)\n out_jx = jax_funcify(out_fg)\n sig = inspect.signature(out_jx)\n assert (x.auto_name, \"_\", z.auto_name, q.auto_name) == tuple(sig.parameters.keys())\n assert (1, 2, 3, 4) == out_jx(1, 2, 3, 4)\n\n\ndef test_jax_FunctionGraph_once():\n \"\"\"Make sure that an output is only computed once when it's referenced multiple times.\"\"\"\n from aesara.link.jax.dispatch import jax_funcify\n\n x = vector(\"x\")\n y = vector(\"y\")\n\n class TestOp(Op):\n def __init__(self):\n self.called = 0\n\n def make_node(self, *args):\n return Apply(self, list(args), [x.type() for x in args])\n\n def perform(self, inputs, outputs):\n for i, inp in enumerate(inputs):\n outputs[i][0] = inp[0]\n\n @jax_funcify.register(TestOp)\n def jax_funcify_TestOp(op, **kwargs):\n def func(*args, op=op):\n op.called += 1\n return list(args)\n\n return func\n\n op1 = TestOp()\n op2 = TestOp()\n\n q, r = op1(x, y)\n outs = op2(q + r, q + r)\n\n out_fg = FunctionGraph([x, y], outs, clone=False)\n assert len(out_fg.outputs) == 2\n\n out_jx = jax_funcify(out_fg)\n\n x_val = np.r_[1, 2].astype(config.floatX)\n y_val = np.r_[2, 3].astype(config.floatX)\n\n res = out_jx(x_val, y_val)\n assert len(res) == 2\n assert op1.called == 1\n assert op2.called == 1\n\n res = out_jx(x_val, y_val)\n assert len(res) == 2\n assert op1.called == 2\n assert op2.called == 2\n\n\ndef test_jax_eye():\n \"\"\"Tests jaxification of the Eye operator\"\"\"\n out = aet.eye(3)\n out_fg = FunctionGraph([], [out])\n\n compare_jax_and_py(out_fg, [])\n\n\ndef test_jax_basic_multiout():\n rng = np.random.default_rng(213234)\n\n M = rng.normal(size=(3, 3))\n X = M.dot(M.T)\n\n x = matrix(\"x\")\n\n outs = aet_nlinalg.eig(x)\n out_fg = FunctionGraph([x], outs)\n\n def assert_fn(x, y):\n np.testing.assert_allclose(x.astype(config.floatX), y, rtol=1e-3)\n\n compare_jax_and_py(out_fg, [X.astype(config.floatX)], assert_fn=assert_fn)\n\n outs = aet_nlinalg.eigh(x)\n out_fg = FunctionGraph([x], outs)\n compare_jax_and_py(out_fg, [X.astype(config.floatX)], assert_fn=assert_fn)\n\n outs = aet_nlinalg.qr(x, mode=\"full\")\n out_fg = FunctionGraph([x], outs)\n compare_jax_and_py(out_fg, [X.astype(config.floatX)], assert_fn=assert_fn)\n\n outs = aet_nlinalg.qr(x, mode=\"reduced\")\n out_fg = FunctionGraph([x], outs)\n compare_jax_and_py(out_fg, [X.astype(config.floatX)], assert_fn=assert_fn)\n\n outs = aet_nlinalg.svd(x)\n out_fg = FunctionGraph([x], outs)\n compare_jax_and_py(out_fg, [X.astype(config.floatX)], assert_fn=assert_fn)\n\n\n@pytest.mark.xfail(\n version_parse(jax.__version__) >= version_parse(\"0.2.12\"),\n reason=\"Omnistaging cannot be disabled\",\n)\ndef test_jax_basic_multiout_omni():\n # Test that a single output of a multi-output `Op` can be used as input to\n # another `Op`\n x = dvector()\n mx, amx = MaxAndArgmax([0])(x)\n out = mx * amx\n out_fg = FunctionGraph([x], [out])\n compare_jax_and_py(out_fg, [np.r_[1, 2]])\n\n\n@pytest.mark.xfail(\n version_parse(jax.__version__) >= version_parse(\"0.2.12\"),\n reason=\"Omnistaging cannot be disabled\",\n)\ndef test_jax_scan_multiple_output():\n \"\"\"Test a scan implementation of a SEIR model.\n\n SEIR model definition:\n S[t+1] = S[t] - B[t]\n E[t+1] = E[t] +B[t] - C[t]\n I[t+1] = I[t+1] + C[t] - D[t]\n\n B[t] ~ Binom(S[t], beta)\n C[t] ~ Binom(E[t], gamma)\n D[t] ~ Binom(I[t], delta)\n \"\"\"\n\n def binomln(n, k):\n return gammaln(n + 1) - gammaln(k + 1) - gammaln(n - k + 1)\n\n def binom_log_prob(n, p, value):\n return binomln(n, value) + value * log(p) + (n - value) * log(1 - p)\n\n # sequences\n aet_C = ivector(\"C_t\")\n aet_D = ivector(\"D_t\")\n # outputs_info (initial conditions)\n st0 = lscalar(\"s_t0\")\n et0 = lscalar(\"e_t0\")\n it0 = lscalar(\"i_t0\")\n logp_c = scalar(\"logp_c\")\n logp_d = scalar(\"logp_d\")\n # non_sequences\n beta = scalar(\"beta\")\n gamma = scalar(\"gamma\")\n delta = scalar(\"delta\")\n\n # TODO: Use random streams when their JAX conversions are implemented.\n # trng = aesara.tensor.random.RandomStream(1234)\n\n def seir_one_step(ct0, dt0, st0, et0, it0, logp_c, logp_d, beta, gamma, delta):\n # bt0 = trng.binomial(n=st0, p=beta)\n bt0 = st0 * beta\n bt0 = bt0.astype(st0.dtype)\n\n logp_c1 = binom_log_prob(et0, gamma, ct0).astype(logp_c.dtype)\n logp_d1 = binom_log_prob(it0, delta, dt0).astype(logp_d.dtype)\n\n st1 = st0 - bt0\n et1 = et0 + bt0 - ct0\n it1 = it0 + ct0 - dt0\n return st1, et1, it1, logp_c1, logp_d1\n\n (st, et, it, logp_c_all, logp_d_all), _ = scan(\n fn=seir_one_step,\n sequences=[aet_C, aet_D],\n outputs_info=[st0, et0, it0, logp_c, logp_d],\n non_sequences=[beta, gamma, delta],\n )\n st.name = \"S_t\"\n et.name = \"E_t\"\n it.name = \"I_t\"\n logp_c_all.name = \"C_t_logp\"\n logp_d_all.name = \"D_t_logp\"\n\n out_fg = FunctionGraph(\n [aet_C, aet_D, st0, et0, it0, logp_c, logp_d, beta, gamma, delta],\n [st, et, it, logp_c_all, logp_d_all],\n )\n\n s0, e0, i0 = 100, 50, 25\n logp_c0 = np.array(0.0, dtype=config.floatX)\n logp_d0 = np.array(0.0, dtype=config.floatX)\n beta_val, gamma_val, delta_val = [\n np.array(val, dtype=config.floatX) for val in [0.277792, 0.135330, 0.108753]\n ]\n C = np.array([3, 5, 8, 13, 21, 26, 10, 3], dtype=np.int32)\n D = np.array([1, 2, 3, 7, 9, 11, 5, 1], dtype=np.int32)\n\n test_input_vals = [\n C,\n D,\n s0,\n e0,\n i0,\n logp_c0,\n logp_d0,\n beta_val,\n gamma_val,\n delta_val,\n ]\n compare_jax_and_py(out_fg, test_input_vals)\n\n\n@pytest.mark.xfail(\n version_parse(jax.__version__) >= version_parse(\"0.2.12\"),\n reason=\"Omnistaging cannot be disabled\",\n)\ndef test_jax_scan_tap_output():\n\n a_aet = scalar(\"a\")\n\n def input_step_fn(y_tm1, y_tm3, a):\n y_tm1.name = \"y_tm1\"\n y_tm3.name = \"y_tm3\"\n res = (y_tm1 + y_tm3) * a\n res.name = \"y_t\"\n return res\n\n y_scan_aet, _ = scan(\n fn=input_step_fn,\n outputs_info=[\n {\n \"initial\": aet.as_tensor_variable(\n np.r_[-1.0, 1.3, 0.0].astype(config.floatX)\n ),\n \"taps\": [-1, -3],\n },\n ],\n non_sequences=[a_aet],\n n_steps=10,\n name=\"y_scan\",\n )\n y_scan_aet.name = \"y\"\n y_scan_aet.owner.inputs[0].name = \"y_all\"\n\n out_fg = FunctionGraph([a_aet], [y_scan_aet])\n\n test_input_vals = [np.array(10.0).astype(config.floatX)]\n compare_jax_and_py(out_fg, test_input_vals)\n\n\ndef test_jax_Subtensors():\n # Basic indices\n x_aet = aet.arange(3 * 4 * 5).reshape((3, 4, 5))\n out_aet = x_aet[1, 2, 0]\n assert isinstance(out_aet.owner.op, aet_subtensor.Subtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n out_aet = x_aet[1:2, 1, :]\n assert isinstance(out_aet.owner.op, aet_subtensor.Subtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n # Advanced indexing\n out_aet = aet_subtensor.advanced_subtensor1(x_aet, [1, 2])\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedSubtensor1)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n out_aet = x_aet[[1, 2], [2, 3]]\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n # Advanced and basic indexing\n out_aet = x_aet[[1, 2], :]\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n out_aet = x_aet[[1, 2], :, [3, 4]]\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n\n@pytest.mark.xfail(\n version_parse(jax.__version__) >= version_parse(\"0.2.12\"),\n reason=\"Omnistaging cannot be disabled\",\n)\ndef test_jax_Subtensors_omni():\n x_aet = aet.arange(3 * 4 * 5).reshape((3, 4, 5))\n\n # Boolean indices\n out_aet = x_aet[x_aet < 0]\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n\n@pytest.mark.xfail(\n version_parse(jax.__version__) >= version_parse(\"0.2.12\"),\n reason=\"Omnistaging cannot be disabled\",\n)\ndef test_jax_IncSubtensor():\n rng = np.random.default_rng(213234)\n\n x_np = rng.uniform(-1, 1, size=(3, 4, 5)).astype(config.floatX)\n x_aet = aet.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(config.floatX)\n\n # \"Set\" basic indices\n st_aet = aet.as_tensor_variable(np.array(-10.0, dtype=config.floatX))\n out_aet = aet_subtensor.set_subtensor(x_aet[1, 2, 3], st_aet)\n assert isinstance(out_aet.owner.op, aet_subtensor.IncSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n st_aet = aet.as_tensor_variable(np.r_[-1.0, 0.0].astype(config.floatX))\n out_aet = aet_subtensor.set_subtensor(x_aet[:2, 0, 0], st_aet)\n assert isinstance(out_aet.owner.op, aet_subtensor.IncSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n out_aet = aet_subtensor.set_subtensor(x_aet[0, 1:3, 0], st_aet)\n assert isinstance(out_aet.owner.op, aet_subtensor.IncSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n # \"Set\" advanced indices\n st_aet = aet.as_tensor_variable(\n rng.uniform(-1, 1, size=(2, 4, 5)).astype(config.floatX)\n )\n out_aet = aet_subtensor.set_subtensor(x_aet[np.r_[0, 2]], st_aet)\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor1)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n st_aet = aet.as_tensor_variable(np.r_[-1.0, 0.0].astype(config.floatX))\n out_aet = aet_subtensor.set_subtensor(x_aet[[0, 2], 0, 0], st_aet)\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n st_aet = aet.as_tensor_variable(x_np[[0, 2], 0, :3])\n out_aet = aet_subtensor.set_subtensor(x_aet[[0, 2], 0, :3], st_aet)\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n # \"Set\" boolean indices\n mask_aet = aet.as_tensor_variable(x_np) > 0\n out_aet = aet_subtensor.set_subtensor(x_aet[mask_aet], 0.0)\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n # \"Increment\" basic indices\n st_aet = aet.as_tensor_variable(np.array(-10.0, dtype=config.floatX))\n out_aet = aet_subtensor.inc_subtensor(x_aet[1, 2, 3], st_aet)\n assert isinstance(out_aet.owner.op, aet_subtensor.IncSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n st_aet = aet.as_tensor_variable(np.r_[-1.0, 0.0].astype(config.floatX))\n out_aet = aet_subtensor.inc_subtensor(x_aet[:2, 0, 0], st_aet)\n assert isinstance(out_aet.owner.op, aet_subtensor.IncSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n out_aet = aet_subtensor.set_subtensor(x_aet[0, 1:3, 0], st_aet)\n assert isinstance(out_aet.owner.op, aet_subtensor.IncSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n # \"Increment\" advanced indices\n st_aet = aet.as_tensor_variable(\n rng.uniform(-1, 1, size=(2, 4, 5)).astype(config.floatX)\n )\n out_aet = aet_subtensor.inc_subtensor(x_aet[np.r_[0, 2]], st_aet)\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor1)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n st_aet = aet.as_tensor_variable(np.r_[-1.0, 0.0].astype(config.floatX))\n out_aet = aet_subtensor.inc_subtensor(x_aet[[0, 2], 0, 0], st_aet)\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n st_aet = aet.as_tensor_variable(x_np[[0, 2], 0, :3])\n out_aet = aet_subtensor.inc_subtensor(x_aet[[0, 2], 0, :3], st_aet)\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n # \"Increment\" boolean indices\n mask_aet = aet.as_tensor_variable(x_np) > 0\n out_aet = aet_subtensor.set_subtensor(x_aet[mask_aet], 1.0)\n assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor)\n out_fg = FunctionGraph([], [out_aet])\n compare_jax_and_py(out_fg, [])\n\n\ndef test_jax_ifelse():\n\n true_vals = np.r_[1, 2, 3]\n false_vals = np.r_[-1, -2, -3]\n\n x = ifelse(np.array(True), true_vals, false_vals)\n x_fg = FunctionGraph([], [x])\n\n compare_jax_and_py(x_fg, [])\n\n a = dscalar(\"a\")\n a.tag.test_value = np.array(0.2, dtype=config.floatX)\n x = ifelse(a < 0.5, true_vals, false_vals)\n x_fg = FunctionGraph([a], [x]) # I.e. False\n\n compare_jax_and_py(x_fg, [get_test_value(i) for i in x_fg.inputs])\n\n\ndef test_jax_CAReduce():\n a_aet = vector(\"a\")\n a_aet.tag.test_value = np.r_[1, 2, 3].astype(config.floatX)\n\n x = aet_sum(a_aet, axis=None)\n x_fg = FunctionGraph([a_aet], [x])\n\n compare_jax_and_py(x_fg, [np.r_[1, 2, 3].astype(config.floatX)])\n\n a_aet = matrix(\"a\")\n a_aet.tag.test_value = np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)\n\n x = aet_sum(a_aet, axis=0)\n x_fg = FunctionGraph([a_aet], [x])\n\n compare_jax_and_py(x_fg, [np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)])\n\n x = aet_sum(a_aet, axis=1)\n x_fg = FunctionGraph([a_aet], [x])\n\n compare_jax_and_py(x_fg, [np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)])\n\n a_aet = matrix(\"a\")\n a_aet.tag.test_value = np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)\n\n x = prod(a_aet, axis=0)\n x_fg = FunctionGraph([a_aet], [x])\n\n compare_jax_and_py(x_fg, [np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)])\n\n x = aet_all(a_aet)\n x_fg = FunctionGraph([a_aet], [x])\n\n compare_jax_and_py(x_fg, [np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)])\n\n\ndef test_jax_MakeVector():\n x = aet.make_vector(1, 2, 3)\n x_fg = FunctionGraph([], [x])\n\n compare_jax_and_py(x_fg, [])\n\n\n@pytest.mark.xfail(\n version_parse(jax.__version__) >= version_parse(\"0.2.12\"),\n reason=\"Omnistaging cannot be disabled\",\n)\ndef test_jax_Reshape():\n a = vector(\"a\")\n x = reshape(a, (2, 2))\n x_fg = FunctionGraph([a], [x])\n compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])\n\n # Test breaking \"omnistaging\" changes in JAX.\n # See https://github.com/tensorflow/probability/commit/782d0c64eb774b9aac54a1c8488e4f1f96fbbc68\n x = reshape(a, (a.shape[0] // 2, a.shape[0] // 2))\n x_fg = FunctionGraph([a], [x])\n compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])\n\n\n@pytest.mark.xfail(reason=\"jax.numpy.arange requires concrete inputs\")\ndef test_jax_Reshape_nonconcrete():\n a = vector(\"a\")\n b = iscalar(\"b\")\n x = reshape(a, (b, b))\n x_fg = FunctionGraph([a, b], [x])\n compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX), 2])\n\n\ndef test_jax_Dimshuffle():\n a_aet = matrix(\"a\")\n\n x = a_aet.T\n x_fg = FunctionGraph([a_aet], [x])\n compare_jax_and_py(x_fg, [np.c_[[1.0, 2.0], [3.0, 4.0]].astype(config.floatX)])\n\n x = a_aet.dimshuffle([0, 1, \"x\"])\n x_fg = FunctionGraph([a_aet], [x])\n compare_jax_and_py(x_fg, [np.c_[[1.0, 2.0], [3.0, 4.0]].astype(config.floatX)])\n\n a_aet = tensor(dtype=config.floatX, broadcastable=[False, True])\n x = a_aet.dimshuffle((0,))\n x_fg = FunctionGraph([a_aet], [x])\n compare_jax_and_py(x_fg, [np.c_[[1.0, 2.0, 3.0, 4.0]].astype(config.floatX)])\n\n a_aet = tensor(dtype=config.floatX, broadcastable=[False, True])\n x = aet_elemwise.DimShuffle([False, True], (0,), inplace=True)(a_aet)\n x_fg = FunctionGraph([a_aet], [x])\n compare_jax_and_py(x_fg, [np.c_[[1.0, 2.0, 3.0, 4.0]].astype(config.floatX)])\n\n\ndef test_jax_Join():\n a = matrix(\"a\")\n b = matrix(\"b\")\n\n x = aet.join(0, a, b)\n x_fg = FunctionGraph([a, b], [x])\n compare_jax_and_py(\n x_fg,\n [\n np.c_[[1.0, 2.0, 3.0]].astype(config.floatX),\n np.c_[[4.0, 5.0, 6.0]].astype(config.floatX),\n ],\n )\n compare_jax_and_py(\n x_fg,\n [\n np.c_[[1.0, 2.0, 3.0]].astype(config.floatX),\n np.c_[[4.0, 5.0]].astype(config.floatX),\n ],\n )\n\n x = aet.join(1, a, b)\n x_fg = FunctionGraph([a, b], [x])\n compare_jax_and_py(\n x_fg,\n [\n np.c_[[1.0, 2.0, 3.0]].astype(config.floatX),\n np.c_[[4.0, 5.0, 6.0]].astype(config.floatX),\n ],\n )\n compare_jax_and_py(\n x_fg,\n [\n np.c_[[1.0, 2.0], [3.0, 4.0]].astype(config.floatX),\n np.c_[[5.0, 6.0]].astype(config.floatX),\n ],\n )\n\n\ndef test_jax_variadic_Scalar():\n mu = vector(\"mu\", dtype=config.floatX)\n mu.tag.test_value = np.r_[0.1, 1.1].astype(config.floatX)\n tau = vector(\"tau\", dtype=config.floatX)\n tau.tag.test_value = np.r_[1.0, 2.0].astype(config.floatX)\n\n res = -tau * mu\n\n fgraph = FunctionGraph([mu, tau], [res])\n\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n res = -tau * (tau - mu) ** 2\n\n fgraph = FunctionGraph([mu, tau], [res])\n\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n\ndef test_jax_logp():\n\n mu = vector(\"mu\")\n mu.tag.test_value = np.r_[0.0, 0.0].astype(config.floatX)\n tau = vector(\"tau\")\n tau.tag.test_value = np.r_[1.0, 1.0].astype(config.floatX)\n sigma = vector(\"sigma\")\n sigma.tag.test_value = (1.0 / get_test_value(tau)).astype(config.floatX)\n value = vector(\"value\")\n value.tag.test_value = np.r_[0.1, -10].astype(config.floatX)\n\n logp = (-tau * (value - mu) ** 2 + log(tau / np.pi / 2.0)) / 2.0\n conditions = [sigma > 0]\n alltrue = aet_all([aet_all(1 * val) for val in conditions])\n normal_logp = aet.switch(alltrue, logp, -np.inf)\n\n fgraph = FunctionGraph([mu, tau, sigma, value], [normal_logp])\n\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n\ndef test_jax_multioutput():\n x = vector(\"x\")\n x.tag.test_value = np.r_[1.0, 2.0].astype(config.floatX)\n y = vector(\"y\")\n y.tag.test_value = np.r_[3.0, 4.0].astype(config.floatX)\n\n w = cosh(x ** 2 + y / 3.0)\n v = cosh(x / 3.0 + y ** 2)\n\n fgraph = FunctionGraph([x, y], [w, v])\n\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n\ndef test_nnet():\n x = vector(\"x\")\n x.tag.test_value = np.r_[1.0, 2.0].astype(config.floatX)\n\n out = sigmoid(x)\n fgraph = FunctionGraph([x], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n out = aet_nnet.ultra_fast_sigmoid(x)\n fgraph = FunctionGraph([x], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n out = softplus(x)\n fgraph = FunctionGraph([x], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n out = aet_nnet.softmax(x)\n fgraph = FunctionGraph([x], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n out = aet_nnet.logsoftmax(x)\n fgraph = FunctionGraph([x], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n\n@pytest.mark.xfail(\n version_parse(jax.__version__) >= version_parse(\"0.2.12\"),\n reason=\"Omnistaging cannot be disabled\",\n)\ndef test_tensor_basics():\n y = vector(\"y\")\n y.tag.test_value = np.r_[1.0, 2.0].astype(config.floatX)\n x = vector(\"x\")\n x.tag.test_value = np.r_[3.0, 4.0].astype(config.floatX)\n A = matrix(\"A\")\n A.tag.test_value = np.empty((2, 2), dtype=config.floatX)\n alpha = scalar(\"alpha\")\n alpha.tag.test_value = np.array(3.0, dtype=config.floatX)\n beta = scalar(\"beta\")\n beta.tag.test_value = np.array(5.0, dtype=config.floatX)\n\n # This should be converted into a `Gemv` `Op` when the non-JAX compatible\n # optimizations are turned on; however, when using JAX mode, it should\n # leave the expression alone.\n out = y.dot(alpha * A).dot(x) + beta * y\n fgraph = FunctionGraph([y, x, A, alpha, beta], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n out = maximum(y, x)\n fgraph = FunctionGraph([y, x], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n out = aet_max(y)\n fgraph = FunctionGraph([y], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n\n@pytest.mark.xfail(reason=\"jax.numpy.arange requires concrete inputs\")\ndef test_arange_nonconcrete():\n\n a = scalar(\"a\")\n a.tag.test_value = 10\n\n out = aet.arange(a)\n fgraph = FunctionGraph([a], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n\n@pytest.mark.xfail(reason=\"jax.numpy.arange requires concrete inputs\")\ndef test_unique_nonconcrete():\n a = matrix(\"a\")\n a.tag.test_value = np.arange(6, dtype=config.floatX).reshape((3, 2))\n\n out = aet_extra_ops.Unique()(a)\n fgraph = FunctionGraph([a], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n\ndef test_identity():\n a = scalar(\"a\")\n a.tag.test_value = 10\n\n out = aes.identity(a)\n fgraph = FunctionGraph([a], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n\ndef test_second():\n a0 = scalar(\"a0\")\n b = scalar(\"b\")\n\n out = aes.second(a0, b)\n fgraph = FunctionGraph([a0, b], [out])\n compare_jax_and_py(fgraph, [10.0, 5.0])\n\n a1 = vector(\"a1\")\n out = aet.second(a1, b)\n fgraph = FunctionGraph([a1, b], [out])\n compare_jax_and_py(fgraph, [np.zeros([5], dtype=config.floatX), 5.0])\n\n\ndef test_jax_BatchedDot():\n # tensor3 . tensor3\n a = tensor3(\"a\")\n a.tag.test_value = (\n np.linspace(-1, 1, 10 * 5 * 3).astype(config.floatX).reshape((10, 5, 3))\n )\n b = tensor3(\"b\")\n b.tag.test_value = (\n np.linspace(1, -1, 10 * 3 * 2).astype(config.floatX).reshape((10, 3, 2))\n )\n out = aet_blas.BatchedDot()(a, b)\n fgraph = FunctionGraph([a, b], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n # A dimension mismatch should raise a TypeError for compatibility\n inputs = [get_test_value(a)[:-1], get_test_value(b)]\n opts = OptimizationQuery(include=[None], exclude=[\"cxx_only\", \"BlasOpt\"])\n jax_mode = Mode(JAXLinker(), opts)\n aesara_jax_fn = function(fgraph.inputs, fgraph.outputs, mode=jax_mode)\n with pytest.raises(TypeError):\n aesara_jax_fn(*inputs)\n\n # matrix . matrix\n a = matrix(\"a\")\n a.tag.test_value = np.linspace(-1, 1, 5 * 3).astype(config.floatX).reshape((5, 3))\n b = matrix(\"b\")\n b.tag.test_value = np.linspace(1, -1, 5 * 3).astype(config.floatX).reshape((5, 3))\n out = aet_blas.BatchedDot()(a, b)\n fgraph = FunctionGraph([a, b], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n\ndef test_shared():\n a = shared(np.array([1, 2, 3], dtype=config.floatX))\n\n aesara_jax_fn = function([], a, mode=\"JAX\")\n jax_res = aesara_jax_fn()\n\n assert isinstance(jax_res, jax.interpreters.xla.DeviceArray)\n np.testing.assert_allclose(jax_res, a.get_value())\n\n aesara_jax_fn = function([], a * 2, mode=\"JAX\")\n jax_res = aesara_jax_fn()\n\n assert isinstance(jax_res, jax.interpreters.xla.DeviceArray)\n np.testing.assert_allclose(jax_res, a.get_value() * 2)\n\n # Changed the shared value and make sure that the JAX-compiled\n # function also changes.\n new_a_value = np.array([3, 4, 5], dtype=config.floatX)\n a.set_value(new_a_value)\n\n jax_res = aesara_jax_fn()\n assert isinstance(jax_res, jax.interpreters.xla.DeviceArray)\n np.testing.assert_allclose(jax_res, new_a_value * 2)\n\n\ndef test_extra_ops():\n a = matrix(\"a\")\n a.tag.test_value = np.arange(6, dtype=config.floatX).reshape((3, 2))\n\n out = aet_extra_ops.cumsum(a, axis=0)\n fgraph = FunctionGraph([a], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n out = aet_extra_ops.cumprod(a, axis=1)\n fgraph = FunctionGraph([a], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n out = aet_extra_ops.diff(a, n=2, axis=1)\n fgraph = FunctionGraph([a], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n out = aet_extra_ops.repeat(a, (3, 3), axis=1)\n fgraph = FunctionGraph([a], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n c = aet.as_tensor(5)\n\n with pytest.raises(NotImplementedError):\n out = aet_extra_ops.fill_diagonal(a, c)\n fgraph = FunctionGraph([a], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n with pytest.raises(NotImplementedError):\n out = aet_extra_ops.fill_diagonal_offset(a, c, c)\n fgraph = FunctionGraph([a], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n with pytest.raises(NotImplementedError):\n out = aet_extra_ops.Unique(axis=1)(a)\n fgraph = FunctionGraph([a], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n indices = np.arange(np.product((3, 4)))\n out = aet_extra_ops.unravel_index(indices, (3, 4), order=\"C\")\n fgraph = FunctionGraph([], out)\n compare_jax_and_py(\n fgraph, [get_test_value(i) for i in fgraph.inputs], must_be_device_array=False\n )\n\n\n@pytest.mark.xfail(\n version_parse(jax.__version__) >= version_parse(\"0.2.12\"),\n reason=\"Omnistaging cannot be disabled\",\n)\ndef test_extra_ops_omni():\n a = matrix(\"a\")\n a.tag.test_value = np.arange(6, dtype=config.floatX).reshape((3, 2))\n\n # This function also cannot take symbolic input.\n c = aet.as_tensor(5)\n out = aet_extra_ops.bartlett(c)\n fgraph = FunctionGraph([], [out])\n compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])\n\n multi_index = np.unravel_index(np.arange(np.product((3, 4))), (3, 4))\n out = aet_extra_ops.ravel_multi_index(multi_index, (3, 4))\n fgraph = FunctionGraph([], [out])\n compare_jax_and_py(\n fgraph, [get_test_value(i) for i in fgraph.inputs], must_be_device_array=False\n )\n\n # The inputs are \"concrete\", yet it still has problems?\n out = aet_extra_ops.Unique()(\n aet.as_tensor(np.arange(6, dtype=config.floatX).reshape((3, 2)))\n )\n fgraph = FunctionGraph([], [out])\n compare_jax_and_py(fgraph, [])\n\n\n@pytest.mark.parametrize(\n \"at_dist, dist_params, rng, size\",\n [\n (\n normal,\n (),\n shared(np.random.RandomState(123)),\n 10000,\n ),\n (\n normal,\n (),\n shared(np.random.default_rng(123)),\n 10000,\n ),\n ],\n)\ndef test_random_stats(at_dist, dist_params, rng, size):\n # The RNG states are not 1:1, so the best we can do is check some summary\n # statistics of the samples\n out = normal(*dist_params, rng=rng, size=size)\n fgraph = FunctionGraph([out.owner.inputs[0]], [out], clone=False)\n\n def assert_fn(x, y):\n (x,) = x\n (y,) = y\n assert x.dtype.kind == y.dtype.kind\n\n d = 2 if config.floatX == \"float64\" else 1\n np.testing.assert_array_almost_equal(np.abs(x.mean()), np.abs(y.mean()), d)\n\n compare_jax_and_py(fgraph, [], assert_fn=assert_fn)\n\n\ndef test_random_unimplemented():\n class NonExistentRV(RandomVariable):\n name = \"non-existent\"\n ndim_supp = 0\n ndims_params = []\n dtype = \"floatX\"\n\n def __call__(self, size=None, **kwargs):\n return super().__call__(size=size, **kwargs)\n\n def rng_fn(cls, rng, size):\n return 0\n\n nonexistentrv = NonExistentRV()\n rng = shared(np.random.RandomState(123))\n out = nonexistentrv(rng=rng)\n fgraph = FunctionGraph([out.owner.inputs[0]], [out], clone=False)\n\n with pytest.raises(NotImplementedError):\n compare_jax_and_py(fgraph, [])\n\n\ndef test_RandomStream():\n srng = RandomStream(seed=123)\n out = srng.normal() - srng.normal()\n\n fn = function([], out, mode=jax_mode)\n jax_res_1 = fn()\n jax_res_2 = fn()\n\n assert np.array_equal(jax_res_1, jax_res_2)\n\n\ndef test_erf():\n x = scalar(\"x\")\n out = erf(x)\n fg = FunctionGraph([x], [out])\n\n compare_jax_and_py(fg, [1.0])\n\n\ndef test_erfc():\n x = scalar(\"x\")\n out = erfc(x)\n fg = FunctionGraph([x], [out])\n\n compare_jax_and_py(fg, [1.0])\n\n\ndef test_erfinv():\n x = scalar(\"x\")\n out = erfinv(x)\n fg = FunctionGraph([x], [out])\n\n compare_jax_and_py(fg, [1.0])\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.isposinf", "numpy.copy", "numpy.isneginf", "numpy.__version__.split", "numpy.frompyfunc", "numpy.dtype", "numpy.sctype2char" ], [ "numpy.testing.assert_allclose", "numpy.array", "numpy.product", "numpy.empty", "numpy.array_equal", "numpy.zeros", "numpy.random.RandomState", "numpy.ones", "numpy.random.default_rng", "numpy.eye", "numpy.arange", "numpy.linspace" ] ]
708yamaguchi/scikit-robot
[ "08fa91a79e637b0dcfafe5553921cb7bb409ef26" ]
[ "tests/skrobot_tests/model_tests/test_primitives.py" ]
[ "import os.path as osp\nimport shutil\nimport unittest\n\nimport numpy as np\n\nimport skrobot\nimport trimesh\n\n\nclass TestAxis(unittest.TestCase):\n\n def test_init(self):\n skrobot.model.Axis()\n\n def from_coords(self):\n coords = skrobot.coordinates.Coordinates()\n skrobot.model.Axis.from_coords(coords)\n\n def from_cascoords(self):\n cascoords = skrobot.coordinates.CascadedCoords()\n skrobot.model.Axis.from_cascoords(cascoords)\n\n\nclass TestBox(unittest.TestCase):\n\n def test_init(self):\n skrobot.model.Box(extents=(1, 1, 1))\n skrobot.model.Box(extents=(1, 1, 1), with_sdf=True)\n\n def test_init_with_sdf(self):\n b = skrobot.model.Box(extents=(1, 1, 1), with_sdf=True)\n booleans, _ = b.sdf.on_surface(b.visual_mesh.vertices)\n is_all_vertices_on_surface = np.all(booleans)\n self.assertTrue(is_all_vertices_on_surface)\n\n\nclass TestCone(unittest.TestCase):\n\n def test_init(self):\n skrobot.model.Cone(radius=0.5, height=1)\n\n\nclass TestCylinder(unittest.TestCase):\n\n def test_init(self):\n skrobot.model.Cylinder(radius=0.5, height=1)\n\n\nclass TestSphere(unittest.TestCase):\n\n def test_init(self):\n skrobot.model.Sphere(radius=1)\n\n def test_init_with_sdf(self):\n s = skrobot.model.Sphere(radius=1.0, with_sdf=True)\n booleans, _ = s.sdf.on_surface(s.visual_mesh.vertices)\n is_all_vertices_on_surface = np.all(booleans)\n self.assertTrue(is_all_vertices_on_surface)\n\n\nclass TestAnnulus(unittest.TestCase):\n\n def test_init(self):\n skrobot.model.Annulus(r_min=0.2, r_max=0.5, height=1)\n\n\nclass TestMeshLink(unittest.TestCase):\n\n def test_init(self):\n cylinder = trimesh.creation.cylinder(radius=1.0, height=1.0)\n skrobot.model.MeshLink(cylinder)\n skrobot.model.MeshLink([cylinder, cylinder])\n\n base_obj_path = osp.join(osp.dirname(skrobot.data.pr2_urdfpath()),\n 'meshes', 'base_v0', 'base.obj')\n skrobot.model.MeshLink(base_obj_path)\n\n def test_init_with_sdf(self):\n home_dir = osp.expanduser(\"~\")\n sdf_cache_dir = osp.join(home_dir, '.skrobot', 'sdf')\n if osp.exists(sdf_cache_dir):\n shutil.rmtree(sdf_cache_dir)\n\n bunny_obj_path = skrobot.data.bunny_objpath()\n m = skrobot.model.MeshLink(bunny_obj_path, with_sdf=True, dim_grid=50)\n\n booleans, _ = m.sdf.on_surface(m.visual_mesh.vertices)\n is_all_vertices_on_surface = np.all(booleans)\n self.assertTrue(is_all_vertices_on_surface)\n" ]
[ [ "numpy.all" ] ]
PanAndy/PolarMask
[ "0421f03a66ad4cbf7bdfe7a17a2e47e9fcc53737", "e224dc10eb62b4d771aafc1ffcb787eedb35ba7b" ]
[ "mmdet/models/anchor_heads/polarmask_head.py", "mmdet/models/detectors/base.py" ]
[ "import torch\nimport torch.nn as nn\nfrom mmcv.cnn import normal_init\n\nfrom mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, multiclass_nms_with_mask\nfrom mmdet.ops import ModulatedDeformConvPack\n\nfrom ..builder import build_loss\nfrom ..registry import HEADS\nfrom ..utils import ConvModule, Scale, bias_init_with_prob, build_norm_layer\nfrom IPython import embed\nimport cv2\nimport numpy as np\nimport math\nimport time\n\nINF = 1e8\n\n\n@HEADS.register_module\nclass PolarMask_Head(nn.Module):\n\n def __init__(self,\n num_classes,\n in_channels,\n feat_channels=256,\n stacked_convs=4,\n strides=(4, 8, 16, 32, 64),\n regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),\n (512, INF)),\n use_dcn=False,\n mask_nms=False,\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(type='IoULoss', loss_weight=1.0),\n loss_mask=dict(type='MaskIOULoss'),\n loss_centerness=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n loss_weight=1.0),\n conv_cfg=None,\n norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):\n super(PolarMask_Head, self).__init__()\n\n self.num_classes = num_classes\n self.cls_out_channels = num_classes - 1\n self.in_channels = in_channels\n self.feat_channels = feat_channels\n self.stacked_convs = stacked_convs\n self.strides = strides\n self.regress_ranges = regress_ranges\n self.loss_cls = build_loss(loss_cls)\n self.loss_bbox = build_loss(loss_bbox)\n self.loss_mask = build_loss(loss_mask)\n self.loss_centerness = build_loss(loss_centerness)\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.fp16_enabled = False\n # xez add for polarmask\n self.use_dcn = use_dcn\n self.mask_nms = mask_nms\n\n # debug vis img\n self.vis_num = 1000\n self.count = 0\n\n # test\n self.angles = torch.range(0, 350, 10).cuda() / 180 * math.pi\n\n self._init_layers()\n\n def _init_layers(self):\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n self.mask_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n if not self.use_dcn:\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.norm_cfg is None))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.norm_cfg is None))\n self.mask_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.norm_cfg is None))\n else:\n self.cls_convs.append(\n ModulatedDeformConvPack(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n dilation=1,\n deformable_groups=1,\n ))\n if self.norm_cfg:\n self.cls_convs.append(build_norm_layer(self.norm_cfg, self.feat_channels)[1])\n self.cls_convs.append(nn.ReLU(inplace=True))\n\n self.reg_convs.append(\n ModulatedDeformConvPack(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n dilation=1,\n deformable_groups=1,\n ))\n if self.norm_cfg:\n self.reg_convs.append(build_norm_layer(self.norm_cfg, self.feat_channels)[1])\n self.reg_convs.append(nn.ReLU(inplace=True))\n\n self.mask_convs.append(\n ModulatedDeformConvPack(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n dilation=1,\n deformable_groups=1,\n ))\n if self.norm_cfg:\n self.mask_convs.append(build_norm_layer(self.norm_cfg, self.feat_channels)[1])\n self.mask_convs.append(nn.ReLU(inplace=True))\n\n self.polar_cls = nn.Conv2d(\n self.feat_channels, self.cls_out_channels, 3, padding=1)\n self.polar_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n self.polar_mask = nn.Conv2d(self.feat_channels, 36, 3, padding=1)\n self.polar_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)\n\n self.scales_bbox = nn.ModuleList([Scale(1.0) for _ in self.strides])\n self.scales_mask = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n def init_weights(self):\n if not self.use_dcn:\n for m in self.cls_convs:\n normal_init(m.conv, std=0.01)\n for m in self.reg_convs:\n normal_init(m.conv, std=0.01)\n for m in self.mask_convs:\n normal_init(m.conv, std=0.01)\n else:\n pass\n\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.polar_cls, std=0.01, bias=bias_cls)\n normal_init(self.polar_reg, std=0.01)\n normal_init(self.polar_mask, std=0.01)\n normal_init(self.polar_centerness, std=0.01)\n\n def forward(self, feats):\n return multi_apply(self.forward_single, feats, self.scales_bbox, self.scales_mask)\n\n def forward_single(self, x, scale_bbox, scale_mask):\n cls_feat = x\n reg_feat = x\n mask_feat = x\n\n for cls_layer in self.cls_convs:\n cls_feat = cls_layer(cls_feat)\n cls_score = self.polar_cls(cls_feat)\n centerness = self.polar_centerness(cls_feat)\n\n for reg_layer in self.reg_convs:\n reg_feat = reg_layer(reg_feat)\n # scale the bbox_pred of different level\n # float to avoid overflow when enabling FP16\n bbox_pred = scale_bbox(self.polar_reg(reg_feat)).float().exp()\n\n for mask_layer in self.mask_convs:\n mask_feat = mask_layer(mask_feat)\n mask_pred = scale_mask(self.polar_mask(mask_feat)).float().exp()\n\n return cls_score, bbox_pred, centerness, mask_pred\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'mask_preds', 'centernesses'))\n def loss(self,\n cls_scores,\n bbox_preds,\n centernesses,\n mask_preds,\n gt_bboxes,\n gt_labels,\n img_metas,\n cfg,\n gt_masks,\n gt_bboxes_ignore=None,\n extra_data=None):\n assert len(cls_scores) == len(bbox_preds) == len(centernesses) == len(mask_preds)\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,\n bbox_preds[0].device)\n\n labels, bbox_targets, mask_targets = self.polar_target(all_level_points, extra_data)\n\n num_imgs = cls_scores[0].size(0)\n # flatten cls_scores, bbox_preds and centerness\n flatten_cls_scores = [\n cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)\n for cls_score in cls_scores]\n flatten_bbox_preds = [\n bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n for bbox_pred in bbox_preds\n ]\n flatten_centerness = [\n centerness.permute(0, 2, 3, 1).reshape(-1)\n for centerness in centernesses\n ]\n flatten_mask_preds = [\n mask_pred.permute(0, 2, 3, 1).reshape(-1, 36)\n for mask_pred in mask_preds\n ]\n flatten_cls_scores = torch.cat(flatten_cls_scores) # [num_pixel, 80]\n flatten_bbox_preds = torch.cat(flatten_bbox_preds) # [num_pixel, 4]\n flatten_mask_preds = torch.cat(flatten_mask_preds) # [num_pixel, 36]\n flatten_centerness = torch.cat(flatten_centerness) # [num_pixel]\n\n flatten_labels = torch.cat(labels).long() # [num_pixel]\n flatten_bbox_targets = torch.cat(bbox_targets) # [num_pixel, 4]\n flatten_mask_targets = torch.cat(mask_targets) # [num_pixel, 36]\n flatten_points = torch.cat([points.repeat(num_imgs, 1)\n for points in all_level_points]) # [num_pixel,2]\n pos_inds = flatten_labels.nonzero().reshape(-1)\n num_pos = len(pos_inds)\n\n loss_cls = self.loss_cls(\n flatten_cls_scores, flatten_labels,\n avg_factor=num_pos + num_imgs) # avoid num_pos is 0\n pos_bbox_preds = flatten_bbox_preds[pos_inds]\n pos_centerness = flatten_centerness[pos_inds]\n pos_mask_preds = flatten_mask_preds[pos_inds]\n\n if num_pos > 0:\n pos_bbox_targets = flatten_bbox_targets[pos_inds]\n pos_mask_targets = flatten_mask_targets[pos_inds]\n pos_centerness_targets = self.polar_centerness_target(pos_mask_targets)\n\n pos_points = flatten_points[pos_inds]\n pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)\n pos_decoded_target_preds = distance2bbox(pos_points,\n pos_bbox_targets)\n\n # centerness weighted iou loss\n loss_bbox = self.loss_bbox(\n pos_decoded_bbox_preds,\n pos_decoded_target_preds,\n weight=pos_centerness_targets,\n avg_factor=pos_centerness_targets.sum())\n loss_mask = self.loss_mask(pos_mask_preds,\n pos_mask_targets,\n weight=pos_centerness_targets,\n avg_factor=pos_centerness_targets.sum())\n\n loss_centerness = self.loss_centerness(pos_centerness,\n pos_centerness_targets)\n else:\n loss_bbox = pos_bbox_preds.sum()\n loss_mask = pos_mask_preds.sum()\n loss_centerness = pos_centerness.sum()\n\n return dict(\n loss_cls=loss_cls,\n loss_bbox=loss_bbox,\n loss_mask=loss_mask,\n loss_centerness=loss_centerness)\n\n def get_points(self, featmap_sizes, dtype, device):\n \"\"\"Get points according to feature map sizes.\n\n Args:\n featmap_sizes (list[tuple]): Multi-level feature map sizes.\n dtype (torch.dtype): Type of points.\n device (torch.device): Device of points.\n\n Returns:\n tuple: points of each image.\n \"\"\"\n mlvl_points = []\n for i in range(len(featmap_sizes)):\n mlvl_points.append(\n self.get_points_single(featmap_sizes[i], self.strides[i],\n dtype, device))\n return mlvl_points\n\n def get_points_single(self, featmap_size, stride, dtype, device):\n h, w = featmap_size\n x_range = torch.arange(\n 0, w * stride, stride, dtype=dtype, device=device)\n y_range = torch.arange(\n 0, h * stride, stride, dtype=dtype, device=device)\n y, x = torch.meshgrid(y_range, x_range)\n points = torch.stack(\n (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2\n return points\n\n def polar_target(self, points, extra_data):\n assert len(points) == len(self.regress_ranges)\n\n num_levels = len(points)\n\n labels_list, bbox_targets_list, mask_targets_list = extra_data.values()\n\n # split to per img, per level\n num_points = [center.size(0) for center in points]\n labels_list = [labels.split(num_points, 0) for labels in labels_list]\n bbox_targets_list = [\n bbox_targets.split(num_points, 0)\n for bbox_targets in bbox_targets_list\n ]\n mask_targets_list = [\n mask_targets.split(num_points, 0)\n for mask_targets in mask_targets_list\n ]\n\n # concat per level image\n concat_lvl_labels = []\n concat_lvl_bbox_targets = []\n concat_lvl_mask_targets = []\n for i in range(num_levels):\n concat_lvl_labels.append(\n torch.cat([labels[i] for labels in labels_list]))\n concat_lvl_bbox_targets.append(\n torch.cat(\n [bbox_targets[i] for bbox_targets in bbox_targets_list]))\n concat_lvl_mask_targets.append(\n torch.cat(\n [mask_targets[i] for mask_targets in mask_targets_list]))\n\n return concat_lvl_labels, concat_lvl_bbox_targets, concat_lvl_mask_targets\n\n def polar_centerness_target(self, pos_mask_targets):\n # only calculate pos centerness targets, otherwise there may be nan\n centerness_targets = (pos_mask_targets.min(dim=-1)[0] / pos_mask_targets.max(dim=-1)[0])\n return torch.sqrt(centerness_targets)\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n def get_bboxes(self,\n cls_scores,\n bbox_preds,\n centernesses,\n mask_preds,\n img_metas,\n cfg,\n rescale=None):\n assert len(cls_scores) == len(bbox_preds)\n num_levels = len(cls_scores)\n\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,\n bbox_preds[0].device)\n result_list = []\n for img_id in range(len(img_metas)):\n cls_score_list = [\n cls_scores[i][img_id].detach() for i in range(num_levels)\n ]\n bbox_pred_list = [\n bbox_preds[i][img_id].detach() for i in range(num_levels)\n ]\n centerness_pred_list = [\n centernesses[i][img_id].detach() for i in range(num_levels)\n ]\n mask_pred_list = [\n mask_preds[i][img_id].detach() for i in range(num_levels)\n ]\n img_shape = img_metas[img_id]['img_shape']\n scale_factor = img_metas[img_id]['scale_factor']\n det_bboxes = self.get_bboxes_single(cls_score_list,\n bbox_pred_list,\n mask_pred_list,\n centerness_pred_list,\n mlvl_points, img_shape,\n scale_factor, cfg, rescale)\n result_list.append(det_bboxes)\n return result_list\n\n def get_bboxes_single(self,\n cls_scores,\n bbox_preds,\n mask_preds,\n centernesses,\n mlvl_points,\n img_shape,\n scale_factor,\n cfg,\n rescale=False):\n assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)\n mlvl_bboxes = []\n mlvl_scores = []\n mlvl_masks = []\n mlvl_centerness = []\n for cls_score, bbox_pred, mask_pred, centerness, points in zip(\n cls_scores, bbox_preds, mask_preds, centernesses, mlvl_points):\n assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n scores = cls_score.permute(1, 2, 0).reshape(\n -1, self.cls_out_channels).sigmoid()\n\n centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()\n bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n mask_pred = mask_pred.permute(1, 2, 0).reshape(-1, 36)\n nms_pre = cfg.get('nms_pre', -1)\n if nms_pre > 0 and scores.shape[0] > nms_pre:\n max_scores, _ = (scores * centerness[:, None]).max(dim=1)\n _, topk_inds = max_scores.topk(nms_pre)\n points = points[topk_inds, :]\n bbox_pred = bbox_pred[topk_inds, :]\n mask_pred = mask_pred[topk_inds, :]\n scores = scores[topk_inds, :]\n centerness = centerness[topk_inds]\n bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)\n masks = distance2mask(points, mask_pred, self.angles, max_shape=img_shape)\n\n mlvl_bboxes.append(bboxes)\n mlvl_scores.append(scores)\n mlvl_centerness.append(centerness)\n mlvl_masks.append(masks)\n\n mlvl_bboxes = torch.cat(mlvl_bboxes)\n mlvl_masks = torch.cat(mlvl_masks)\n if rescale:\n _mlvl_bboxes = mlvl_bboxes / mlvl_bboxes.new_tensor(scale_factor)\n try:\n scale_factor = torch.Tensor(scale_factor)[:2].cuda().unsqueeze(1).repeat(1, 36)\n _mlvl_masks = mlvl_masks / scale_factor\n except:\n _mlvl_masks = mlvl_masks / mlvl_masks.new_tensor(scale_factor)\n\n mlvl_scores = torch.cat(mlvl_scores)\n padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\n mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)\n mlvl_centerness = torch.cat(mlvl_centerness)\n\n centerness_factor = 0.5 # mask centerness is smaller than origin centerness, so add a constant is important or the score will be too low.\n if self.mask_nms:\n '''1 mask->min_bbox->nms, performance same to origin box'''\n a = _mlvl_masks\n _mlvl_bboxes = torch.stack([a[:, 0].min(1)[0],a[:, 1].min(1)[0],a[:, 0].max(1)[0],a[:, 1].max(1)[0]],-1)\n det_bboxes, det_labels, det_masks = multiclass_nms_with_mask(\n _mlvl_bboxes,\n mlvl_scores,\n _mlvl_masks,\n cfg.score_thr,\n cfg.nms,\n cfg.max_per_img,\n score_factors=mlvl_centerness + centerness_factor)\n\n else:\n '''2 origin bbox->nms, performance same to mask->min_bbox'''\n det_bboxes, det_labels, det_masks = multiclass_nms_with_mask(\n _mlvl_bboxes,\n mlvl_scores,\n _mlvl_masks,\n cfg.score_thr,\n cfg.nms,\n cfg.max_per_img,\n score_factors=mlvl_centerness + centerness_factor)\n\n return det_bboxes, det_labels, det_masks\n\n\n# test\ndef distance2mask(points, distances, angles, max_shape=None):\n '''Decode distance prediction to 36 mask points\n Args:\n points (Tensor): Shape (n, 2), [x, y].\n distance (Tensor): Distance from the given point to 36,from angle 0 to 350.\n angles (Tensor):\n max_shape (tuple): Shape of the image.\n\n Returns:\n Tensor: Decoded masks.\n '''\n num_points = points.shape[0]\n points = points[:, :, None].repeat(1, 1, 36)\n c_x, c_y = points[:, 0], points[:, 1]\n\n sin = torch.sin(angles)\n cos = torch.cos(angles)\n sin = sin[None, :].repeat(num_points, 1)\n cos = cos[None, :].repeat(num_points, 1)\n\n x = distances * sin + c_x\n y = distances * cos + c_y\n\n if max_shape is not None:\n x = x.clamp(min=0, max=max_shape[1] - 1)\n y = y.clamp(min=0, max=max_shape[0] - 1)\n\n res = torch.cat([x[:, None, :], y[:, None, :]], dim=1)\n return res\n\n\n\n", "import logging\nfrom abc import ABCMeta, abstractmethod\n\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as maskUtils\nimport torch.nn as nn\n\nfrom mmdet.core import auto_fp16, get_classes, tensor2imgs\n\n\nclass BaseDetector(nn.Module):\n \"\"\"Base class for detectors\"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(self):\n super(BaseDetector, self).__init__()\n self.fp16_enabled = False\n\n @property\n def with_neck(self):\n return hasattr(self, 'neck') and self.neck is not None\n\n @property\n def with_shared_head(self):\n return hasattr(self, 'shared_head') and self.shared_head is not None\n\n @property\n def with_bbox(self):\n return hasattr(self, 'bbox_head') and self.bbox_head is not None\n\n @property\n def with_mask(self):\n return hasattr(self, 'mask_head') and self.mask_head is not None\n\n @abstractmethod\n def extract_feat(self, imgs):\n pass\n\n def extract_feats(self, imgs):\n assert isinstance(imgs, list)\n for img in imgs:\n yield self.extract_feat(img)\n\n @abstractmethod\n def forward_train(self, imgs, img_metas, **kwargs):\n pass\n\n @abstractmethod\n def simple_test(self, img, img_meta, **kwargs):\n pass\n\n @abstractmethod\n def aug_test(self, imgs, img_metas, **kwargs):\n pass\n\n def init_weights(self, pretrained=None):\n if pretrained is not None:\n logger = logging.getLogger()\n logger.info('load model from: {}'.format(pretrained))\n\n def forward_test(self, imgs, img_metas, **kwargs):\n for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:\n if not isinstance(var, list):\n raise TypeError('{} must be a list, but got {}'.format(\n name, type(var)))\n\n num_augs = len(imgs)\n if num_augs != len(img_metas):\n raise ValueError(\n 'num of augmentations ({}) != num of image meta ({})'.format(\n len(imgs), len(img_metas)))\n # TODO: remove the restriction of imgs_per_gpu == 1 when prepared\n imgs_per_gpu = imgs[0].size(0)\n assert imgs_per_gpu == 1\n\n if num_augs == 1:\n return self.simple_test(imgs[0], img_metas[0], **kwargs)\n else:\n return self.aug_test(imgs, img_metas, **kwargs)\n\n @auto_fp16(apply_to=('img', ))\n def forward(self, img, img_meta, return_loss=True, **kwargs):\n if return_loss:\n return self.forward_train(img, img_meta, **kwargs)\n else:\n return self.forward_test(img, img_meta, **kwargs)\n\n def show_result(self,\n data,\n result,\n img_norm_cfg,\n dataset=None,\n score_thr=0.3):\n if isinstance(result, tuple):\n bbox_result, segm_result = result\n else:\n bbox_result, segm_result = result, None\n\n img_tensor = data['img'][0]\n img_metas = data['img_meta'][0].data[0]\n imgs = tensor2imgs(img_tensor, **img_norm_cfg)\n assert len(imgs) == len(img_metas)\n\n if dataset is None:\n class_names = self.CLASSES\n elif isinstance(dataset, str):\n class_names = get_classes(dataset)\n elif isinstance(dataset, (list, tuple)):\n class_names = dataset\n else:\n raise TypeError(\n 'dataset must be a valid dataset name or a sequence'\n ' of class names, not {}'.format(type(dataset)))\n\n for img, img_meta in zip(imgs, img_metas):\n h, w, _ = img_meta['img_shape']\n img_show = img[:h, :w, :]\n\n bboxes = np.vstack(bbox_result)\n # draw segmentation masks\n if segm_result is not None:\n segms = mmcv.concat_list(segm_result)\n inds = np.where(bboxes[:, -1] > score_thr)[0]\n for i in inds:\n color_mask = np.random.randint(\n 0, 256, (1, 3), dtype=np.uint8)\n mask = maskUtils.decode(segms[i]).astype(np.bool)\n img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5\n # draw bounding boxes\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(bbox_result)\n ]\n labels = np.concatenate(labels)\n mmcv.imshow_det_bboxes(\n img_show,\n bboxes,\n labels,\n class_names=class_names,\n score_thr=score_thr)\n" ]
[ [ "torch.cos", "torch.cat", "torch.sqrt", "torch.nn.ModuleList", "torch.sin", "torch.arange", "torch.nn.ReLU", "torch.range", "torch.nn.Conv2d", "torch.meshgrid", "torch.Tensor" ], [ "numpy.concatenate", "numpy.full", "numpy.where", "numpy.random.randint", "numpy.vstack" ] ]
PhilippVerpoort/blue-green-H2
[ "a9b9bd27d2459df0f14e719a466af5ed6318d7e0", "a9b9bd27d2459df0f14e719a466af5ed6318d7e0" ]
[ "src/data/fuels/calc_fuels.py", "src/plotting/plots/plotOverTime.py" ]
[ "import pandas as pd\n\nfrom src.data.fuels.calc_ghgi import calcGHGI\nfrom src.data.fuels.calc_cost import calcCost\nfrom src.timeit import timeit\n\n\n# calculate fuel data\n@timeit\ndef calcFuelData(times: list, full_params: pd.DataFrame, fuels: dict, gwp: str = 'gwp100', levelised: bool = False):\n fuelSpecs = {'names': {}, 'colours': {}}\n fuelEntries = []\n\n for fuel_id, fuel in fuels.items():\n fuelSpecs['names'][fuel_id] = fuel['desc']\n fuelSpecs['colours'][fuel_id] = fuel['colour']\n\n for t in times:\n currentParams = full_params.query(f\"year=={t}\").droplevel(level=1)\n\n levelisedCost = calcCost(currentParams, fuel)\n levelisedGHGI = calcGHGI(currentParams, fuel, gwp)\n\n newFuel = {'fuel': fuel_id, 'type': fuels[fuel_id]['type'], 'year': t}\n\n for component in levelisedCost:\n newFuel[f\"cost__{component}\"] = levelisedCost[component][0]\n newFuel[f\"cost_uu__{component}\"] = levelisedCost[component][1]\n newFuel[f\"cost_ul__{component}\"] = levelisedCost[component][2]\n for component in levelisedGHGI:\n newFuel[f\"ghgi__{component}\"] = levelisedGHGI[component][0]\n newFuel[f\"ghgi_uu__{component}\"] = levelisedGHGI[component][1]\n newFuel[f\"ghgi_ul__{component}\"] = levelisedGHGI[component][2]\n\n newFuel['cost'] = sum(newFuel[f\"cost__{component}\"] for component in levelisedCost)\n newFuel['cost_uu'] = sum(newFuel[f\"cost_uu__{component}\"] for component in levelisedCost)\n newFuel['cost_ul'] = sum(newFuel[f\"cost_ul__{component}\"] for component in levelisedCost)\n\n newFuel['ghgi'] = sum(newFuel[f\"ghgi__{component}\"] for component in levelisedGHGI)\n newFuel['ghgi_uu'] = sum(newFuel[f\"ghgi_uu__{component}\"] for component in levelisedGHGI)\n newFuel['ghgi_ul'] = sum(newFuel[f\"ghgi_ul__{component}\"] for component in levelisedGHGI)\n\n fuelEntries.append(newFuel)\n\n fuelData = pd.DataFrame.from_records(fuelEntries)\n\n return fuelData, fuelSpecs\n", "from string import ascii_lowercase\n\nimport numpy as np\nimport pandas as pd\n\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom plotly.colors import hex_to_rgb\n\nfrom src.timeit import timeit\n\n\n@timeit\ndef plotOverTime(FSCPData: pd.DataFrame, FSCPDataSteel: pd.DataFrame, config: dict):\n # select which lines to plot based on function argument\n FSCPsCols, plotFSCP, plotLines = __selectPlotFSCPs(FSCPData, config['showFSCPs'], config['refFuelTop'],\n config['n_samples'])\n FSCPsCols, plotFSCPSteel, plotLinesSteel = __selectPlotFSCPs(FSCPDataSteel, config['showFSCPs'],\n config['refFuelBottom'], config['n_samples'])\n\n # produce figure\n fig = __produceFigure(FSCPsCols, plotFSCP, plotFSCPSteel, plotLines, plotLinesSteel, config)\n\n # styling figure\n __styling(fig, config)\n\n return {'fig3': fig}\n\n\ndef __selectPlotFSCPs(FSCPData: pd.DataFrame, showFSCPs: dict, refFuel: str, n_samples: int):\n FSCPsCols = [None] * len(showFSCPs)\n\n listOfFSCPs = pd.DataFrame(columns=(FSCPData.keys().tolist() + ['plotIndex']))\n for index, args in enumerate(showFSCPs):\n cols, fuel_x, fuel_y = args\n if fuel_x == 'ref': fuel_x = refFuel\n addFSCP = FSCPData.query(f\"fuel_x=='{fuel_x}' & fuel_y=='{fuel_y}' & year_x==year_y\").reset_index(drop=True)\n if fuel_x == refFuel: addFSCP.loc[:, 'fuel_x'] = 'ref'\n addFSCP.insert(1, 'plotIndex', len(addFSCP) * [index])\n FSCPsCols[index] = cols\n listOfFSCPs = pd.concat([listOfFSCPs, addFSCP], ignore_index=True)\n\n # year_x == year_y, so we only need one of them from now on\n listOfFSCPs['year'] = listOfFSCPs['year_x']\n\n # return FSCPs for scatter plots\n plotFSCP = listOfFSCPs[['plotIndex', 'fuel_x', 'fuel_y', 'year', 'fscp', 'fscp_uu', 'fscp_ul']]\n\n # return costs and GHGIs for line plots\n plotLines = listOfFSCPs[['plotIndex', 'fuel_x', 'fuel_y', 'year', 'cost_x', 'cost_y', 'ghgi_x', 'ghgi_y']]\n\n # interpolation of plotLines\n t = np.linspace(plotLines['year'].min(), plotLines['year'].max(), n_samples)\n dtypes = {'year': float, 'cost_x': float, 'cost_y': float, 'ghgi_x': float, 'ghgi_y': float}\n allEntries = []\n\n for index in plotLines['plotIndex'].unique():\n samples = plotLines.query(f\"plotIndex=={index}\").reset_index(drop=True).astype(dtypes)\n fuel_x = samples.fuel_x.iloc[0]\n fuel_y = samples.fuel_y.iloc[0]\n new = dict(\n plotIndex=n_samples * [int(index)],\n fuel_x=n_samples * [fuel_x],\n fuel_y=n_samples * [fuel_y],\n year=t,\n )\n tmp = pd.DataFrame(new, columns=plotLines.keys())\n tmp.index = np.arange(len(samples), len(tmp) + len(samples))\n tmp = tmp.merge(samples, how='outer').sort_values(by=['year']).astype(dtypes)\n allEntries.append(tmp.interpolate())\n\n plotLinesInterpolated = pd.concat(allEntries, ignore_index=True)\n plotLinesInterpolated['fscp'] = (plotLinesInterpolated['cost_x'] - plotLinesInterpolated['cost_y']) / (\n plotLinesInterpolated['ghgi_y'] - plotLinesInterpolated['ghgi_x'])\n\n return FSCPsCols, plotFSCP, plotLinesInterpolated\n\n\ndef __produceFigure(FSCPsCols: list, plotFSCP: pd.DataFrame, plotFSCPSteel: pd.DataFrame,\n plotLines: pd.DataFrame, plotLinesSteel: pd.DataFrame, config: dict):\n # plot\n fig = make_subplots(\n rows=2,\n cols=2,\n subplot_titles=ascii_lowercase,\n shared_yaxes=True,\n horizontal_spacing=0.025,\n vertical_spacing=0.1,\n )\n\n\n # add FSCP traces for heating\n traces = __addFSCPTraces(plotFSCP, plotLines, len(FSCPsCols), config['refFuelTop'], config)\n for id, trace in traces:\n for j, col in enumerate(FSCPsCols[id]):\n if j: trace.showlegend = False\n fig.add_trace(trace, row=1, col=col)\n\n\n # add FSCP traces for steel\n traces = __addFSCPTraces(plotFSCPSteel, plotLinesSteel, len(FSCPsCols), config['refFuelBottom'], config)\n for id, trace in traces:\n for j, col in enumerate(FSCPsCols[id]):\n trace.showlegend = False\n fig.add_trace(trace, row=2, col=col)\n\n\n # compute and plot carbon price tracjetory\n cpTrajData = __computeCPTraj(config['co2price_traj']['years'], config['co2price_traj']['values'], config['n_samples'])\n traces = __addCPTraces(cpTrajData, config)\n for trace in traces:\n for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:\n if i or j: trace.showlegend = False\n fig.add_trace(trace, row=i + 1, col=j + 1)\n\n\n # zero y line\n for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:\n fig.add_hline(0.0, line_width=config['global']['lw_thin'], line_color='black', row=i + 1, col=j + 1)\n\n\n # add text annotations explaining figure content\n annotationStyling = dict(xanchor='center', yanchor='middle', showarrow=False,\n bordercolor='black', borderwidth=2, borderpad=3, bgcolor='white')\n\n for i in range(2):\n axisNumb = str(i+1) if i else ''\n blueTech = config['annotationLabels']['blueTechs'][i]\n fig.add_annotation(\n x=0.50,\n xref=f\"x{axisNumb} domain\",\n y=1.15,\n yref=f\"y{axisNumb} domain\",\n text=f\"Blue H<sub>2</sub> from {blueTech}\",\n **annotationStyling\n )\n\n for i in range(2):\n axisNumb = str(i+2) if i else ''\n application = config['annotationLabels']['applications'][i]\n fig.add_annotation(\n x=-0.17,\n xref=f\"x{axisNumb} domain\",\n y=0.5,\n yref=f\"y{axisNumb} domain\",\n text=f\"{application}\",\n textangle=-90,\n **annotationStyling\n )\n\n\n # add circles on intersects\n __addAnnotations(fig, cpTrajData, plotLines, plotLinesSteel, config)\n\n\n # add arrows in 2025\n __addAnnotationArrows(fig, config)\n\n\n # add legend for annotations\n __addAnnotationsLegend(fig, config)\n\n\n # update axes titles and ranges\n fig.update_layout(\n xaxis=dict(\n title=config['labels']['time'],\n range=[config['plotting']['t_min'], config['plotting']['t_max']]\n ),\n xaxis2=dict(\n title=config['labels']['time'],\n range=[config['plotting']['t_min'], config['plotting']['t_max']]\n ),\n xaxis3=dict(\n title=config['labels']['time'],\n range=[config['plotting']['t_min'], config['plotting']['t_max']]\n ),\n xaxis4=dict(\n title=config['labels']['time'],\n range=[config['plotting']['t_min'], config['plotting']['t_max']]\n ),\n yaxis=dict(\n title=config['labels']['fscp'],\n range=[config['plotting']['fscp_min'], config['plotting']['fscp_max']]\n ),\n yaxis3=dict(\n title=config['labels']['fscp_steel'],\n range=[config['plotting']['fscp_min'], config['plotting']['fscp_max']]\n ),\n margin_l=180.0,\n margin_b=520.0,\n )\n\n return fig\n\n\ndef __addAnnotations(fig: go.Figure, cpTrajData: pd.DataFrame, plotLines: pd.DataFrame, plotLinesSteel: pd.DataFrame, config: dict):\n traceArgs = [\n dict(row=1, col=1, lines=plotLines, anno=config['annotationFuels']['left']),\n dict(row=1, col=2, lines=plotLines, anno=config['annotationFuels']['right']),\n dict(row=2, col=1, lines=plotLinesSteel, anno=config['annotationFuels']['left']),\n dict(row=2, col=2, lines=plotLinesSteel, anno=config['annotationFuels']['right']),\n ]\n\n for args in traceArgs:\n points = __calcPoints(cpTrajData, args['lines'], args['anno'])\n data = pd.DataFrame(points).T\n\n fig.add_trace(go.Scatter(\n x=data.year,\n y=data.fscp,\n text=data.index,\n mode='markers+text',\n marker=dict(symbol='circle-open', size=config['global']['highlight_marker'], line={'width': config['global']['lw_thin']}, color='Black'),\n textposition='bottom center',\n showlegend=False,\n # hovertemplate = f\"{name}<br>Carbon price: %{{x:.2f}}&plusmn;%{{error_x.array:.2f}}<extra></extra>\",\n ), row=args['row'], col=args['col'])\n\n\ndef __calcPoints(cpTrajData: pd.DataFrame, plotLines: pd.DataFrame, fuels: list) -> dict:\n points = {}\n\n fuelRef, fuelGreen, fuelBlue = fuels\n\n dropCols = ['plotIndex', 'fuel_x', 'fuel_y', 'cost_x', 'cost_y', 'ghgi_x', 'ghgi_y']\n greenLine = plotLines.query(f\"fuel_x=='{fuelRef}' & fuel_y=='{fuelGreen}'\").drop(columns=dropCols).reset_index(drop=True)\n blueLine = plotLines.query(f\"fuel_x=='{fuelRef}' & fuel_y=='{fuelBlue}'\").drop(columns=dropCols).reset_index(drop=True)\n redLine = plotLines.query(f\"fuel_x=='{fuelBlue}' & fuel_y=='{fuelGreen}'\").drop(columns=dropCols).reset_index(drop=True)\n\n purpleLine = cpTrajData.drop(columns=['name', 'CP_u', 'CP_l'])\n\n for i, line in enumerate([blueLine, greenLine, redLine]):\n diffLines = pd.merge(line, purpleLine, on=['year'])\n diffLines['delta'] = (diffLines['fscp'] - diffLines['CP']).abs()\n points[i+2] = diffLines.nsmallest(1, 'delta').drop(columns=['CP', 'delta']).iloc[0]\n\n diffLines = pd.merge(blueLine, greenLine, on=['year'], suffixes=('', '_right'))\n diffLines['delta'] = (diffLines['fscp'] - diffLines['fscp_right']).abs()\n points[5] = diffLines.nsmallest(1, 'delta').drop(columns=['fscp_right', 'delta']).iloc[0]\n\n points[6] = redLine.abs().nsmallest(1, 'fscp').iloc[0]\n\n return points\n\n\ndef __addAnnotationArrows(fig: go.Figure, config: dict):\n __addArrow(fig, 2025.0, 150.0, 600.0, 1, 1, config)\n __addArrow(fig, 2025.5, 150.0, 800.0, 1, 1, config)\n fig.add_annotation(text='1', x=2024.5, y=200.0, row=1, col=1, showarrow=False)\n\n __addArrow(fig, 2025.0, 150.0, 300.0, 1, 2, config)\n __addArrow(fig, 2025.5, 150.0, 800.0, 1, 2, config)\n fig.add_annotation(text='1', x=2024.5, y=200.0, row=1, col=2, showarrow=False)\n\n __addArrow(fig, 2024.5, 90.0, 200.0, 2, 1, config)\n fig.add_annotation(text='1', x=2024.0, y=150.0, row=2, col=1, showarrow=False)\n\n __addArrow(fig, 2024.5, 90.0, 200.0, 2, 2, config)\n fig.add_annotation(text='1', x=2024.0, y=150.0, row=2, col=2, showarrow=False)\n\n\ndef __addArrow(fig: go.Figure, x: float, y1: float, y2: float, row: int, col: int, config: dict):\n xaxes = [['x', 'x2'], ['x3', 'x4']]\n yaxes = [['y', 'y2'], ['y3', 'y4']]\n \n for ay, y in [(y1, y2), (y2, y1)]:\n fig.add_annotation(\n axref=xaxes[row-1][col-1],\n xref=xaxes[row-1][col-1],\n ayref=yaxes[row-1][col-1],\n yref=yaxes[row-1][col-1],\n ax=x,\n x=x,\n ay=ay,\n y=y,\n arrowcolor='black',\n arrowwidth=config['global']['lw_thin'],\n #arrowsize=config['global']['highlight_marker_sm'],\n arrowhead=2,\n showarrow=True,\n row=row,\n col=col,\n )\n\n\ndef __addAnnotationsLegend(fig: go.Figure, config: dict):\n y0 = -0.40\n\n fig.add_shape(\n type='rect',\n x0=0.0,\n y0=y0,\n x1=0.80,\n y1=y0-0.2,\n xref='paper',\n yref='paper',\n line_width=2,\n fillcolor='white',\n )\n\n fig.add_annotation(\n text=f\"<b>{config['annotationTexts']['heading1']}:</b><br><br><br><b>{config['annotationTexts']['heading2']}:</b>\",\n align='left',\n xanchor='left',\n x=0.0,\n yanchor='top',\n y=y0,\n xref='paper',\n yref='paper',\n showarrow=False,\n )\n\n for i in range(6):\n fig.add_annotation(\n text=f\"{i+1}: \"+config['annotationTexts'][f\"point{i+1}\"],\n align='left',\n xanchor='left',\n x=0.0 + i%3 * 0.22,\n yanchor='top',\n y=y0-(0.03 if i<3 else 0.13),\n xref='paper',\n yref='paper',\n showarrow=False,\n )\n\n\n\ndef __addFSCPTraces(plotData: pd.DataFrame, plotLines: pd.DataFrame, n_lines: int, refFuel: str, config: dict, sensitivityNG: bool = False):\n traces = []\n\n for index in range(n_lines):\n thisDataScatter = plotData.query(f\"plotIndex=={index}\").reset_index(drop=True)\n thisDataLine = plotLines.query(f\"plotIndex=={index}\").reset_index(drop=True)\n\n # styling of individual lines\n truncated = (thisDataScatter.loc[0, 'fuel_x'] == 'blue LEB' and thisDataScatter.loc[0, 'fuel_y'] == 'green RE') or \\\n thisDataScatter.loc[0, 'fuel_x'] == 'blue LEB lowscco2'\n dashed = thisDataScatter.loc[0, 'fuel_y'] in ['green pure RE', 'blue LEB lowscco2']\n longdashed = thisDataScatter.loc[0, 'fuel_x'] == 'blue LEB lowscco2'\n shift = 0\n if thisDataScatter.loc[0, 'fuel_y'] == 'green RE':\n if thisDataScatter.loc[0, 'fuel_x'] == 'ref':\n shift = -1\n else:\n shift = +1\n elif thisDataScatter.loc[0, 'fuel_y'] == 'green pure RE':\n shift = +2\n thisDataScatter = thisDataScatter.query(f\"year<=2035\")\n thisDataLine = thisDataLine.query(f\"year<=2035\")\n\n # line properties\n fuel_x = thisDataScatter.iloc[thisDataScatter.first_valid_index()]['fuel_x']\n fuel_y = thisDataScatter.iloc[0]['fuel_y']\n name = f\"Fossil→{config['names'][fuel_y]}\" if fuel_x == 'ref' else f\"{config['names'][fuel_x]}→{config['names'][fuel_y]}\"\n col = config['fscp_colours'][f\"{fuel_x} to {fuel_y}\"] if f\"{fuel_x} to {fuel_y}\" in config['fscp_colours'] else \\\n config['colours'][fuel_y]\n\n # do not plot awkward red line in sensitivity analysis row 2\n if sensitivityNG and fuel_x == 'blue LEB':\n continue\n\n # scatter plot\n traces.append((index, go.Scatter(\n x=thisDataScatter['year'],\n y=thisDataScatter['fscp'],\n name=name,\n legendgroup=0 if fuel_x == 'ref' else 1,\n showlegend=False,\n mode='markers',\n line=dict(color=col, width=config['global']['lw_default'], dash='dot' if dashed else 'solid'),\n marker=dict(symbol='x-thin', size=config['global']['highlight_marker_sm'], line={'width': config['global']['lw_thin'], 'color': col}, ),\n hovertemplate=f\"<b>{name}</b><br>Year: %{{x:d}}<br>FSCP: %{{y:.2f}}&plusmn;%{{error_y.array:.2f}}<extra></extra>\",\n )))\n\n # remove unphysical negative FSCPs\n if truncated and not sensitivityNG:\n thisDataLine = thisDataLine.query(f\"(year>=2030 & fscp>0.0) | year>=2040\")\n\n # line plot\n traces.append((index, go.Scatter(\n x=thisDataLine['year'],\n y=thisDataLine['fscp'],\n legendgroup=0 if fuel_x == 'ref' else 1,\n legendgrouptitle=dict(text=f\"<b>{config['legendlabels'][0]}:</b>\" if fuel_x=='ref' else f\"<b>{config['legendlabels'][0]}:</b>\"),\n name=name,\n mode='lines',\n line=dict(color=col, width=config['global']['lw_default'], dash='dot' if dashed else 'dash' if longdashed else 'solid'),\n )))\n\n # error bars\n thisDataScatter = thisDataScatter.query(f\"year==[2030,2040,2050]\")\n thisDataScatter = thisDataScatter.query(f\"fscp<={config['plotting']['fscp_max']} and (fscp>0.0 | year > 2040)\")\n\n traces.append((index, go.Scatter(\n x=thisDataScatter['year'] + shift * 0.1,\n y=thisDataScatter['fscp'],\n error_y=dict(type='data', array=thisDataScatter['fscp_uu'], arrayminus=thisDataScatter['fscp_ul'],\n thickness=config['global']['lw_thin']),\n name=name,\n legendgroup=0 if fuel_x == 'ref' else 1,\n showlegend=False,\n mode='markers',\n marker=dict(symbol='x-thin', size=0.00001,),\n line_color=('rgba({}, {}, {}, {})'.format(*hex_to_rgb(col), .4)),\n hovertemplate=f\"<b>{name}</b><br>Year: %{{x:d}}<br>FSCP: %{{y:.2f}}&plusmn;%{{error_y.array:.2f}}<extra></extra>\",\n )))\n\n return traces\n\n\n# compute carbon price trajectories\ndef __computeCPTraj(years: list, values: dict, n_samples: int):\n v_mean = []\n v_upper = []\n v_lower = []\n\n for i, year in enumerate(years):\n vals = [v[i] for v in values.values()]\n mean = sum(vals)/len(vals)\n v_mean.append(mean)\n v_upper.append(max(vals)-mean)\n v_lower.append(mean-min(vals))\n\n # create data frame with time and cp values\n cpData = pd.DataFrame({\n 'year': years,\n 'CP': v_mean,\n 'CP_u': v_upper,\n 'CP_l': v_lower,\n })\n\n # interpolate in between\n samples = pd.DataFrame({'year': np.linspace(years[0], years[-1], n_samples)})\n dtypes = {'year': float, 'CP': float, 'CP_u': float, 'CP_l': float}\n cpData = cpData.merge(samples, how='outer').sort_values(by=['year']).astype(dtypes).interpolate()\n\n # add name to dataframe\n cpData['name'] = 'cp'\n\n return cpData\n\n\n# plot traces\ndef __addCPTraces(cpTrajData: pd.DataFrame, config: dict):\n traces = []\n\n name = config['carbon_price_config']['name']\n colour = config['carbon_price_config']['colour']\n\n # add main graphs (FSCP and CP)\n traces.append(go.Scatter(\n name=name,\n legendgroup=1,\n mode='lines',\n x=cpTrajData['year'],\n y=cpTrajData['CP'],\n line_color=colour,\n line_width=config['global']['lw_thin'],\n showlegend=True,\n hovertemplate=f\"<b>{name}</b><br>Time: %{{x:.2f}}<br>Carbon price: %{{y:.2f}}<extra></extra>\"\n ))\n\n data_x = cpTrajData['year']\n data_yu = cpTrajData['CP'] + cpTrajData['CP_u']\n data_yl = cpTrajData['CP'] - cpTrajData['CP_l']\n\n errorBand = go.Scatter(\n name='Uncertainty Range',\n legendgroup=1,\n x=pd.concat([data_x, data_x[::-1]], ignore_index=True),\n y=pd.concat([data_yl, data_yu[::-1]], ignore_index=True),\n mode='lines',\n marker=dict(color=colour),\n fillcolor=(\"rgba({}, {}, {}, 0.1)\".format(*hex_to_rgb(colour))),\n fill='toself',\n line=dict(width=config['global']['lw_ultrathin']),\n showlegend=False,\n hoverinfo='skip'\n )\n traces.append(errorBand)\n\n return traces\n\n\ndef __styling(fig: go.Figure, config: dict):\n # update legend styling\n fig.update_layout(\n legend=dict(\n orientation='h',\n xanchor='left',\n x=0.0,\n yanchor='top',\n y=-0.1,\n bgcolor='rgba(255,255,255,1.0)',\n bordercolor='black',\n borderwidth=2,\n ),\n )\n\n # update axis styling\n for axis in ['xaxis', 'xaxis2', 'xaxis3', 'xaxis4', 'yaxis', 'yaxis2', 'yaxis3', 'yaxis4']:\n update = {axis: dict(\n showline=True,\n linewidth=2,\n linecolor='black',\n showgrid=False,\n zeroline=False,\n mirror=True,\n ticks='outside',\n )}\n fig.update_layout(**update)\n\n # update figure background colour and font colour and type\n fig.update_layout(\n paper_bgcolor='rgba(255, 255, 255, 1.0)',\n plot_bgcolor='rgba(255, 255, 255, 0.0)',\n font_color='black',\n font_family='Helvetica',\n )\n\n # move title annotations\n for i, annotation in enumerate(fig['layout']['annotations'][:len(config['subplot_title_positions'])]):\n x_pos, y_pos = config['subplot_title_positions'][i]\n annotation['xanchor'] = 'left'\n annotation['yanchor'] = 'top'\n annotation['xref'] = 'paper'\n annotation['yref'] = 'paper'\n\n annotation['x'] = x_pos\n annotation['y'] = y_pos\n\n annotation['text'] = \"<b>{0}</b>\".format(annotation['text'])\n" ]
[ [ "pandas.DataFrame.from_records" ], [ "pandas.DataFrame", "numpy.linspace", "pandas.merge", "pandas.concat" ] ]
TatsuyaHaga/reversereplaymodel_codes
[ "579009d260f32b259994d77c8a66877cf6304dee" ]
[ "Fig5/10cells_wideSTDP/plot_bias_eachparam.py" ]
[ "#!/usr/bin/env python3\n\nimport numpy\nimport scipy.stats\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport pylab\nimport seaborn\nseaborn.set(context=\"paper\", style=\"white\", palette=\"deep\")\n\ndata=numpy.loadtxt(\"bias_log.csv\", delimiter=\",\")\ndata[:,0]*=1000\n\nlinfit=numpy.zeros([4,4])\n\npylab.close()\npylab.figure(figsize=(3,2))\npylab.subplot(2,2,1)\nsignif=data[:,4]<0.01\nnosignif=data[:,4]>=0.01\npylab.plot(data[signif,0], data[signif,2], \".\", markersize=2, color=\"black\")\npylab.plot(data[nosignif,0], data[nosignif,2], \".\", markersize=2, color=\"grey\")\na,b,r_val, p_val, stderr=scipy.stats.linregress(data[:,0], data[:,2])\nlinfit[0,:]=[a,b,r_val,p_val]\npylab.plot([numpy.min(data[:,0]), numpy.max(data[:,0])], [a*numpy.min(data[:,0])+b, a*numpy.max(data[:,0])+b], color=\"red\")\npylab.plot([numpy.min(data[:,0]), numpy.max(data[:,0])], [0.0, 0.0], color=\"blue\")\npylab.xticks([])\npylab.ylabel(\"Mean bias\")\n#pylab.xlabel(\"ISI [ms]\")\n\npylab.subplot(2,2,2)\nsignif=data[:,4]<0.01\nnosignif=data[:,4]>=0.01\npylab.plot(data[signif,1], data[signif,2], \".\", markersize=2, color=\"black\")\npylab.plot(data[nosignif,1], data[nosignif,2], \".\", markersize=2, color=\"grey\")\na,b,r_val,p_val,stderr=scipy.stats.linregress(data[:,1], data[:,2])\nlinfit[1,:]=[a,b,r_val,p_val]\npylab.plot([numpy.min(data[:,1]), numpy.max(data[:,1])], [a*numpy.min(data[:,1])+b, a*numpy.max(data[:,1])+b], color=\"red\")\npylab.plot([numpy.min(data[:,1]), numpy.max(data[:,1])], [0.0, 0.0], color=\"blue\")\npylab.xticks([])\npylab.yticks([])\n#pylab.ylabel(\"Mean bias\")\n#pylab.xlabel(\"Speed [ms/cell]\")\n\npylab.subplot(2,2,3)\nsignif=data[:,6]<0.01\nnosignif=data[:,6]>=0.01\npylab.plot(data[signif,0], data[signif,5], \".\", markersize=2, color=\"black\")\npylab.plot(data[nosignif,0], data[nosignif,5], \".\", markersize=2, color=\"grey\")\na,b,r_val,p_val,stderr=scipy.stats.linregress(data[:,0], data[:,5])\nlinfit[2,:]=[a,b,r_val,p_val]\npylab.plot([numpy.min(data[:,0]), numpy.max(data[:,0])], [a*numpy.min(data[:,0])+b, a*numpy.max(data[:,0])+b], color=\"red\")\npylab.plot([numpy.min(data[:,0]), numpy.max(data[:,0])], [0.5, 0.5], color=\"blue\")\npylab.plot([13.0,13.0], [0.0,1.0], \"--\", color=\"black\")\npylab.xticks([10,20,30,40,50])\npylab.ylim([0,1])\npylab.yticks([0,0.5,1])\npylab.ylabel(\"P(bias>0)\")\npylab.xlabel(\"Peak firing rate\")\n\npylab.subplot(2,2,4)\nsignif=data[:,6]<0.01\nnosignif=data[:,6]>=0.01\npylab.plot(data[signif,1], data[signif,5], \".\", markersize=2, color=\"black\")\npylab.plot(data[nosignif,1], data[nosignif,5], \".\", markersize=2, color=\"grey\")\na,b,r_val,p_val,stderr=scipy.stats.linregress(data[:,1], data[:,5])\nlinfit[3,:]=[a,b,r_val,p_val]\npylab.plot([numpy.min(data[:,1]), numpy.max(data[:,1])], [a*numpy.min(data[:,1])+b, a*numpy.max(data[:,1])+b], color=\"red\")\npylab.plot([numpy.min(data[:,1]), numpy.max(data[:,1])], [0.5, 0.5], color=\"blue\")\n#pylab.xticks([5,10,20,30,40,50])\npylab.ylim([0,1])\npylab.yticks([0,0.5,1])\npylab.yticks([])\n#pylab.ylabel(\"P(bias>0)\")\npylab.xlabel(\"Phase selectivity\")\n\npylab.tight_layout()\npylab.savefig(\"bias.pdf\")\n\nnumpy.savetxt(\"linregress_results.csv\", linfit, delimiter=\",\")\n" ]
[ [ "matplotlib.use", "numpy.max", "numpy.savetxt", "numpy.zeros", "numpy.min", "numpy.loadtxt" ] ]
ma02954AteebAhmed/InstaWeight
[ "a1ef58d60cfecb867d78b87adc6df8929216dd10" ]
[ "instaweight/utils/rs.py" ]
[ "# First import the library\nimport pyrealsense2 as rs\nfrom PIL import Image \nimport numpy as np\nimport cv2\n\n\nimage_no = 100000\n\n# Create a context object. This object owns the handles to all connected realsense devices\npipeline = rs.pipeline()\nconfig = rs.config()\nprint(config)\nexit()\nconfig.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)\nconfig.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)\nprofile = pipeline.start(config)\ndepth_sensor = profile.get_device().first_depth_sensor()\ndepth_scale = depth_sensor.get_depth_scale()\n\n\n\nwhile True:\n\n # for preview of images\n while(True):\n\n frames = pipeline.wait_for_frames()\n color = frames.get_color_frame()\n color = np.asanyarray(color.get_data())\n\n cv2.imshow(\"Image\", color)\n\n k = cv2.waitKey(30)\n if k == 27: # if ESC is pressed, close the program\n exit()\n elif k == 32:\n break\n else:\n continue\n\n # This call waits until a new coherent set of frames is available on a device\n # Calls to get_frame_data(...) and get_frame_timestamp(...) on a device will return stable values until wait_for_frames(...) is called\n frames = pipeline.wait_for_frames()\n depth = frames.get_depth_frame()\n color = frames.get_color_frame()\n\n color = np.asanyarray(color.get_data())\n img_color = Image.fromarray(color)\n img_color.save('C:\\\\users\\\\ateeb\\\\desktop\\\\depth_dataset\\\\{}.jpg'.format(image_no))\n \n # Create alignment primitive with color as its target stream:\n \n align = rs.align(rs.stream.color)\n frames = align.process(frames)\n\n # Update color and depth frames:\n aligned_depth_frame = frames.get_depth_frame()\n depth = aligned_depth_frame.get_data()\n depth = np.asanyarray(depth , dtype = 'float') * depth_scale\n\n #saving the depth data\n np.save('C:\\\\users\\\\ateeb\\\\desktop\\\\depth_\\\\'+str(image_no) + '.npy' , depth)\n\n image_no += 1\n print(str(image_no)+'.jpg')" ]
[ [ "numpy.asanyarray" ] ]
inpho/vsm
[ "d5fc930ccc95f275e10e151c8f05db2c05aba01f" ]
[ "unit_tests/tests_labeleddata.py" ]
[ "from builtins import str\nfrom builtins import zip\nfrom builtins import range\nfrom past.builtins import basestring\n\nimport unittest2 as unittest\nimport numpy as np\n\nfrom vsm.viewer.labeleddata import *\n\n\nclass TestLabeleddata(unittest.TestCase):\n\n def setUp(self):\n\n words = ['row', 'row', 'row', 'your', 'boat', 'gently', 'down', 'the', \n 'stream', 'merrily', 'merrily', 'merrily', 'merrily', 'life', \n 'is', 'but', 'a', 'dream']\n values = [np.random.random() for t in words]\n d = [('i', np.array(words).dtype), \n ('value', np.array(values).dtype)]\n self.v = np.array(list(zip(words, values)), dtype=d)\n\n\n\n def test_LabeledColumn(self):\n\n arr = self.v.view(LabeledColumn)\n arr.subcol_headers = ['Word', 'Value']\n arr.col_header = 'Song lets make this longer than subcol headers'\n arr.col_len = 10\n arr1 = self.v.view(LabeledColumn)\n\n self.assertTrue(isinstance(arr.__str__(), basestring))\n self.assertTrue(sum(arr.subcol_widths) <= arr.col_width)\n self.assertEqual(arr.shape[0], arr1.col_len)\n self.assertFalse(arr1.col_header)\n self.assertFalse(arr1.subcol_headers)\n\n\n def test_DataTable(self):\n\n v = LabeledColumn(self.v)\n v.subcol_widths = [30, 20]\n v.col_len = 10\n t = []\n for i in range(5):\n t.append(v.copy())\n t[i].col_header = 'Iteration ' + str(i)\n \n schc = ['Topic', 'Word']\n schf = ['Word', 'Value'] \n t = DataTable(t, 'Song', subcolhdr_compact=schc, subcolhdr_full=schf)\n\n self.assertTrue(isinstance(t.__str__(), basestring))\n self.assertTrue('Song', t.table_header)\n\n t.compact_view = False\n self.assertTrue(isinstance(t.__str__(), basestring))\n self.assertTrue('Song', t.table_header)\n\n\n\n def test_IndexedSymmArray(self):\n\n from vsm.corpus.util.corpusbuilders import random_corpus\n from vsm.model.ldacgsseq import LdaCgsSeq\n from vsm.viewer.ldacgsviewer import LdaCgsViewer\n\n c = random_corpus(50000, 1000, 0, 50)\n m = LdaCgsSeq(c, 'document', K=20)\n viewer = LdaCgsViewer(c, m)\n \n li = [0, 1, 10]\n isa = viewer.dismat_top(li)\n \n self.assertEqual(isa.shape[0], len(li))\n \n\n \n \n#Define and run test suite\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestLabeleddata)\nunittest.TextTestRunner(verbosity=2).run(suite)\n" ]
[ [ "numpy.random.random", "numpy.array" ] ]
jorgecarleitao/schemaflow
[ "c489170bf720bbbc0a9dc032884d5df79a84094a" ]
[ "examples/end_to_end_kaggle.py" ]
[ "\"\"\"\nExample of solving a Kaggle problem using schemaflow's API.\n\nThis script performs an end-to-end analysis and prediction of the Kaggle exercise\nhttps://www.kaggle.com/c/house-prices-advanced-regression-techniques\n\nIt demonstrates the advantages of explicitly declaring the types in the Pipe (using Schemaflow's API).\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder\nimport sklearn.metrics\nimport matplotlib.pyplot as plt\n\nfrom schemaflow import types as sf_types\nfrom schemaflow import ops as sf_ops\nfrom schemaflow.pipe import Pipe\nfrom schemaflow.pipeline import Pipeline\n\n\nclass SplitNumericCategorical(Pipe):\n fit_requires = transform_requires = {'x': sf_types.PandasDataFrame(schema={})}\n transform_modifies = {'x_categorical': sf_types.PandasDataFrame(schema={}),\n 'x': sf_types.PandasDataFrame(schema={})}\n\n fitted_parameters = {'numeric_columns': sf_types.List(str)}\n\n def fit(self, data: dict, parameters: dict=None):\n self['numeric_columns'] = list(data['x'].select_dtypes(include=[np.number]).columns)\n\n def transform(self, data: dict):\n data['x_categorical'] = data['x'].drop(self['numeric_columns'], axis=1)\n data['x'] = data['x'].loc[:, self['numeric_columns']]\n return data\n\n\nclass FillNaN(Pipe):\n fit_requires = transform_modifies = transform_requires = {\n 'x': sf_types.PandasDataFrame(schema={}),\n 'x_categorical': sf_types.PandasDataFrame(schema={})}\n\n fitted_parameters = {\n 'means': sf_types.List(float),\n 'most_frequent': sf_types.List(str)}\n\n def fit(self, data: dict, parameters: dict=None):\n self['means'] = data['x'].mean(axis=0)\n self['most_frequent'] = data['x_categorical'].mode(axis=0)\n\n def transform(self, data: dict):\n data['x'] = data['x'].fillna(self['means'])\n for column in data['x_categorical'].columns:\n data['x_categorical'].loc[data['x_categorical'][column].isnull(), column] = self['most_frequent'][column][0]\n return data\n\n\nclass JoinCategoricalAsOneHot(Pipe):\n fit_requires = {'x_categorical': sf_types.PandasDataFrame(schema={})}\n transform_requires = {\n 'x': sf_types.PandasDataFrame(schema={}),\n 'x_categorical': sf_types.PandasDataFrame(schema={})\n }\n transform_modifies = {\n 'x': sf_types.PandasDataFrame(schema={}),\n 'x_categorical': sf_ops.Drop(),\n }\n\n fitted_parameters = {'label': object, 'one_hot': object}\n\n def fit(self, data: dict, parameters: dict=None):\n df = data['x_categorical'].copy()\n self['label'] = dict((column, LabelEncoder()) for column in df.columns)\n self['transformer'] = OneHotEncoder()\n\n for column in self['label']:\n df.loc[:, column] = self['label'][column].fit_transform(df.loc[:, column])\n self['transformer'].fit(df.values)\n\n def transform(self, data: dict):\n index = data['x_categorical'].index\n for column in self['label']:\n mode = data['x_categorical'].loc[:, column].mode()[0]\n\n def f(x):\n if x not in self['label'][column].classes_:\n return mode\n else:\n return x\n\n data['x_categorical'].loc[:, column] = data['x_categorical'].loc[:, column].apply(f)\n data['x_categorical'].loc[:, column] = self['label'][column].transform(data['x_categorical'].loc[:, column])\n\n data['x_categorical'] = self['transformer'].transform(data['x_categorical'])\n\n df = pd.DataFrame(data['x_categorical'].toarray(), index=index)\n data['x'] = data['x'].join(df)\n del data['x_categorical']\n return data\n\n\nclass BaselineModel(Pipe):\n fit_requires = transform_requires = {'x': sf_types.PandasDataFrame({})}\n\n transform_modifies = {'y_pred_baseline': sf_types.Array(np.float64)}\n\n fitted_parameters = {'mean': np.float64}\n\n def fit(self, data: dict, parameters: dict = None):\n self['mean'] = np.mean(data['y'])\n\n def transform(self, data: dict):\n data['y_pred_baseline'] = np.full(data['x'].shape[0], self['mean'])\n return data\n\n\nclass LogLassoModel(Pipe):\n transform_requires = {'x': sf_types.PandasDataFrame(schema={})}\n fit_requires = {'x': sf_types.PandasDataFrame(schema={}), 'y': sf_types.Array(float)}\n transform_modifies = {\n 'y_pred': sf_types.Array(np.float64),\n 'x': sf_ops.Drop()\n }\n\n fitted_parameters = {'model': LassoCV}\n\n def fit(self, data: dict, parameters: dict=None):\n self['model'] = LassoCV(normalize=True)\n self['model'].fit(data['x'], np.log(data['y']))\n\n def transform(self, data: dict):\n data['y_pred'] = np.exp(self['model'].predict(data['x']))\n del data['x']\n return data\n\n\ndef x_y_split(df, target_column):\n return df.drop(target_column, axis=1), df.loc[:, target_column]\n\n\ndef analyse_performance(df, target_column, pipeline, parameters: dict=None):\n train, test = train_test_split(df, test_size=0.2, random_state=1)\n\n x_train, y_train = x_y_split(train, target_column)\n x_test, y_test = x_y_split(test, target_column)\n\n pipeline.logged_fit({'x': x_train, 'y': y_train.values}, parameters)\n\n result = pipeline.logged_transform({'x': x_test})\n\n y_pred = result['y_pred']\n y_pred_baseline = result['y_pred_baseline']\n\n def metric(y_true, y_pred):\n return sklearn.metrics.mean_squared_error(np.log(y_true), np.log(y_pred))\n\n print(metric(y_test, y_pred))\n print(metric(y_test, y_pred_baseline))\n\n plt.plot(range(len(y_test)), y_test, 'o')\n plt.plot(range(len(y_test)), y_pred, 'o')\n plt.savefig('examples/comparison1.png')\n plt.close()\n\n plt.plot(y_test, y_pred, 'o', label='Lasso')\n plt.plot(y_test, y_pred_baseline, 'o', label='baseline')\n plt.plot(y_test, y_test, '-', label='')\n plt.xlabel('truth')\n plt.ylabel('pred')\n plt.legend()\n plt.savefig('examples/pred_vs_truth.png')\n plt.close()\n\n\ndef export_predictions(df, target_column, predict_pipeline, parameters: dict=None):\n\n x, y = x_y_split(df, target_column)\n\n predict_pipeline.fit({'x': x, 'y': y}, parameters)\n\n df = pd.read_csv('examples/all/test.csv', index_col='Id')\n\n result = predict_pipeline.transform({'x': df})['y_pred']\n\n pd.Series(result, name=target_column, index=df.index).to_csv('examples/submission.txt', header=True)\n\n\nif __name__ == '__main__':\n predict_pipeline = Pipeline([\n SplitNumericCategorical(),\n FillNaN(),\n JoinCategoricalAsOneHot(),\n ('baseline', BaselineModel()),\n ('model', LogLassoModel())\n ])\n\n import logging\n import sys\n\n logger = logging.getLogger('schemaflow')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.INFO)\n ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n logger.addHandler(ch)\n\n # this pipeline is very generic: it does not make any assumptions about the data's format.\n predict_pipeline.check_fit({'x': sf_types.PandasDataFrame({}), 'y': sf_types.Array(np.float64)}, raise_=True)\n predict_pipeline.check_transform({'x': sf_types.PandasDataFrame({})}, raise_=True)\n\n print('expected fit schema: ', predict_pipeline.fit_requires)\n print('fitted parameters: ', predict_pipeline.fitted_parameters)\n\n print('expected transform schema: ', predict_pipeline.transform_requires)\n print('expected transformed schema: ', predict_pipeline.transform_schema(predict_pipeline.transform_requires))\n\n # execution of the pipeline\n target_column = 'SalePrice'\n\n df = pd.read_csv('examples/all/train.csv', index_col='Id')\n\n analyse_performance(df.copy(), target_column, predict_pipeline)\n\n export_predictions(df.copy(), target_column, predict_pipeline)\n" ]
[ [ "numpy.full", "sklearn.linear_model.LassoCV", "sklearn.preprocessing.LabelEncoder", "numpy.log", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.close", "numpy.mean", "matplotlib.pyplot.ylabel", "pandas.Series", "sklearn.model_selection.train_test_split", "pandas.read_csv", "sklearn.preprocessing.OneHotEncoder" ] ]
andreum/user-movie-embedding
[ "0766b4fa6d61ebaf9994e61ca611dc256cd38a24" ]
[ "t9b.py" ]
[ "#\n# andre@corp.insite.com.br\n# 2017-10-10\n# Codigo que faz regressao simples e encontra embeddings\n#\n# a ideia aqui e a seguinte:\n# - carregar dados do movielens\n# - inicializar o embedding de forma aleatoria\n# - encontrar os embeddings de filmes e de usuarios que gerem o menor erro possivel\n# t8: retira os bias de filmes e usuarios e substitui por um unico bias global\n# t9b: multilayer nn\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom time import gmtime, strftime, localtime\n\nimport math\nimport time\nimport sys\nimport os\n#from pylab import *\nfrom scipy import sparse\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport random\n\nfrom tensorflow.python import debug as tf_debug\n\nNUM_USERS = 247754\nNUM_MOVIES = 151712\nSECOND_FEATURES = 32\nbatch_size = 99999\nnum_steps = 2000001\nbase_alpha = 0.0003\ncount =1\n# Regularization\nlbda = 0.002\ndecay = 0.9999\nnum_ratings = 0\nTRAIN_INPUT_FILE=\"train-ratings-3.csv\"\nTEST_INPUT_FILE=\"validation-ratings-3.csv\"\nuse_bias = True\nuse_square = True\nuse_second_layer = True\ntf.set_random_seed(1)\nround_ranking = 0\n\nt0 = time.perf_counter()\n\n#import tracemalloc\n\n#tracemalloc.start()\n\n\nif sys.argv[1].isdigit():\n NUM_FEATURES = int(sys.argv[1])\nelse:\n raise Exception(\"parameter NUM_FEATURES is required\")\n\nif len(sys.argv) < 3:\n raise Exception(\"parameter round_ranking is required (y, Y, s, S, 1, T, t means should round down. Anything else means it shouldn't\")\n\nif sys.argv[2] in (\"y\", \"Y\", \"s\", \"S\", \"1\", \"T\", \"t\"):\n round_ranking = 1\nelse:\n round_raking = 0\n\nif len(sys.argv) < 4:\n use_activation = 'linear'\n# raise Exception('parameter activation is required. It can be \"linear\", \"sigmoid\" or \"relu\"')\nelse:\n if sys.argv[3] in (\"sigmoid\" , \"linear\", \"relu\"):\n use_activation = sys.argv[3]\n\nif use_activation == 'sigmoid':\n scale = 6.0\nelse:\n scale = 1.0\n\ndef loga(msg):\n now = time.perf_counter()\n print(\"%6.2f: %s\" % (now - t0, msg))\n\ndef load_data(train_fname, test_fname):\n global NUM_USERS\n global NUM_MOVIES\n global round_ranking\n global num_ratings\n print(\"Loading data from {} and {}\".format(train_fname, test_fname))\n full_train_data = pd.read_csv(train_fname, sep=\",\").sample(frac=1)\n full_test_data = pd.read_csv(test_fname, sep=\",\").sample(frac=1)\n train_data = np.array(full_train_data[[\"userId\", \"movieId\"]])\n train_labels = np.array(full_train_data[[\"rating\"]])\n test_data = np.array(full_test_data[[\"userId\", \"movieId\"]])\n test_labels = np.array(full_test_data[[\"rating\"]])\n if (round_ranking):\n train_labels = np.floor(train_labels)\n NUM_USERS = np.amax(train_data[:,0]) + 1\n NUM_MOVIES = np.amax(train_data[:,1]) + 1\n num_ratings = train_data.shape[0]\n loga(\"NUM_USERS = {}\".format(NUM_USERS))\n loga(\"NUM_MOVIES = {}\".format(NUM_MOVIES))\n loga(\"num ratings = {}\".format(num_ratings))\n loga(\"batch_size = {}\".format(batch_size))\n loga(\"num_steps = {}\".format(num_steps))\n return train_data, train_labels, test_data, test_labels\n\ndef apply_activation(x):\n if (use_activation) == 'sigmoid':\n return tf.sigmoid(x) * scale\n elif use_activation == 'relu':\n return tf.nn.relu(x)\n else:\n return x\n\ndef variable_summaries(var, varname):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope(varname):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n #with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\nloga(\"feature: using {0} activation with scale {1}\".format(use_activation, scale))\nif use_activation == 'sigmoid':\n activation_str = \"sigmoid_{}\".format(scale)\nelse:\n activation_str = use_activation\n base_alpha = base_alpha / 10.0\n\ni = 1\nwhile (os.path.isdir(\"t9b-r{0:d}-bias{1:d}-L2{2:d}-f{3}-a{4}-round{5}-{6}\".format(NUM_FEATURES, int(use_bias), int(use_square), TRAIN_INPUT_FILE, activation_str, round_ranking, i))):\n i = i + 1\ndirname = \"t9b-r{0:d}-bias{1:d}-L2{2:d}-f{3}-a{4}-round{5}-{6}\".format(NUM_FEATURES, int(use_bias), int(use_square), TRAIN_INPUT_FILE, activation_str, round_ranking, i)\nos.mkdir(dirname)\nprefix = dirname + \"/\"\nsys.stdout = open(prefix + \"out\", \"w\", 1)\ngraph = tf.Graph()\n\nloga(\"feature: using {} activation\".format(activation_str))\n\ntrain_data, train_labels, test_data, test_labels = load_data(TRAIN_INPUT_FILE, TEST_INPUT_FILE)\n\nwith graph.as_default():\n tf_train_data = tf.placeholder(tf.int32, shape=(None, 2))\n tf_train_labels = tf.placeholder(tf.float32, shape=(None, 1))\n tf_lr = tf.placeholder(tf.float32)\n tf_batch_size = tf.cast(tf.shape(tf_train_data)[0], tf.float32)\n print(\"when setting graph: NUM_USERS: {}\".format(NUM_USERS))\n print(\"when setting graph: NUM_MOVIES: {}\".format(NUM_MOVIES))\n\n tf_count = tf.get_variable(\"count\", dtype=tf.int32, initializer=tf.constant(count))\n if (NUM_FEATURES > 0):\n ones = tf.constant(1., shape=(NUM_FEATURES,1))\n sec_ones = tf.constant(1., shape=(SECOND_FEATURES,1))\n with tf.name_scope('embeddings'):\n user_embeddings = tf.get_variable(\"user_embeddings\", [NUM_USERS, NUM_FEATURES], initializer=tf.random_normal_initializer(0,1*math.sqrt(1/NUM_FEATURES)))\n movie_embeddings = tf.get_variable(\"movie_embeddings\", [NUM_MOVIES, NUM_FEATURES], initializer=tf.random_normal_initializer(0,1*math.sqrt(1/NUM_FEATURES)))\n variable_summaries(user_embeddings, 'user_embeddings')\n variable_summaries(movie_embeddings, 'movie_embeddings')\n if use_second_layer:\n print(\"using another layer...\")\n with tf.name_scope(\"second_layer\"):\n second_weights = tf.get_variable(\"second_weights\", [NUM_FEATURES, SECOND_FEATURES], initializer = tf.random_normal_initializer(0,1*math.sqrt(1/SECOND_FEATURES)))\n second_bias = tf.get_variable(\"second_bias\", [1, SECOND_FEATURES], initializer = tf.random_normal_initializer(0,1/SECOND_FEATURES))\n \n with tf.name_scope('local_embeddings'):\n tf_user_embeddings = tf.gather(user_embeddings, tf_train_data[:,0])\n tf_movie_embeddings = tf.gather(movie_embeddings, tf_train_data[:,1])\n variable_summaries(tf_user_embeddings, 'tf_user_embeddings')\n variable_summaries(tf_movie_embeddings, 'tf_movie_embeddings')\n else:\n user_embeddings = tf.get_variable(\"user_embeddings\", initializer = tf.constant(0.0))\n movie_embeddings = tf.get_variable(\"movie_embeddings\", initializer = tf.constant(0.0))\n movie_embeddings = tf.abs(movie_embeddings)\n #bias = tf.get_variable(\"bias\", dtype=tf.float32, initializer=tf.constant(3.5))\n user_bias = tf.get_variable(\"user_bias\", [NUM_USERS, 1], initializer=tf.random_normal_initializer(0.0))\n movie_bias = tf.get_variable(\"movie_bias\", [NUM_MOVIES, 1], initializer=tf.random_normal_initializer(3.1))\n tf_user_bias = tf.gather(user_bias, tf_train_data[:,0])\n tf_movie_bias = tf.gather(movie_bias, tf_train_data[:,1])\n with tf.name_scope(\"biases\"):\n variable_summaries(user_bias, 'user_bias')\n variable_summaries(movie_bias, 'movie_bias')\n variable_summaries(tf_user_bias, 'tf_user_bias')\n variable_summaries(tf_movie_bias, 'tf_movie_bias')\n\n #train_prediction = tf.tensordot(tf_user_embeddings, tf_movie_embeddings, axes=1)\n if (NUM_FEATURES > 0):\n #if (use_bias):\n # second_layer = tf.matmul(tf.multiply(tf_user_embeddings, tf_movie_embeddings), ones) + tf_user_bias + tf_movie_bias\n #else:\n second_layer = tf.multiply(tf_user_embeddings, tf_movie_embeddings)\n \n #train_prediction = tf.matmul(tf.multiply(tf_user_embeddings, tf_movie_embeddings), ones) + tf_movie_bias + bias\n else:\n #train_prediction = tf_user_bias + tf_movie_bias + bias \n #train_prediction = 5.0 * tf.sigmoid(tf_user_bias + tf_movie_bias)\n second_layer = tf_movie_bias\n if use_bias:\n\t loga(\"feature: using biases\")\n else:\n\t loga(\"feature: NOT using biases\")\n with tf.name_scope('results'):\n # what i have here is a vector that is tf_batch_size x NUM_FEATURES dimensions\n if use_second_layer:\n #train_prediction = tf.layers.dense(tf.layers.dropout(second_layer, 0.5), 32, activation = tf.tanh) # batch_size x 32\n train_prediction = apply_activation(tf.matmul(tf.matmul(second_layer, second_weights) + second_bias, sec_ones))\n #variable_summaries(second_weights, 'second_weights')\n #variable_summaries(second_bias, 'second_bias')\n else:\n train_prediction = apply_activation(tf.matmul(second_layer, ones))\n\n if (use_bias):\n train_prediction = train_prediction + tf_user_bias + tf_movie_bias\n\n error = tf.subtract(train_prediction, tf_train_labels)\n sse = tf.reduce_sum(tf.square(error))\n tf.summary.scalar(\"sse\", sse)\n if (NUM_FEATURES > 0):\n if (use_square):\n loga(\"feature: using L2 on movie embedding regularization\")\n regularization = tf.reduce_sum(tf.square(tf_user_embeddings))/NUM_FEATURES/tf_batch_size + tf.reduce_sum(tf.square(tf_movie_embeddings))/NUM_FEATURES/tf_batch_size\n else:\n loga(\"feature: using L1 on movie embedding regularization\")\n regularization = tf.reduce_sum(tf.square(tf_user_embeddings))/NUM_FEATURES/tf_batch_size + tf.reduce_sum(tf.abs(tf_movie_embeddings))/NUM_FEATURES/tf_batch_size\n if (use_bias):\n loga(\"feature: using bias, so regularization has to treat that\")\n regularization = regularization + tf.reduce_sum(tf.square(tf_movie_bias)/NUM_MOVIES + tf.square(tf_user_bias)/NUM_USERS)\n else:\n regularization = tf.reduce_sum(tf.square(tf_movie_bias)) + tf.reduce_sum(tf.square(tf_user_bias))\n if use_second_layer:\n loga(\"feature: using second layer in regularization\")\n regularization = regularization + tf.reduce_sum(tf.square(second_weights))/SECOND_FEATURES/NUM_FEATURES/tf_batch_size + tf.reduce_sum(tf.square(second_bias))/SECOND_FEATURES/tf_batch_size\n\t # There's o need to regularize the biases\n # + tf.reduce_sum(tf.square(tf_movie_bias))*batch_size/NUM_MOVIES + tf.reduce_sum(tf.square(tf_user_bias)) * batch_size / NUM_USERS\n loss = sse + lbda * regularization\n tf.summary.scalar(\"loss\", loss)\n mse = sse / tf_batch_size\n tf.summary.scalar(\"batch_size\", tf_batch_size)\n tf.summary.scalar(\"mse\", mse)\n optimizer = tf.train.GradientDescentOptimizer(tf_lr).minimize(loss)\n histogram = tf.histogram_fixed_width(error, [-4.5, 4.5], nbins=10)\n tf.summary.histogram('error', error)\n merged = tf.summary.merge_all()\n saver = tf.train.Saver()\n\ntrain_writer = tf.summary.FileWriter(dirname + \"/train\", graph)\nvalidation_writer = tf.summary.FileWriter(dirname + \"/validation\", graph)\n\n\nwith tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n print(\"Initialized\")\n uemb, memb = session.run([user_embeddings, movie_embeddings])\n print(\"user embeddings: {}\\n\",uemb)\n print(\"movie embeddings: {}\\n\",memb)\n acccount = acctot = accmse = 0.0\n old_loss = 1e20\n lr = base_alpha\n decay = 1.0 - (batch_size/num_ratings)\n display_interval = int(num_ratings / batch_size)\n epoch = 0\n pace = 1.01\n for step in range(num_steps):\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n batch_data = train_data[offset:(offset + batch_size), :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n #snapshot = tracemalloc.take_snapshot()\n #top_stats = snapshot.statistics('lineno')\n #print(\"[ Top 10 ]\")\n #for stat in top_stats[:10]:\n # print(stat)\n\n feed_dict = {tf_train_data : batch_data, tf_train_labels : batch_labels, tf_lr: lr}\n if (step % display_interval == 0):\n _, l, predictions, uemb, memb, _mse, hist, ubias, mbias, summary = session.run(\n [optimizer, loss, train_prediction, user_embeddings, movie_embeddings, mse, histogram, user_bias, movie_bias, merged], feed_dict=feed_dict)\n else:\n _, l, predictions, uemb, memb, _mse, hist, ubias, mbias = session.run(\n [optimizer, loss, train_prediction, user_embeddings, movie_embeddings, mse, histogram, user_bias, movie_bias], feed_dict=feed_dict)\n\n #print(\"sessino run\")\n acccount = acccount * decay + 1\n acctot = acctot * decay + l / batch_size\n accmse = accmse * decay + _mse\n exploss = acctot/acccount\n expmse = accmse/acccount\n\n if (step % display_interval == 0):\n # TODO: make a loop to test all the itens and get the loss\n epoch = epoch + 1\n train_writer.add_summary(summary, epoch)\n feed_dict = {tf_train_data : test_data, tf_train_labels : test_labels, tf_lr: lr}\n test_l, test_predictions, test_mse, test_hist, val_summary = session.run(\n [loss, train_prediction, mse, histogram, merged], feed_dict=feed_dict)\n validation_writer.add_summary(val_summary, epoch)\n\n if (test_l / old_loss >= 0.9999):\n lr = lr * 0.2\n #batch_size = int(batch_size * 0.7) + 300\n #decay = 1.0 - (batch_size/num_ratings)\n #display_interval = int(num_ratings / batch_size)\n #pace = 1.0\n else:\n lr = lr * pace\n old_loss = test_l\n\t #\n loga(\"Minibatch loss at epoch %d step %d: %f (%f): (%f) on test set: (%f)\" % (epoch, step, l, exploss, l/batch_size, test_l))\n print(\" Mean Square Error: %.10f - exp=%.9f - test mse=%.9f\" % (_mse, expmse, test_mse))\n print(\" Learning Rate: %.10f batch_size=%d\" % (lr, batch_size))\n if (NUM_FEATURES > 0):\n print(\" user embeddings: %f: %s\" % (np.linalg.norm(uemb)/math.sqrt(uemb.size), np.mean(uemb, 0)))\n print(\" movie embeddings: %f: %s\" % (np.linalg.norm(memb)/math.sqrt(memb.size), np.mean(memb, 0)))\n print(\" user bias: %f: %f\" % (np.linalg.norm(ubias)/math.sqrt(ubias.size), np.mean(ubias, 0)))\n print(\" movie bias: %f: %f\" % (np.linalg.norm(mbias)/math.sqrt(mbias.size), np.mean(mbias, 0)))\n #print(\"bias: %f\" % (_bias))\n print(\" error: %s\" % (hist))\n print(\" test: %s\" % (test_hist))\n #print(\"user embeddings: %f\" % (user_embeddings))\n #print(\"embeddings: {}\".format(emb))\n #valid_prediction.eval(), valid_labels))\n #print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))\n if (epoch % 20 == 0):\n print(\"saving model to {}model.ckpt\".format(prefix))\n saver.save(session, prefix + \"model.ckpt\")\n print(\"saving... done\")\n\n if lr < 1e-10:\n break\n print(\"ENDED. Steps done: {}\".format(step))\n print(\"saving model to {}model.ckpt\".format(prefix))\n saver.save(session, prefix + \"model.ckpt\", global_step=step)\n if (NUM_FEATURES > 0):\n print(\"user_embeddings:\\n{}\".format(np.around(uemb, 3)))\n print(\"movie_embeddings:\\n{}\".format(np.around(memb, 3)))\n np.savetxt(prefix + \"user_embeddings.csv.gz\", uemb, delimiter=',', fmt=\"%.7f\")\n np.savetxt(prefix + \"movie_embeddings.csv.gz\", memb, delimiter=',', fmt=\"%.7f\")\n else:\n print(\"NO EMBEDDINGS\")\n np.savetxt(prefix + \"user_bias.csv.gz\", ubias, delimiter=',', fmt=\"%.7f\")\n np.savetxt(prefix + \"movie_bias.csv.gz\", mbias, delimiter=',', fmt=\"%.7f\")\n\n" ]
[ [ "tensorflow.reduce_min", "tensorflow.matmul", "numpy.mean", "pandas.read_csv", "tensorflow.global_variables_initializer", "tensorflow.random_normal_initializer", "tensorflow.set_random_seed", "tensorflow.train.GradientDescentOptimizer", "tensorflow.shape", "numpy.linalg.norm", "tensorflow.sigmoid", "tensorflow.summary.histogram", "tensorflow.subtract", "tensorflow.train.Saver", "tensorflow.constant", "numpy.around", "tensorflow.histogram_fixed_width", "tensorflow.abs", "numpy.array", "numpy.savetxt", "tensorflow.nn.relu", "tensorflow.summary.scalar", "tensorflow.Session", "tensorflow.placeholder", "numpy.amax", "tensorflow.name_scope", "tensorflow.summary.merge_all", "numpy.floor", "tensorflow.multiply", "tensorflow.Graph", "tensorflow.reduce_max", "tensorflow.gather", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.square" ] ]
xinshi-chen/l2stop
[ "f9b4f23269c2db110c48fb883287049849492341" ]
[ "dncnn_stop/train_stop_kl.py" ]
[ "import cv2\nimport torch\nimport torchvision.utils\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport glob\nimport pprint\nimport os\nimport time\nfrom shutil import copyfile\nimport pdb\nimport random\nfrom collections import Counter, namedtuple\n\nfrom models import DnCNN_DS, MulticlassNet\nfrom trainer import PolicyKL\nfrom utils import *\nfrom torch.utils.data import DataLoader, sampler\nfrom dataset import Dataset\n\nfrom policy_args import cmd_args as args\n\nprint('In total, using {} GPUs'.format(torch.cuda.device_count()))\nfor i in range(torch.cuda.device_count()):\n print(torch.cuda.get_device_name(i))\n\n\n# train the policy\ndef policy_training(device='cuda'):\n noiseset = [35, 45, 55]\n # set random seed for all gpu related things\n seed_torch(seed=args.seed)\n\n # load msdnet and the data\n model = DnCNN_DS(channels=1, num_of_layers=args.num_of_layers)\n\n model = torch.nn.DataParallel(model).cuda()\n\n if os.path.exists(os.path.join(args.outf, 'net.pth')):\n print('Loading denoise model...')\n model.load_state_dict(torch.load(os.path.join(args.outf, 'net.pth')))\n else:\n print('Need the classification model!')\n return\n\n # need to augment the validation set to generate training set for PolicyNet\n print('Loading dataset ...\\n')\n\n # load the original image instead\n # dataset_train = Dataset(train=True, data_folder=args.data_folder)\n # total_train = len(dataset_train)\n # val_size = int(total_train*0.2)\n # print(\"Training data for policynet: \", val_size)\n # # load indices file\n # indices = np.load(os.path.join(args.outf, 'indices.npy'))\n # val_idx = indices[:val_size]\n # train_idx = indices[val_size:]\n # train_loader = DataLoader(dataset=dataset_train, num_workers=args.num_workers, \n # sampler=sampler.SubsetRandomSampler(train_idx),\n # batch_size=args.batch_size, shuffle=False)\n # val_loader = DataLoader(dataset=dataset_train, num_workers=args.num_workers,\n # sampler=sampler.SubsetRandomSampler(val_idx),\n # batch_size=args.batch_size, shuffle=False)\n\n # load the original test data\n dataset_train = load_imgs('train')\n total_train = len(dataset_train)\n val_size = int(total_train*args.val_ratio)\n indices = list(range(total_train))\n random.Random(0).shuffle(indices)\n np.save(os.path.join(args.outf, 'policy_train_indices.npy'), np.array(indices))\n val_idx = indices[:val_size]\n train_idx = indices[val_size:]\n train_loader = DataLoader(dataset=dataset_train, num_workers=args.num_workers, \n sampler=sampler.SubsetRandomSampler(train_idx),\n batch_size=args.batch_size, shuffle=False)\n val_loader = DataLoader(dataset=dataset_train, num_workers=args.num_workers,\n sampler=sampler.SubsetRandomSampler(val_idx),\n batch_size=1, shuffle=False)\n print('Training data size: ', len(train_loader.dataset))\n # print('Validation data size: ', len(val_loader.dataset))\n\n\n dataset_val = Dataset(train=False)\n test_loader_12 = DataLoader(dataset=dataset_val, num_workers=4, batch_size=1, shuffle=False)\n # use Set68 as testdataset\n dataset_test = load_imgs('Set68')\n test_loader = DataLoader(dataset=dataset_test, num_workers=4, batch_size=1, shuffle=False)\n\n # need to construct the policy network and train the policy net.\n # the architecture of the policy network need to be designed.\n\n ######################################\n # need to think about the model of policynet\n ######################################\n model.eval()\n p_true_all = list()\n psnr_all = list()\n np.random.seed(seed=args.seed)\n test_noiseL = np.random.choice(noiseset, size=len(val_loader.dataset))\n # print(test_noiseL)\n print('Average noise level: ', np.average(test_noiseL))\n for i, batch in enumerate(val_loader):\n # for i in range(1):\n # batch = next(iter(train_loader))\n data = batch\n data = data.cuda()\n noise = torch.zeros(data.size())\n noise = torch.FloatTensor(data.size()).normal_(mean=0, \n std=test_noiseL[i]/255., generator=torch.manual_seed(args.seed))\n noise = noise.cuda()\n\n with torch.no_grad():\n outputs = model(data+noise)\n p_true, mse_all = PolicyKL.true_posterior(args, outputs, noise)\n p_true_all.append(p_true)\n \n # psnrs = list()\n # for pred in outputs:\n # psnr = batch_PSNR(torch.clamp(data+noise-pred, 0., 1.),\n # data, 1.)\n # psnrs.append(psnr)\n # psnr_all.append(np.array(psnrs))\n # psnr_all = np.stack(psnr_all)\n\n\n p_true = torch.cat(p_true_all, dim=0)\n p_det = max_onehot(p_true, dim=-1, device=device)\n p_true = torch.mean(p_true, dim=0)\n # find positions with nonzero posterior\n p_det_index = torch.argmax(p_det, dim=1)\n print(Counter(list(p_det_index.cpu().numpy())))\n p_det = torch.mean(p_det, dim=0)\n train_post = {}\n nz_post = {}\n i = 0\n for t in range(len(outputs)):\n if p_det[t] > 0.001:\n # if p_det[t] > -1:\n train_post[i] = t\n nz_post[i] = t\n i += 1\n del train_post[i-1]\n\n p_str = 'val p true:['\n p_str += ','.join(['%0.3f' % p_true[t] for t in nz_post.values()])\n print(p_str+']')\n\n p_str = 'val p true det:['\n p_str += ','.join(['%0.3f' % p_det[t] for t in nz_post.values()])\n print(p_str+']')\n\n print(nz_post)\n ######################################\n \n\n # initialize nets with nonzero posterior\n if args.policy_type == 'multiclass':\n score_net = MulticlassNet(args, nz_post, 1)\n elif args.policy_type == 'sequential':\n score_net = MulticlassNet(args, train_post, 1)\n else:\n print('Model not implemented!!')\n return\n score_net = torch.nn.DataParallel(score_net)\n score_net = score_net.cuda()\n # pdb.set_trace()\n\n if args.restart and os.path.exists(os.path.join(args.outf, '{}_policy_net.dump'.format(args.policy_type))):\n print('Loading previous policynet model...')\n dump = os.path.join(args.outf, '{}_policy_net.dump'.format(args.policy_type))\n score_net.load_state_dict(torch.load(dump))\n\n # train\n if args.phase == 'train':\n\n # start training\n optimizer = optim.Adam(list(score_net.parameters()),\n lr=1e-3,\n weight_decay=args.weight_decay)\n milestones = [10, 20, 40, 60, 80]\n # gammas = [0.4, 0.2, 0.2, 0.2, 0.2]\n gammas = [1, 1, 1, 1, 1]\n scheduler = MultiStepMultiLR(optimizer, milestones=milestones, gammas=gammas)\n trainer = PolicyKL(args=args,\n model=model,\n score_net=score_net,\n train_post=train_post,\n nz_post=nz_post,\n optimizer=optimizer,\n train_loader=train_loader,\n val_loader = val_loader,\n test_loader=test_loader, \n device=device,\n scheduler=scheduler)\n trainer.train()\n # test\n dump = os.path.join(args.outf, '{}_policy_net.dump'.format(args.policy_type))\n score_net.load_state_dict(torch.load(dump))\n\n PolicyKL.test(args=args,\n score_net=score_net,\n model=model,\n data_loader=test_loader,\n nz_post=nz_post,\n device=device,\n noiseset=[75]\n )\n print(args.outf)\n\nif __name__ == '__main__':\n policy_training()\n\n" ]
[ [ "torch.cat", "numpy.array", "torch.argmax", "numpy.random.seed", "torch.no_grad", "torch.cuda.get_device_name", "torch.cuda.device_count", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.load", "numpy.average", "torch.utils.data.sampler.SubsetRandomSampler", "torch.mean", "torch.nn.DataParallel" ] ]
aidiary/PRML
[ "db2dfc10bd39dc5649528d3778aa5fffb283186b" ]
[ "sklearn/plot_svm_iris.py" ]
[ "#coding:utf-8\nimport numpy as np\nimport pylab as pl\nfrom sklearn import svm, datasets\n\n\"\"\"\n線形SVMでirisデータを分類\n\"\"\"\n\n# データをロード\niris = datasets.load_iris()\nX = iris.data[:, :2]\nY = iris.target\n\n# 分類器を学習\nclf = svm.SVC(C=1.0, kernel='linear')\nclf.fit(X, Y)\n\npl.figure(1)\n\n# メッシュの各点の分類結果を描画\nh = 0.02\nx_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5\ny_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\nZ = clf.predict(np.c_[xx.ravel(), yy.ravel()])\nZ = Z.reshape(xx.shape)\npl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired)\n\n# 訓練データをプロット\npl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)\npl.xlabel('Sepal length')\npl.ylabel('Sepal width')\n\npl.xlim(xx.min(), xx.max())\npl.ylim(yy.min(), yy.max())\n\npl.show()\n" ]
[ [ "numpy.arange", "sklearn.datasets.load_iris", "sklearn.svm.SVC" ] ]
sbrugman/probfit
[ "0088576251a05b0af64f24cd89f44343b325e988" ]
[ "probfit/oneshot.py" ]
[ "# -*- coding: utf-8 -*-\nimport collections\nimport itertools as itt\n\nimport numpy as np\nfrom iminuit import Minuit\n\nfrom ._libstat import _vector_apply\nfrom .costfunc import BinnedChi2, BinnedLH, UnbinnedLH\nfrom .nputil import minmax\n\n\ndef fit_uml(f, data, quiet=False, print_level=0, *arg, **kwd):\n \"\"\"\n perform unbinned likelihood fit\n :param f: pdf\n :param data: data\n :param quiet: if not quite draw latest fit on fail fit\n :param printlevel: minuit printlevel\n :return:\n \"\"\"\n uml = UnbinnedLH(f, data)\n minuit = Minuit(uml, print_level=print_level, **kwd)\n minuit.set_strategy(2)\n minuit.migrad()\n if not minuit.migrad_ok() or not minuit.matrix_accurate():\n if not quiet:\n from matplotlib import pyplot as plt\n\n plt.figure()\n uml.show()\n print(minuit.values)\n return (uml, minuit)\n\n\ndef fit_binx2(f, data, bins=30, bound=None, print_level=0, quiet=False, *arg, **kwd):\n \"\"\"\n perform chi^2 fit\n :param f:\n :param data:\n :param bins:\n :param range:\n :param printlevel:\n :param quiet:\n :param arg:\n :param kwd:\n :return:\n \"\"\"\n uml = BinnedChi2(f, data, bins=bins, bound=bound)\n minuit = Minuit(uml, print_level=print_level, **kwd)\n minuit.set_strategy(2)\n minuit.migrad()\n if not minuit.migrad_ok() or not minuit.matrix_accurate():\n if not quiet:\n from matplotlib import pyplot as plt\n\n plt.figure()\n uml.show()\n print(minuit.values)\n\n return (uml, minuit)\n\n\ndef fit_binlh(\n f,\n data,\n bins=30,\n bound=None,\n quiet=False,\n weights=None,\n use_w2=False,\n print_level=0,\n pedantic=True,\n extended=False,\n *arg,\n **kwd\n):\n \"\"\"\n perform bin likelihood fit\n :param f:\n :param data:\n :param bins:\n :param range:\n :param quiet:\n :param weights:\n :param use_w2:\n :param printlevel:\n :param pedantic:\n :param extended:\n :param arg:\n :param kwd:\n :return:\n \"\"\"\n uml = BinnedLH(\n f,\n data,\n bins=bins,\n bound=bound,\n weights=weights,\n use_w2=use_w2,\n extended=extended,\n )\n minuit = Minuit(uml, print_level=print_level, pedantic=pedantic, **kwd)\n minuit.set_strategy(2)\n minuit.migrad()\n if not minuit.migrad_ok() or not minuit.matrix_accurate():\n if not quiet:\n from matplotlib import pyplot as plt\n\n plt.figure()\n uml.show()\n print(minuit.values)\n return (uml, minuit)\n\n\ndef tuplize(x):\n \"\"\"\n :param x:\n :return:\n \"\"\"\n if isinstance(x, collections.Iterable):\n return x\n else:\n return tuple([x])\n\n\ndef pprint_arg(vnames, value):\n \"\"\"\n pretty print argument\n :param vnames:\n :param value:\n :return:\n \"\"\"\n ret = \"\"\n for name, v in zip(vnames, value):\n ret += \"{}={};\".format(name, str(v))\n return ret\n\n\ndef try_uml(f, data, bins=40, fbins=1000, *arg, **kwd):\n from matplotlib import pyplot as plt\n\n fom = UnbinnedLH(f, data)\n narg = f.func_code.co_argcount\n vnames = f.func_code.co_varnames[1:narg]\n my_arg = [tuplize(kwd[name]) for name in vnames]\n h, e, _ = plt.hist(data, bins=bins, normed=True, histtype=\"step\")\n vx = np.linspace(e[0], e[-1], fbins)\n first = True\n minfom = 0\n minarg = None\n for thisarg in itt.product(*my_arg):\n vy = _vector_apply(f, vx, thisarg)\n plt.plot(vx, vy, \"-\", label=pprint_arg(vnames, thisarg))\n thisfom = fom(*thisarg)\n if first or thisfom < minfom:\n minfom = thisfom\n minarg = thisarg\n first = False\n leg = plt.legend(fancybox=True)\n leg.get_frame().set_alpha(0.5)\n ret = {k: v for k, v in zip(vnames, minarg)}\n return ret\n\n\ndef try_binlh(\n f,\n data,\n weights=None,\n bins=40,\n fbins=1000,\n show=\"both\",\n extended=False,\n bound=None,\n *arg,\n **kwd\n):\n from matplotlib import pyplot as plt\n\n if bound is None:\n bound = minmax(data)\n fom = BinnedLH(f, data, extended=extended, bound=bound)\n narg = f.func_code.co_argcount\n vnames = f.func_code.co_varnames[1:narg]\n my_arg = [tuplize(kwd[name]) for name in vnames]\n h, e = None, None\n if show == \"both\":\n h, e, _ = plt.hist(\n data,\n bins=bins,\n range=bound,\n histtype=\"step\",\n weights=weights,\n normed=not extended,\n )\n else:\n h, e = np.histogram(\n data, bins=bins, range=bound, weights=weights, normed=not extended\n )\n bw = e[1] - e[0]\n\n vx = np.linspace(e[0], e[-1], fbins)\n first = True\n minfom = 0\n minarg = None\n for thisarg in itt.product(*my_arg):\n vy = _vector_apply(f, vx, thisarg)\n if extended:\n vy *= bw\n plt.plot(vx, vy, \"-\", label=pprint_arg(vnames, thisarg))\n thisfom = fom(*thisarg)\n if first or thisfom < minfom:\n minfom = thisfom\n minarg = thisarg\n first = False\n leg = plt.legend(fancybox=True)\n leg.get_frame().set_alpha(0.5)\n ret = {k: v for k, v in zip(vnames, minarg)}\n return ret\n\n\ndef try_chi2(f, data, weights=None, bins=40, fbins=1000, show=\"both\", *arg, **kwd):\n from matplotlib import pyplot as plt\n\n fom = BinnedChi2(f, data)\n narg = f.func_code.co_argcount\n vnames = f.func_code.co_varnames[1:narg]\n my_arg = [tuplize(kwd[name]) for name in vnames]\n h, e = None, None\n if show == \"both\":\n h, e, _ = plt.hist(data, bins=bins, histtype=\"step\", weights=weights)\n else:\n h, e = np.histogram(data, bins=bins, weights=weights)\n bw = e[1] - e[0]\n vx = np.linspace(e[0], e[-1], fbins)\n first = True\n minfom = 0\n minarg = None\n for thisarg in itt.product(*my_arg):\n vy = _vector_apply(f, vx, thisarg) * bw\n plt.plot(vx, vy, \"-\", label=pprint_arg(vnames, thisarg))\n thisfom = fom(*thisarg)\n if first or thisfom < minfom:\n minfom = thisfom\n minarg = thisarg\n first = False\n leg = plt.legend(fancybox=True)\n leg.get_frame().set_alpha(0.5)\n ret = {k: v for k, v in zip(vnames, minarg)}\n return ret\n\n\n# def randfr(r):\n# \"\"\"\n# generate a uniform random number with in range r\n# :param r: tuple range\n# :return: float\n# \"\"\"\n# b = r[1]\n# a = r[0]\n# return np.random.ranf() * (b - a) + a\n\n# def guess_initial(alg, f, data, ntry=100, guessrange=(-100, 100), draw=False, *arg, **kwd):\n# \"\"\"\n# This is very bad at the moment don't use it\n# \"\"\"\n# fom = alg(f, data, *arg, **kwd)\n# first = True\n# minfom = 0\n# minparam = ()\n# narg = fom.func_code.co_argcount\n# vnames = fom.func_code.co_varnames[:narg]\n# ranges = {}\n# for vname in vnames:\n# if 'limit_' + vname in kwd:\n# ranges[vname] = kwd['limit_' + vname]\n# else:\n# ranges[vname] = guessrange\n\n# for i in range(ntry):\n# arg = []\n# for vname in vnames:\n# arg.append(randfr(ranges[vname]))\n# try:\n# thisfom = fom(*arg)\n# if first or thisfom < minfom:\n# first = False\n# minfom = thisfom\n# minparam = arg\n# except ZeroDivisionError:\n# pass\n\n# print(minparam, minfom)\n# ret = {}\n# for vname, bestguess in zip(vnames, minparam):\n# ret[vname] = bestguess\n# if draw:\n# fom(*minparam)\n# fom.draw()\n# return ret\n" ]
[ [ "numpy.histogram", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.hist", "numpy.linspace" ] ]
treese41528/LB-CNN
[ "8b3c99a9149d570096dc629f70a5a6ddba71cafa" ]
[ "SimpleNet/SimpleNet.py" ]
[ "# -*- coding: utf-8 -*-\r\n# SimpleNetV1(2016)\r\n# Implementation of https://arxiv.org/abs/1608.06037\r\n# Lets keep it simple, Using simple architectures to outperform deeper and more complex architectures\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom collections import OrderedDict\r\nimport os\r\n\r\n\r\nclass _ConvLayer(nn.Module):\r\n def __init__(self,num_input_features,num_output_features):\r\n super(_ConvLayer,self).__init__()\r\n #self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size = 3, stride = 1, padding = 1, bias=False))\r\n #self.add_module('norm', nn.BatchNorm2d(num_output_features, eps=1e-05, momentum=0.05, affine=True,track_running_stats = True))\r\n #self.add_module('relu', nn.ReLU(inplace=True))\r\n self.conv = nn.Conv2d(num_input_features, num_output_features, kernel_size = 3, stride = 1, padding = 1, bias=False)\r\n self.norm = nn.BatchNorm2d(num_output_features, eps=1e-05, momentum=0.05, affine=True,track_running_stats = True)\r\n self.relu = nn.ReLU(inplace=True)\r\n \r\n def forward(self,x_in):\r\n x_out = self.relu(self.norm(self.conv(x_in)))\r\n return x_out\r\n\r\nclass _ConvBlock(nn.Module):\r\n def __init__(self,num_layers,num_input_features,num_output_features):\r\n super(_ConvBlock,self).__init__()\r\n self.convBlock_features = nn.ModuleDict()\r\n for l in range(num_layers):\r\n conv_layer = _ConvLayer(num_input_features[l],num_output_features[l])\r\n self.convBlock_features['convlayer%d' % (l + 1)] = conv_layer\r\n \r\n def forward(self,x_in):\r\n x_out = x_in\r\n for name, layer in self.convBlock_features.items():\r\n x_out = layer(x_out)\r\n return x_out\r\n\r\nclass SimpleNet(nn.Module):\r\n def __init__(self,num_classes,num_input_channels):\r\n super(SimpleNet,self).__init__()\r\n self.num_classes = num_classes\r\n self.num_input_channels = num_input_channels\r\n \r\n self.features = nn.Sequential(OrderedDict([]))\r\n self._makeLayers()\r\n self.dropOutFinal = nn.Dropout(0.1)\r\n \r\n self.classifier = nn.Linear(256, num_classes)\r\n \r\n \r\n \r\n \r\n def _makeLayers(self):\r\n num_input_features = [self.num_input_channels,64,128,128]\r\n num_output_features = [64,128,128,128]\r\n num_layers = 4\r\n conv_block = _ConvBlock(num_layers,num_input_features,num_output_features)\r\n self.features.add_module('convBlock1',conv_block)\r\n self.features.add_module('pool1', nn.MaxPool2d(kernel_size= 2, stride= 2, dilation= 1, ceil_mode = False))\r\n self.features.add_module('dropOut1',nn.Dropout2d(p=0.1))\r\n \r\n \r\n num_input_features = [128,128,128]\r\n num_output_features = [128,128,256]\r\n num_layers = 3\r\n conv_block2 = _ConvBlock(num_layers,num_input_features,num_output_features)\r\n self.features.add_module('convBlock2',conv_block2)\r\n self.features.add_module('pool2', nn.MaxPool2d(kernel_size = 2, stride = 2, dilation = 1, ceil_mode = False))\r\n self.features.add_module('dropOut2',nn.Dropout2d(p=0.1))\r\n \r\n \r\n num_input_features = [256,256]\r\n num_output_features = [256,256]\r\n num_layers = 2\r\n conv_block3 = _ConvBlock(num_layers,num_input_features,num_output_features)\r\n self.features.add_module('convBlock3',conv_block3)\r\n self.features.add_module('pool3', nn.MaxPool2d(kernel_size = 2, stride = 2, dilation = 1, ceil_mode = False))\r\n self.features.add_module('dropOut3',nn.Dropout2d(p=0.1))\r\n \r\n \r\n num_input_features = [256]\r\n num_output_features = [512]\r\n num_layers = 1\r\n conv_block4 = _ConvBlock(num_layers,num_input_features,num_output_features)\r\n self.features.add_module('convBlock4',conv_block4)\r\n self.features.add_module('pool4', nn.MaxPool2d(kernel_size = 2, stride = 2, dilation = 1, ceil_mode = False))\r\n self.features.add_module('dropOut4',nn.Dropout2d(p=0.1))\r\n \r\n \r\n num_input_features = [512,2048]\r\n num_output_features = [2048,256]\r\n num_layers = 2\r\n conv_block5 = _ConvBlock(num_layers,num_input_features,num_output_features)\r\n self.features.add_module('convBlock5',conv_block5)\r\n self.features.add_module('pool5', nn.MaxPool2d(kernel_size = 2, stride = 2, dilation = 1, ceil_mode = False))\r\n self.features.add_module('dropOut5',nn.Dropout2d(p=0.1))\r\n \r\n \r\n num_input_features = [256]\r\n num_output_features = [256]\r\n num_layers = 1\r\n conv_block6 = _ConvBlock(num_layers,num_input_features,num_output_features)\r\n self.features.add_module('convBlock6',conv_block6)\r\n \r\n for m in self.features.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.xavier_uniform_(m.weight.data, gain=nn.init.calculate_gain('relu'))\r\n \r\n def save_models(self,epoch,optimizer_dict,save_dir):\r\n if not os.path.exists(save_dir):\r\n os.makedirs(save_dir)\r\n if optimizer_dict is not None:\r\n state = {\r\n 'epoch': epoch,\r\n 'state_dict': self.state_dict(),\r\n 'optimizer': optimizer_dict,\r\n }\r\n else:\r\n state = {\r\n 'epoch': epoch,\r\n 'state_dict': self.state_dict(),\r\n }\r\n torch.save(state,save_dir +'/SimpleNetModel' + \"_{}.model\".format(epoch))\r\n print(\"Check Point Saved\") \r\n \r\n def forward(self,x_in):\r\n x_out = self.features(x_in)\r\n #Global Max Pooling\r\n x_out = F.max_pool2d(x_out, kernel_size= x_out.size()[2:]) \r\n x_out = self.dropOutFinal(x_out)\r\n\r\n x_out = x_out.view(x_out.size(0), -1)\r\n class_out = self.classifier(x_out)\r\n return x_out, class_out\r\n \r\n \r\n \r\n \r\n " ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.MaxPool2d", "torch.nn.ModuleDict", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.init.calculate_gain", "torch.nn.Dropout2d" ] ]
Saharae/Final-Project-Group2
[ "28d26f93b52eb06a1e195e9c7cfe10110f46faec" ]
[ "Code/GUI.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 5 17:04:12 2021\n\n@author: adamkritz\n\"\"\"\n\nimport matplotlib.gridspec as grd\nimport pandas as pd\nimport sys\nfrom PyQt5.QtWidgets import QMainWindow, QAction, QApplication\nimport webbrowser\nfrom PyQt5.QtWidgets import QSizePolicy\n\n \nfrom PyQt5.QtWidgets import QPushButton \nfrom PyQt5.QtWidgets import QLineEdit \nfrom PyQt5.QtWidgets import QRadioButton \nfrom PyQt5.QtWidgets import QGroupBox \n\nfrom PyQt5.QtWidgets import QTableWidget,QTableWidgetItem\n\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import Qt\n\nfrom PyQt5.QtGui import QIcon, QFont\nfrom PyQt5.QtWidgets import QMessageBox\n\nfrom PyQt5.QtWidgets import QWidget,QLabel, QVBoxLayout, QHBoxLayout, QGridLayout\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas \nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar \nfrom matplotlib.figure import Figure \nimport seaborn as sns\nfrom preprocessing_utils import get_repo_root\nfrom preprocessing_utils import get_repo_root_w\nfrom sys import platform\nimport zipfile\n\n\n# this unzips the results folder\ndef unzip_results():\n if platform == \"darwin\":\n with zipfile.ZipFile(get_repo_root() + \"/results.zip\",\"r\") as zf:\n zf.extractall(get_repo_root())\n elif platform == \"win32\":\n with zipfile.ZipFile(get_repo_root_w() + \"\\\\results.zip\",\"r\") as zf:\n zf.extractall(get_repo_root_w())\n\n \n# this takes variables from the main\ndf = 0\npred = 0\n\ndef take(x, y):\n global df\n global pred\n df = x\n pred = y\n\n### The way this works:\n### Each class is a separate window. Within each class you can define how you want\n### the window to look (size, whats in it, etc)\n### The Menu class at the bottom is the main window. In this window there is \n### a file menu that contains the spots for each other window. \n### All the functions at the bottom of the main window will open the other windows\n### if they are clicked.\n\n# Numerical Variables Window \nclass NumericalVars(QMainWindow):\n\n def __init__(self):\n super(NumericalVars, self).__init__()\n\n # create window and main widget\n self.Title = 'Numerical Variables'\n self.setWindowTitle(self.Title)\n self.main_widget = QWidget(self)\n self.layout = QVBoxLayout(self.main_widget)\n \n # variables box with buttons to pick which one\n self.groupBox1 = QGroupBox('Variables')\n self.groupBox1Layout= QHBoxLayout()\n self.groupBox1.setLayout(self.groupBox1Layout)\n \n # description for the variable selected\n self.groupBox2 = QGroupBox('Description')\n self.groupBox2Layout= QHBoxLayout()\n self.groupBox2.setLayout(self.groupBox2Layout)\n\n # graphic 1 of the variable\n self.groupBox3 = QGroupBox('Graphic 1')\n self.groupBox3Layout = QHBoxLayout()\n self.groupBox3.setLayout(self.groupBox3Layout)\n \n \n # graphic 2 of the variable\n self.groupBox4 = QGroupBox('Graphic 2')\n self.groupBox4Layout = QHBoxLayout()\n self.groupBox4.setLayout(self.groupBox4Layout)\n\n\n # button creation\n self.b1 = QRadioButton(\"Duration\")\n self.b1.toggled.connect(self.onClicked)\n\n self.b2 = QRadioButton(\"Budget\")\n self.b2.toggled.connect(self.onClicked)\n\n self.b3 = QRadioButton(\"World Wide Gross Income\")\n self.b3.toggled.connect(self.onClicked)\n \n self.b4 = QRadioButton(\"USA Gross Income\")\n self.b4.toggled.connect(self.onClicked)\n \n self.groupBox1Layout.addWidget(self.b1)\n self.groupBox1Layout.addWidget(self.b2)\n self.groupBox1Layout.addWidget(self.b3)\n self.groupBox1Layout.addWidget(self.b4)\n \n # description creation\n self.label = QLabel(\"\")\n self.layout.addWidget(self.label)\n self.groupBox2Layout.addWidget(self.label)\n \n\n # figure and canvas figure to draw the graph is created to\n self.fig = Figure()\n self.ax1 = self.fig.add_subplot(111)\n self.canvas = FigureCanvas(self.fig)\n \n \n # second graph\n self.fig2 = Figure()\n self.ax2 = self.fig2.add_subplot(111)\n self.canvas2 = FigureCanvas(self.fig2)\n \n \n self.groupBox3Layout.addWidget(self.canvas)\n self.groupBox4Layout.addWidget(self.canvas2)\n\n # add it all to main widget\n self.layout.addWidget(self.groupBox1)\n self.layout.addWidget(self.groupBox2)\n self.layout.addWidget(self.groupBox3)\n self.layout.addWidget(self.groupBox4)\n self.layout.addStretch(1)\n\n # main widget sizing\n self.setCentralWidget(self.main_widget) \n self.showMaximized() \n \n # function for each button\n def onClicked(self):\n if self.b1.isChecked():\n mean = str(round(df['duration'].mean()))\n std = str(round(df['duration'].std()))\n self.label.setText('The duration of the movie. The mean is ' + mean + ' minutes and the standard deviation is ' + std + ' minutes.')\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.show()\n self.groupBox4.hide()\n sns.histplot(data = df, x = 'duration', ax = self.ax1, kde = True, bins = 75)\n self.ax1.set_xlim((0, 300))\n self.ax1.set_title('Distribution of Movie Durations')\n sns.despine()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n if self.b2.isChecked():\n mean = str(round(df['budget_adjusted'].mean()))\n std = str(round(df['budget_adjusted'].std()))\n self.label.setText('The budget for the movie. This has been adjusted for inflation. The mean is ' + mean + ' USD and the standard deviation is ' + std + ' USD.')\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.show()\n self.groupBox4.hide()\n sns.histplot(data = df, x = 'budget_adjusted', ax = self.ax1, kde = True, log_scale = True)\n self.ax1.set_title('Distribution of Movie Budget')\n self.ax1.set_xlabel('Budget ($)')\n sns.despine()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n \n if self.b3.isChecked():\n \n mean = str(round(df['worldwide_gross_income_adjusted'].mean()))\n std = str(round(df['worldwide_gross_income_adjusted'].std()))\n \n self.label.setText('The amount of money the movie made world-wide. This has been adjusted for inflation. The mean is ' + mean + ' USD and the standard deviation is ' + std + ' USD.')\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.show()\n self.groupBox4.show()\n sns.histplot(data = df, x = 'worldwide_gross_income_adjusted', kde = True, ax = self.ax1, log_scale = True)\n self.ax1.set_title('Distribution of Movie Income (Worldwide)')\n self.ax1.set_xlabel('Worldwide Income ($)')\n sns.despine()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n \n \n p = sns.scatterplot(data = df, x = 'date_published_year', y = 'worldwide_gross_income_adjusted', alpha = 0.2, ax = self.ax2)\n p.set(yscale = 'log')\n self.ax2.set_title('Worldwide Gross Income by Date')\n self.ax2.set_ylabel('Worldwide Gross Income ($)')\n self.ax2.set_xlabel('Year Published')\n sns.despine()\n \n self.fig2.tight_layout()\n self.fig2.canvas.draw_idle()\n \n if self.b4.isChecked():\n \n mean = str(round(df['usa_gross_income_adjusted'].mean()))\n std = str(round(df['usa_gross_income_adjusted'].std()))\n\n self.label.setText('The amount of money the movie made in the United States. This has been adjusted for inflation. The mean is ' + mean + ' USD and the standard deviation is ' + std + ' USD.')\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.show()\n self.groupBox4.show()\n sns.histplot(data = df, x = 'usa_gross_income_adjusted', kde = True, ax = self.ax1, log_scale = True)\n self.ax1.set_title('Distribution of Movie Income (USA)')\n self.ax1.set_xlabel('USA Income ($)')\n sns.despine()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n \n p = sns.scatterplot(data = df, x = 'date_published_year', y = 'usa_gross_income_adjusted', alpha = 0.2, ax = self.ax2)\n p.set(yscale = 'log')\n self.ax2.set_title('USA Gross Income by Date')\n self.ax2.set_ylabel('USA Gross Income ($)')\n self.ax2.set_xlabel('Year Published')\n sns.despine()\n \n self.fig2.tight_layout()\n self.fig2.canvas.draw_idle()\n\n \n# Categorical Variables Window \nclass CategoricalVars(QMainWindow):\n\n def __init__(self):\n super(CategoricalVars, self).__init__()\n\n # create window and main widget\n self.Title = 'Categorical Variables'\n self.setWindowTitle(self.Title)\n self.main_widget = QWidget(self)\n self.layout = QVBoxLayout(self.main_widget)\n \n # variables box with buttons to pick which one \n self.groupBox1 = QGroupBox('Variables')\n self.groupBox1Layout= QHBoxLayout()\n self.groupBox1.setLayout(self.groupBox1Layout)\n \n # description for each variable\n self.groupBox2 = QGroupBox('Description')\n self.groupBox2Layout= QHBoxLayout()\n self.groupBox2.setLayout(self.groupBox2Layout)\n\n # graphic 1 of the variable\n self.groupBox3 = QGroupBox('Graphic 1')\n self.groupBox3Layout = QHBoxLayout()\n self.groupBox3.setLayout(self.groupBox3Layout)# Creates vertical layout\n \n # graphic 2 of the variable\n self.groupBox4 = QGroupBox('Graphic 2')\n self.groupBox4Layout = QHBoxLayout()\n self.groupBox4.setLayout(self.groupBox4Layout)\n\n # button creation\n self.b1 = QRadioButton(\"Title\")\n self.b1.toggled.connect(self.onClicked2)\n\n self.b2 = QRadioButton(\"Date Published\")\n self.b2.toggled.connect(self.onClicked2)\n\n self.b3 = QRadioButton(\"Genre\")\n self.b3.toggled.connect(self.onClicked2)\n \n self.b5 = QRadioButton(\"Region\")\n self.b5.toggled.connect(self.onClicked2)\n \n self.b6 = QRadioButton(\"Director Frequency\")\n self.b6.toggled.connect(self.onClicked2)\n \n self.b7 = QRadioButton(\"Writer Frequency\")\n self.b7.toggled.connect(self.onClicked2)\n \n self.b8 = QRadioButton(\"Production Company Frequency\")\n self.b8.toggled.connect(self.onClicked2)\n \n self.b9 = QRadioButton(\"Actor Frequency\")\n self.b9.toggled.connect(self.onClicked2)\n\n self.b10 = QRadioButton(\"Description\")\n self.b10.toggled.connect(self.onClicked2)\n \n self.groupBox1Layout.addWidget(self.b1)\n self.groupBox1Layout.addWidget(self.b10)\n self.groupBox1Layout.addWidget(self.b2)\n self.groupBox1Layout.addWidget(self.b3)\n self.groupBox1Layout.addWidget(self.b5)\n self.groupBox1Layout.addWidget(self.b6)\n self.groupBox1Layout.addWidget(self.b7)\n self.groupBox1Layout.addWidget(self.b8)\n self.groupBox1Layout.addWidget(self.b9)\n \n # label creation\n self.label = QLabel(\"\")\n self.layout.addWidget(self.label)\n self.groupBox2Layout.addWidget(self.label)\n\n # graph one creation\n self.fig = Figure()\n self.ax1 = self.fig.add_subplot(111)\n self.canvas = FigureCanvas(self.fig)\n \n \n # second graph\n \n self.fig2 = Figure()\n self.ax2 = self.fig2.add_subplot(111)\n self.canvas2 = FigureCanvas(self.fig2)\n \n self.groupBox3Layout.addWidget(self.canvas)\n \n self.groupBox4Layout.addWidget(self.canvas2)\n\n # add to main widget\n self.layout.addWidget(self.groupBox1)\n self.layout.addWidget(self.groupBox2)\n self.layout.addWidget(self.groupBox3)\n self.layout.addWidget(self.groupBox4)\n self.layout.addStretch(1)\n\n # resize main widget\n self.setCentralWidget(self.main_widget) \n self.showMaximized() \n\n # functions for buttons\n def onClicked2(self):\n if self.b1.isChecked():\n mean = str(round(df['title_n_words'].mean(), 2))\n std = str(round(df['title_n_words'].std(), 2))\n self.label.setText('Having to do with the title of the movie. This includes: number of words in the title, the ratio of long words to short words, the ratio of vowels to non-vowels, and the ratio of capital letters to lowercase letters. \\nHere are two plots about the number of words and the ratio of long words. For our purposes, these will be treated as categorical variables. The mean number of words in the title is ' + mean +' and the standard deviation is ' + std + '.')\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.show()\n self.groupBox4.show()\n sns.countplot(data = df, x = 'title_n_words', ax = self.ax1)\n self.ax1.set_title('Number of Words in Title')\n self.ax1.set_xlabel('Number of Words')\n self.ax1.set_ylabel('Frequency')\n sns.despine()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n \n sns.histplot(data = df, x = 'title_ratio_long_words', ax = self.ax2, bins = 10)\n self.ax2.set_title('Ratio of Long Words to Short Words in the Title')\n self.ax2.set_xlabel('Ratio')\n self.ax2.set_ylabel('Frequency')\n sns.despine()\n self.fig2.tight_layout()\n self.fig2.canvas.draw_idle()\n \n if self.b10.isChecked():\n mean = str(round(df['description_n_words'].mean(), 2))\n std = str(round(df['description_n_words'].std(), 2))\n self.label.setText('Having to do with the IMDb description of the movie. This is similar to title, and includes: number of words in the description, the ratio of long words to short words, the ratio of vowels to non-vowels, and the ratio of capital letters to lowercase letters. \\nHere are two plots about the number of words and the ratio of long words. For our purposes, these will be treated as categorical variables. The mean number of words in the description is ' + mean +' and the standard deviation is ' + std + '.')\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.show()\n self.groupBox4.show()\n sns.countplot(data = df, x = 'description_n_words', ax = self.ax1)\n self.ax1.set_title('Number of Words in Description')\n self.ax1.set_xlabel('Number of Words')\n self.ax1.set_ylabel('Frequency')\n sns.despine()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n \n sns.histplot(data = df, x = 'description_ratio_long_words', ax = self.ax2, bins = 10)\n self.ax2.set_title('Ratio of Long Words to Short Words in the Description')\n self.ax2.set_xlabel('Ratio')\n self.ax2.set_ylabel('Frequency')\n sns.despine()\n self.fig2.tight_layout()\n self.fig2.canvas.draw_idle()\n \n if self.b2.isChecked():\n self.label.setText('The date the movie released. This includes the year, month, and day of release. Here are two plots on the year and month of release.')\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.show()\n self.groupBox4.show()\n sns.histplot(data = df, x = 'date_published_year', ax = self.ax1)\n self.ax1.set_title('Year Released')\n self.ax1.set_xlabel('Year')\n self.ax1.set_ylabel('Frequency')\n sns.despine()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n \n sns.countplot(data = df, x = 'date_published_month', ax = self.ax2)\n self.ax2.set_title('Month Released')\n self.ax2.set_xlabel('Month')\n self.ax2.set_xticklabels(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',' Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])\n self.ax2.set_ylabel('Frequency')\n sns.despine()\n self.fig2.tight_layout()\n self.fig2.canvas.draw_idle()\n \n if self.b3.isChecked():\n self.label.setText('The genre of the movie. IMDb gives each movie three genres, resulting in many possible sets of three genres. There are also many possible genres, further increasing the number of genre combinations. \\nBecause of this, we decided to binary encode the genres, as we could not easily represent each one otherwise. This results in 732 combinations of genres that we will use.')\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.hide()\n self.groupBox4.hide()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n \n if self.b5.isChecked():\n self.label.setText('The region the movie was initially released in. The six regions are: Africa, Americas, Asia, Europe, Oceania, and None of the above/No region recorded.')\n af = df['region_Africa'].value_counts()[1]\n am = df['region_Americas'].value_counts()[1]\n asi = df['region_Asia'].value_counts()[1]\n eu = df['region_Europe'].value_counts()[1]\n oc = df['region_Oceania'].value_counts()[1]\n no = df['region_None'].value_counts()[1]\n vals = [af, am, asi, eu, oc, no]\n names = ['Africa', 'America', 'Asia', 'Europe', 'Oceania', 'None']\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.show()\n self.groupBox4.hide()\n self.ax1.bar(names, height = vals)\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n \n if self.b6.isChecked():\n mean = df['director_weighted_frequency'].mean()\n std = df['director_weighted_frequency'].std()\n mean = f'{mean:.5e}'\n std = f'{std:.5e}'\n self.label.setText('The frequency of director appearance. This variable measures how often a director directs a movie compared to other directors. For our purposes, this will be represented as a categorical variable. The mean frequency is ' + mean + ' and the standard deviation is ' + std + '.')\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.show()\n self.groupBox4.hide()\n sns.histplot(data = df, x = 'director_weighted_frequency', ax = self.ax1, bins = 20)\n self.ax1.set_title('Director Frequency Histogram')\n self.ax1.set_xlabel('Directory Frequency')\n self.ax1.set_ylabel('Frequency (Count)')\n sns.despine()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n \n if self.b7.isChecked():\n mean = df['writer_weighted_frequency'].mean()\n std = df['writer_weighted_frequency'].std()\n mean = f'{mean:.5e}'\n std = f'{std:.5e}'\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.show()\n self.groupBox4.hide()\n self.label.setText('The frequency of writer appearance. This variable measures how often a writer writes a movie compared to other writers. For our purposes, this will be represented as a categorical variable. The mean frequency is ' + mean + ' and the standard deviation is ' + std + '.')\n sns.histplot(data = df, x = 'writer_weighted_frequency', ax = self.ax1, bins = 20)\n self.ax1.set_title('Writer Frequency Histogram')\n self.ax1.set_xlabel('Writer Frequency')\n self.ax1.set_ylabel('Frequency (Count)')\n sns.despine()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n \n if self.b8.isChecked():\n mean = df['production_company_frequency'].mean()\n std = df['production_company_frequency'].std()\n mean = f'{mean:.3e}'\n std = f'{std:.3e}'\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.show()\n self.groupBox4.hide()\n self.label.setText('The frequency of production company appearance. This variable measures how often a production company produces a movie compared to other production companies. For our purposes, this will be represented as a categorical variable. The mean frequency is ' + mean + ' and the standard deviation is ' + std + '.')\n sns.histplot(data = df, x = 'production_company_frequency', ax = self.ax1, bins = 20)\n self.ax1.set_title('Production Company Frequency Histogram')\n self.ax1.set_xlabel('Production Company Frequency')\n self.ax1.set_ylabel('Frequency (Count)')\n sns.despine()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n \n if self.b9.isChecked():\n mean = df['actors_weighted_frequency'].mean()\n std = df['actors_weighted_frequency'].std()\n mean = f'{mean:.5e}'\n std = f'{std:.5e}'\n self.ax1.clear()\n self.ax2.clear()\n self.groupBox3.show()\n self.groupBox4.hide()\n self.label.setText('The frequency of actor appearance. This variable measures how often a actor acts in a movie compared to other actors. For our purposes, this will be represented as a categorical variable. The mean frequency is ' + mean + ' and the standard deviation is ' + std + '.')\n sns.histplot(data = df, x = 'actors_weighted_frequency', ax = self.ax1, bins = 20)\n self.ax1.set_title('Actor Frequency Histogram')\n self.ax1.set_xlabel('Actor Company Frequency')\n self.ax1.set_ylabel('Frequency (Count)')\n sns.despine()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n \n\n# Target Variable Window \nclass TargetVar(QMainWindow):\n\n def __init__(self):\n super(TargetVar, self).__init__()\n \n # create main window and widget\n self.Title = 'Weighted Average Vote'\n self.setWindowTitle(self.Title)\n self.main_widget = QWidget(self)\n self.layout = QVBoxLayout(self.main_widget) \n \n # description box\n self.groupBox1 = QGroupBox('Description')\n self.groupBox1Layout= QHBoxLayout()\n self.groupBox1.setLayout(self.groupBox1Layout)\n \n # plot picker box with buttons\n self.groupBox2 = QGroupBox('Plot Picker')\n self.groupBox2Layout= QHBoxLayout()\n self.groupBox2.setLayout(self.groupBox2Layout)\n \n # navigation bar for plots\n self.groupBox25 = QGroupBox('Navigation Bar')\n self.groupBox25Layout= QHBoxLayout()\n self.groupBox25.setLayout(self.groupBox25Layout)\n \n # create the first graphic\n self.groupBox3 = QGroupBox('Graphic')\n self.groupBox3Layout= QHBoxLayout()\n self.groupBox3.setLayout(self.groupBox3Layout)\n \n # label creation\n mean = str(round(df['weighted_average_vote'].mean(), 2))\n std = str(round(df['weighted_average_vote'].std(), 2))\n \n self.label = QLabel(\"The average vote for an IMDb movie is calculated by the averaging all the ratings for a movie. However, IMDb uses weighted average vote over raw average. \\nThis allows IMDb to weight votes differently in order to detect unusual activity, like review-bombing, which prevents users from drastically changing a movie's score. \\nThe mean weighted average vote is \" + mean + ' and the standard deviation is ' +std + '. This will be our target variable to predict.')\n self.groupBox1Layout.addWidget(self.label)\n \n # create graphic with 2 boxes\n self.fig = Figure()\n gs00 = grd.GridSpec(1, 2, width_ratios=[10,1])\n self.ax1 = self.fig.add_subplot(gs00[0])\n self.cax = self.fig.add_subplot(gs00[1])\n self.canvas = FigureCanvas(self.fig)\n \n # navigation toolbar creation\n self.toolbar = NavigationToolbar(self.canvas, self)\n \n self.groupBox25Layout.addWidget(self.toolbar)\n \n # create buttons\n self.b1 = QRadioButton(\"Distribution\")\n self.b1.toggled.connect(self.onClicked)\n\n self.b2 = QRadioButton(\"Heatmap\")\n self.b2.toggled.connect(self.onClicked)\n \n self.groupBox2Layout.addWidget(self.b1)\n self.groupBox2Layout.addWidget(self.b2)\n \n\n self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.canvas.updateGeometry()\n \n self.groupBox3Layout.addWidget(self.canvas)\n\n # add boxes to main widget\n self.layout.addWidget(self.groupBox1)\n self.layout.addWidget(self.groupBox2)\n self.layout.addWidget(self.groupBox25)\n self.layout.addWidget(self.groupBox3)\n \n # main widget sizing\n self.setCentralWidget(self.main_widget) \n self.showMaximized() \n\n # button functions\n def onClicked(self):\n if self.b1.isChecked():\n self.ax1.clear()\n self.cax.set_visible(False)\n sns.histplot(data = df, x = 'weighted_average_vote', ax = self.ax1, bins = 40, kde = True)\n self.ax1.set_title('Distribution of Weighted Votes')\n sns.despine()\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n if self.b2.isChecked():\n self.ax1.clear()\n self.cax.set_visible(True)\n sns.heatmap(df[['duration', 'weighted_average_vote', 'budget_adjusted',\n 'usa_gross_income_adjusted', 'worldwide_gross_income_adjusted',\n 'date_published_year', 'date_published_month', 'date_published_day',\n 'actors_weighted_frequency', 'director_weighted_frequency',\n 'writer_weighted_frequency', 'production_company_frequency', 'title_n_words',\n 'title_ratio_long_words', 'title_ratio_vowels',\n 'title_ratio_capital_letters', 'description_n_words',\n 'description_ratio_long_words', 'description_ratio_vowels',\n 'description_ratio_capital_letters', ]].corr(), vmin = -1, vmax = 1, ax = self.ax1, cmap = 'coolwarm', cbar_ax=self.cax)\n self.ax1.set_title('Correlation Matrix of Numeric Variables')\n self.fig.tight_layout()\n self.fig.canvas.draw_idle()\n\n# Models to Try Window\nclass ModelstoTry(QMainWindow):\n\n def __init__(self):\n super(ModelstoTry, self).__init__()\n \n # create window and main widget\n self.Title = 'Models to Try'\n self.setWindowTitle(self.Title)\n self.main_widget = QWidget(self)\n self.layout = QVBoxLayout(self.main_widget) \n \n # create first groupbox for background\n self.groupBox1 = QGroupBox('Background')\n self.groupBox1Layout= QHBoxLayout()\n self.groupBox1.setLayout(self.groupBox1Layout)\n \n # create label for background\n self.label = QLabel('We started our modeling phase by first selecting a handful of promising models that work with regression problems.\\\n \\nOur selected models are Linear Regression, Random Forest, Gradient Boosting, Adaptive Boosting, and K-Nearest Neighbors.\\\n \\nThe out of the box Random Forest and Gradient Boosting models seem to perform best with the 2 lowest validation MSEs.')\n self.groupBox1Layout.addWidget(self.label)\n \n # create plot image\n self.groupBox2 = QGroupBox('MSE Plot')\n self.groupBox2Layout= QHBoxLayout()\n self.groupBox2.setLayout(self.groupBox2Layout)\n \n # add image to plot image\n self.label_image = QLabel()\n if platform == \"darwin\":\n self.pix = QPixmap(get_repo_root() + '/results/1. Base Model Comparison/model_comparison_base_all_features.png')\n self.pix2 = self.pix.scaled(1000, 500, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n elif platform == \"win32\":\n self.pix = QPixmap(get_repo_root_w() + '\\\\results\\\\1. Base Model Comparison\\\\model_comparison_base_all_features.png')\n self.pix2 = self.pix.scaled(1000, 500, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n self.groupBox2Layout.addWidget(self.label_image)\n \n # add boxes to main widget\n self.layout.addWidget(self.groupBox1)\n self.layout.addWidget(self.groupBox2)\n \n # resize main widget\n self.setCentralWidget(self.main_widget) \n self.showMaximized() \n \n# First Hyperparameter window\nclass Hyp1(QMainWindow):\n\n def __init__(self):\n super(Hyp1, self).__init__()\n \n # create window and main widget\n self.Title = 'Hyperparameter Tuning and Validation Phase I'\n self.setWindowTitle(self.Title)\n self.main_widget = QWidget(self)\n self.layout = QGridLayout(self.main_widget) \n \n # create info box\n self.groupBox1 = QGroupBox('Info')\n self.groupBox1Layout= QHBoxLayout()\n self.groupBox1.setLayout(self.groupBox1Layout)\n \n # add text to info box\n self.label = QLabel('We selected the top 3 base models and tuned them by setting a list of \\\n \\nhyperparameters to try in GridSearch validation to see if performance increases.\\\n \\nAs seen in the Model Comparison graph, our best model still seems to be the \\\n \\nRandom Forest. Let us focus on just that model in the next phase.')\n self.groupBox1Layout.addWidget(self.label)\n \n # create plot picker with buttons\n self.groupBox15 = QGroupBox('Plot Picker')\n self.groupBox15Layout= QHBoxLayout()\n self.groupBox15.setLayout(self.groupBox15Layout)\n \n # create buttons for plots\n self.b1 = QRadioButton(\"Random Forest\")\n self.b1.toggled.connect(self.onClicked)\n\n self.b2 = QRadioButton(\"KNN\")\n self.b2.toggled.connect(self.onClicked)\n\n self.b3 = QRadioButton(\"Gradient Boosting\")\n self.b3.toggled.connect(self.onClicked)\n \n self.b4 = QRadioButton(\"Model Comparison\")\n self.b4.toggled.connect(self.onClicked)\n\n self.groupBox15Layout.addWidget(self.b4)\n self.groupBox15Layout.addWidget(self.b1)\n self.groupBox15Layout.addWidget(self.b2)\n self.groupBox15Layout.addWidget(self.b3)\n\n # create box to show plots\n self.groupBox2 = QGroupBox('Tuned Plots')\n self.groupBox2Layout= QHBoxLayout()\n self.groupBox2.setLayout(self.groupBox2Layout)\n \n # create graph are for image\n self.label_image = QLabel()\n self.groupBox2Layout.addWidget(self.label_image)\n \n \n self.label_image2 = QLabel()\n \n # import data\n if platform == \"darwin\":\n self.all_data = pd.read_csv(get_repo_root() + '/results/2. Tuning 1/gridsearchcv_results.csv')\n elif platform == \"win32\": \n self.all_data = pd.read_csv(get_repo_root_w() + '\\\\results\\\\2. Tuning 1\\\\gridsearchcv_results.csv')\n \n # create grid search results box\n self.groupBox4 = QGroupBox('Gridsearch Results')\n self.groupBox4Layout= QHBoxLayout()\n self.groupBox4.setLayout(self.groupBox4Layout)\n \n # add data to the table\n NumRows = len(self.all_data.index)\n \n self.tableWidget = QTableWidget()\n self.tableWidget.setColumnCount(len(self.all_data.columns))\n self.tableWidget.setRowCount(NumRows)\n self.tableWidget.setHorizontalHeaderLabels(self.all_data.columns)\n\n for i in range(NumRows):\n for j in range(len(self.all_data.columns)):\n self.tableWidget.setItem(i, j, QTableWidgetItem(str(self.all_data.iat[i, j])))\n\n self.tableWidget.resizeColumnsToContents()\n self.tableWidget.resizeRowsToContents()\n \n self.groupBox4Layout.addWidget(self.tableWidget)\n\n self.layout.addWidget(self.groupBox1, 0, 0)\n self.layout.addWidget(self.groupBox15, 0, 1)\n self.layout.addWidget(self.groupBox2, 1, 1)\n self.layout.addWidget(self.groupBox4, 1, 0)\n \n self.setCentralWidget(self.main_widget) \n self.showMaximized() \n \n # functions for each button\n def onClicked(self):\n if self.b1.isChecked():\n if platform == \"darwin\":\n self.pix = QPixmap(get_repo_root() + '/results/2. Tuning 1/validation_curves_random_forest_tuned.png')\n self.pix2 = self.pix.scaled(1200, 800, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n elif platform == \"win32\":\n self.pix = QPixmap(get_repo_root_w() + '\\\\results\\\\2. Tuning 1\\\\validation_curves_random_forest_tuned.png')\n self.pix2 = self.pix.scaled(1200, 800, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n\n if self.b2.isChecked():\n if platform == \"darwin\":\n self.pix = QPixmap(get_repo_root() + '/results/2. Tuning 1/validation_curves_knn_regressor_tuned.png')\n self.pix2 = self.pix.scaled(1200, 800, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n elif platform == \"win32\":\n self.pix = QPixmap(get_repo_root_w() + '\\\\results\\\\2. Tuning 1\\\\validation_curves_knn_regressor_tuned.png')\n self.pix2 = self.pix.scaled(1200, 800, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n \n if self.b3.isChecked():\n if platform == \"darwin\":\n self.pix = QPixmap(get_repo_root() + '/results/2. Tuning 1/validation_curves_gradient_boost_tuned.png')\n self.pix2 = self.pix.scaled(1200, 800, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n elif platform == \"win32\":\n self.pix = QPixmap(get_repo_root_w() + '\\\\results\\\\2. Tuning 1\\\\validation_curves_gradient_boost_tuned.png')\n self.pix2 = self.pix.scaled(1200, 800, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n \n \n if self.b4.isChecked():\n if platform == \"darwin\":\n self.pix = QPixmap(get_repo_root() + '/results/2. Tuning 1/model_comparison_tuning_model1.png')\n self.pix2 = self.pix.scaled(1000, 500, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n elif platform == \"win32\": \n self.pix = QPixmap(get_repo_root_w() + '\\\\results\\\\2. Tuning 1\\\\model_comparison_tuning_model1.png')\n self.pix2 = self.pix.scaled(1000, 500, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n \n# Second Hyperparameter window\nclass Hyp2(QMainWindow):\n\n def __init__(self):\n super(Hyp2, self).__init__()\n \n # create window and main widget\n self.Title = 'Hyperparameter Tuning and Validation Phase II'\n self.setWindowTitle(self.Title)\n self.main_widget = QWidget(self)\n self.layout = QVBoxLayout(self.main_widget) \n \n # first box for info\n self.groupBox1 = QGroupBox('Info')\n self.groupBox1Layout= QHBoxLayout()\n self.groupBox1.setLayout(self.groupBox1Layout)\n \n # add description to box\n self.label = QLabel('We tried to tune our best model, the Random Forest model to see how much better we can make it. Our Random Forest showed signs of overfitting so we tried to set hyperparameters to regularize the model \\\n \\nsuch as increasing the number of trees, the min samples per leaf node, the max number of features per tree, and the max depth each tree can go.')\n self.groupBox1Layout.addWidget(self.label)\n \n # create buttons\n self.b1 = QRadioButton('Learning Curves')\n self.b1.toggled.connect(self.onClicked)\n\n self.b2 = QRadioButton('Validation Curves')\n self.b2.toggled.connect(self.onClicked)\n \n # create plot picker and add buttons\n self.groupBox15 = QGroupBox('Plot Picker')\n self.groupBox15Layout= QHBoxLayout()\n self.groupBox15.setLayout(self.groupBox15Layout)\n \n self.groupBox15Layout.addWidget(self.b1)\n self.groupBox15Layout.addWidget(self.b2)\n \n # create box for plots\n self.groupBox2 = QGroupBox('Curve Plots')\n self.groupBox2Layout= QHBoxLayout()\n self.groupBox2.setLayout(self.groupBox2Layout)\n \n # add plot area to box\n self.label_image = QLabel()\n \n self.groupBox2Layout.addWidget(self.label_image)\n \n # add boxes to main widget\n self.layout.addWidget(self.groupBox1)\n self.layout.addWidget(self.groupBox15)\n self.layout.addWidget(self.groupBox2)\n \n # resize main window\n self.setCentralWidget(self.main_widget) \n self.showMaximized()\n\n # create button function\n def onClicked(self):\n if self.b1.isChecked():\n if platform == \"darwin\":\n self.pix = QPixmap(get_repo_root() + '/results/3. Tuning 2 & Model Selection/learning_curves_random_forest_tuned.png')\n self.pix2 = self.pix.scaled(1200, 800, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n elif platform == \"win32\":\n self.pix = QPixmap(get_repo_root_w() + '\\\\results\\\\3. Tuning 2 & Model Selection\\\\learning_curves_random_forest_tuned.png')\n self.pix2 = self.pix.scaled(1200, 800, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n \n if self.b2.isChecked():\n if platform == \"darwin\":\n self.pix = QPixmap(get_repo_root() + '/results/3. Tuning 2 & Model Selection/validation_curves_random_forest_tuned.png')\n self.pix2 = self.pix.scaled(1200, 800, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n elif platform == \"win32\":\n self.pix = QPixmap(get_repo_root_w() + '\\\\results\\\\3. Tuning 2 & Model Selection\\\\validation_curves_random_forest_tuned.png')\n self.pix2 = self.pix.scaled(1200, 800, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n\n# Model Selection window\nclass ModelSelection(QMainWindow):\n\n def __init__(self):\n super(ModelSelection, self).__init__()\n \n # create window and main widget\n self.Title = 'Model Selection'\n self.setWindowTitle(self.Title)\n self.main_widget = QWidget(self)\n self.layout = QVBoxLayout(self.main_widget) \n \n # create box for info\n self.groupBox1 = QGroupBox('Info')\n self.groupBox1Layout= QHBoxLayout()\n self.groupBox1.setLayout(self.groupBox1Layout)\n \n # text for the box\n self.label = QLabel('This is our final selected model.')\n self.groupBox1Layout.addWidget(self.label)\n \n # create second box for gridsearch data\n self.groupBox2 = QGroupBox('Gridsearch Results 2')\n self.groupBox2Layout= QHBoxLayout()\n self.groupBox2.setLayout(self.groupBox2Layout)\n \n # create image area\n self.label_image = QLabel()\n \n # import all the data and images\n if platform == \"darwin\":\n self.pix = QPixmap(get_repo_root() + '/results/4. Results Evaluation/most_important_features_results_eval.png')\n self.pix2 = self.pix.scaled(1000, 500, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n self.all_data = pd.read_csv(get_repo_root() + '/results/4. Results Evaluation/gridsearchcv_results.csv')\n elif platform == \"win32\": \n self.pix = QPixmap(get_repo_root_w() + '\\\\results\\\\4. Results Evaluation\\\\most_important_features_results_eval.png')\n self.pix2 = self.pix.scaled(1000, 500, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n self.all_data = pd.read_csv(get_repo_root_w() + '\\\\results\\\\4. Results Evaluation\\\\gridsearchcv_results.csv')\n \n # add data to table and table to box\n NumRows = len(self.all_data.index) \n \n self.tableWidget = QTableWidget()\n self.tableWidget.setColumnCount(len(self.all_data.columns))\n self.tableWidget.setRowCount(NumRows)\n self.tableWidget.setHorizontalHeaderLabels(self.all_data.columns)\n\n for i in range(NumRows):\n for j in range(len(self.all_data.columns)):\n self.tableWidget.setItem(i, j, QTableWidgetItem(str(self.all_data.iat[i, j])))\n\n self.tableWidget.resizeColumnsToContents()\n self.tableWidget.resizeRowsToContents()\n \n self.groupBox2Layout.addWidget(self.tableWidget)\n \n # create third box for plot window\n self.groupBox3 = QGroupBox('Most Important Features')\n self.groupBox3Layout= QHBoxLayout()\n self.groupBox3.setLayout(self.groupBox3Layout)\n \n \n self.groupBox3Layout.addWidget(self.label_image)\n \n # add groupboxs to main window\n self.layout.addWidget(self.groupBox1)\n self.layout.addWidget(self.groupBox2)\n self.layout.addWidget(self.groupBox3)\n \n # resize main window\n self.setCentralWidget(self.main_widget) \n self.showMaximized() \n\n# Model Results window\nclass ModelResults(QMainWindow):\n\n def __init__(self):\n super(ModelResults, self).__init__()\n \n # create window and main widget\n self.Title = 'Model and Results Evaluation'\n self.setWindowTitle(self.Title)\n self.main_widget = QWidget(self)\n self.layout = QVBoxLayout(self.main_widget) \n \n # first group box and label\n self.groupBox1 = QGroupBox('Info')\n self.groupBox1Layout= QHBoxLayout()\n self.groupBox1.setLayout(self.groupBox1Layout)\n \n self.label = QLabel('After selecting our best Random Forest model, we compared it with a random model and calculated the average MSE between the two. Our model performed much better than the random model and was proved statistically significant using a 2-Sample T-Test with a null hypothesis that the two distributions are the same.')\n self.groupBox1Layout.addWidget(self.label)\n \n # second group box for data\n self.groupBox2 = QGroupBox('Our Model')\n self.groupBox2Layout= QHBoxLayout()\n self.groupBox2.setLayout(self.groupBox2Layout)\n \n # third group box for label\n self.groupBox3 = QGroupBox('Model vs Random')\n self.groupBox3Layout= QHBoxLayout()\n self.groupBox3.setLayout(self.groupBox3Layout)\n \n # label for third gorupbox \n self.label2 = QLabel('Our model can predict the weighted average movie IMDB rating with an average error of +- 0.93 while a random model has an average error of +- 2.9.')\n \n # fourth groupbox data\n self.groupBox4 = QGroupBox('Prediction Results')\n self.groupBox4Layout= QHBoxLayout()\n self.groupBox4.setLayout(self.groupBox4Layout)\n \n # fifth groupbox for image\n self.groupBox5 = QGroupBox('MSE: Our Model Versus Random')\n self.groupBox5Layout= QHBoxLayout()\n self.groupBox5.setLayout(self.groupBox5Layout)\n \n # create image space and import data\n self.label_image = QLabel()\n \n if platform == \"darwin\":\n self.pix = QPixmap(get_repo_root() + '/results/4. Results Evaluation/vs_random_results_eval.png')\n self.pix2 = self.pix.scaled(720,360, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n self.all_data = pd.read_csv(get_repo_root() + '/results/4. Results Evaluation/best_model_evaluation_results.csv')\n self.all_data2 = pd.read_csv(get_repo_root() + '/results/4. Results Evaluation/prediction_results.csv')\n elif platform == \"win32\": \n self.pix = QPixmap(get_repo_root_w() + '\\\\results\\\\4. Results Evaluation\\\\vs_random_results_eval.png')\n self.pix2 = self.pix.scaled(720,360, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n self.all_data = pd.read_csv(get_repo_root_w() + '\\\\results\\\\4. Results Evaluation\\\\best_model_evaluation_results.csv')\n self.all_data2 = pd.read_csv(get_repo_root_w() + '\\\\results\\\\4. Results Evaluation\\\\prediction_results.csv')\n \n # move data into tables \n self.all_dataHead = self.all_data.head(6) \n \n NumRows = len(self.all_dataHead.index)\n \n self.tableWidget = QTableWidget()\n self.tableWidget.setColumnCount(len(self.all_dataHead.columns))\n self.tableWidget.setRowCount(NumRows)\n self.tableWidget.setHorizontalHeaderLabels(self.all_dataHead.columns)\n\n for i in range(NumRows):\n for j in range(len(self.all_dataHead.columns)):\n self.tableWidget.setItem(i, j, QTableWidgetItem(str(self.all_dataHead.iat[i, j])))\n\n self.tableWidget.resizeColumnsToContents()\n self.tableWidget.resizeRowsToContents()\n \n \n self.all_data2Head = self.all_data2.head(10) \n \n NumRows = len(self.all_data2Head.index)\n \n self.tableWidget2 = QTableWidget()\n self.tableWidget2.setColumnCount(len(self.all_data2Head.columns))\n self.tableWidget2.setRowCount(NumRows)\n self.tableWidget2.setHorizontalHeaderLabels(self.all_data2Head.columns)\n\n for i in range(NumRows):\n for j in range(len(self.all_data2Head.columns)):\n self.tableWidget2.setItem(i, j, QTableWidgetItem(str(self.all_data2Head.iat[i, j])))\n\n self.tableWidget2.resizeColumnsToContents()\n self.tableWidget2.resizeRowsToContents()\n \n # add everything to boxes\n self.groupBox2Layout.addWidget(self.tableWidget) \n self.groupBox3Layout.addWidget(self.label2)\n self.groupBox4Layout.addWidget(self.tableWidget2) \n self.groupBox5Layout.addWidget(self.label_image) \n \n # add boxes to main widget\n self.layout.addWidget(self.groupBox1)\n self.layout.addWidget(self.groupBox2)\n self.layout.addWidget(self.groupBox3)\n self.layout.addWidget(self.groupBox4)\n self.layout.addWidget(self.groupBox5)\n \n # resize main widget\n self.setCentralWidget(self.main_widget)\n self.showMaximized() \n\n# Prediction window\nclass predi(QMainWindow):\n def __init__(self):\n\n super(predi, self).__init__()\n \n # create main window and widget\n self.Title = 'Prediction Game'\n self.setWindowTitle(self.Title)\n self.main_widget = QWidget(self)\n self.layout = QVBoxLayout(self.main_widget) \n \n # create first description box and add text\n self.groupBox1 = QGroupBox('Description')\n self.groupBox1Layout= QHBoxLayout()\n self.groupBox1.setLayout(self.groupBox1Layout)\n \n self.label = QLabel(\"This tool will allow you to make predictions against our best model, to see who can come out on top!\\nWe will give you a list of features of a movie selected randomly from our test set and you will predict the weighted average score.\\nOur model will also predict the weighted average score, and whoever comes the closest to the real score will win! \\nSince you are presumably a human, we will give you human readable features for you to make your guess. \\nRemember, no cheating by looking up the movie online. And if any of the features are missing, it is because they were not in the IMDb dataset, so our model did not get them either.\")\n self.groupBox1Layout.addWidget(self.label)\n \n # create button to generate movie and add to movie\n self.groupBox15 = QGroupBox('Random Movie Generator')\n self.groupBox15Layout= QHBoxLayout()\n self.groupBox15.setLayout(self.groupBox15Layout)\n \n self.button = QPushButton('Generate', self)\n self.button.setToolTip('This is an example button')\n self.groupBox15Layout.addWidget(self.button)\n self.button.clicked.connect(self.on_click)\n \n # create table\n self.tableWidget = QTableWidget()\n self.tableWidget.setRowCount(2)\n self.tableWidget.setColumnCount(15)\n self.tableWidget.setItem(0, 0, QTableWidgetItem(\"Duration\"))\n self.tableWidget.setItem(0, 1, QTableWidgetItem(\"Title\"))\n self.tableWidget.setItem(0, 2, QTableWidgetItem(\"Date Published\"))\n self.tableWidget.setItem(0, 3, QTableWidgetItem(\"Director\"))\n self.tableWidget.setItem(0, 4, QTableWidgetItem(\"Writer\"))\n self.tableWidget.setItem(0, 5, QTableWidgetItem(\"Production Company\"))\n self.tableWidget.setItem(0, 6, QTableWidgetItem(\"Actors\"))\n self.tableWidget.setItem(0, 7, QTableWidgetItem(\"Description\"))\n self.tableWidget.setItem(0, 8, QTableWidgetItem(\"Budget\"))\n self.tableWidget.setItem(0, 9, QTableWidgetItem(\"USA Gross Income\"))\n self.tableWidget.setItem(0, 10, QTableWidgetItem(\"Worldwide Gross Income\"))\n self.tableWidget.setItem(0, 11, QTableWidgetItem(\"Genre 1\"))\n self.tableWidget.setItem(0, 12, QTableWidgetItem(\"Genre 2\"))\n self.tableWidget.setItem(0, 13, QTableWidgetItem(\"Genre 3\"))\n self.tableWidget.setItem(0, 14, QTableWidgetItem(\"Region\"))\n \n # create box for movie features\n self.groupBox175 = QGroupBox(\"Your Movie's Features\")\n self.groupBox175Layout= QHBoxLayout()\n self.groupBox175.setLayout(self.groupBox175Layout)\n self.groupBox175Layout.addWidget(self.tableWidget)\n \n # create box for guessing\n self.groupBox2 = QGroupBox('Input your guess')\n self.groupBox2Layout= QHBoxLayout()\n self.groupBox2.setLayout(self.groupBox2Layout)\n\n # add button and area to input guess\n self.txtInputText = QLineEdit(self)\n\n self.locked = QPushButton(\"Lock In!\",self)\n self.locked.clicked.connect(self.guess)\n\n self.groupBox2Layout.addWidget(self.txtInputText)\n self.groupBox2Layout.addWidget(self.locked)\n \n # create results box\n self.groupBox3 = QGroupBox(\"Results\")\n self.groupBox3Layout= QHBoxLayout()\n self.groupBox3.setLayout(self.groupBox3Layout)\n \n self.label3 = QLabel('')\n self.groupBox3Layout.addWidget(self.label3)\n\n # add boxes to main widget\n self.layout.addWidget(self.groupBox1)\n self.layout.addWidget(self.groupBox15)\n self.layout.addWidget(self.groupBox175)\n self.layout.addWidget(self.groupBox2)\n self.layout.addWidget(self.groupBox3)\n\n # resize main widget\n self.setCentralWidget(self.main_widget) \n self.showMaximized() \n\n # create button click function (autofills random data)\n def on_click(self):\n global movie\n movie = pred.sample(n = 1)\n movie2 = movie[['duration', 'title',\n 'date_published', 'director', 'writer', 'production_company',\n 'actors', 'description', 'budget_adjusted',\n 'usa_gross_income_adjusted', 'worldwide_gross_income_adjusted',\n 'genre1', 'genre2', 'genre3', 'region']]\n movie3 = movie2.to_numpy()\n movie4 = movie3[0]\n self.tableWidget.setItem(1,0, QTableWidgetItem(str(movie4[0])))\n self.tableWidget.setItem(1,1, QTableWidgetItem(str(movie4[1])))\n self.tableWidget.setItem(1,2, QTableWidgetItem(str(movie4[2])))\n self.tableWidget.setItem(1,3, QTableWidgetItem(str(movie4[3])))\n self.tableWidget.setItem(1,4, QTableWidgetItem(str(movie4[4])))\n self.tableWidget.setItem(1,5, QTableWidgetItem(str(movie4[5])))\n self.tableWidget.setItem(1,6, QTableWidgetItem(str(movie4[6])))\n self.tableWidget.setItem(1,7, QTableWidgetItem(str(movie4[7])))\n self.tableWidget.setItem(1,8, QTableWidgetItem(str(movie4[8])))\n self.tableWidget.setItem(1,9, QTableWidgetItem(str(movie4[9])))\n self.tableWidget.setItem(1,10, QTableWidgetItem(str(movie4[10])))\n self.tableWidget.setItem(1,11, QTableWidgetItem(str(movie4[11])))\n self.tableWidget.setItem(1,12, QTableWidgetItem(str(movie4[12])))\n self.tableWidget.setItem(1,13, QTableWidgetItem(str(movie4[13])))\n self.tableWidget.setItem(1,14, QTableWidgetItem(str(movie4[14])))\n\n # create results data\n def guess(self):\n movie22 = movie[['Actual Rating', 'Predicted Rating']]\n movie23 = movie22.to_numpy()\n movie24 = movie23[0]\n a = self.txtInputText.text()\n b = movie24[1]\n c = movie24[0]\n if abs(c - float(a)) < abs(c - b): \n self.label3.setText(\"The results are in...\\nYou Predicted: \" + a + \"\\nOur model predicted: \" + str(b) + \"\\nThe actual weighted average vote is: \" + str(c) + \"\\nYou win!\")\n if abs(c - float(a)) > abs(c - b): \n self.label3.setText(\"The results are in...\\nYou Predicted: \" + a + \"\\nOur model predicted: \" + str(b) + \"\\nThe actual weighted average vote is: \" + str(c) +\"\\nYou lose!\")\n if abs(c - float(a)) == abs(c - b): \n self.label3.setText(\"The results are in...\\nYou Predicted: \" + a + \"\\nOur model predicted: \" + str(b) + \"\\nThe actual weighted average vote is: \" + str(c) +\"\\nIt's a tie!\")\n\n# Main Menu window\nclass Menu(QMainWindow):\n\n def __init__(self):\n\n super().__init__()\n # set size\n self.left = 400\n self.top = 200\n self.width = 1000\n self.height = 700\n\n # Title\n\n self.Title = 'Group 2 Final Project'\n\n #call intiUI to create elements for menu\n\n self.initUI()\n \n def initUI(self):\n\n \n # Creates the menu and the items\n \n \n self.setWindowTitle(self.Title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.statusBar()\n \n self.main_widget = QWidget(self)\n \n # add label to main menu\n self.label = QLabel(self)\n self.label.setText(\"Welcome to our final project! \\nWe will be using modelling to predict the IMDb score of movies. \\nPlease click any of the tabs above to look around.\")\n self.label.setFont(QFont(\"Times\", 20))\n self.label.adjustSize()\n self.label.setAlignment(Qt.AlignCenter)\n self.label.move(40, 100)\n \n # add image to main window\n self.label_image = QLabel(self)\n \n if platform == \"darwin\":\n self.pix = QPixmap(get_repo_root() + '/adam-kritz-individual-project/IMDb-Logo.png')\n self.pix2 = self.pix.scaled(709, 341, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n elif platform == \"win32\": \n self.pix = QPixmap(get_repo_root_w() + '\\\\adam-kritz-individual-project\\\\IMDb-Logo.png')\n self.pix2 = self.pix.scaled(709, 341, transformMode=Qt.SmoothTransformation)\n self.label_image.setPixmap(self.pix2)\n \n self.label_image.adjustSize()\n self.label_image.move(145, 300)\n \n # create menu options\n mainMenu = self.menuBar()\n \n fileMenu = mainMenu.addMenu('File')\n \n preproc = mainMenu.addMenu('EDA') \n \n model = mainMenu.addMenu('Modelling') \n \n pred = mainMenu.addMenu('Prediction Game') \n\n # create about us section \n file3Button = QAction(\"About Us\", self) \n file3Button.setStatusTip(\"Information about our project\") \n file3Button.triggered.connect(self.file3) \n \n # legal info section\n file5Button = QAction(\"IMDb Legal Information\", self) \n file5Button.setStatusTip(\"IMDb Legal Information\") \n file5Button.triggered.connect(self.file5) \n \n # link to our report section\n file2Button = QAction(\"Link to our report\", self) \n file2Button.setStatusTip(\"Here you can find the full report of our results\") \n file2Button.triggered.connect(self.file2) \n \n # link to data set section\n file4Button = QAction(\"Link to the dataset\", self) \n file4Button.setStatusTip(\"Link to the dataset on Kaggle\") \n file4Button.triggered.connect(self.file4) \n \n # exit button\n exitButton = QAction(QIcon('enter.png'), '&Exit', self)\n exitButton.setShortcut('Ctrl+Q')\n exitButton.setStatusTip('Exit application')\n exitButton.triggered.connect(self.close)\n \n # add them all to the file menu options\n fileMenu.addAction(file3Button)\n fileMenu.addAction(file5Button)\n fileMenu.addAction(file2Button)\n fileMenu.addAction(file4Button)\n fileMenu.addAction(exitButton)\n \n # preprocessing tabs\n \n preproc1Button = QAction(\"Numerical Variables\", self) \n preproc1Button.setStatusTip(\"All the numeric variables we used\") \n preproc1Button.triggered.connect(self.preproc1) \n\n preproc2Button = QAction(\"Categorical Variables\", self) \n preproc2Button.setStatusTip(\"All the categorical variables we used\") \n preproc2Button.triggered.connect(self.preproc2)\n \n preproc3Button = QAction(\"Target Variable\", self) \n preproc3Button.setStatusTip(\"The target variable\") \n preproc3Button.triggered.connect(self.preproc3)\n\n # add to preprocessing menu\n preproc.addAction(preproc1Button) \n preproc.addAction(preproc2Button)\n preproc.addAction(preproc3Button)\n \n # modelling\n \n model1button = QAction('Models to Try', self)\n model1button.setStatusTip(\"Models to Try\") \n model1button.triggered.connect(self.model1) \n \n model2button = QAction('Hyperparameter Tuning and Validation Phase I', self)\n model2button.setStatusTip(\"Hyperparameter Tuning and Validation Phase I\") \n model2button.triggered.connect(self.model2) \n \n model3button = QAction('Hyperparameter Tuning and Validation Phase II', self)\n model3button.setStatusTip(\"Hyperparameter Tuning and Validation Phase II\") \n model3button.triggered.connect(self.model3) \n \n model4button = QAction('Model Selection', self)\n model4button.setStatusTip(\"Model Selection\") \n model4button.triggered.connect(self.model4) \n \n model5button = QAction('Model and Results Evaluation', self)\n model5button.setStatusTip(\"Model and Results Evaluation\") \n model5button.triggered.connect(self.model5) \n\n # add to modelling menu\n model.addAction(model1button)\n model.addAction(model2button)\n model.addAction(model3button)\n model.addAction(model4button)\n model.addAction(model5button)\n\n # create prediction section\n pred1button = QAction(\"Let's Predict\", self)\n pred1button.setStatusTip(\"This tool will make a prediction for a movie\") \n pred1button.triggered.connect(self.pred1) \n \n # add to menu option\n pred.addAction(pred1button)\n \n # This line shows the windows\n \n self.dialogs = list()\n\n self.show()\n \n # open our report\n def file2(self):\n webbrowser.open('https://docs.google.com/document/d/15mzM34VmwNzyYF0Mygbi-N_v0qPVQXY8LAIWD_Nz-p8/edit?usp=sharing') # this will be our report\n \n # give info about us\n def file3(self):\n QMessageBox.about(self, \"About Us\", \"We created this project in fall 2021 as part of our Intro to Data Mining Course at George Washington University. In this project, we took Stefano Leone’s IMDb dataset on Kaggle, and used different modeling techniques to predict the weighted average vote of movies based on their features. \")\n \n # open dataset\n def file4(self):\n webbrowser.open('https://www.kaggle.com/stefanoleone992/imdb-extensive-dataset')\n \n # give legal info\n def file5(self):\n QMessageBox.about(self, \"IMDb License\", \"IMDb, IMDb.COM, and the IMDb logo are trademarks of IMDb.com, Inc. or its affiliates.\")\n \n # preprocessing open windows\n def preproc1(self):\n dialog = NumericalVars()\n self.dialogs.append(dialog) \n dialog.show() \n \n def preproc2(self):\n dialog = CategoricalVars()\n self.dialogs.append(dialog) \n dialog.show()\n \n def preproc3(self):\n dialog = TargetVar()\n self.dialogs.append(dialog) \n dialog.show()\n \n # modelling open windows\n def model1(self):\n dialog = ModelstoTry()\n self.dialogs.append(dialog) \n dialog.show()\n \n def model2(self):\n dialog = Hyp1()\n self.dialogs.append(dialog) \n dialog.show()\n \n def model3(self):\n dialog = Hyp2()\n self.dialogs.append(dialog) \n dialog.show()\n \n def model4(self):\n dialog = ModelSelection()\n self.dialogs.append(dialog) \n dialog.show()\n \n def model5(self):\n dialog = ModelResults()\n self.dialogs.append(dialog) \n dialog.show()\n \n # prediction open windws\n def pred1(self):\n dialog = predi()\n self.dialogs.append(dialog) \n dialog.show()\n \n\n\n# Application starts here\n\n\nif __name__ == \"__main__\":\n # run unzip results\n unzip_results()\n # change these to your file paths if you want to run the GUI by itself\n df = pd.read_csv(r'C:\\Users\\trash\\Desktop\\data 6103 work\\moviesdf.csv')\n pred = pd.read_csv(r'C:\\Users\\trash\\Desktop\\data 6103 work\\predictions_with_ids.csv')\n # creates the PyQt5 application\n app = QApplication(sys.argv)\n # Creates the menu\n mn = Menu()\n # create exit\n sys.exit(app.exec_())" ]
[ [ "matplotlib.backends.backend_qt5agg.NavigationToolbar2QT", "matplotlib.figure.Figure", "pandas.read_csv", "matplotlib.gridspec.GridSpec", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg" ] ]
JoElfner/scikit-learn
[ "a538c37de8b7007250a296eddfb3bed6afabd500" ]
[ "sklearn/preprocessing/_data.py" ]
[ "# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# Mathieu Blondel <mathieu@mblondel.org>\n# Olivier Grisel <olivier.grisel@ensta.org>\n# Andreas Mueller <amueller@ais.uni-bonn.de>\n# Eric Martin <eric@ericmart.in>\n# Giorgio Patrini <giorgio.patrini@anu.edu.au>\n# Eric Chang <ericchang2017@u.northwestern.edu>\n# License: BSD 3 clause\n\n\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy import stats\nfrom scipy import optimize\nfrom scipy.special import boxcox\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..utils import check_array\nfrom ..utils.deprecation import deprecated\nfrom ..utils.extmath import row_norms\nfrom ..utils.extmath import _incremental_mean_and_var\nfrom ..utils.sparsefuncs_fast import (\n inplace_csr_row_normalize_l1,\n inplace_csr_row_normalize_l2,\n)\nfrom ..utils.sparsefuncs import (\n inplace_column_scale,\n mean_variance_axis,\n incr_mean_variance_axis,\n min_max_axis,\n)\nfrom ..utils.validation import (\n check_is_fitted,\n check_random_state,\n _check_sample_weight,\n FLOAT_DTYPES,\n)\n\nfrom ._encoders import OneHotEncoder\n\nBOUNDS_THRESHOLD = 1e-7\n\n__all__ = [\n \"Binarizer\",\n \"KernelCenterer\",\n \"MinMaxScaler\",\n \"MaxAbsScaler\",\n \"Normalizer\",\n \"OneHotEncoder\",\n \"RobustScaler\",\n \"StandardScaler\",\n \"QuantileTransformer\",\n \"PowerTransformer\",\n \"add_dummy_feature\",\n \"binarize\",\n \"normalize\",\n \"scale\",\n \"robust_scale\",\n \"maxabs_scale\",\n \"minmax_scale\",\n \"quantile_transform\",\n \"power_transform\",\n]\n\n\ndef _is_constant_feature(var, mean, n_samples):\n \"\"\"Detect if a feature is indistinguishable from a constant feature.\n\n The detection is based on its computed variance and on the theoretical\n error bounds of the '2 pass algorithm' for variance computation.\n\n See \"Algorithms for computing the sample variance: analysis and\n recommendations\", by Chan, Golub, and LeVeque.\n \"\"\"\n # In scikit-learn, variance is always computed using float64 accumulators.\n eps = np.finfo(np.float64).eps\n\n upper_bound = n_samples * eps * var + (n_samples * mean * eps) ** 2\n return var <= upper_bound\n\n\ndef _handle_zeros_in_scale(scale, copy=True, constant_mask=None):\n \"\"\"Set scales of near constant features to 1.\n\n The goal is to avoid division by very small or zero values.\n\n Near constant features are detected automatically by identifying\n scales close to machine precision unless they are precomputed by\n the caller and passed with the `constant_mask` kwarg.\n\n Typically for standard scaling, the scales are the standard\n deviation while near constant features are better detected on the\n computed variances which are closer to machine precision by\n construction.\n \"\"\"\n # if we are fitting on 1D arrays, scale might be a scalar\n if np.isscalar(scale):\n if scale == 0.0:\n scale = 1.0\n return scale\n elif isinstance(scale, np.ndarray):\n if constant_mask is None:\n # Detect near constant values to avoid dividing by a very small\n # value that could lead to suprising results and numerical\n # stability issues.\n constant_mask = scale < 10 * np.finfo(scale.dtype).eps\n\n if copy:\n # New array to avoid side-effects\n scale = scale.copy()\n scale[constant_mask] = 1.0\n return scale\n\n\ndef scale(X, *, axis=0, with_mean=True, with_std=True, copy=True):\n \"\"\"Standardize a dataset along any axis.\n\n Center to the mean and component wise scale to unit variance.\n\n Read more in the :ref:`User Guide <preprocessing_scaler>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to center and scale.\n\n axis : int, default=0\n axis used to compute the means and standard deviations along. If 0,\n independently standardize each feature, otherwise (if 1) standardize\n each sample.\n\n with_mean : bool, default=True\n If True, center the data before scaling.\n\n with_std : bool, default=True\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n copy : bool, default=True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSC matrix and if axis is 1).\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The transformed data.\n\n Notes\n -----\n This implementation will refuse to center scipy.sparse matrices\n since it would make them non-sparse and would potentially crash the\n program with memory exhaustion problems.\n\n Instead the caller is expected to either set explicitly\n `with_mean=False` (in that case, only variance scaling will be\n performed on the features of the CSC matrix) or to call `X.toarray()`\n if he/she expects the materialized dense array to fit in memory.\n\n To avoid memory copy the caller should pass a CSC matrix.\n\n NaNs are treated as missing values: disregarded to compute the statistics,\n and maintained during the data transformation.\n\n We use a biased estimator for the standard deviation, equivalent to\n `numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to\n affect model performance.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n .. warning:: Risk of data leak\n\n Do not use :func:`~sklearn.preprocessing.scale` unless you know\n what you are doing. A common mistake is to apply it to the entire data\n *before* splitting into training and test sets. This will bias the\n model evaluation because information would have leaked from the test\n set to the training set.\n In general, we recommend using\n :class:`~sklearn.preprocessing.StandardScaler` within a\n :ref:`Pipeline <pipeline>` in order to prevent most risks of data\n leaking: `pipe = make_pipeline(StandardScaler(), LogisticRegression())`.\n\n See Also\n --------\n StandardScaler : Performs scaling to unit variance using the Transformer\n API (e.g. as part of a preprocessing\n :class:`~sklearn.pipeline.Pipeline`).\n\n \"\"\" # noqa\n X = check_array(\n X,\n accept_sparse=\"csc\",\n copy=copy,\n ensure_2d=False,\n estimator=\"the scale function\",\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n )\n if sparse.issparse(X):\n if with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` instead\"\n \" See docstring for motivation and alternatives.\"\n )\n if axis != 0:\n raise ValueError(\n \"Can only scale sparse matrix on axis=0, got axis=%d\" % axis\n )\n if with_std:\n _, var = mean_variance_axis(X, axis=0)\n var = _handle_zeros_in_scale(var, copy=False)\n inplace_column_scale(X, 1 / np.sqrt(var))\n else:\n X = np.asarray(X)\n if with_mean:\n mean_ = np.nanmean(X, axis)\n if with_std:\n scale_ = np.nanstd(X, axis)\n # Xr is a view on the original array that enables easy use of\n # broadcasting on the axis in which we are interested in\n Xr = np.rollaxis(X, axis)\n if with_mean:\n Xr -= mean_\n mean_1 = np.nanmean(Xr, axis=0)\n # Verify that mean_1 is 'close to zero'. If X contains very\n # large values, mean_1 can also be very large, due to a lack of\n # precision of mean_. In this case, a pre-scaling of the\n # concerned feature is efficient, for instance by its mean or\n # maximum.\n if not np.allclose(mean_1, 0):\n warnings.warn(\n \"Numerical issues were encountered \"\n \"when centering the data \"\n \"and might not be solved. Dataset may \"\n \"contain too large values. You may need \"\n \"to prescale your features.\"\n )\n Xr -= mean_1\n if with_std:\n scale_ = _handle_zeros_in_scale(scale_, copy=False)\n Xr /= scale_\n if with_mean:\n mean_2 = np.nanmean(Xr, axis=0)\n # If mean_2 is not 'close to zero', it comes from the fact that\n # scale_ is very small so that mean_2 = mean_1/scale_ > 0, even\n # if mean_1 was close to zero. The problem is thus essentially\n # due to the lack of precision of mean_. A solution is then to\n # subtract the mean again:\n if not np.allclose(mean_2, 0):\n warnings.warn(\n \"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. The standard \"\n \"deviation of the data is probably \"\n \"very close to 0. \"\n )\n Xr -= mean_2\n return X\n\n\nclass MinMaxScaler(TransformerMixin, BaseEstimator):\n \"\"\"Transform features by scaling each feature to a given range.\n\n This estimator scales and translates each feature individually such\n that it is in the given range on the training set, e.g. between\n zero and one.\n\n The transformation is given by::\n\n X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))\n X_scaled = X_std * (max - min) + min\n\n where min, max = feature_range.\n\n This transformation is often used as an alternative to zero mean,\n unit variance scaling.\n\n Read more in the :ref:`User Guide <preprocessing_scaler>`.\n\n Parameters\n ----------\n feature_range : tuple (min, max), default=(0, 1)\n Desired range of transformed data.\n\n copy : bool, default=True\n Set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array).\n\n clip : bool, default=False\n Set to True to clip transformed values of held-out data to\n provided `feature range`.\n\n .. versionadded:: 0.24\n\n Attributes\n ----------\n min_ : ndarray of shape (n_features,)\n Per feature adjustment for minimum. Equivalent to\n ``min - X.min(axis=0) * self.scale_``\n\n scale_ : ndarray of shape (n_features,)\n Per feature relative scaling of the data. Equivalent to\n ``(max - min) / (X.max(axis=0) - X.min(axis=0))``\n\n .. versionadded:: 0.17\n *scale_* attribute.\n\n data_min_ : ndarray of shape (n_features,)\n Per feature minimum seen in the data\n\n .. versionadded:: 0.17\n *data_min_*\n\n data_max_ : ndarray of shape (n_features,)\n Per feature maximum seen in the data\n\n .. versionadded:: 0.17\n *data_max_*\n\n data_range_ : ndarray of shape (n_features,)\n Per feature range ``(data_max_ - data_min_)`` seen in the data\n\n .. versionadded:: 0.17\n *data_range_*\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n n_samples_seen_ : int\n The number of samples processed by the estimator.\n It will be reset on new calls to fit, but increments across\n ``partial_fit`` calls.\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n minmax_scale : Equivalent function without the estimator API.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in fit, and maintained in\n transform.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n Examples\n --------\n >>> from sklearn.preprocessing import MinMaxScaler\n >>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]\n >>> scaler = MinMaxScaler()\n >>> print(scaler.fit(data))\n MinMaxScaler()\n >>> print(scaler.data_max_)\n [ 1. 18.]\n >>> print(scaler.transform(data))\n [[0. 0. ]\n [0.25 0.25]\n [0.5 0.5 ]\n [1. 1. ]]\n >>> print(scaler.transform([[2, 2]]))\n [[1.5 0. ]]\n \"\"\"\n\n def __init__(self, feature_range=(0, 1), *, copy=True, clip=False):\n self.feature_range = feature_range\n self.copy = copy\n self.clip = clip\n\n def _reset(self):\n \"\"\"Reset internal data-dependent state of the scaler, if necessary.\n\n __init__ parameters are not touched.\n \"\"\"\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, \"scale_\"):\n del self.scale_\n del self.min_\n del self.n_samples_seen_\n del self.data_min_\n del self.data_max_\n del self.data_range_\n\n def fit(self, X, y=None):\n \"\"\"Compute the minimum and maximum to be used for later scaling.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted scaler.\n \"\"\"\n\n # Reset internal state before fitting\n self._reset()\n return self.partial_fit(X, y)\n\n def partial_fit(self, X, y=None):\n \"\"\"Online computation of min and max on X for later scaling.\n\n All of X is processed as a single batch. This is intended for cases\n when :meth:`fit` is not feasible due to very large number of\n `n_samples` or because X is read from a continuous stream.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted scaler.\n \"\"\"\n feature_range = self.feature_range\n if feature_range[0] >= feature_range[1]:\n raise ValueError(\n \"Minimum of desired feature range must be smaller than maximum. Got %s.\"\n % str(feature_range)\n )\n\n if sparse.issparse(X):\n raise TypeError(\n \"MinMaxScaler does not support sparse input. \"\n \"Consider using MaxAbsScaler instead.\"\n )\n\n first_pass = not hasattr(self, \"n_samples_seen_\")\n X = self._validate_data(\n X,\n reset=first_pass,\n estimator=self,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n )\n\n data_min = np.nanmin(X, axis=0)\n data_max = np.nanmax(X, axis=0)\n\n if first_pass:\n self.n_samples_seen_ = X.shape[0]\n else:\n data_min = np.minimum(self.data_min_, data_min)\n data_max = np.maximum(self.data_max_, data_max)\n self.n_samples_seen_ += X.shape[0]\n\n data_range = data_max - data_min\n self.scale_ = (feature_range[1] - feature_range[0]) / _handle_zeros_in_scale(\n data_range, copy=True\n )\n self.min_ = feature_range[0] - data_min * self.scale_\n self.data_min_ = data_min\n self.data_max_ = data_max\n self.data_range_ = data_range\n return self\n\n def transform(self, X):\n \"\"\"Scale features of X according to feature_range.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data that will be transformed.\n\n Returns\n -------\n Xt : ndarray of shape (n_samples, n_features)\n Transformed data.\n \"\"\"\n check_is_fitted(self)\n\n X = self._validate_data(\n X,\n copy=self.copy,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n reset=False,\n )\n\n X *= self.scale_\n X += self.min_\n if self.clip:\n np.clip(X, self.feature_range[0], self.feature_range[1], out=X)\n return X\n\n def inverse_transform(self, X):\n \"\"\"Undo the scaling of X according to feature_range.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data that will be transformed. It cannot be sparse.\n\n Returns\n -------\n Xt : ndarray of shape (n_samples, n_features)\n Transformed data.\n \"\"\"\n check_is_fitted(self)\n\n X = check_array(\n X, copy=self.copy, dtype=FLOAT_DTYPES, force_all_finite=\"allow-nan\"\n )\n\n X -= self.min_\n X /= self.scale_\n return X\n\n def _more_tags(self):\n return {\"allow_nan\": True}\n\n\ndef minmax_scale(X, feature_range=(0, 1), *, axis=0, copy=True):\n \"\"\"Transform features by scaling each feature to a given range.\n\n This estimator scales and translates each feature individually such\n that it is in the given range on the training set, i.e. between\n zero and one.\n\n The transformation is given by (when ``axis=0``)::\n\n X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))\n X_scaled = X_std * (max - min) + min\n\n where min, max = feature_range.\n\n The transformation is calculated as (when ``axis=0``)::\n\n X_scaled = scale * X + min - X.min(axis=0) * scale\n where scale = (max - min) / (X.max(axis=0) - X.min(axis=0))\n\n This transformation is often used as an alternative to zero mean,\n unit variance scaling.\n\n Read more in the :ref:`User Guide <preprocessing_scaler>`.\n\n .. versionadded:: 0.17\n *minmax_scale* function interface\n to :class:`~sklearn.preprocessing.MinMaxScaler`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data.\n\n feature_range : tuple (min, max), default=(0, 1)\n Desired range of transformed data.\n\n axis : int, default=0\n Axis used to scale along. If 0, independently scale each feature,\n otherwise (if 1) scale each sample.\n\n copy : bool, default=True\n Set to False to perform inplace scaling and avoid a copy (if the input\n is already a numpy array).\n\n Returns\n -------\n X_tr : ndarray of shape (n_samples, n_features)\n The transformed data.\n\n .. warning:: Risk of data leak\n\n Do not use :func:`~sklearn.preprocessing.minmax_scale` unless you know\n what you are doing. A common mistake is to apply it to the entire data\n *before* splitting into training and test sets. This will bias the\n model evaluation because information would have leaked from the test\n set to the training set.\n In general, we recommend using\n :class:`~sklearn.preprocessing.MinMaxScaler` within a\n :ref:`Pipeline <pipeline>` in order to prevent most risks of data\n leaking: `pipe = make_pipeline(MinMaxScaler(), LogisticRegression())`.\n\n See Also\n --------\n MinMaxScaler : Performs scaling to a given range using the Transformer\n API (e.g. as part of a preprocessing\n :class:`~sklearn.pipeline.Pipeline`).\n\n Notes\n -----\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n \"\"\" # noqa\n # Unlike the scaler object, this function allows 1d input.\n # If copy is required, it will be done inside the scaler object.\n X = check_array(\n X, copy=False, ensure_2d=False, dtype=FLOAT_DTYPES, force_all_finite=\"allow-nan\"\n )\n original_ndim = X.ndim\n\n if original_ndim == 1:\n X = X.reshape(X.shape[0], 1)\n\n s = MinMaxScaler(feature_range=feature_range, copy=copy)\n if axis == 0:\n X = s.fit_transform(X)\n else:\n X = s.fit_transform(X.T).T\n\n if original_ndim == 1:\n X = X.ravel()\n\n return X\n\n\nclass StandardScaler(TransformerMixin, BaseEstimator):\n \"\"\"Standardize features by removing the mean and scaling to unit variance.\n\n The standard score of a sample `x` is calculated as:\n\n z = (x - u) / s\n\n where `u` is the mean of the training samples or zero if `with_mean=False`,\n and `s` is the standard deviation of the training samples or one if\n `with_std=False`.\n\n Centering and scaling happen independently on each feature by computing\n the relevant statistics on the samples in the training set. Mean and\n standard deviation are then stored to be used on later data using\n :meth:`transform`.\n\n Standardization of a dataset is a common requirement for many\n machine learning estimators: they might behave badly if the\n individual features do not more or less look like standard normally\n distributed data (e.g. Gaussian with 0 mean and unit variance).\n\n For instance many elements used in the objective function of\n a learning algorithm (such as the RBF kernel of Support Vector\n Machines or the L1 and L2 regularizers of linear models) assume that\n all features are centered around 0 and have variance in the same\n order. If a feature has a variance that is orders of magnitude larger\n that others, it might dominate the objective function and make the\n estimator unable to learn from other features correctly as expected.\n\n This scaler can also be applied to sparse CSR or CSC matrices by passing\n `with_mean=False` to avoid breaking the sparsity structure of the data.\n\n Read more in the :ref:`User Guide <preprocessing_scaler>`.\n\n Parameters\n ----------\n copy : bool, default=True\n If False, try to avoid a copy and do inplace scaling instead.\n This is not guaranteed to always work inplace; e.g. if the data is\n not a NumPy array or scipy.sparse CSR matrix, a copy may still be\n returned.\n\n with_mean : bool, default=True\n If True, center the data before scaling.\n This does not work (and will raise an exception) when attempted on\n sparse matrices, because centering them entails building a dense\n matrix which in common use cases is likely to be too large to fit in\n memory.\n\n with_std : bool, default=True\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n Attributes\n ----------\n scale_ : ndarray of shape (n_features,) or None\n Per feature relative scaling of the data to achieve zero mean and unit\n variance. Generally this is calculated using `np.sqrt(var_)`. If a\n variance is zero, we can't achieve unit variance, and the data is left\n as-is, giving a scaling factor of 1. `scale_` is equal to `None`\n when `with_std=False`.\n\n .. versionadded:: 0.17\n *scale_*\n\n mean_ : ndarray of shape (n_features,) or None\n The mean value for each feature in the training set.\n Equal to ``None`` when ``with_mean=False``.\n\n var_ : ndarray of shape (n_features,) or None\n The variance for each feature in the training set. Used to compute\n `scale_`. Equal to ``None`` when ``with_std=False``.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_samples_seen_ : int or ndarray of shape (n_features,)\n The number of samples processed by the estimator for each feature.\n If there are no missing samples, the ``n_samples_seen`` will be an\n integer, otherwise it will be an array of dtype int. If\n `sample_weights` are used it will be a float (if no missing data)\n or an array of dtype float that sums the weights seen so far.\n Will be reset on new calls to fit, but increments across\n ``partial_fit`` calls.\n\n See Also\n --------\n scale : Equivalent function without the estimator API.\n\n :class:`~sklearn.decomposition.PCA` : Further removes the linear\n correlation across features with 'whiten=True'.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in fit, and maintained in\n transform.\n\n We use a biased estimator for the standard deviation, equivalent to\n `numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to\n affect model performance.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n Examples\n --------\n >>> from sklearn.preprocessing import StandardScaler\n >>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]\n >>> scaler = StandardScaler()\n >>> print(scaler.fit(data))\n StandardScaler()\n >>> print(scaler.mean_)\n [0.5 0.5]\n >>> print(scaler.transform(data))\n [[-1. -1.]\n [-1. -1.]\n [ 1. 1.]\n [ 1. 1.]]\n >>> print(scaler.transform([[2, 2]]))\n [[3. 3.]]\n \"\"\" # noqa\n\n def __init__(self, *, copy=True, with_mean=True, with_std=True):\n self.with_mean = with_mean\n self.with_std = with_std\n self.copy = copy\n\n def _reset(self):\n \"\"\"Reset internal data-dependent state of the scaler, if necessary.\n\n __init__ parameters are not touched.\n \"\"\"\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, \"scale_\"):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_\n\n def fit(self, X, y=None, sample_weight=None):\n \"\"\"Compute the mean and std to be used for later scaling.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Individual weights for each sample.\n\n .. versionadded:: 0.24\n parameter *sample_weight* support to StandardScaler.\n\n Returns\n -------\n self : object\n Fitted scaler.\n \"\"\"\n\n # Reset internal state before fitting\n self._reset()\n return self.partial_fit(X, y, sample_weight)\n\n def partial_fit(self, X, y=None, sample_weight=None):\n \"\"\"\n Online computation of mean and std on X for later scaling.\n\n All of X is processed as a single batch. This is intended for cases\n when :meth:`fit` is not feasible due to very large number of\n `n_samples` or because X is read from a continuous stream.\n\n The algorithm for incremental mean and std is given in Equation 1.5a,b\n in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. \"Algorithms\n for computing the sample variance: Analysis and recommendations.\"\n The American Statistician 37.3 (1983): 242-247:\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Individual weights for each sample.\n\n .. versionadded:: 0.24\n parameter *sample_weight* support to StandardScaler.\n\n Returns\n -------\n self : object\n Fitted scaler.\n \"\"\"\n first_call = not hasattr(self, \"n_samples_seen_\")\n X = self._validate_data(\n X,\n accept_sparse=(\"csr\", \"csc\"),\n estimator=self,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n reset=first_call,\n )\n n_features = X.shape[1]\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)\n\n # Even in the case of `with_mean=False`, we update the mean anyway\n # This is needed for the incremental computation of the var\n # See incr_mean_variance_axis and _incremental_mean_variance_axis\n\n # if n_samples_seen_ is an integer (i.e. no missing values), we need to\n # transform it to a NumPy array of shape (n_features,) required by\n # incr_mean_variance_axis and _incremental_variance_axis\n dtype = np.int64 if sample_weight is None else X.dtype\n if not hasattr(self, \"n_samples_seen_\"):\n self.n_samples_seen_ = np.zeros(n_features, dtype=dtype)\n elif np.size(self.n_samples_seen_) == 1:\n self.n_samples_seen_ = np.repeat(self.n_samples_seen_, X.shape[1])\n self.n_samples_seen_ = self.n_samples_seen_.astype(dtype, copy=False)\n\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` \"\n \"instead. See docstring for motivation and alternatives.\"\n )\n sparse_constructor = (\n sparse.csr_matrix if X.format == \"csr\" else sparse.csc_matrix\n )\n\n if self.with_std:\n # First pass\n if not hasattr(self, \"scale_\"):\n self.mean_, self.var_, self.n_samples_seen_ = mean_variance_axis(\n X, axis=0, weights=sample_weight, return_sum_weights=True\n )\n # Next passes\n else:\n (\n self.mean_,\n self.var_,\n self.n_samples_seen_,\n ) = incr_mean_variance_axis(\n X,\n axis=0,\n last_mean=self.mean_,\n last_var=self.var_,\n last_n=self.n_samples_seen_,\n weights=sample_weight,\n )\n # We force the mean and variance to float64 for large arrays\n # See https://github.com/scikit-learn/scikit-learn/pull/12338\n self.mean_ = self.mean_.astype(np.float64, copy=False)\n self.var_ = self.var_.astype(np.float64, copy=False)\n else:\n self.mean_ = None # as with_mean must be False for sparse\n self.var_ = None\n weights = _check_sample_weight(sample_weight, X)\n sum_weights_nan = weights @ sparse_constructor(\n (np.isnan(X.data), X.indices, X.indptr), shape=X.shape\n )\n self.n_samples_seen_ += (np.sum(weights) - sum_weights_nan).astype(\n dtype\n )\n else:\n # First pass\n if not hasattr(self, \"scale_\"):\n self.mean_ = 0.0\n if self.with_std:\n self.var_ = 0.0\n else:\n self.var_ = None\n\n if not self.with_mean and not self.with_std:\n self.mean_ = None\n self.var_ = None\n self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0)\n\n else:\n self.mean_, self.var_, self.n_samples_seen_ = _incremental_mean_and_var(\n X,\n self.mean_,\n self.var_,\n self.n_samples_seen_,\n sample_weight=sample_weight,\n )\n\n # for backward-compatibility, reduce n_samples_seen_ to an integer\n # if the number of samples is the same for each feature (i.e. no\n # missing values)\n if np.ptp(self.n_samples_seen_) == 0:\n self.n_samples_seen_ = self.n_samples_seen_[0]\n\n if self.with_std:\n # Extract the list of near constant features on the raw variances,\n # before taking the square root.\n constant_mask = _is_constant_feature(\n self.var_, self.mean_, self.n_samples_seen_\n )\n self.scale_ = _handle_zeros_in_scale(\n np.sqrt(self.var_), copy=False, constant_mask=constant_mask\n )\n else:\n self.scale_ = None\n\n return self\n\n def transform(self, X, copy=None):\n \"\"\"Perform standardization by centering and scaling.\n\n Parameters\n ----------\n X : {array-like, sparse matrix of shape (n_samples, n_features)\n The data used to scale along the features axis.\n copy : bool, default=None\n Copy the input X or not.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n \"\"\"\n check_is_fitted(self)\n\n copy = copy if copy is not None else self.copy\n X = self._validate_data(\n X,\n reset=False,\n accept_sparse=\"csr\",\n copy=copy,\n estimator=self,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n )\n\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` \"\n \"instead. See docstring for motivation and alternatives.\"\n )\n if self.scale_ is not None:\n inplace_column_scale(X, 1 / self.scale_)\n else:\n if self.with_mean:\n X -= self.mean_\n if self.with_std:\n X /= self.scale_\n return X\n\n def inverse_transform(self, X, copy=None):\n \"\"\"Scale back the data to the original representation.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to scale along the features axis.\n copy : bool, default=None\n Copy the input X or not.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n \"\"\"\n check_is_fitted(self)\n\n copy = copy if copy is not None else self.copy\n X = check_array(\n X,\n accept_sparse=\"csr\",\n copy=copy,\n estimator=self,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n )\n\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot uncenter sparse matrices: pass `with_mean=False` \"\n \"instead See docstring for motivation and alternatives.\"\n )\n if self.scale_ is not None:\n inplace_column_scale(X, self.scale_)\n else:\n if self.with_std:\n X *= self.scale_\n if self.with_mean:\n X += self.mean_\n return X\n\n def _more_tags(self):\n return {\"allow_nan\": True, \"preserves_dtype\": [np.float64, np.float32]}\n\n\nclass MaxAbsScaler(TransformerMixin, BaseEstimator):\n \"\"\"Scale each feature by its maximum absolute value.\n\n This estimator scales and translates each feature individually such\n that the maximal absolute value of each feature in the\n training set will be 1.0. It does not shift/center the data, and\n thus does not destroy any sparsity.\n\n This scaler can also be applied to sparse CSR or CSC matrices.\n\n .. versionadded:: 0.17\n\n Parameters\n ----------\n copy : bool, default=True\n Set to False to perform inplace scaling and avoid a copy (if the input\n is already a numpy array).\n\n Attributes\n ----------\n scale_ : ndarray of shape (n_features,)\n Per feature relative scaling of the data.\n\n .. versionadded:: 0.17\n *scale_* attribute.\n\n max_abs_ : ndarray of shape (n_features,)\n Per feature maximum absolute value.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_samples_seen_ : int\n The number of samples processed by the estimator. Will be reset on\n new calls to fit, but increments across ``partial_fit`` calls.\n\n See Also\n --------\n maxabs_scale : Equivalent function without the estimator API.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in fit, and maintained in\n transform.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n Examples\n --------\n >>> from sklearn.preprocessing import MaxAbsScaler\n >>> X = [[ 1., -1., 2.],\n ... [ 2., 0., 0.],\n ... [ 0., 1., -1.]]\n >>> transformer = MaxAbsScaler().fit(X)\n >>> transformer\n MaxAbsScaler()\n >>> transformer.transform(X)\n array([[ 0.5, -1. , 1. ],\n [ 1. , 0. , 0. ],\n [ 0. , 1. , -0.5]])\n \"\"\"\n\n def __init__(self, *, copy=True):\n self.copy = copy\n\n def _reset(self):\n \"\"\"Reset internal data-dependent state of the scaler, if necessary.\n\n __init__ parameters are not touched.\n \"\"\"\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, \"scale_\"):\n del self.scale_\n del self.n_samples_seen_\n del self.max_abs_\n\n def fit(self, X, y=None):\n \"\"\"Compute the maximum absolute value to be used for later scaling.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted scaler.\n \"\"\"\n # Reset internal state before fitting\n self._reset()\n return self.partial_fit(X, y)\n\n def partial_fit(self, X, y=None):\n \"\"\"\n Online computation of max absolute value of X for later scaling.\n\n All of X is processed as a single batch. This is intended for cases\n when :meth:`fit` is not feasible due to very large number of\n `n_samples` or because X is read from a continuous stream.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted scaler.\n \"\"\"\n first_pass = not hasattr(self, \"n_samples_seen_\")\n X = self._validate_data(\n X,\n reset=first_pass,\n accept_sparse=(\"csr\", \"csc\"),\n estimator=self,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n )\n\n if sparse.issparse(X):\n mins, maxs = min_max_axis(X, axis=0, ignore_nan=True)\n max_abs = np.maximum(np.abs(mins), np.abs(maxs))\n else:\n max_abs = np.nanmax(np.abs(X), axis=0)\n\n if first_pass:\n self.n_samples_seen_ = X.shape[0]\n else:\n max_abs = np.maximum(self.max_abs_, max_abs)\n self.n_samples_seen_ += X.shape[0]\n\n self.max_abs_ = max_abs\n self.scale_ = _handle_zeros_in_scale(max_abs, copy=True)\n return self\n\n def transform(self, X):\n \"\"\"Scale the data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data that should be scaled.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n \"\"\"\n check_is_fitted(self)\n X = self._validate_data(\n X,\n accept_sparse=(\"csr\", \"csc\"),\n copy=self.copy,\n reset=False,\n estimator=self,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n )\n\n if sparse.issparse(X):\n inplace_column_scale(X, 1.0 / self.scale_)\n else:\n X /= self.scale_\n return X\n\n def inverse_transform(self, X):\n \"\"\"Scale back the data to the original representation.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data that should be transformed back.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n \"\"\"\n check_is_fitted(self)\n X = check_array(\n X,\n accept_sparse=(\"csr\", \"csc\"),\n copy=self.copy,\n estimator=self,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n )\n\n if sparse.issparse(X):\n inplace_column_scale(X, self.scale_)\n else:\n X *= self.scale_\n return X\n\n def _more_tags(self):\n return {\"allow_nan\": True}\n\n\ndef maxabs_scale(X, *, axis=0, copy=True):\n \"\"\"Scale each feature to the [-1, 1] range without breaking the sparsity.\n\n This estimator scales each feature individually such\n that the maximal absolute value of each feature in the\n training set will be 1.0.\n\n This scaler can also be applied to sparse CSR or CSC matrices.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data.\n\n axis : int, default=0\n axis used to scale along. If 0, independently scale each feature,\n otherwise (if 1) scale each sample.\n\n copy : bool, default=True\n Set to False to perform inplace scaling and avoid a copy (if the input\n is already a numpy array).\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The transformed data.\n\n .. warning:: Risk of data leak\n\n Do not use :func:`~sklearn.preprocessing.maxabs_scale` unless you know what\n you are doing. A common mistake is to apply it to the entire data\n *before* splitting into training and test sets. This will bias the\n model evaluation because information would have leaked from the test\n set to the training set.\n In general, we recommend using\n :class:`~sklearn.preprocessing.MaxAbsScaler` within a\n :ref:`Pipeline <pipeline>` in order to prevent most risks of data\n leaking: `pipe = make_pipeline(MaxAbsScaler(), LogisticRegression())`.\n\n See Also\n --------\n MaxAbsScaler : Performs scaling to the [-1, 1] range using\n the Transformer API (e.g. as part of a preprocessing\n :class:`~sklearn.pipeline.Pipeline`).\n\n Notes\n -----\n NaNs are treated as missing values: disregarded to compute the statistics,\n and maintained during the data transformation.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n \"\"\" # noqa\n # Unlike the scaler object, this function allows 1d input.\n\n # If copy is required, it will be done inside the scaler object.\n X = check_array(\n X,\n accept_sparse=(\"csr\", \"csc\"),\n copy=False,\n ensure_2d=False,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n )\n original_ndim = X.ndim\n\n if original_ndim == 1:\n X = X.reshape(X.shape[0], 1)\n\n s = MaxAbsScaler(copy=copy)\n if axis == 0:\n X = s.fit_transform(X)\n else:\n X = s.fit_transform(X.T).T\n\n if original_ndim == 1:\n X = X.ravel()\n\n return X\n\n\nclass RobustScaler(TransformerMixin, BaseEstimator):\n \"\"\"Scale features using statistics that are robust to outliers.\n\n This Scaler removes the median and scales the data according to\n the quantile range (defaults to IQR: Interquartile Range).\n The IQR is the range between the 1st quartile (25th quantile)\n and the 3rd quartile (75th quantile).\n\n Centering and scaling happen independently on each feature by\n computing the relevant statistics on the samples in the training\n set. Median and interquartile range are then stored to be used on\n later data using the ``transform`` method.\n\n Standardization of a dataset is a common requirement for many\n machine learning estimators. Typically this is done by removing the mean\n and scaling to unit variance. However, outliers can often influence the\n sample mean / variance in a negative way. In such cases, the median and\n the interquartile range often give better results.\n\n .. versionadded:: 0.17\n\n Read more in the :ref:`User Guide <preprocessing_scaler>`.\n\n Parameters\n ----------\n with_centering : bool, default=True\n If True, center the data before scaling.\n This will cause ``transform`` to raise an exception when attempted on\n sparse matrices, because centering them entails building a dense\n matrix which in common use cases is likely to be too large to fit in\n memory.\n\n with_scaling : bool, default=True\n If True, scale the data to interquartile range.\n\n quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0, \\\n default=(25.0, 75.0), == (1st quantile, 3rd quantile), == IQR\n Quantile range used to calculate ``scale_``.\n\n .. versionadded:: 0.18\n\n copy : bool, default=True\n If False, try to avoid a copy and do inplace scaling instead.\n This is not guaranteed to always work inplace; e.g. if the data is\n not a NumPy array or scipy.sparse CSR matrix, a copy may still be\n returned.\n\n unit_variance : bool, default=False\n If True, scale data so that normally distributed features have a\n variance of 1. In general, if the difference between the x-values of\n ``q_max`` and ``q_min`` for a standard normal distribution is greater\n than 1, the dataset will be scaled down. If less than 1, the dataset\n will be scaled up.\n\n .. versionadded:: 0.24\n\n Attributes\n ----------\n center_ : array of floats\n The median value for each feature in the training set.\n\n scale_ : array of floats\n The (scaled) interquartile range for each feature in the training set.\n\n .. versionadded:: 0.17\n *scale_* attribute.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n Examples\n --------\n >>> from sklearn.preprocessing import RobustScaler\n >>> X = [[ 1., -2., 2.],\n ... [ -2., 1., 3.],\n ... [ 4., 1., -2.]]\n >>> transformer = RobustScaler().fit(X)\n >>> transformer\n RobustScaler()\n >>> transformer.transform(X)\n array([[ 0. , -2. , 0. ],\n [-1. , 0. , 0.4],\n [ 1. , 0. , -1.6]])\n\n See Also\n --------\n robust_scale : Equivalent function without the estimator API.\n\n :class:`~sklearn.decomposition.PCA`\n Further removes the linear correlation across features with\n 'whiten=True'.\n\n Notes\n -----\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n https://en.wikipedia.org/wiki/Median\n https://en.wikipedia.org/wiki/Interquartile_range\n \"\"\"\n\n def __init__(\n self,\n *,\n with_centering=True,\n with_scaling=True,\n quantile_range=(25.0, 75.0),\n copy=True,\n unit_variance=False,\n ):\n self.with_centering = with_centering\n self.with_scaling = with_scaling\n self.quantile_range = quantile_range\n self.unit_variance = unit_variance\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Compute the median and quantiles to be used for scaling.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to compute the median and quantiles\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted scaler.\n \"\"\"\n # at fit, convert sparse matrices to csc for optimized computation of\n # the quantiles\n X = self._validate_data(\n X,\n accept_sparse=\"csc\",\n estimator=self,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n )\n\n q_min, q_max = self.quantile_range\n if not 0 <= q_min <= q_max <= 100:\n raise ValueError(\"Invalid quantile range: %s\" % str(self.quantile_range))\n\n if self.with_centering:\n if sparse.issparse(X):\n raise ValueError(\n \"Cannot center sparse matrices: use `with_centering=False`\"\n \" instead. See docstring for motivation and alternatives.\"\n )\n self.center_ = np.nanmedian(X, axis=0)\n else:\n self.center_ = None\n\n if self.with_scaling:\n quantiles = []\n for feature_idx in range(X.shape[1]):\n if sparse.issparse(X):\n column_nnz_data = X.data[\n X.indptr[feature_idx] : X.indptr[feature_idx + 1]\n ]\n column_data = np.zeros(shape=X.shape[0], dtype=X.dtype)\n column_data[: len(column_nnz_data)] = column_nnz_data\n else:\n column_data = X[:, feature_idx]\n\n quantiles.append(np.nanpercentile(column_data, self.quantile_range))\n\n quantiles = np.transpose(quantiles)\n\n self.scale_ = quantiles[1] - quantiles[0]\n self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)\n if self.unit_variance:\n adjust = stats.norm.ppf(q_max / 100.0) - stats.norm.ppf(q_min / 100.0)\n self.scale_ = self.scale_ / adjust\n else:\n self.scale_ = None\n\n return self\n\n def transform(self, X):\n \"\"\"Center and scale the data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to scale along the specified axis.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n \"\"\"\n check_is_fitted(self)\n X = self._validate_data(\n X,\n accept_sparse=(\"csr\", \"csc\"),\n copy=self.copy,\n estimator=self,\n dtype=FLOAT_DTYPES,\n reset=False,\n force_all_finite=\"allow-nan\",\n )\n\n if sparse.issparse(X):\n if self.with_scaling:\n inplace_column_scale(X, 1.0 / self.scale_)\n else:\n if self.with_centering:\n X -= self.center_\n if self.with_scaling:\n X /= self.scale_\n return X\n\n def inverse_transform(self, X):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The rescaled data to be transformed back.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n \"\"\"\n check_is_fitted(self)\n X = check_array(\n X,\n accept_sparse=(\"csr\", \"csc\"),\n copy=self.copy,\n estimator=self,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n )\n\n if sparse.issparse(X):\n if self.with_scaling:\n inplace_column_scale(X, self.scale_)\n else:\n if self.with_scaling:\n X *= self.scale_\n if self.with_centering:\n X += self.center_\n return X\n\n def _more_tags(self):\n return {\"allow_nan\": True}\n\n\ndef robust_scale(\n X,\n *,\n axis=0,\n with_centering=True,\n with_scaling=True,\n quantile_range=(25.0, 75.0),\n copy=True,\n unit_variance=False,\n):\n \"\"\"Standardize a dataset along any axis\n\n Center to the median and component wise scale\n according to the interquartile range.\n\n Read more in the :ref:`User Guide <preprocessing_scaler>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_sample, n_features)\n The data to center and scale.\n\n axis : int, default=0\n axis used to compute the medians and IQR along. If 0,\n independently scale each feature, otherwise (if 1) scale\n each sample.\n\n with_centering : bool, default=True\n If True, center the data before scaling.\n\n with_scaling : bool, default=True\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0\n default=(25.0, 75.0), == (1st quantile, 3rd quantile), == IQR\n Quantile range used to calculate ``scale_``.\n\n .. versionadded:: 0.18\n\n copy : bool, default=True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n unit_variance : bool, default=False\n If True, scale data so that normally distributed features have a\n variance of 1. In general, if the difference between the x-values of\n ``q_max`` and ``q_min`` for a standard normal distribution is greater\n than 1, the dataset will be scaled down. If less than 1, the dataset\n will be scaled up.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The transformed data.\n\n Notes\n -----\n This implementation will refuse to center scipy.sparse matrices\n since it would make them non-sparse and would potentially crash the\n program with memory exhaustion problems.\n\n Instead the caller is expected to either set explicitly\n `with_centering=False` (in that case, only variance scaling will be\n performed on the features of the CSR matrix) or to call `X.toarray()`\n if he/she expects the materialized dense array to fit in memory.\n\n To avoid memory copy the caller should pass a CSR matrix.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n .. warning:: Risk of data leak\n\n Do not use :func:`~sklearn.preprocessing.robust_scale` unless you know\n what you are doing. A common mistake is to apply it to the entire data\n *before* splitting into training and test sets. This will bias the\n model evaluation because information would have leaked from the test\n set to the training set.\n In general, we recommend using\n :class:`~sklearn.preprocessing.RobustScaler` within a\n :ref:`Pipeline <pipeline>` in order to prevent most risks of data\n leaking: `pipe = make_pipeline(RobustScaler(), LogisticRegression())`.\n\n See Also\n --------\n RobustScaler : Performs centering and scaling using the Transformer API\n (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).\n \"\"\"\n X = check_array(\n X,\n accept_sparse=(\"csr\", \"csc\"),\n copy=False,\n ensure_2d=False,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n )\n original_ndim = X.ndim\n\n if original_ndim == 1:\n X = X.reshape(X.shape[0], 1)\n\n s = RobustScaler(\n with_centering=with_centering,\n with_scaling=with_scaling,\n quantile_range=quantile_range,\n unit_variance=unit_variance,\n copy=copy,\n )\n if axis == 0:\n X = s.fit_transform(X)\n else:\n X = s.fit_transform(X.T).T\n\n if original_ndim == 1:\n X = X.ravel()\n\n return X\n\n\ndef normalize(X, norm=\"l2\", *, axis=1, copy=True, return_norm=False):\n \"\"\"Scale input vectors individually to unit norm (vector length).\n\n Read more in the :ref:`User Guide <preprocessing_normalization>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to normalize, element by element.\n scipy.sparse matrices should be in CSR format to avoid an\n un-necessary copy.\n\n norm : {'l1', 'l2', 'max'}, default='l2'\n The norm to use to normalize each non zero sample (or each non-zero\n feature if axis is 0).\n\n axis : {0, 1}, default=1\n axis used to normalize the data along. If 1, independently normalize\n each sample, otherwise (if 0) normalize each feature.\n\n copy : bool, default=True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n return_norm : bool, default=False\n whether to return the computed norms\n\n Returns\n -------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Normalized input X.\n\n norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, )\n An array of norms along given axis for X.\n When X is sparse, a NotImplementedError will be raised\n for norm 'l1' or 'l2'.\n\n See Also\n --------\n Normalizer : Performs normalization using the Transformer API\n (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).\n\n Notes\n -----\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n \"\"\"\n if norm not in (\"l1\", \"l2\", \"max\"):\n raise ValueError(\"'%s' is not a supported norm\" % norm)\n\n if axis == 0:\n sparse_format = \"csc\"\n elif axis == 1:\n sparse_format = \"csr\"\n else:\n raise ValueError(\"'%d' is not a supported axis\" % axis)\n\n X = check_array(\n X,\n accept_sparse=sparse_format,\n copy=copy,\n estimator=\"the normalize function\",\n dtype=FLOAT_DTYPES,\n )\n if axis == 0:\n X = X.T\n\n if sparse.issparse(X):\n if return_norm and norm in (\"l1\", \"l2\"):\n raise NotImplementedError(\n \"return_norm=True is not implemented \"\n \"for sparse matrices with norm 'l1' \"\n \"or norm 'l2'\"\n )\n if norm == \"l1\":\n inplace_csr_row_normalize_l1(X)\n elif norm == \"l2\":\n inplace_csr_row_normalize_l2(X)\n elif norm == \"max\":\n mins, maxes = min_max_axis(X, 1)\n norms = np.maximum(abs(mins), maxes)\n norms_elementwise = norms.repeat(np.diff(X.indptr))\n mask = norms_elementwise != 0\n X.data[mask] /= norms_elementwise[mask]\n else:\n if norm == \"l1\":\n norms = np.abs(X).sum(axis=1)\n elif norm == \"l2\":\n norms = row_norms(X)\n elif norm == \"max\":\n norms = np.max(abs(X), axis=1)\n norms = _handle_zeros_in_scale(norms, copy=False)\n X /= norms[:, np.newaxis]\n\n if axis == 0:\n X = X.T\n\n if return_norm:\n return X, norms\n else:\n return X\n\n\nclass Normalizer(TransformerMixin, BaseEstimator):\n \"\"\"Normalize samples individually to unit norm.\n\n Each sample (i.e. each row of the data matrix) with at least one\n non zero component is rescaled independently of other samples so\n that its norm (l1, l2 or inf) equals one.\n\n This transformer is able to work both with dense numpy arrays and\n scipy.sparse matrix (use CSR format if you want to avoid the burden of\n a copy / conversion).\n\n Scaling inputs to unit norms is a common operation for text\n classification or clustering for instance. For instance the dot\n product of two l2-normalized TF-IDF vectors is the cosine similarity\n of the vectors and is the base similarity metric for the Vector\n Space Model commonly used by the Information Retrieval community.\n\n Read more in the :ref:`User Guide <preprocessing_normalization>`.\n\n Parameters\n ----------\n norm : {'l1', 'l2', 'max'}, default='l2'\n The norm to use to normalize each non zero sample. If norm='max'\n is used, values will be rescaled by the maximum of the absolute\n values.\n\n copy : bool, default=True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix).\n\n Attributes\n ----------\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n Notes\n -----\n This estimator is stateless (besides constructor parameters), the\n fit method does nothing but is useful when used in a pipeline.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n See Also\n --------\n normalize : Equivalent function without the estimator API.\n\n Examples\n --------\n >>> from sklearn.preprocessing import Normalizer\n >>> X = [[4, 1, 2, 2],\n ... [1, 3, 9, 3],\n ... [5, 7, 5, 1]]\n >>> transformer = Normalizer().fit(X) # fit does nothing.\n >>> transformer\n Normalizer()\n >>> transformer.transform(X)\n array([[0.8, 0.2, 0.4, 0.4],\n [0.1, 0.3, 0.9, 0.3],\n [0.5, 0.7, 0.5, 0.1]])\n \"\"\"\n\n def __init__(self, norm=\"l2\", *, copy=True):\n self.norm = norm\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Do nothing and return the estimator unchanged\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to estimate the normalization parameters.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted transformer.\n \"\"\"\n self._validate_data(X, accept_sparse=\"csr\")\n return self\n\n def transform(self, X, copy=None):\n \"\"\"Scale each non zero row of X to unit norm\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to normalize, row by row. scipy.sparse matrices should be\n in CSR format to avoid an un-necessary copy.\n\n copy : bool, default=None\n Copy the input X or not.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n \"\"\"\n copy = copy if copy is not None else self.copy\n X = self._validate_data(X, accept_sparse=\"csr\", reset=False)\n return normalize(X, norm=self.norm, axis=1, copy=copy)\n\n def _more_tags(self):\n return {\"stateless\": True}\n\n\ndef binarize(X, *, threshold=0.0, copy=True):\n \"\"\"Boolean thresholding of array-like or scipy.sparse matrix.\n\n Read more in the :ref:`User Guide <preprocessing_binarization>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to binarize, element by element.\n scipy.sparse matrices should be in CSR or CSC format to avoid an\n un-necessary copy.\n\n threshold : float, default=0.0\n Feature values below or equal to this are replaced by 0, above it by 1.\n Threshold may not be less than 0 for operations on sparse matrices.\n\n copy : bool, default=True\n set to False to perform inplace binarization and avoid a copy\n (if the input is already a numpy array or a scipy.sparse CSR / CSC\n matrix and if axis is 1).\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The transformed data.\n\n See Also\n --------\n Binarizer : Performs binarization using the Transformer API\n (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).\n \"\"\"\n X = check_array(X, accept_sparse=[\"csr\", \"csc\"], copy=copy)\n if sparse.issparse(X):\n if threshold < 0:\n raise ValueError(\"Cannot binarize a sparse matrix with threshold < 0\")\n cond = X.data > threshold\n not_cond = np.logical_not(cond)\n X.data[cond] = 1\n X.data[not_cond] = 0\n X.eliminate_zeros()\n else:\n cond = X > threshold\n not_cond = np.logical_not(cond)\n X[cond] = 1\n X[not_cond] = 0\n return X\n\n\nclass Binarizer(TransformerMixin, BaseEstimator):\n \"\"\"Binarize data (set feature values to 0 or 1) according to a threshold.\n\n Values greater than the threshold map to 1, while values less than\n or equal to the threshold map to 0. With the default threshold of 0,\n only positive values map to 1.\n\n Binarization is a common operation on text count data where the\n analyst can decide to only consider the presence or absence of a\n feature rather than a quantified number of occurrences for instance.\n\n It can also be used as a pre-processing step for estimators that\n consider boolean random variables (e.g. modelled using the Bernoulli\n distribution in a Bayesian setting).\n\n Read more in the :ref:`User Guide <preprocessing_binarization>`.\n\n Parameters\n ----------\n threshold : float, default=0.0\n Feature values below or equal to this are replaced by 0, above it by 1.\n Threshold may not be less than 0 for operations on sparse matrices.\n\n copy : bool, default=True\n Set to False to perform inplace binarization and avoid a copy (if\n the input is already a numpy array or a scipy.sparse CSR matrix).\n\n Attributes\n ----------\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n binarize : Equivalent function without the estimator API.\n KBinsDiscretizer : Bin continuous data into intervals.\n OneHotEncoder : Encode categorical features as a one-hot numeric array.\n\n Notes\n -----\n If the input is a sparse matrix, only the non-zero values are subject\n to update by the Binarizer class.\n\n This estimator is stateless (besides constructor parameters), the\n fit method does nothing but is useful when used in a pipeline.\n\n Examples\n --------\n >>> from sklearn.preprocessing import Binarizer\n >>> X = [[ 1., -1., 2.],\n ... [ 2., 0., 0.],\n ... [ 0., 1., -1.]]\n >>> transformer = Binarizer().fit(X) # fit does nothing.\n >>> transformer\n Binarizer()\n >>> transformer.transform(X)\n array([[1., 0., 1.],\n [1., 0., 0.],\n [0., 1., 0.]])\n \"\"\"\n\n def __init__(self, *, threshold=0.0, copy=True):\n self.threshold = threshold\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Do nothing and return the estimator unchanged.\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted transformer.\n \"\"\"\n self._validate_data(X, accept_sparse=\"csr\")\n return self\n\n def transform(self, X, copy=None):\n \"\"\"Binarize each element of X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to binarize, element by element.\n scipy.sparse matrices should be in CSR format to avoid an\n un-necessary copy.\n\n copy : bool\n Copy the input X or not.\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Transformed array.\n \"\"\"\n copy = copy if copy is not None else self.copy\n # TODO: This should be refactored because binarize also calls\n # check_array\n X = self._validate_data(X, accept_sparse=[\"csr\", \"csc\"], copy=copy, reset=False)\n return binarize(X, threshold=self.threshold, copy=False)\n\n def _more_tags(self):\n return {\"stateless\": True}\n\n\nclass KernelCenterer(TransformerMixin, BaseEstimator):\n r\"\"\"Center an arbitrary kernel matrix :math:`K`.\n\n Let define a kernel :math:`K` such that:\n\n .. math::\n K(X, Y) = \\phi(X) . \\phi(Y)^{T}\n\n :math:`\\phi(X)` is a function mapping of rows of :math:`X` to a\n Hilbert space and :math:`K` is of shape `(n_samples, n_samples)`.\n\n This class allows to compute :math:`\\tilde{K}(X, Y)` such that:\n\n .. math::\n \\tilde{K(X, Y)} = \\tilde{\\phi}(X) . \\tilde{\\phi}(Y)^{T}\n\n :math:`\\tilde{\\phi}(X)` is the centered mapped data in the Hilbert\n space.\n\n `KernelCenterer` centers the features without explicitly computing the\n mapping :math:`\\phi(\\cdot)`. Working with centered kernels is sometime\n expected when dealing with algebra computation such as eigendecomposition\n for :class:`~sklearn.decomposition.KernelPCA` for instance.\n\n Read more in the :ref:`User Guide <kernel_centering>`.\n\n Attributes\n ----------\n K_fit_rows_ : ndarray of shape (n_samples,)\n Average of each column of kernel matrix.\n\n K_fit_all_ : float\n Average of kernel matrix.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n sklearn.kernel_approximation.Nystroem : Approximate a kernel map\n using a subset of the training data.\n\n References\n ----------\n .. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller.\n \"Nonlinear component analysis as a kernel eigenvalue problem.\"\n Neural computation 10.5 (1998): 1299-1319.\n <https://www.mlpack.org/papers/kpca.pdf>`_\n\n Examples\n --------\n >>> from sklearn.preprocessing import KernelCenterer\n >>> from sklearn.metrics.pairwise import pairwise_kernels\n >>> X = [[ 1., -2., 2.],\n ... [ -2., 1., 3.],\n ... [ 4., 1., -2.]]\n >>> K = pairwise_kernels(X, metric='linear')\n >>> K\n array([[ 9., 2., -2.],\n [ 2., 14., -13.],\n [ -2., -13., 21.]])\n >>> transformer = KernelCenterer().fit(K)\n >>> transformer\n KernelCenterer()\n >>> transformer.transform(K)\n array([[ 5., 0., -5.],\n [ 0., 14., -14.],\n [ -5., -14., 19.]])\n \"\"\"\n\n def __init__(self):\n # Needed for backported inspect.signature compatibility with PyPy\n pass\n\n def fit(self, K, y=None):\n \"\"\"Fit KernelCenterer.\n\n Parameters\n ----------\n K : ndarray of shape (n_samples, n_samples)\n Kernel matrix.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n\n K = self._validate_data(K, dtype=FLOAT_DTYPES)\n\n if K.shape[0] != K.shape[1]:\n raise ValueError(\n \"Kernel matrix must be a square matrix.\"\n \" Input is a {}x{} matrix.\".format(K.shape[0], K.shape[1])\n )\n\n n_samples = K.shape[0]\n self.K_fit_rows_ = np.sum(K, axis=0) / n_samples\n self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples\n return self\n\n def transform(self, K, copy=True):\n \"\"\"Center kernel matrix.\n\n Parameters\n ----------\n K : ndarray of shape (n_samples1, n_samples2)\n Kernel matrix.\n\n copy : bool, default=True\n Set to False to perform inplace computation.\n\n Returns\n -------\n K_new : ndarray of shape (n_samples1, n_samples2)\n Returns the instance itself.\n \"\"\"\n check_is_fitted(self)\n\n K = self._validate_data(K, copy=copy, dtype=FLOAT_DTYPES, reset=False)\n\n K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis]\n\n K -= self.K_fit_rows_\n K -= K_pred_cols\n K += self.K_fit_all_\n\n return K\n\n def _more_tags(self):\n return {\"pairwise\": True}\n\n # TODO: Remove in 1.1\n # mypy error: Decorated property not supported\n @deprecated( # type: ignore\n \"Attribute `_pairwise` was deprecated in \"\n \"version 0.24 and will be removed in 1.1.\"\n )\n @property\n def _pairwise(self):\n return True\n\n\ndef add_dummy_feature(X, value=1.0):\n \"\"\"Augment dataset with an additional dummy feature.\n\n This is useful for fitting an intercept term with implementations which\n cannot otherwise fit it directly.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Data.\n\n value : float\n Value to use for the dummy feature.\n\n Returns\n -------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features + 1)\n Same data with dummy feature added as first column.\n\n Examples\n --------\n >>> from sklearn.preprocessing import add_dummy_feature\n >>> add_dummy_feature([[0, 1], [1, 0]])\n array([[1., 0., 1.],\n [1., 1., 0.]])\n \"\"\"\n X = check_array(X, accept_sparse=[\"csc\", \"csr\", \"coo\"], dtype=FLOAT_DTYPES)\n n_samples, n_features = X.shape\n shape = (n_samples, n_features + 1)\n if sparse.issparse(X):\n if sparse.isspmatrix_coo(X):\n # Shift columns to the right.\n col = X.col + 1\n # Column indices of dummy feature are 0 everywhere.\n col = np.concatenate((np.zeros(n_samples), col))\n # Row indices of dummy feature are 0, ..., n_samples-1.\n row = np.concatenate((np.arange(n_samples), X.row))\n # Prepend the dummy feature n_samples times.\n data = np.concatenate((np.full(n_samples, value), X.data))\n return sparse.coo_matrix((data, (row, col)), shape)\n elif sparse.isspmatrix_csc(X):\n # Shift index pointers since we need to add n_samples elements.\n indptr = X.indptr + n_samples\n # indptr[0] must be 0.\n indptr = np.concatenate((np.array([0]), indptr))\n # Row indices of dummy feature are 0, ..., n_samples-1.\n indices = np.concatenate((np.arange(n_samples), X.indices))\n # Prepend the dummy feature n_samples times.\n data = np.concatenate((np.full(n_samples, value), X.data))\n return sparse.csc_matrix((data, indices, indptr), shape)\n else:\n klass = X.__class__\n return klass(add_dummy_feature(X.tocoo(), value))\n else:\n return np.hstack((np.full((n_samples, 1), value), X))\n\n\nclass QuantileTransformer(TransformerMixin, BaseEstimator):\n \"\"\"Transform features using quantiles information.\n\n This method transforms the features to follow a uniform or a normal\n distribution. Therefore, for a given feature, this transformation tends\n to spread out the most frequent values. It also reduces the impact of\n (marginal) outliers: this is therefore a robust preprocessing scheme.\n\n The transformation is applied on each feature independently. First an\n estimate of the cumulative distribution function of a feature is\n used to map the original values to a uniform distribution. The obtained\n values are then mapped to the desired output distribution using the\n associated quantile function. Features values of new/unseen data that fall\n below or above the fitted range will be mapped to the bounds of the output\n distribution. Note that this transform is non-linear. It may distort linear\n correlations between variables measured at the same scale but renders\n variables measured at different scales more directly comparable.\n\n Read more in the :ref:`User Guide <preprocessing_transformer>`.\n\n .. versionadded:: 0.19\n\n Parameters\n ----------\n n_quantiles : int, default=1000 or n_samples\n Number of quantiles to be computed. It corresponds to the number\n of landmarks used to discretize the cumulative distribution function.\n If n_quantiles is larger than the number of samples, n_quantiles is set\n to the number of samples as a larger number of quantiles does not give\n a better approximation of the cumulative distribution function\n estimator.\n\n output_distribution : {'uniform', 'normal'}, default='uniform'\n Marginal distribution for the transformed data. The choices are\n 'uniform' (default) or 'normal'.\n\n ignore_implicit_zeros : bool, default=False\n Only applies to sparse matrices. If True, the sparse entries of the\n matrix are discarded to compute the quantile statistics. If False,\n these entries are treated as zeros.\n\n subsample : int, default=1e5\n Maximum number of samples used to estimate the quantiles for\n computational efficiency. Note that the subsampling procedure may\n differ for value-identical sparse and dense matrices.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for subsampling and smoothing\n noise.\n Please see ``subsample`` for more details.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary <random_state>`\n\n copy : bool, default=True\n Set to False to perform inplace transformation and avoid a copy (if the\n input is already a numpy array).\n\n Attributes\n ----------\n n_quantiles_ : int\n The actual number of quantiles used to discretize the cumulative\n distribution function.\n\n quantiles_ : ndarray of shape (n_quantiles, n_features)\n The values corresponding the quantiles of reference.\n\n references_ : ndarray of shape (n_quantiles, )\n Quantiles of references.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.preprocessing import QuantileTransformer\n >>> rng = np.random.RandomState(0)\n >>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)\n >>> qt = QuantileTransformer(n_quantiles=10, random_state=0)\n >>> qt.fit_transform(X)\n array([...])\n\n See Also\n --------\n quantile_transform : Equivalent function without the estimator API.\n PowerTransformer : Perform mapping to a normal distribution using a power\n transform.\n StandardScaler : Perform standardization that is faster, but less robust\n to outliers.\n RobustScaler : Perform robust standardization that removes the influence\n of outliers but does not put outliers and inliers on the same scale.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in fit, and maintained in\n transform.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n \"\"\"\n\n def __init__(\n self,\n *,\n n_quantiles=1000,\n output_distribution=\"uniform\",\n ignore_implicit_zeros=False,\n subsample=int(1e5),\n random_state=None,\n copy=True,\n ):\n self.n_quantiles = n_quantiles\n self.output_distribution = output_distribution\n self.ignore_implicit_zeros = ignore_implicit_zeros\n self.subsample = subsample\n self.random_state = random_state\n self.copy = copy\n\n def _dense_fit(self, X, random_state):\n \"\"\"Compute percentiles for dense matrices.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n The data used to scale along the features axis.\n \"\"\"\n if self.ignore_implicit_zeros:\n warnings.warn(\n \"'ignore_implicit_zeros' takes effect only with\"\n \" sparse matrix. This parameter has no effect.\"\n )\n\n n_samples, n_features = X.shape\n references = self.references_ * 100\n\n self.quantiles_ = []\n for col in X.T:\n if self.subsample < n_samples:\n subsample_idx = random_state.choice(\n n_samples, size=self.subsample, replace=False\n )\n col = col.take(subsample_idx, mode=\"clip\")\n self.quantiles_.append(np.nanpercentile(col, references))\n self.quantiles_ = np.transpose(self.quantiles_)\n # Due to floating-point precision error in `np.nanpercentile`,\n # make sure that quantiles are monotonically increasing.\n # Upstream issue in numpy:\n # https://github.com/numpy/numpy/issues/14685\n self.quantiles_ = np.maximum.accumulate(self.quantiles_)\n\n def _sparse_fit(self, X, random_state):\n \"\"\"Compute percentiles for sparse matrices.\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_features)\n The data used to scale along the features axis. The sparse matrix\n needs to be nonnegative. If a sparse matrix is provided,\n it will be converted into a sparse ``csc_matrix``.\n \"\"\"\n n_samples, n_features = X.shape\n references = self.references_ * 100\n\n self.quantiles_ = []\n for feature_idx in range(n_features):\n column_nnz_data = X.data[X.indptr[feature_idx] : X.indptr[feature_idx + 1]]\n if len(column_nnz_data) > self.subsample:\n column_subsample = self.subsample * len(column_nnz_data) // n_samples\n if self.ignore_implicit_zeros:\n column_data = np.zeros(shape=column_subsample, dtype=X.dtype)\n else:\n column_data = np.zeros(shape=self.subsample, dtype=X.dtype)\n column_data[:column_subsample] = random_state.choice(\n column_nnz_data, size=column_subsample, replace=False\n )\n else:\n if self.ignore_implicit_zeros:\n column_data = np.zeros(shape=len(column_nnz_data), dtype=X.dtype)\n else:\n column_data = np.zeros(shape=n_samples, dtype=X.dtype)\n column_data[: len(column_nnz_data)] = column_nnz_data\n\n if not column_data.size:\n # if no nnz, an error will be raised for computing the\n # quantiles. Force the quantiles to be zeros.\n self.quantiles_.append([0] * len(references))\n else:\n self.quantiles_.append(np.nanpercentile(column_data, references))\n self.quantiles_ = np.transpose(self.quantiles_)\n # due to floating-point precision error in `np.nanpercentile`,\n # make sure the quantiles are monotonically increasing\n # Upstream issue in numpy:\n # https://github.com/numpy/numpy/issues/14685\n self.quantiles_ = np.maximum.accumulate(self.quantiles_)\n\n def fit(self, X, y=None):\n \"\"\"Compute the quantiles used for transforming.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to scale along the features axis. If a sparse\n matrix is provided, it will be converted into a sparse\n ``csc_matrix``. Additionally, the sparse matrix needs to be\n nonnegative if `ignore_implicit_zeros` is False.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted transformer.\n \"\"\"\n if self.n_quantiles <= 0:\n raise ValueError(\n \"Invalid value for 'n_quantiles': %d. \"\n \"The number of quantiles must be at least one.\"\n % self.n_quantiles\n )\n\n if self.subsample <= 0:\n raise ValueError(\n \"Invalid value for 'subsample': %d. \"\n \"The number of subsamples must be at least one.\"\n % self.subsample\n )\n\n if self.n_quantiles > self.subsample:\n raise ValueError(\n \"The number of quantiles cannot be greater than\"\n \" the number of samples used. Got {} quantiles\"\n \" and {} samples.\".format(self.n_quantiles, self.subsample)\n )\n\n X = self._check_inputs(X, in_fit=True, copy=False)\n n_samples = X.shape[0]\n\n if self.n_quantiles > n_samples:\n warnings.warn(\n \"n_quantiles (%s) is greater than the total number \"\n \"of samples (%s). n_quantiles is set to \"\n \"n_samples.\" % (self.n_quantiles, n_samples)\n )\n self.n_quantiles_ = max(1, min(self.n_quantiles, n_samples))\n\n rng = check_random_state(self.random_state)\n\n # Create the quantiles of reference\n self.references_ = np.linspace(0, 1, self.n_quantiles_, endpoint=True)\n if sparse.issparse(X):\n self._sparse_fit(X, rng)\n else:\n self._dense_fit(X, rng)\n\n return self\n\n def _transform_col(self, X_col, quantiles, inverse):\n \"\"\"Private function to transform a single feature.\"\"\"\n\n output_distribution = self.output_distribution\n\n if not inverse:\n lower_bound_x = quantiles[0]\n upper_bound_x = quantiles[-1]\n lower_bound_y = 0\n upper_bound_y = 1\n else:\n lower_bound_x = 0\n upper_bound_x = 1\n lower_bound_y = quantiles[0]\n upper_bound_y = quantiles[-1]\n # for inverse transform, match a uniform distribution\n with np.errstate(invalid=\"ignore\"): # hide NaN comparison warnings\n if output_distribution == \"normal\":\n X_col = stats.norm.cdf(X_col)\n # else output distribution is already a uniform distribution\n\n # find index for lower and higher bounds\n with np.errstate(invalid=\"ignore\"): # hide NaN comparison warnings\n if output_distribution == \"normal\":\n lower_bounds_idx = X_col - BOUNDS_THRESHOLD < lower_bound_x\n upper_bounds_idx = X_col + BOUNDS_THRESHOLD > upper_bound_x\n if output_distribution == \"uniform\":\n lower_bounds_idx = X_col == lower_bound_x\n upper_bounds_idx = X_col == upper_bound_x\n\n isfinite_mask = ~np.isnan(X_col)\n X_col_finite = X_col[isfinite_mask]\n if not inverse:\n # Interpolate in one direction and in the other and take the\n # mean. This is in case of repeated values in the features\n # and hence repeated quantiles\n #\n # If we don't do this, only one extreme of the duplicated is\n # used (the upper when we do ascending, and the\n # lower for descending). We take the mean of these two\n X_col[isfinite_mask] = 0.5 * (\n np.interp(X_col_finite, quantiles, self.references_)\n - np.interp(-X_col_finite, -quantiles[::-1], -self.references_[::-1])\n )\n else:\n X_col[isfinite_mask] = np.interp(X_col_finite, self.references_, quantiles)\n\n X_col[upper_bounds_idx] = upper_bound_y\n X_col[lower_bounds_idx] = lower_bound_y\n # for forward transform, match the output distribution\n if not inverse:\n with np.errstate(invalid=\"ignore\"): # hide NaN comparison warnings\n if output_distribution == \"normal\":\n X_col = stats.norm.ppf(X_col)\n # find the value to clip the data to avoid mapping to\n # infinity. Clip such that the inverse transform will be\n # consistent\n clip_min = stats.norm.ppf(BOUNDS_THRESHOLD - np.spacing(1))\n clip_max = stats.norm.ppf(1 - (BOUNDS_THRESHOLD - np.spacing(1)))\n X_col = np.clip(X_col, clip_min, clip_max)\n # else output distribution is uniform and the ppf is the\n # identity function so we let X_col unchanged\n\n return X_col\n\n def _check_inputs(self, X, in_fit, accept_sparse_negative=False, copy=False):\n \"\"\"Check inputs before fit and transform.\"\"\"\n X = self._validate_data(\n X,\n reset=in_fit,\n accept_sparse=\"csc\",\n copy=copy,\n dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\",\n )\n # we only accept positive sparse matrix when ignore_implicit_zeros is\n # false and that we call fit or transform.\n with np.errstate(invalid=\"ignore\"): # hide NaN comparison warnings\n if (\n not accept_sparse_negative\n and not self.ignore_implicit_zeros\n and (sparse.issparse(X) and np.any(X.data < 0))\n ):\n raise ValueError(\n \"QuantileTransformer only accepts non-negative sparse matrices.\"\n )\n\n # check the output distribution\n if self.output_distribution not in (\"normal\", \"uniform\"):\n raise ValueError(\n \"'output_distribution' has to be either 'normal'\"\n \" or 'uniform'. Got '{}' instead.\".format(self.output_distribution)\n )\n\n return X\n\n def _transform(self, X, inverse=False):\n \"\"\"Forward and inverse transform.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n The data used to scale along the features axis.\n\n inverse : bool, default=False\n If False, apply forward transform. If True, apply\n inverse transform.\n\n Returns\n -------\n X : ndarray of shape (n_samples, n_features)\n Projected data.\n \"\"\"\n\n if sparse.issparse(X):\n for feature_idx in range(X.shape[1]):\n column_slice = slice(X.indptr[feature_idx], X.indptr[feature_idx + 1])\n X.data[column_slice] = self._transform_col(\n X.data[column_slice], self.quantiles_[:, feature_idx], inverse\n )\n else:\n for feature_idx in range(X.shape[1]):\n X[:, feature_idx] = self._transform_col(\n X[:, feature_idx], self.quantiles_[:, feature_idx], inverse\n )\n\n return X\n\n def transform(self, X):\n \"\"\"Feature-wise transformation of the data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to scale along the features axis. If a sparse\n matrix is provided, it will be converted into a sparse\n ``csc_matrix``. Additionally, the sparse matrix needs to be\n nonnegative if `ignore_implicit_zeros` is False.\n\n Returns\n -------\n Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The projected data.\n \"\"\"\n check_is_fitted(self)\n X = self._check_inputs(X, in_fit=False, copy=self.copy)\n\n return self._transform(X, inverse=False)\n\n def inverse_transform(self, X):\n \"\"\"Back-projection to the original space.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data used to scale along the features axis. If a sparse\n matrix is provided, it will be converted into a sparse\n ``csc_matrix``. Additionally, the sparse matrix needs to be\n nonnegative if `ignore_implicit_zeros` is False.\n\n Returns\n -------\n Xt : {ndarray, sparse matrix} of (n_samples, n_features)\n The projected data.\n \"\"\"\n check_is_fitted(self)\n X = self._check_inputs(\n X, in_fit=False, accept_sparse_negative=True, copy=self.copy\n )\n\n return self._transform(X, inverse=True)\n\n def _more_tags(self):\n return {\"allow_nan\": True}\n\n\ndef quantile_transform(\n X,\n *,\n axis=0,\n n_quantiles=1000,\n output_distribution=\"uniform\",\n ignore_implicit_zeros=False,\n subsample=int(1e5),\n random_state=None,\n copy=True,\n):\n \"\"\"Transform features using quantiles information.\n\n This method transforms the features to follow a uniform or a normal\n distribution. Therefore, for a given feature, this transformation tends\n to spread out the most frequent values. It also reduces the impact of\n (marginal) outliers: this is therefore a robust preprocessing scheme.\n\n The transformation is applied on each feature independently. First an\n estimate of the cumulative distribution function of a feature is\n used to map the original values to a uniform distribution. The obtained\n values are then mapped to the desired output distribution using the\n associated quantile function. Features values of new/unseen data that fall\n below or above the fitted range will be mapped to the bounds of the output\n distribution. Note that this transform is non-linear. It may distort linear\n correlations between variables measured at the same scale but renders\n variables measured at different scales more directly comparable.\n\n Read more in the :ref:`User Guide <preprocessing_transformer>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to transform.\n\n axis : int, default=0\n Axis used to compute the means and standard deviations along. If 0,\n transform each feature, otherwise (if 1) transform each sample.\n\n n_quantiles : int, default=1000 or n_samples\n Number of quantiles to be computed. It corresponds to the number\n of landmarks used to discretize the cumulative distribution function.\n If n_quantiles is larger than the number of samples, n_quantiles is set\n to the number of samples as a larger number of quantiles does not give\n a better approximation of the cumulative distribution function\n estimator.\n\n output_distribution : {'uniform', 'normal'}, default='uniform'\n Marginal distribution for the transformed data. The choices are\n 'uniform' (default) or 'normal'.\n\n ignore_implicit_zeros : bool, default=False\n Only applies to sparse matrices. If True, the sparse entries of the\n matrix are discarded to compute the quantile statistics. If False,\n these entries are treated as zeros.\n\n subsample : int, default=1e5\n Maximum number of samples used to estimate the quantiles for\n computational efficiency. Note that the subsampling procedure may\n differ for value-identical sparse and dense matrices.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for subsampling and smoothing\n noise.\n Please see ``subsample`` for more details.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary <random_state>`\n\n copy : bool, default=True\n Set to False to perform inplace transformation and avoid a copy (if the\n input is already a numpy array). If True, a copy of `X` is transformed,\n leaving the original `X` unchanged\n\n ..versionchanged:: 0.23\n The default value of `copy` changed from False to True in 0.23.\n\n Returns\n -------\n Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The transformed data.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.preprocessing import quantile_transform\n >>> rng = np.random.RandomState(0)\n >>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)\n >>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True)\n array([...])\n\n See Also\n --------\n QuantileTransformer : Performs quantile-based scaling using the\n Transformer API (e.g. as part of a preprocessing\n :class:`~sklearn.pipeline.Pipeline`).\n power_transform : Maps data to a normal distribution using a\n power transformation.\n scale : Performs standardization that is faster, but less robust\n to outliers.\n robust_scale : Performs robust standardization that removes the influence\n of outliers but does not put outliers and inliers on the same scale.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in fit, and maintained in\n transform.\n\n .. warning:: Risk of data leak\n\n Do not use :func:`~sklearn.preprocessing.quantile_transform` unless\n you know what you are doing. A common mistake is to apply it\n to the entire data *before* splitting into training and\n test sets. This will bias the model evaluation because\n information would have leaked from the test set to the\n training set.\n In general, we recommend using\n :class:`~sklearn.preprocessing.QuantileTransformer` within a\n :ref:`Pipeline <pipeline>` in order to prevent most risks of data\n leaking:`pipe = make_pipeline(QuantileTransformer(),\n LogisticRegression())`.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n \"\"\"\n n = QuantileTransformer(\n n_quantiles=n_quantiles,\n output_distribution=output_distribution,\n subsample=subsample,\n ignore_implicit_zeros=ignore_implicit_zeros,\n random_state=random_state,\n copy=copy,\n )\n if axis == 0:\n return n.fit_transform(X)\n elif axis == 1:\n return n.fit_transform(X.T).T\n else:\n raise ValueError(\n \"axis should be either equal to 0 or 1. Got axis={}\".format(axis)\n )\n\n\nclass PowerTransformer(TransformerMixin, BaseEstimator):\n \"\"\"Apply a power transform featurewise to make data more Gaussian-like.\n\n Power transforms are a family of parametric, monotonic transformations\n that are applied to make data more Gaussian-like. This is useful for\n modeling issues related to heteroscedasticity (non-constant variance),\n or other situations where normality is desired.\n\n Currently, PowerTransformer supports the Box-Cox transform and the\n Yeo-Johnson transform. The optimal parameter for stabilizing variance and\n minimizing skewness is estimated through maximum likelihood.\n\n Box-Cox requires input data to be strictly positive, while Yeo-Johnson\n supports both positive or negative data.\n\n By default, zero-mean, unit-variance normalization is applied to the\n transformed data.\n\n Read more in the :ref:`User Guide <preprocessing_transformer>`.\n\n .. versionadded:: 0.20\n\n Parameters\n ----------\n method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'\n The power transform method. Available methods are:\n\n - 'yeo-johnson' [1]_, works with positive and negative values\n - 'box-cox' [2]_, only works with strictly positive values\n\n standardize : bool, default=True\n Set to True to apply zero-mean, unit-variance normalization to the\n transformed output.\n\n copy : bool, default=True\n Set to False to perform inplace computation during transformation.\n\n Attributes\n ----------\n lambdas_ : ndarray of float of shape (n_features,)\n The parameters of the power transformation for the selected features.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.preprocessing import PowerTransformer\n >>> pt = PowerTransformer()\n >>> data = [[1, 2], [3, 2], [4, 5]]\n >>> print(pt.fit(data))\n PowerTransformer()\n >>> print(pt.lambdas_)\n [ 1.386... -3.100...]\n >>> print(pt.transform(data))\n [[-1.316... -0.707...]\n [ 0.209... -0.707...]\n [ 1.106... 1.414...]]\n\n See Also\n --------\n power_transform : Equivalent function without the estimator API.\n\n QuantileTransformer : Maps data to a standard normal distribution with\n the parameter `output_distribution='normal'`.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in ``fit``, and maintained\n in ``transform``.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n References\n ----------\n\n .. [1] I.K. Yeo and R.A. Johnson, \"A new family of power transformations to\n improve normality or symmetry.\" Biometrika, 87(4), pp.954-959,\n (2000).\n\n .. [2] G.E.P. Box and D.R. Cox, \"An Analysis of Transformations\", Journal\n of the Royal Statistical Society B, 26, 211-252 (1964).\n \"\"\"\n\n def __init__(self, method=\"yeo-johnson\", *, standardize=True, copy=True):\n self.method = method\n self.standardize = standardize\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Estimate the optimal parameter lambda for each feature.\n\n The optimal lambda parameter for minimizing skewness is estimated on\n each feature independently using maximum likelihood.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data used to estimate the optimal transformation parameters.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted transformer.\n \"\"\"\n self._fit(X, y=y, force_transform=False)\n return self\n\n def fit_transform(self, X, y=None):\n return self._fit(X, y, force_transform=True)\n\n def _fit(self, X, y=None, force_transform=False):\n X = self._check_input(X, in_fit=True, check_positive=True, check_method=True)\n\n if not self.copy and not force_transform: # if call from fit()\n X = X.copy() # force copy so that fit does not change X inplace\n\n optim_function = {\n \"box-cox\": self._box_cox_optimize,\n \"yeo-johnson\": self._yeo_johnson_optimize,\n }[self.method]\n with np.errstate(invalid=\"ignore\"): # hide NaN warnings\n self.lambdas_ = np.array([optim_function(col) for col in X.T])\n\n if self.standardize or force_transform:\n transform_function = {\n \"box-cox\": boxcox,\n \"yeo-johnson\": self._yeo_johnson_transform,\n }[self.method]\n for i, lmbda in enumerate(self.lambdas_):\n with np.errstate(invalid=\"ignore\"): # hide NaN warnings\n X[:, i] = transform_function(X[:, i], lmbda)\n\n if self.standardize:\n self._scaler = StandardScaler(copy=False)\n if force_transform:\n X = self._scaler.fit_transform(X)\n else:\n self._scaler.fit(X)\n\n return X\n\n def transform(self, X):\n \"\"\"Apply the power transform to each feature using the fitted lambdas.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to be transformed using a power transformation.\n\n Returns\n -------\n X_trans : ndarray of shape (n_samples, n_features)\n The transformed data.\n \"\"\"\n check_is_fitted(self)\n X = self._check_input(X, in_fit=False, check_positive=True, check_shape=True)\n\n transform_function = {\n \"box-cox\": boxcox,\n \"yeo-johnson\": self._yeo_johnson_transform,\n }[self.method]\n for i, lmbda in enumerate(self.lambdas_):\n with np.errstate(invalid=\"ignore\"): # hide NaN warnings\n X[:, i] = transform_function(X[:, i], lmbda)\n\n if self.standardize:\n X = self._scaler.transform(X)\n\n return X\n\n def inverse_transform(self, X):\n \"\"\"Apply the inverse power transformation using the fitted lambdas.\n\n The inverse of the Box-Cox transformation is given by::\n\n if lambda_ == 0:\n X = exp(X_trans)\n else:\n X = (X_trans * lambda_ + 1) ** (1 / lambda_)\n\n The inverse of the Yeo-Johnson transformation is given by::\n\n if X >= 0 and lambda_ == 0:\n X = exp(X_trans) - 1\n elif X >= 0 and lambda_ != 0:\n X = (X_trans * lambda_ + 1) ** (1 / lambda_) - 1\n elif X < 0 and lambda_ != 2:\n X = 1 - (-(2 - lambda_) * X_trans + 1) ** (1 / (2 - lambda_))\n elif X < 0 and lambda_ == 2:\n X = 1 - exp(-X_trans)\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The transformed data.\n\n Returns\n -------\n X : ndarray of shape (n_samples, n_features)\n The original data.\n \"\"\"\n check_is_fitted(self)\n X = self._check_input(X, in_fit=False, check_shape=True)\n\n if self.standardize:\n X = self._scaler.inverse_transform(X)\n\n inv_fun = {\n \"box-cox\": self._box_cox_inverse_tranform,\n \"yeo-johnson\": self._yeo_johnson_inverse_transform,\n }[self.method]\n for i, lmbda in enumerate(self.lambdas_):\n with np.errstate(invalid=\"ignore\"): # hide NaN warnings\n X[:, i] = inv_fun(X[:, i], lmbda)\n\n return X\n\n def _box_cox_inverse_tranform(self, x, lmbda):\n \"\"\"Return inverse-transformed input x following Box-Cox inverse\n transform with parameter lambda.\n \"\"\"\n if lmbda == 0:\n x_inv = np.exp(x)\n else:\n x_inv = (x * lmbda + 1) ** (1 / lmbda)\n\n return x_inv\n\n def _yeo_johnson_inverse_transform(self, x, lmbda):\n \"\"\"Return inverse-transformed input x following Yeo-Johnson inverse\n transform with parameter lambda.\n \"\"\"\n x_inv = np.zeros_like(x)\n pos = x >= 0\n\n # when x >= 0\n if abs(lmbda) < np.spacing(1.0):\n x_inv[pos] = np.exp(x[pos]) - 1\n else: # lmbda != 0\n x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1\n\n # when x < 0\n if abs(lmbda - 2) > np.spacing(1.0):\n x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1, 1 / (2 - lmbda))\n else: # lmbda == 2\n x_inv[~pos] = 1 - np.exp(-x[~pos])\n\n return x_inv\n\n def _yeo_johnson_transform(self, x, lmbda):\n \"\"\"Return transformed input x following Yeo-Johnson transform with\n parameter lambda.\n \"\"\"\n\n out = np.zeros_like(x)\n pos = x >= 0 # binary mask\n\n # when x >= 0\n if abs(lmbda) < np.spacing(1.0):\n out[pos] = np.log1p(x[pos])\n else: # lmbda != 0\n out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda\n\n # when x < 0\n if abs(lmbda - 2) > np.spacing(1.0):\n out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda)\n else: # lmbda == 2\n out[~pos] = -np.log1p(-x[~pos])\n\n return out\n\n def _box_cox_optimize(self, x):\n \"\"\"Find and return optimal lambda parameter of the Box-Cox transform by\n MLE, for observed data x.\n\n We here use scipy builtins which uses the brent optimizer.\n \"\"\"\n # the computation of lambda is influenced by NaNs so we need to\n # get rid of them\n _, lmbda = stats.boxcox(x[~np.isnan(x)], lmbda=None)\n\n return lmbda\n\n def _yeo_johnson_optimize(self, x):\n \"\"\"Find and return optimal lambda parameter of the Yeo-Johnson\n transform by MLE, for observed data x.\n\n Like for Box-Cox, MLE is done via the brent optimizer.\n \"\"\"\n\n def _neg_log_likelihood(lmbda):\n \"\"\"Return the negative log likelihood of the observed data x as a\n function of lambda.\"\"\"\n x_trans = self._yeo_johnson_transform(x, lmbda)\n n_samples = x.shape[0]\n\n loglike = -n_samples / 2 * np.log(x_trans.var())\n loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum()\n\n return -loglike\n\n # the computation of lambda is influenced by NaNs so we need to\n # get rid of them\n x = x[~np.isnan(x)]\n # choosing bracket -2, 2 like for boxcox\n return optimize.brent(_neg_log_likelihood, brack=(-2, 2))\n\n def _check_input(\n self, X, in_fit, check_positive=False, check_shape=False, check_method=False\n ):\n \"\"\"Validate the input before fit and transform.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n\n in_fit : bool\n Whether or not `_check_input` is called from `fit` or other\n methods, e.g. `predict`, `transform`, etc.\n\n check_positive : bool, default=False\n If True, check that all data is positive and non-zero (only if\n ``self.method=='box-cox'``).\n\n check_shape : bool, default=False\n If True, check that n_features matches the length of self.lambdas_\n\n check_method : bool, default=False\n If True, check that the transformation method is valid.\n \"\"\"\n X = self._validate_data(\n X,\n ensure_2d=True,\n dtype=FLOAT_DTYPES,\n copy=self.copy,\n force_all_finite=\"allow-nan\",\n reset=in_fit,\n )\n\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings(\"ignore\", r\"All-NaN (slice|axis) encountered\")\n if check_positive and self.method == \"box-cox\" and np.nanmin(X) <= 0:\n raise ValueError(\n \"The Box-Cox transformation can only be \"\n \"applied to strictly positive data\"\n )\n\n if check_shape and not X.shape[1] == len(self.lambdas_):\n raise ValueError(\n \"Input data has a different number of features \"\n \"than fitting data. Should have {n}, data has {m}\".format(\n n=len(self.lambdas_), m=X.shape[1]\n )\n )\n\n valid_methods = (\"box-cox\", \"yeo-johnson\")\n if check_method and self.method not in valid_methods:\n raise ValueError(\n \"'method' must be one of {}, got {} instead.\".format(\n valid_methods, self.method\n )\n )\n\n return X\n\n def _more_tags(self):\n return {\"allow_nan\": True}\n\n\ndef power_transform(X, method=\"yeo-johnson\", *, standardize=True, copy=True):\n \"\"\"\n Power transforms are a family of parametric, monotonic transformations\n that are applied to make data more Gaussian-like. This is useful for\n modeling issues related to heteroscedasticity (non-constant variance),\n or other situations where normality is desired.\n\n Currently, power_transform supports the Box-Cox transform and the\n Yeo-Johnson transform. The optimal parameter for stabilizing variance and\n minimizing skewness is estimated through maximum likelihood.\n\n Box-Cox requires input data to be strictly positive, while Yeo-Johnson\n supports both positive or negative data.\n\n By default, zero-mean, unit-variance normalization is applied to the\n transformed data.\n\n Read more in the :ref:`User Guide <preprocessing_transformer>`.\n\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to be transformed using a power transformation.\n\n method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'\n The power transform method. Available methods are:\n\n - 'yeo-johnson' [1]_, works with positive and negative values\n - 'box-cox' [2]_, only works with strictly positive values\n\n .. versionchanged:: 0.23\n The default value of the `method` parameter changed from\n 'box-cox' to 'yeo-johnson' in 0.23.\n\n standardize : bool, default=True\n Set to True to apply zero-mean, unit-variance normalization to the\n transformed output.\n\n copy : bool, default=True\n Set to False to perform inplace computation during transformation.\n\n Returns\n -------\n X_trans : ndarray of shape (n_samples, n_features)\n The transformed data.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.preprocessing import power_transform\n >>> data = [[1, 2], [3, 2], [4, 5]]\n >>> print(power_transform(data, method='box-cox'))\n [[-1.332... -0.707...]\n [ 0.256... -0.707...]\n [ 1.076... 1.414...]]\n\n .. warning:: Risk of data leak.\n Do not use :func:`~sklearn.preprocessing.power_transform` unless you\n know what you are doing. A common mistake is to apply it to the entire\n data *before* splitting into training and test sets. This will bias the\n model evaluation because information would have leaked from the test\n set to the training set.\n In general, we recommend using\n :class:`~sklearn.preprocessing.PowerTransformer` within a\n :ref:`Pipeline <pipeline>` in order to prevent most risks of data\n leaking, e.g.: `pipe = make_pipeline(PowerTransformer(),\n LogisticRegression())`.\n\n See Also\n --------\n PowerTransformer : Equivalent transformation with the\n Transformer API (e.g. as part of a preprocessing\n :class:`~sklearn.pipeline.Pipeline`).\n\n quantile_transform : Maps data to a standard normal distribution with\n the parameter `output_distribution='normal'`.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in ``fit``, and maintained\n in ``transform``.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n References\n ----------\n\n .. [1] I.K. Yeo and R.A. Johnson, \"A new family of power transformations to\n improve normality or symmetry.\" Biometrika, 87(4), pp.954-959,\n (2000).\n\n .. [2] G.E.P. Box and D.R. Cox, \"An Analysis of Transformations\", Journal\n of the Royal Statistical Society B, 26, 211-252 (1964).\n \"\"\"\n pt = PowerTransformer(method=method, standardize=standardize, copy=copy)\n return pt.fit_transform(X)\n" ]
[ [ "numpy.minimum", "numpy.rollaxis", "numpy.exp", "numpy.finfo", "numpy.nanmean", "numpy.sign", "numpy.size", "numpy.warnings.catch_warnings", "numpy.full", "numpy.zeros_like", "scipy.optimize.brent", "scipy.sparse.isspmatrix_csc", "numpy.warnings.filterwarnings", "numpy.nanpercentile", "numpy.interp", "numpy.nanmin", "numpy.log1p", "numpy.transpose", "numpy.arange", "numpy.sqrt", "numpy.nanmax", "scipy.stats.norm.cdf", "numpy.nanmedian", "numpy.nanstd", "scipy.sparse.issparse", "scipy.sparse.coo_matrix", "numpy.array", "numpy.zeros", "scipy.sparse.csc_matrix", "numpy.diff", "numpy.allclose", "numpy.isscalar", "numpy.power", "numpy.clip", "scipy.sparse.isspmatrix_coo", "numpy.spacing", "numpy.logical_not", "numpy.isnan", "scipy.stats.norm.ppf", "numpy.asarray", "numpy.errstate", "numpy.sum", "numpy.maximum.accumulate", "numpy.any", "numpy.ptp", "numpy.abs", "numpy.repeat", "numpy.linspace", "numpy.maximum" ] ]
LangwenH/AlphaPose-for-Mice-Behavior
[ "357923f5993a521507fe7359fa763d2b5d2493f7" ]
[ "video_demo.py" ]
[ "import torch\r\nfrom torch.autograd import Variable\r\nimport torch.nn.functional as F\r\nimport torchvision.transforms as transforms\r\n\r\nimport torch.nn as nn\r\nimport torch.utils.data\r\nimport numpy as np\r\nfrom opt import opt\r\n\r\nfrom dataloader import VideoLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco\r\nfrom yolo.util import write_results, dynamic_write_results\r\nfrom SPPE.src.main_fast_inference import *\r\n\r\nimport os\r\nimport sys\r\nfrom tqdm import tqdm\r\nimport time\r\nfrom fn import getTime\r\nimport cv2\r\n\r\nfrom pPose_nms import pose_nms, write_json\r\n\r\nargs = opt\r\nargs.dataset = 'coco'\r\nif not args.sp:\r\n torch.multiprocessing.set_start_method('forkserver', force=True)\r\n torch.multiprocessing.set_sharing_strategy('file_system')\r\n\r\nif __name__ == \"__main__\":\r\n videofile = args.video\r\n mode = args.mode\r\n if not os.path.exists(args.outputpath):\r\n os.mkdir(args.outputpath)\r\n \r\n if not len(videofile):\r\n raise IOError('Error: must contain --video')\r\n\r\n # Load input video\r\n data_loader = VideoLoader(videofile, batchSize=args.detbatch).start()\r\n (fourcc,fps,frameSize) = data_loader.videoinfo()\r\n\r\n # Load detection loader\r\n print('Loading YOLO model..')\r\n sys.stdout.flush()\r\n det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()\r\n det_processor = DetectionProcessor(det_loader).start()\r\n \r\n # Load pose model\r\n pose_dataset = Mscoco()\r\n if args.fast_inference:\r\n pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)\r\n else:\r\n pose_model = InferenNet(4 * 1 + 1, pose_dataset)\r\n pose_model.cuda()\r\n pose_model.eval()\r\n\r\n runtime_profile = {\r\n 'dt': [],\r\n 'pt': [],\r\n 'pn': []\r\n }\r\n\r\n # Data writer\r\n save_path = os.path.join(args.outputpath, 'AlphaPose_'+videofile.split('/')[-1].split('.')[0]+'.avi')\r\n writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()\r\n\r\n im_names_desc = tqdm(range(data_loader.length()))\r\n batchSize = args.posebatch\r\n for i in im_names_desc:\r\n start_time = getTime()\r\n with torch.no_grad():\r\n (inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()\r\n if orig_img is None:\r\n break\r\n if boxes is None or boxes.nelement() == 0:\r\n writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])\r\n continue\r\n\r\n ckpt_time, det_time = getTime(start_time)\r\n runtime_profile['dt'].append(det_time)\r\n # Pose Estimation\r\n \r\n datalen = inps.size(0)\r\n leftover = 0\r\n if (datalen) % batchSize:\r\n leftover = 1\r\n num_batches = datalen // batchSize + leftover\r\n hm = []\r\n for j in range(num_batches):\r\n inps_j = inps[j*batchSize:min((j + 1)*batchSize, datalen)].cuda()\r\n hm_j = pose_model(inps_j)\r\n hm.append(hm_j)\r\n hm = torch.cat(hm)\r\n ckpt_time, pose_time = getTime(ckpt_time)\r\n runtime_profile['pt'].append(pose_time)\r\n\r\n hm = hm.cpu().data\r\n writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])\r\n\r\n ckpt_time, post_time = getTime(ckpt_time)\r\n runtime_profile['pn'].append(post_time)\r\n\r\n if args.profile:\r\n # TQDM\r\n im_names_desc.set_description(\r\n 'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(\r\n dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))\r\n )\r\n\r\n print('===========================> Finish Model Running.')\r\n if (args.save_img or args.save_video) and not args.vis_fast:\r\n print('===========================> Rendering remaining images in the queue...')\r\n print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')\r\n while(writer.running()):\r\n pass\r\n writer.stop()\r\n final_result = writer.results()\r\n write_json(final_result, args.outputpath)\r\n" ]
[ [ "torch.cat", "torch.no_grad", "torch.multiprocessing.set_start_method", "numpy.mean", "torch.multiprocessing.set_sharing_strategy" ] ]
BhaveshJP25/RSNA
[ "48d85faf82651b1ae4fdcd829ce2d4978a858d3f" ]
[ "2DNet/src/dataset/dataset.py" ]
[ "import torch.utils.data as data\nimport torch\nimport albumentations\nimport cv2\nimport numpy as np\nimport random\nimport math\nfrom settings import train_png_dir\n\ndef generate_transforms(image_size):\n IMAGENET_SIZE = image_size\n\n train_transform = albumentations.Compose([\n albumentations.Resize(IMAGENET_SIZE, IMAGENET_SIZE),\n albumentations.Normalize(mean=(0.456, 0.456, 0.456), std=(0.224, 0.224, 0.224), max_pixel_value=255.0, p=1.0)\n ])\n\n val_transform = albumentations.Compose([\n albumentations.Resize(IMAGENET_SIZE, IMAGENET_SIZE),\n albumentations.Normalize(mean=(0.456, 0.456, 0.456), std=(0.224, 0.224, 0.224), max_pixel_value=255.0, p=1.0)\n ])\n\n return train_transform, val_transform\n\ndef generate_random_list(length):\n new_list = []\n\n for i in range(length):\n if i <= length/2:\n weight = int(i/4)\n else:\n weight = int((length - i)/4)\n weight = np.max([1, weight])\n new_list += [i]*weight\n\n return new_list \n\n\nclass RSNA_Dataset_train_by_study_context(data.Dataset):\n def __init__(self,\n df = None,\n name_list = None,\n transform = None\n ):\n self.df = df[df['study_instance_uid'].isin(name_list)]\n self.name_list = name_list\n self.transform = transform\n\n def __getitem__(self, idx):\n study_name = self.name_list[idx % len(self.name_list)]\n study_train_df = self.df[self.df['study_instance_uid']==study_name]\n print(study_train_df.head())\n study_index = random.choice(generate_random_list(study_train_df.shape[0]-1))\n\n slice_id = study_name + '_' + str(study_index)\n filename = study_train_df[study_train_df['slice_id']==slice_id]['filename'].values[0]\n\n if study_index == (study_train_df.shape[0]-1):\n filename_up = filename\n else:\n slice_id_up = study_name + '_' + str(study_index+1)\n filename_up = study_train_df[study_train_df['slice_id']==slice_id_up]['filename'].values[0]\n\n if study_index == 0:\n filename_down = filename\n else:\n slice_id_down = study_name + '_' + str(study_index-1)\n filename_down = study_train_df[study_train_df['slice_id']==slice_id_down]['filename'].values[0]\n\n # print(train_png_dir)\n # print(\"\\n\")\n # print(filename)\n image = cv2.imread(train_png_dir + filename, 0)\n image = cv2.resize(image, (512, 512))\n image_up = cv2.imread(train_png_dir + filename_up, 0)\n image_up = cv2.resize(image_up, (512, 512))\n image_down = cv2.imread(train_png_dir + filename_down, 0)\n image_down = cv2.resize(image_down, (512, 512))\n\n image_cat = np.concatenate([image_up[:,:,np.newaxis], image[:,:,np.newaxis], image_down[:,:,np.newaxis]],2)\n label = torch.FloatTensor(study_train_df[study_train_df['filename']==filename].loc[:, 'any': 'subdural'].values)\n\n if random.random() < 0.5:\n image_cat = cv2.cvtColor(image_cat, cv2.COLOR_BGR2RGB)\n\n image_cat = aug_image(image_cat, is_infer=False)\n \n if self.transform is not None:\n augmented = self.transform(image=image_cat)\n image_cat = augmented['image'].transpose(2, 0, 1)\n\n # print(label)\n # exit(0)\n\n return image_cat, label\n\n def __len__(self):\n return len(self.name_list) * 4\n\n\nclass RSNA_Dataset_val_by_study_context(data.Dataset):\n def __init__(self,\n df = None,\n name_list = None,\n transform = None\n ):\n self.df = df\n self.name_list = name_list\n self.transform = transform\n\n def __getitem__(self, idx):\n \n filename = self.name_list[idx % len(self.name_list)]\n filename_train_df = self.df[self.df['filename']==filename]\n study_name = filename_train_df['study_instance_uid'].values[0]\n study_index = int(filename_train_df['slice_id'].values[0].split('_')[-1])\n study_train_df = self.df[self.df['study_instance_uid']==study_name]\n\n if study_index == (study_train_df.shape[0]-1):\n filename_up = filename\n else:\n slice_id_up = study_name + '_' + str(study_index+1)\n filename_up = study_train_df[study_train_df['slice_id']==slice_id_up]['filename'].values[0]\n\n if study_index == 0:\n filename_down = filename\n else:\n slice_id_down = study_name + '_' + str(study_index-1)\n filename_down = study_train_df[study_train_df['slice_id']==slice_id_down]['filename'].values[0]\n\n image = cv2.imread(train_png_dir + filename, 0)\n image = cv2.resize(image, (512, 512))\n image_up = cv2.imread(train_png_dir + filename_up, 0)\n image_up = cv2.resize(image_up, (512, 512))\n image_down = cv2.imread(train_png_dir + filename_down, 0)\n image_down = cv2.resize(image_down, (512, 512))\n\n image_cat = np.concatenate([image_up[:,:,np.newaxis], image[:,:,np.newaxis], image_down[:,:,np.newaxis]],2)\n label = torch.FloatTensor(study_train_df[study_train_df['filename']==filename].loc[:, 'any':'subdural'].values)\n image_cat = aug_image(image_cat, is_infer=True)\n\n if self.transform is not None:\n augmented = self.transform(image=image_cat)\n image_cat = augmented['image'].transpose(2, 0, 1)\n\n return image_cat, label\n\n def __len__(self):\n return len(self.name_list)\n\ndef randomHorizontalFlip(image, u=0.5):\n if np.random.random() < u:\n image = cv2.flip(image, 1)\n return image\n\ndef randomVerticleFlip(image, u=0.5):\n if np.random.random() < u:\n image = cv2.flip(image, 0)\n return image\n\ndef randomRotate90(image, u=0.5):\n if np.random.random() < u:\n image[:,:,0:3] = np.rot90(image[:,:,0:3])\n return image\n\n#===================================================origin=============================================================\ndef random_cropping(image, ratio=0.8, is_random = True):\n height, width, _ = image.shape\n target_h = int(height*ratio)\n target_w = int(width*ratio)\n\n if is_random:\n start_x = random.randint(0, width - target_w)\n start_y = random.randint(0, height - target_h)\n else:\n start_x = ( width - target_w ) // 2\n start_y = ( height - target_h ) // 2\n\n zeros = image[start_y:start_y+target_h,start_x:start_x+target_w,:]\n zeros = cv2.resize(zeros ,(width,height))\n return zeros\n\ndef cropping(image, ratio=0.8, code = 0):\n height, width, _ = image.shape\n target_h = int(height*ratio)\n target_w = int(width*ratio)\n\n if code==0:\n start_x = ( width - target_w ) // 2\n start_y = ( height - target_h ) // 2\n\n elif code == 1:\n start_x = 0\n start_y = 0\n\n elif code == 2:\n start_x = width - target_w\n start_y = 0\n\n elif code == 3:\n start_x = 0\n start_y = height - target_h\n\n elif code == 4:\n start_x = width - target_w\n start_y = height - target_h\n\n elif code == -1:\n return image\n\n zeros = image[start_y:start_y+target_h,start_x:start_x+target_w,:]\n zeros = cv2.resize(zeros ,(width,height))\n return zeros\n\ndef random_erasing(img, probability=0.5, sl=0.02, sh=0.4, r1=0.3):\n if random.uniform(0, 1) > probability:\n return img\n\n for attempt in range(100):\n area = img.shape[0] * img.shape[1]\n\n target_area = random.uniform(sl, sh) * area\n aspect_ratio = random.uniform(r1, 1 / r1)\n\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if w < img.shape[1] and h < img.shape[0]:\n x1 = random.randint(0, img.shape[0] - h)\n y1 = random.randint(0, img.shape[1] - w)\n if img.shape[2] == 3:\n img[x1:x1 + h, y1:y1 + w, :] = 0.0\n else:\n print('!!!!!!!! random_erasing dim wrong!!!!!!!!!!!')\n return\n\n return img\n return img\n\ndef randomShiftScaleRotate(image,\n shift_limit=(-0.0, 0.0),\n scale_limit=(-0.0, 0.0),\n rotate_limit=(-0.0, 0.0),\n aspect_limit=(-0.0, 0.0),\n borderMode=cv2.BORDER_CONSTANT, u=0.5):\n\n if np.random.random() < u:\n height, width, channel = image.shape\n\n angle = np.random.uniform(rotate_limit[0], rotate_limit[1])\n scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])\n aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])\n sx = scale * aspect / (aspect ** 0.5)\n sy = scale / (aspect ** 0.5)\n dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)\n dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)\n\n cc = np.math.cos(angle / 180 * np.math.pi) * sx\n ss = np.math.sin(angle / 180 * np.math.pi) * sy\n rotate_matrix = np.array([[cc, -ss], [ss, cc]])\n\n box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])\n box1 = box0 - np.array([width / 2, height / 2])\n box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])\n\n box0 = box0.astype(np.float32)\n box1 = box1.astype(np.float32)\n mat = cv2.getPerspectiveTransform(box0, box1)\n image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,\n borderValue=(\n 0, 0,\n 0,))\n return image\n\n\ndef aug_image(image, is_infer=False):\n if is_infer:\n image = randomHorizontalFlip(image, u=0)\n image = np.asarray(image)\n image = cropping(image, ratio=0.8, code=0)\n return image\n\n else:\n image = randomHorizontalFlip(image)\n height, width, _ = image.shape\n image = randomShiftScaleRotate(image,\n shift_limit=(-0.1, 0.1),\n scale_limit=(-0.1, 0.1),\n aspect_limit=(-0.1, 0.1),\n rotate_limit=(-30, 30))\n\n image = cv2.resize(image, (width, height))\n image = random_erasing(image, probability=0.5, sl=0.02, sh=0.4, r1=0.3)\n\n ratio = random.uniform(0.6,0.99)\n image = random_cropping(image, ratio=ratio, is_random=True)\n return image\n\n\ndef generate_dataset_loader(df_all, c_train, train_transform, train_batch_size, c_val, val_transform, val_batch_size, workers):\n train_dataset = RSNA_Dataset_train_by_study_context(df_all, c_train, train_transform)\n val_dataset = RSNA_Dataset_val_by_study_context(df_all, c_val, val_transform)\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=train_batch_size, \n shuffle=True,\n num_workers=workers,\n pin_memory=True,\n drop_last=True)\n\n val_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=val_batch_size, \n shuffle=False,\n num_workers=workers,\n pin_memory=True,\n drop_last=False)\n\n return train_loader, val_loader\n\n" ]
[ [ "numpy.max", "numpy.concatenate", "numpy.rot90", "numpy.array", "numpy.dot", "numpy.asarray", "torch.FloatTensor", "numpy.random.uniform", "numpy.math.cos", "torch.utils.data.DataLoader", "numpy.math.sin", "numpy.random.random" ] ]
sebasmos/Spacenet7TRDP
[ "03b5819321108017f8f8c2d359264c8e18d9e38a" ]
[ "Segmentation/baseline/src/sn7_baseline_postproc_funcs.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 25 14:11:02 2020\n\n@author: avanetten\n\"\"\"\n\nfrom shapely.ops import cascaded_union\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\nimport multiprocessing\nimport pandas as pd\nimport numpy as np\nimport skimage.io\nimport tqdm\nimport glob\nimport math\nimport gdal\nimport time\nimport os\n\nimport solaris as sol\nfrom solaris.utils.core import _check_gdf_load\nfrom solaris.raster.image import create_multiband_geotiff \n\n\ndef map_wrapper(x):\n '''For multi-threading'''\n return x[0](*(x[1:]))\n \n \ndef multithread_polys(param):\n '''Simple wrapper around mask_to_poly_geojson() for multiprocessing\n # https://solaris.readthedocs.io/en/latest/_modules/solaris/vector/mask.html#mask_to_poly_geojson\n # mask_to_poly_geojson(pred_arr, channel_scaling=None, reference_im=None,\n # output_path=None, output_type='geojson', min_area=40,\n # bg_threshold=0, do_transform=None, simplify=False,\n # tolerance=0.5, **kwargs)\n '''\n \n [pred_image, min_area, output_path_pred, output_type, \n bg_threshold, simplify] = param\n print(\"output_pred:\", os.path.basename(output_path_pred))\n sol.vector.mask.mask_to_poly_geojson(pred_image, \n min_area=min_area, \n output_path=output_path_pred,\n output_type=output_type,\n bg_threshold=bg_threshold,\n simplify=simplify)\n \n \ndef calculate_iou(pred_poly, test_data_GDF):\n \"\"\"Get the best intersection over union for a predicted polygon.\n Adapted from: https://github.com/CosmiQ/solaris/blob/master/solaris/eval/iou.py, but\n keeps index of test_data_GDF\n \n Arguments\n ---------\n pred_poly : :py:class:`shapely.Polygon`\n Prediction polygon to test.\n test_data_GDF : :py:class:`geopandas.GeoDataFrame`\n GeoDataFrame of ground truth polygons to test ``pred_poly`` against.\n Returns\n -------\n iou_GDF : :py:class:`geopandas.GeoDataFrame`\n A subset of ``test_data_GDF`` that overlaps ``pred_poly`` with an added\n column ``iou_score`` which indicates the intersection over union value.\n \"\"\"\n\n # Fix bowties and self-intersections\n if not pred_poly.is_valid:\n pred_poly = pred_poly.buffer(0.0)\n\n precise_matches = test_data_GDF[test_data_GDF.intersects(pred_poly)]\n\n iou_row_list = []\n for idx, row in precise_matches.iterrows():\n # Load ground truth polygon and check exact iou\n test_poly = row.geometry\n # Ignore invalid polygons for now\n if pred_poly.is_valid and test_poly.is_valid:\n intersection = pred_poly.intersection(test_poly).area\n union = pred_poly.union(test_poly).area\n # Calculate iou\n iou_score = intersection / float(union)\n gt_idx = idx\n else:\n iou_score = 0\n gt_idx = -1\n row['iou_score'] = iou_score\n row['gt_idx'] = gt_idx\n iou_row_list.append(row)\n\n iou_GDF = gpd.GeoDataFrame(iou_row_list)\n return iou_GDF\n\n \ndef track_footprint_identifiers(json_dir, out_dir,\n min_iou=0.25, iou_field='iou_score', id_field='Id',\n reverse_order=False, \n verbose=True, super_verbose=False):\n '''\n Track footprint identifiers in the deep time stack.\n We need to track the global gdf instead of just the gdf of t-1.\n '''\n \n os.makedirs(out_dir, exist_ok=True)\n \n # set columns for master gdf\n gdf_master_columns = [id_field, iou_field, 'area', 'geometry']\n\n json_files = sorted([f\n for f in os.listdir(os.path.join(json_dir))\n if f.endswith('.geojson') and os.path.exists(os.path.join(json_dir, f))])\n # start at the end and work backwards?\n if reverse_order:\n json_files = json_files[::-1]\n\n # check if only partical matching has been done (this will cause errors)\n out_files_tmp = sorted([z for z in os.listdir(out_dir) if z.endswith('.geojson')])\n if len(out_files_tmp) > 0:\n if len(out_files_tmp) != len(json_files):\n raise Exception(\"\\nError in:\", out_dir, \"with N =\", len(out_files_tmp), \n \"files, need to purge this folder and restart matching!\\n\")\n return\n elif len(out_files_tmp) == len(json_files):\n print(\"\\nDir:\", os.path.basename(out_dir), \"N files:\", len(json_files), \n \"directory matching completed, skipping...\")\n return\n else:\n print(\"\\nMatching json_dir: \", os.path.basename(json_dir), \"N json:\", len(json_files))\n \n gdf_dict = {}\n for j, f in enumerate(json_files):\n \n name_root = f.split('.')[0]\n json_path = os.path.join(json_dir, f)\n output_path = os.path.join(out_dir, f)\n \n if verbose and ((j % 1) == 0):\n print(\" \", j, \"/\", len(json_files), \"for\", os.path.basename(json_dir), \"=\", name_root)\n\n # gdf\n gdf_now = gpd.read_file(json_path)\n # drop value if it exists\n gdf_now = gdf_now.drop(columns=['value'])\n # get area\n gdf_now['area'] = gdf_now['geometry'].area\n # initialize iou, id\n gdf_now[iou_field] = -1\n gdf_now[id_field] = -1\n # sort by reverse area\n gdf_now.sort_values(by=['area'], ascending=False, inplace=True)\n gdf_now = gdf_now.reset_index(drop=True)\n # reorder columns (if needed)\n gdf_now = gdf_now[gdf_master_columns] \n id_set = set([])\n \n if verbose:\n print(\"\\n\")\n print(\"\", j, \"file_name:\", f)\n print(\" \", \"gdf_now.columns:\", gdf_now.columns)\n \n if j == 0:\n # Establish initial footprints at Epoch0\n # set id\n gdf_now[id_field] = gdf_now.index.values\n gdf_now[iou_field] = 0\n n_new = len(gdf_now)\n n_matched = 0\n id_set = set(gdf_now[id_field].values)\n gdf_master_Out = gdf_now.copy(deep=True)\n # gdf_dict[f] = gdf_now\n else:\n # match buildings in epochT to epochT-1\n # see: https://github.com/CosmiQ/solaris/blob/master/solaris/eval/base.py\n # print(\"gdf_master;\", gdf_dict['master']) #gdf_master)\n gdf_master_Out = gdf_dict['master'].copy(deep=True)\n gdf_master_Edit = gdf_dict['master'].copy(deep=True)\n \n if verbose:\n print(\" len gdf_now:\", len(gdf_now), \"len(gdf_master):\", len(gdf_master_Out),\n \"max master id:\", np.max(gdf_master_Out[id_field]))\n print(\" gdf_master_Edit.columns:\", gdf_master_Edit.columns)\n \n new_id = np.max(gdf_master_Edit[id_field]) + 1\n # if verbose:\n # print(\"new_id:\", new_id)\n idx = 0\n n_new = 0\n n_matched = 0\n for pred_idx, pred_row in gdf_now.iterrows():\n if verbose:\n if (idx % 1000) == 0:\n print(\" \", name_root, idx, \"/\", len(gdf_now))\n if super_verbose:\n # print(\" \", i, j, idx, \"/\", len(gdf_now))\n print(\" \", idx, \"/\", len(gdf_now))\n idx += 1\n pred_poly = pred_row.geometry\n # if super_verbose:\n # print(\" pred_poly.exterior.coords:\", list(pred_poly.exterior.coords))\n \n # get iou overlap\n iou_GDF = calculate_iou(pred_poly, gdf_master_Edit)\n # iou_GDF = iou.calculate_iou(pred_poly, gdf_master_Edit)\n # print(\"iou_GDF:\", iou_GDF)\n \n # Get max iou\n if not iou_GDF.empty:\n max_iou_row = iou_GDF.loc[iou_GDF['iou_score'].idxmax(axis=0, skipna=True)]\n # sometimes we are get an erroneous id of 0, caused by nan area,\n # so check for this\n max_area = max_iou_row.geometry.area\n if max_area == 0 or math.isnan(max_area):\n # print(\"nan area!\", max_iou_row, \"returning...\")\n raise Exception(\"\\n Nan area!:\", max_iou_row, \"returning...\")\n return\n \n id_match = max_iou_row[id_field]\n if id_match in id_set:\n print(\"Already seen id! returning...\")\n raise Exception(\"\\n Already seen id!\", id_match, \"returning...\")\n return\n \n # print(\"iou_GDF:\", iou_GDF)\n if max_iou_row['iou_score'] >= min_iou:\n if super_verbose:\n print(\" pred_idx:\", pred_idx, \"match_id:\", max_iou_row[id_field],\n \"max iou:\", max_iou_row['iou_score'])\n # we have a successful match, so set iou, and id\n gdf_now.loc[pred_row.name, iou_field] = max_iou_row['iou_score']\n gdf_now.loc[pred_row.name, id_field] = id_match\n # drop matched polygon in ground truth\n gdf_master_Edit = gdf_master_Edit.drop(max_iou_row.name, axis=0)\n n_matched += 1\n # # update gdf_master geometry?\n # # Actually let's leave the geometry the same so it doesn't move around...\n # gdf_master_Out.at[max_iou_row['gt_idx'], 'geometry'] = pred_poly\n # gdf_master_Out.at[max_iou_row['gt_idx'], 'area'] = pred_poly.area\n # gdf_master_Out.at[max_iou_row['gt_idx'], iou_field] = max_iou_row['iou_score']\n \n else:\n # no match, \n if super_verbose:\n print(\" Minimal match! - pred_idx:\", pred_idx, \"match_id:\",\n max_iou_row[id_field], \"max iou:\", max_iou_row['iou_score'])\n print(\" Using new id:\", new_id)\n if (new_id in id_set) or (new_id == 0):\n raise Exception(\"trying to add an id that already exists, returning!\")\n return\n gdf_now.loc[pred_row.name, iou_field] = 0\n gdf_now.loc[pred_row.name, id_field] = new_id\n id_set.add(new_id)\n # update master, cols = [id_field, iou_field, 'area', 'geometry']\n gdf_master_Out.loc[new_id] = [new_id, 0, pred_poly.area, pred_poly]\n new_id += 1\n n_new += 1\n \n else:\n # no match (same exact code as right above)\n if super_verbose:\n print(\" pred_idx:\", pred_idx, \"no overlap, new_id:\", new_id)\n if (new_id in id_set) or (new_id == 0):\n raise Exception(\"trying to add an id that already exists, returning!\")\n return\n gdf_now.loc[pred_row.name, iou_field] = 0\n gdf_now.loc[pred_row.name, id_field] = new_id\n id_set.add(new_id)\n # update master, cols = [id_field, iou_field, 'area', 'geometry']\n gdf_master_Out.loc[new_id] = [new_id, 0, pred_poly.area, pred_poly]\n new_id += 1\n n_new += 1\n \n # print(\"gdf_now:\", gdf_now)\n gdf_dict[f] = gdf_now\n gdf_dict['master'] = gdf_master_Out\n\n # save!\n if len(gdf_now) > 0:\n gdf_now.to_file(output_path, driver=\"GeoJSON\")\n else:\n print(\"Empty dataframe, writing empty gdf\", output_path)\n open(output_path, 'a').close()\n\n if verbose:\n print(\" \", \"N_new, N_matched:\", n_new, n_matched)\n \n return \n \n\ndef sn7_convert_geojsons_to_csv(json_dirs, output_csv_path, population='proposal'):\n '''\n Convert jsons to csv\n Population is either \"ground\" or \"proposal\" \n '''\n \n first_file = True # switch that will be turned off once we process the first file\n for json_dir in tqdm.tqdm(json_dirs):\n json_files = sorted(glob.glob(os.path.join(json_dir, '*.geojson')))\n for json_file in tqdm.tqdm(json_files):\n try:\n df = gpd.read_file(json_file)\n except (fiona.errors.DriverError):\n message = '! Invalid dataframe for %s' % json_file\n print(message)\n continue\n #raise Exception(message)\n if population == 'ground':\n file_name_col = df.image_fname.apply(lambda x: os.path.splitext(x)[0])\n elif population == 'proposal':\n file_name_col = os.path.splitext(os.path.basename(json_file))[0]\n else:\n raise Exception('! Invalid population')\n df = gpd.GeoDataFrame({\n 'filename': file_name_col,\n 'id': df.Id.astype(int),\n 'geometry': df.geometry,\n })\n if len(df) == 0:\n message = '! Empty dataframe for %s' % json_file\n print(message)\n #raise Exception(message)\n\n if first_file:\n net_df = df\n first_file = False\n else:\n net_df = net_df.append(df)\n \n net_df.to_csv(output_csv_path, index=False)\n return net_df" ]
[ [ "numpy.max" ] ]
jukedl/tensorflow
[ "57f08cbd1fe217a4befe8ab650598acf6458f965" ]
[ "tensorflow/python/compat/compat.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for API compatibility between TensorFlow release versions.\n\nSee [Version\nCompatibility](https://tensorflow.org/guide/version_compat#backward_forward)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\n\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 6, 27)\n\n\n@tf_export(\"compat.forward_compatible\")\ndef forward_compatible(year, month, day):\n \"\"\"Return true if the forward compatibility window has expired.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n Forward-compatibility refers to scenarios where the producer of a TensorFlow\n model (a GraphDef or SavedModel) is compiled against a version of the\n TensorFlow library newer than what the consumer was compiled against. The\n \"producer\" is typically a Python program that constructs and trains a model\n while the \"consumer\" is typically another program that loads and serves the\n model.\n\n TensorFlow has been supporting a 3 week forward-compatibility window for\n programs compiled from source at HEAD.\n\n For example, consider the case where a new operation `MyNewAwesomeAdd` is\n created with the intent of replacing the implementation of an existing Python\n wrapper - `tf.add`. The Python wrapper implementation should change from\n something like:\n\n ```python\n def add(inputs, name=None):\n return gen_math_ops.add(inputs, name)\n ```\n\n to:\n\n ```python\n from tensorflow.python.compat import compat\n\n def add(inputs, name=None):\n if compat.forward_compatible(year, month, day):\n # Can use the awesome new implementation.\n return gen_math_ops.my_new_awesome_add(inputs, name)\n # To maintain forward compatibiltiy, use the old implementation.\n return gen_math_ops.add(inputs, name)\n ```\n\n Where `year`, `month`, and `day` specify the date beyond which binaries\n that consume a model are expected to have been updated to include the\n new operations. This date is typically at least 3 weeks beyond the date\n the code that adds the new operation is committed.\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Returns:\n True if the caller can expect that serialized TensorFlow graphs produced\n can be consumed by programs that are compiled with the TensorFlow library\n source code after (year, month, day).\n \"\"\"\n return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)\n\n\n@tf_export(\"compat.forward_compatibility_horizon\")\n@tf_contextlib.contextmanager\ndef forward_compatibility_horizon(year, month, day):\n \"\"\"Context manager for testing forward compatibility of generated graphs.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n To ensure forward compatibility of generated graphs (see `forward_compatible`)\n with older binaries, new features can be gated with:\n\n ```python\n if compat.forward_compatible(year=2018, month=08, date=01):\n generate_graph_with_new_features()\n else:\n generate_graph_so_older_binaries_can_consume_it()\n ```\n\n However, when adding new features, one may want to unittest it before\n the forward compatibility window expires. This context manager enables\n such tests. For example:\n\n ```python\n from tensorflow.python.compat import compat\n\n def testMyNewFeature(self):\n with compat.forward_compatibility_horizon(2018, 08, 02):\n # Test that generate_graph_with_new_features() has an effect\n ```\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Yields:\n Nothing.\n \"\"\"\n global _FORWARD_COMPATIBILITY_HORIZON\n try:\n old_compat_date = _FORWARD_COMPATIBILITY_HORIZON\n _FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)\n yield\n finally:\n _FORWARD_COMPATIBILITY_HORIZON = old_compat_date\n" ]
[ [ "tensorflow.python.util.tf_export.tf_export" ] ]
TriceHelix/ASMAGAN
[ "6e2b5b587f88f641fdcc05a81cf5f0b4d6a9f3e1", "6e2b5b587f88f641fdcc05a81cf5f0b4d6a9f3e1" ]
[ "utilities/LossUtili.py", "export_scripts/export_gen_onnx.py" ]
[ "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n#############################################################\n# File: LossUtili.py\n# Created Date: Thursday October 10th 2019\n# Author: Chen Xuanhong\n# Email: chenxuanhongzju@outlook.com\n# Last Modified: Monday, 14th October 2019 5:19:31 pm\n# Modified By: Chen Xuanhong\n# Copyright (c) 2019 Shanghai Jiao Tong University\n#############################################################\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef __classificationLoss__(logit, target):\n \"\"\"Compute binary cross entropy loss.\"\"\"\n return F.binary_cross_entropy_with_logits(logit, target, reduction='sum')/logit.size(0)\n\ndef __hingeLoss__(logit, label):\n return nn.ReLU()(1.0 - label * logit).mean()# / logit.size(0)\n\ndef getClassifierLoss(classificationLossType):\n if classificationLossType == \"hinge\":\n return __hingeLoss__\n elif classificationLossType == \"cross-entropy\":\n return __classificationLoss__\n\ndef gradientPenalty(y, x):\n \"\"\"Compute gradient penalty: (L2_norm(dy/dx) - 1)**2.\"\"\"\n weight = torch.ones(y.size()).cuda()\n dydx = torch.autograd.grad(outputs=y,\n inputs=x,\n grad_outputs=weight,\n retain_graph=True,\n create_graph=True,\n only_inputs=True)[0]\n\n dydx = dydx.view(dydx.size(0), -1)\n dydx_l2norm = (torch.sum(dydx**2, dim=1)).sqrt()\n return ((dydx_l2norm-1)**2).mean()\n\ndef gradientPenaltyWithRelu(fakeImages, realImages):\n \"\"\"Compute gradient penalty: (max(0,L2_norm(dy/dx) - 1))**2.\"\"\"\n weight = torch.ones(fakeImages.size()).cuda()\n dydx = torch.autograd.grad(outputs=fakeImages,\n inputs=realImages,\n grad_outputs=weight,\n retain_graph=True,\n create_graph=True,\n only_inputs=True)[0]\n\n dydx = dydx.view(dydx.size(0), -1)\n dydx_l2norm = (torch.sum(dydx**2, dim=1)).sqrt()\n return (nn.ReLU()(dydx_l2norm-1)**2).mean()", "import os\nimport json\nimport torch\nimport torch.onnx\n\nclass Exporter(object):\n def __init__(self, config):\n self.config = config\n\n def export(self):\n save_path_gen = \"exports/{}_generator.onnx\".format(self.config[\"version\"])\n exports_dir = \"exports\"\n save_path_condition_labels = \"{}/{}_condition-labels.json\".format(exports_dir, self.config[\"version\"])\n\n print(\"Loading trained model...\")\n\n package = __import__(self.config[\"com_base\"]+self.config[\"gScriptName\"], fromlist=True)\n GClass = getattr(package, 'Generator')\n \n Gen = GClass(self.config[\"GConvDim\"], self.config[\"GKS\"], self.config[\"resNum\"], len(self.config[\"selectedStyleDir\"]))\n if self.config[\"cuda\"] >=0:\n Gen = Gen.cuda()\n \n checkpoint = torch.load(self.config[\"ckp_name\"])\n Gen.load_state_dict(checkpoint['g_model'])\n Gen.eval() # set to inference mode\n\n print(\"Exporting condition labels tensor as JSON...\")\n\n batch_size = self.config[\"batchSize\"]\n n_class = len(self.config[\"selectedStyleDir\"])\n\n condition_labels = torch.ones((n_class, batch_size, 1)).int()\n for i in range(n_class):\n condition_labels[i,:,:] = condition_labels[i,:,:]*i\n\n if not os.path.exists(exports_dir):\n os.makedirs(exports_dir)\n with open(save_path_condition_labels, 'w') as f:\n json.dump(condition_labels.cpu().numpy().tolist(), f) # dump tensor as json before cuda alloc\n\n if self.config[\"cuda\"] >=0:\n condition_labels = condition_labels.cuda()\n\n print(\"Exporting Generator as ONNX model...\")\n\n dummy_input = torch.randn(1, 3, 1024, 1024, requires_grad=True)\n if self.config[\"cuda\"] >=0:\n dummy_input = dummy_input.cuda()\n\n opset = self.config[\"onnxOpset\"]\n dynamic_axes_mapping = {'input_img' : {2 : 'iheight', 3 : 'iwidth'},\n 'output_img' : {2 : 'oheight', 3 : 'owidth'}}\n\n # Export the model\n torch.onnx.export(Gen, # model being run\n (dummy_input, condition_labels[0, 0, :]), # model input\n save_path_gen, # where to save the model\n export_params = True, # store the trained parameter weights inside the model file\n opset_version = opset, # the ONNX version to export the model to\n do_constant_folding = True, # whether to execute constant folding for optimization\n input_names = ['input_img', 'input_style'], # the model's input names\n output_names = ['output_img'] # the model's output names\n #dynamic_axes = dynamic_axes_mapping # dynamic input size/shape\n )\n \n print(\"Finished exporting Generator!\")\n" ]
[ [ "torch.nn.functional.binary_cross_entropy_with_logits", "torch.autograd.grad", "torch.nn.ReLU", "torch.sum" ], [ "torch.onnx.export", "torch.randn", "torch.load", "torch.ones" ] ]
Azbesciak/RadonTransform
[ "5a3b722ae4515c31c74f38323fa183da60a3e701" ]
[ "dicom_reader.py" ]
[ "import pydicom\nfrom pydicom.data import get_testdata_files\nimport matplotlib.pyplot as plt\n# get some test data\n# filename = get_testdata_files(\"CT_small.dcm\")[0]\nfilename = \"my_dicom.dic\"\nds = pydicom.dcmread(filename)\npixel_bytes = ds.pixel_array\nprint(ds.PatientName)\n\nprint(ds[0x10,0x10].value)\n\nds.PatientID = \"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nds.SeriesNumber = 5\nprint(ds)\nds[0x10,0x10].value = 'Test'\n\nfig, ax = plt.subplots(1, 1, figsize=(8, 8))\nax.imshow(pixel_bytes, cmap=plt.cm.Greys_r)\nplt.show()\npydicom.filewriter.write_file(\"my_dicom.dic\", ds) # extension required\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
TongheYing/ML-Au
[ "0790b0fb44612595c336e3cf2fda3287b7797a2a", "0790b0fb44612595c336e3cf2fda3287b7797a2a" ]
[ "local/pso/pso_sp.py", "dataset-ML/N20/vasp/read_static.py" ]
[ "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport os\nimport numpy as np\nimport copy\nfrom local.pso.pso_atoms import PsoAtoms\nfrom local.pso.Pre_Relax import PreRelax\nfrom scipy.optimize import linear_sum_assignment\n\n\nclass Pso(object):\n \"\"\"Pso object.\n\n The Pso object contains the main process of PSO.\n It must contains a list of atoms objects for optimization,a parameters\n dictionary and a calculator object for dft calculation.\n The main Pso equation:v(t+1)=w*v(t)+c1*r1*(pbest-x)+c2*r2*(gbest-x)\n The default value of this parameters is:WEIGHT=0.6,c1=c2=2,r1 and r2 is two\n seperately generated random numbers is range [0,1].\n NPAR:number of particles.It increases with the size of the system.Default\n value is 20.\n EDIFF:If the energy difference between the neighbour gbest is lower than\n it,the Pso_evolution will stop.The lower,the more precise.\n Set this parameters' value by a pso_init file or using set_parameters().\n \"\"\"\n\n def __init__(self, atoms_list=None, parameters=None, calculator=None):\n\n self.set_atoms_list(atoms_list)\n self.default_parameters={'UNF':1.,'c1':2.0,'c2':2.0,'COC':0.7298,'NPAR':150,'EDIFF':1e-4,'LSS':3,'ELIR':0.4,'WEIGHT':0.9,'GER':1,'PREX':1,'FIX2':[0],'PATH':0.01,'VMAX':0.5,'SCO':0.05,'DCO':2.,'ZMAX':[1.,1.,1],'ZMIN':[0.,0.,0.],'VAC':[0.,0.,10.],'PBC':[0,0,0],'CUO':[0.8,1.6],'LPAIR':1,'LPRELAX':1}\n if parameters is None:#Use the default value\n parameters=self.default_parameters\n if 'pso_init' in os.listdir(os.getcwd()):\n from local.pso.Read_Pso_Init import PSO_INIT\n\n self.pso_init=PSO_INIT()\n tmp=self.default_parameters\n for key in tmp.keys():\n if key in self.pso_init.parameters.keys():\n tmp[key]=self.pso_init.parameters[key]\n parameters=tmp\n self.set_parameters(parameters)\n\n if calculator is None:\n from local.pso.nnmodel import Model\n nnmodel = Model()\n self.set_calc(nnmodel)\n self.set_calc(calculator)\n\n def set_calc(self,calculator):\n self._calc=calculator\n\n def get_calc(self):\n return self._calc\n\n def set_atoms_list(self,atoms_list=None):\n self._atoms_list=[]\n if atoms_list!=None:\n for atoms in atoms_list:\n self.add_atoms(atoms)\n\n def add_atoms(self,atoms=None):\n if not isinstance(atoms,PsoAtoms):\n raise TypeError('The given atoms is not a PsoAtoms object.')\n self._atoms_list.append(atoms)\n\n def del_atoms(self,atoms=None):\n self._atoms_list.remove(atoms)\n\n def get_atoms(self):\n return self._atoms\n\n def set_parameters(self,parameters):\n if not isinstance(parameters,dict):\n raise ValueError('The given format is not a dictionary')\n a=set(parameters.keys())\n b=set(self.default_parameters.keys())\n if not a<=b:\n raise ValueError\n c=self.default_parameters\n for key in parameters.keys():\n c[key]=parameters[key]\n self._parameters=c\n\n def get_parameters(self):\n return copy.deepcopy(self._parameters)\n\n\ndef pso_evo(pso=None, natoms=None):\n from local.pso.rand_stru import RandStru\n\n \"It is the main function of Pso.\"\n import os\n import shutil\n\n def write_pso_data():\n filename='pso_data_%03d'%GER\n atoms=pso._atoms_list[0]\n f=open(filename,'w')\n f.write('System\\n')\n f.write(atoms.get_system_name())\n f.write('\\n')\n f.write('Subs Atom\\n')\n for elem in atoms._subs_elements:\n f.write(elem+' ')\n f.write('\\n')\n for num in atoms._subs_numbers:\n f.write('%3d '%num)\n f.write('\\n')\n f.write('Abso Atom\\n')\n for elem in atoms._abso_elements:\n f.write(elem+' ')\n f.write('\\n')\n for num in atoms._abso_numbers:\n f.write('%3d '%num)\n f.write('\\n')\n f.write('Lattice Constant %.16f\\n'%atoms.get_lattice_constant())\n f.write('Cell\\n')\n for i in atoms.get_cell():\n for j in i:\n f.write('%.16f '%j)\n f.write('\\n')\n f.write('Pbc ')\n for i in atoms.get_pbc():\n f.write('%d '%i)\n f.write('\\n')\n f.write('Subs Mass\\n')\n for mass in atoms.get_subs_masses():\n f.write('%.4f '%mass)\n f.write('\\n')\n f.write('Abso Mass\\n')\n for mass in atoms.get_abso_masses():\n f.write('%.4f '%mass)\n f.write('\\n')\n f.write('Subs Radius\\n')\n for radius in atoms.get_subs_radius():\n f.write('%.4f '%radius)\n f.write('\\n')\n f.write('Abso Radius\\n')\n for radius in atoms.get_abso_radius():\n f.write('%.4f '%radius)\n f.write('\\n')\n f.write('Constraints\\n')\n for i in atoms.get_constraints():\n for j in i:\n f.write('%d '%j)\n f.write('\\n')\n f.write('Subs Structure\\n')\n for atom in atoms.get_subs_positions():\n for cord in atom:\n f.write('%.16f '%cord)\n f.write('\\n')\n f.write('Pso Parameters\\n')\n for key in pso._parameters.keys():\n f.write('%s '%key)\n value=pso._parameters[key]\n if isinstance(value,list):\n for i in value:\n f.write('%.8f '%i)\n f.write('\\n')\n else:\n f.write('%.8f \\n'%pso._parameters[key])\n f.write('Calculator '+pso._calc._name+'\\n')\n f.write('!!!!!!!!!!!!!!!!!!!!!!!\\n')\n f.write(' Generation %d\\n'%GER)\n f.write('\\n')\n if GER==1:\n f.write('Last Gbest %.16f\\n'%0)\n else:\n f.write('Last Gbest %.16f\\n'%gbest[1])\n f.write('\\n&&&&&&&&&&&&&&& Number of Eliminated structures &&&&&&&&&&&&&&&\\n')\n for i in elim_list:\n f.write('%2d '%i)\n f.write('\\n\\n')\n for i1,atoms in enumerate(pso._atoms_list):\n stru=atoms.get_abso_positions()\n f.write('----------Particle %d----------\\n'%i1)\n f.write('Positions\\n')\n for atom in stru:\n for cord in atom:\n f.write('%.16f '%cord)\n f.write('\\n')\n f.write('Velocities\\n')\n for i2 in range(len(stru)):\n for v in velocities[i1,i2,:]:\n f.write('%.16f '%v)\n f.write('\\n')\n f.write(' *******************************\\n')\n f.close()\n\n def new_velocity(atoms,v,pbest,gbest,lpair):\n from math import e\n v1,v2=[],[]\n c1,c2=pso._parameters['c1'],pso._parameters['c2']\n x=pso._parameters['COC']\n w=pso._parameters['WEIGHT']\n r1=np.random.rand()\n r2=np.random.rand()\n w = 0.4 + 0.5 / GER\n if np.abs(pbest[0]-gbest[1])<=5e-2:\n w=0.25\n f2.write('x %.3f; w %.3f; c1 %.3f; c2 %.3f; r1 %.3f; r2 %.3f\\n'%(x,w,c1,c2,r1,r2))\n f2.write('Last Velocities\\n')\n for i in v:\n for j in i:\n f2.write('%.16f '%j)\n f2.write('\\n')\n temp=0\n stru=atoms.get_abso_positions(cord_mod='r')\n pbest=pbest[1].get_abso_positions(cord_mod='r')\n gbest=gbest[2].get_abso_positions(cord_mod='r')\n f2.write('Pbest\\n')\n for n in atoms.get_abso_numbers():\n dist=[atoms.get_distance(atom1,atom2)[0] for atom1 in stru[temp:temp+n] for atom2 in pbest[temp:temp+n]]\n dist=np.array(dist)\n dist=dist.reshape(n,n)\n for i in dist:\n for j in i:\n f2.write('%.16f '%j)\n f2.write('\\n')\n if lpair:\n path1=get_pair(dist)[0]\n else:\n path1=range(len(dist))\n for i in path1:\n f2.write('%d '%i)\n f2.write('\\n')\n for i,j in enumerate(path1):\n v1.append(atoms.get_distance(pbest[temp+j],stru[temp+i])[1])\n temp+=n\n v1=np.array(v1)\n f2.write('v1\\n')\n for i in v1:\n for j in i:\n f2.write('%.16f '%j)\n f2.write('\\n')\n temp=0\n f2.write('Gbest\\n')\n for n in atoms.get_abso_numbers():\n dist=[atoms.get_distance(atom1,atom2)[0] for atom1 in stru[temp:temp+n] for atom2 in gbest[temp:temp+n] ]\n dist=np.array(dist)\n dist=dist.reshape(n,n)\n for i in dist:\n for j in i:\n f2.write('%.16f '%j)\n f2.write('\\n')\n if lpair:\n path1=get_pair(dist)[0]\n else:\n path1=range(len(dist))\n for i in path1:\n f2.write('%d '%i)\n f2.write('\\n')\n for i,j in enumerate(path1):\n v2.append(atoms.get_distance(gbest[temp+j],stru[temp+i])[1])\n temp+=n\n v2=np.array(v2)\n f2.write('v2\\n')\n for i in v2:\n for j in i:\n f2.write('%.16f '%j)\n f2.write('\\n')\n f2.write('\\n')\n new_velo=x*(c1*r1*v1+c2*r2*v2)+w*v\n return new_velo\n\n init_dir=os.getcwd()\n pbest=[]\n gbest=[0,1e5,None]\n\n # initialization, generate the initial velocity randomly in the range [-0.1,0.1]\n if pso is not None:\n if not isinstance(pso, Pso):\n raise ValueError('NO Pso Object')\n ediff=pso._parameters['EDIFF']\n npar=pso._parameters['NPAR']\n c1=pso._parameters['c1']\n c2=pso._parameters['c2']\n unf=pso._parameters['UNF']\n coc=pso._parameters['COC']\n lss=pso._parameters['LSS']\n elir=pso._parameters['ELIR']\n GER=pso._parameters['GER']\n vmax=pso._parameters['VMAX']\n vac=pso._parameters['VAC']\n dis_cutoff=pso._parameters['DCO']\n lprelax=pso._parameters['LPRELAX']\n lpair=pso._parameters['LPAIR']\n ntsubs=len(pso._atoms_list[0]._subs_symbols)\n ntabso=len(pso._atoms_list[0]._abso_symbols)\n f0 = open('pso_data','a')\n f0.write('%s\\n' % pso._atoms_list[0].get_system_name())\n f0.write('Parameters\\n')\n for key in pso._parameters.keys():\n f0.write('%s '%key)\n value = pso._parameters[key]\n if isinstance(value, list):\n for i in pso._parameters[key]:\n f0.write('%.3f ' % i)\n f0.write('\\n')\n else:\n f0.write('%.3f\\n' % pso._parameters[key])\n f0.write(\"--------Substrate's Atoms' Positions:--------\\n\")\n for atom in pso._atoms_list[0].get_subs_positions():\n for cord in atom:\n f0.write('%.16f '%cord)\n f0.write('\\n')\n f0.write('**********************************\\n')\n velocities = vmax-2*vmax*np.random.rand(npar, ntabso, 3)\n dirname = 'pso_'+'001'\n if lprelax:\n PreRelax(pso, filename='pre_relax_001', dirt=init_dir)\n if dirname in os.listdir(os.getcwd()):shutil.rmtree(dirname)\n os.mkdir(dirname)\n os.chdir(dirname)\n for n, atoms in enumerate(pso._atoms_list):\n dirname1 = dirname+'_'+'%04d'%n\n if dirname1 in os.listdir(os.getcwd()):shutil.rmtree(dirname1)\n os.mkdir(dirname1)\n os.chdir(dirname1)\n pso._calc.sp_run(atoms=atoms, input_dir=init_dir)\n os.chdir('..')\n write_pso_data()\n os.chdir('..')\n else:\n # Read information from pso_data\n from local.pso.Read_Pso_Init import PSO_INIT\n pso_init=PSO_INIT()\n GER=pso_init.parameters['GER']\n\n updated = pso_init.parameters['UPDATED']\n f0=open('pso_data','a')\n os.chdir('pso_%03d'%GER)\n pso,last_gbest,last_elim=read_data(filename='pso_data_%03d'%GER)\n ediff=pso._parameters['EDIFF']\n npar=pso._parameters['NPAR']\n c1=pso._parameters['c1']\n c2=pso._parameters['c2']\n unf=pso._parameters['UNF']\n coc=pso._parameters['COC']\n lss=pso._parameters['LSS']\n elir=pso._parameters['ELIR']\n vmax=pso._parameters['VMAX']\n vac=pso._parameters['VAC']\n zmax=pso._parameters['ZMAX']\n zmin=pso._parameters['ZMIN']\n sim_cutoff=pso._parameters['SCO']\n dis_cutoff=pso._parameters['DCO']\n lprelax=pso._parameters['LPRELAX']\n lpair=pso._parameters['LPAIR']\n gen =pso._parameters['GER']\n ntsubs=len(pso._atoms_list[0]._subs_symbols)\n ntabso=len(pso._atoms_list[0]._abso_symbols)\n\n # Main Loop\n # Read information from result\n os.chdir('..')\n os.chdir('pso_%03d'%GER)\n dir_list=os.listdir(os.getcwd())\n numofupdate = 0\n numofinit = 0\n numoftest = 0\n\n for n in range(int(npar)):\n print(\"it's a {0} particle\".format(n))\n dirname = 'pso_%03d'%GER+'_%04d'%n\n os.chdir(dirname)\n\n if not updated:\n from local.optimize_cluster import structure_optimization\n from local.error_indicator import read_trajectory\n if n == 0:\n if 'update_' + str(int(GER)).zfill(3) in os.listdir(\"../\"):\n shutil.rmtree('../update_' + str(int(GER)).zfill(3))\n os.mkdir('../update_' + str(int(GER)).zfill(3))\n\n if 'updating_' + str(int(GER)).zfill(3) in os.listdir('../'):\n os.system('rm -rf ../updating_' + str(int(GER)).zfill(3))\n if 'test_store_' + str(int(GER)).zfill(3) in os.listdir('../'):\n os.system('rm -rf ../test_store_' + str(int(GER)).zfill(3))\n\n structure_optimization(filename='POSCAR', gen=GER, natoms=natoms)\n numofupdate, numofinit, numoftest = \\\n read_trajectory(gen=GER, prev_update=numofupdate, prev_init=numofinit, prev_test=numoftest, natoms=natoms)\n os.system('cp ../../../clustertut/ase_calcs/optimization.poscar ./POSCAR_pbest')\n os.system('cp ./POSCAR_pbest ../update_' + str(int(GER)).zfill(3) + '/POSCAR_' + str(n).zfill(4))\n\n energy=pso._calc.get_energy(updated=updated, num=n, gen=GER)\n pso._atoms_list[n].set_atoms_energy(energy)\n pi = [energy, pso._atoms_list[0].copy()]\n abso_stru=pso._calc.get_stru(pi[1])\n pi[1].set_abso_positions(abso_stru, cord_mod='d')\n pbest.append(pi)\n os.chdir('..')\n\n\n if updated:\n energies=[i[0] for i in pbest]\n energies=np.array(energies)\n energy_sort=np.argsort(energies)\n gbest=[energies.argmin(),energies.min(),pbest[energies.argmin()][1].copy()]\n velocities=[atoms.get_atoms_velo() for atoms in pso._atoms_list]\n velocities=np.array(velocities)\n filename='pso_data_%03d'%GER\n f2=open(filename,'r')\n f3=open('tmp','w')\n count=0\n for line in f2:\n if 'Last Gbest' in line:\n f3.write('Energies sort list\\n')\n for i in energy_sort:\n f3.write('%d %.16f\\n'%(i,energies[i]))\n if ' *******************************' in line:\n f3.write('Pbest Positions and Energy\\n')\n for i in pbest[count][1].get_abso_positions():\n for j in i:\n f3.write('%.16f '%j)\n f3.write('\\n')\n f3.write('Pbest Free Energy %.16f\\n'%pbest[count][0])\n count+=1\n f3.write(line)\n\n f3.write('----------Gbest Positions and Energy----------\\n')\n f3.write('Gbest Positions\\n')\n for i in gbest[2].get_abso_positions():\n for j in i:\n f3.write('%.16f '%j)\n f3.write('\\n')\n f3.write('Gbest Free Energy %.16f\\n'%gbest[1])\n f3.write('Gbest Number %d\\n'%gbest[0])\n f3.close()\n f2.close()\n os.rename('tmp',filename)\n os.chdir(init_dir)\n if np.abs(gbest[1]-last_gbest)>=np.abs(ediff):\n GER+=1\n pso._parameters['GER']+=1\n # Update Swarm\n f2=open('velocities_%03d'%GER,'w')\n for n,atoms in enumerate(pso._atoms_list):\n f2.write('*************** Particle %d ***************\\n'%n)\n velocities[n]=new_velocity(atoms,velocities[n],pbest[n],gbest,lpair)\n f2.close()\n #eliminate the high energy structures,and substitute them by new random structures.\n neli=int(elir*npar)\n elim_list=energy_sort[-neli:]\n surv_list=energy_sort[:-neli]\n elim_list=list(elim_list)\n\n surv_list=list(surv_list)\n\n surv_list.reverse()\n tmp1=[]\n tmp2=[]\n #if one structure is both in last_elim and elim,we do not eliminate it and keep it oen generation!\n for n in elim_list:\n if n in last_elim:\n for m in surv_list:\n if m not in last_elim:\n surv_list.remove(m)\n tmp1.append(m)\n tmp2.append(n)\n break\n if m==surv_list[-1]:\n tmp1.append(n)\n else:\n tmp1.append(n)\n elim_list=tmp1\n surv_list.extend(tmp2)\n for n in elim_list:\n atoms=pso._atoms_list[n]\n RandStru(atoms, natoms=natoms)\n velocities[n]=vmax-vmax*2*np.random.rand(ntabso,3)\n for n in surv_list:\n atoms=pso._atoms_list[n]\n stru=atoms.get_abso_positions(cord_mod='r')\n stru=stru+velocities[n]\n atoms.set_abso_positions(stru,cord_mod='r')\n #Evaluate Swarm\n if lprelax:\n PreRelax(pso,filename='pre_relax_%03d'%GER,dirt=init_dir)\n f0.write('Generation %d\\n'%GER)\n dirname='pso_'+'%03d'%GER\n if dirname in os.listdir(os.getcwd()):shutil.rmtree(dirname)\n os.mkdir(dirname)\n os.chdir(dirname)\n for n,atoms in enumerate(pso._atoms_list):\n dirname1=dirname+'_'+'%04d'%n\n if dirname1 in os.listdir(os.getcwd()):shutil.rmtree(dirname1)\n os.mkdir(dirname1)\n os.chdir(dirname1)\n pso._calc.sp_run(atoms=atoms,input_dir=init_dir)\n os.chdir('..')\n temp = pso._atoms_list\n write_pso_data()\n print('Done!')\n os.chdir('..')\n else: # energy converge\n print('COMPLETED!')\n f0.write('\\n\\n***************Energy Converged!***************\\n')\n return gbest[2]\n f0.close()\n\n\ndef read_data(filename='pso_data_001'):\n # from vasp import Vasp\n from local.pso.nnmodel import Model\n 'read information from pso-data file and return a Pso object'\n f=open(filename,'r')\n print(filename)\n data=[line.strip() for line in f ]\n count=0\n last_elim=[]\n last_gbest=0.\n for n,line in enumerate(data):\n if 'System' in line:\n system_name=data[n+1]\n continue\n if 'Subs Atom' in line:\n subs_elements=data[n+1].split()\n subs_numbers=[int(i) for i in data[n+2].split()]\n nsubs=sum(subs_numbers)\n continue\n if 'Abso Atom' in line:\n abso_elements=data[n+1].split()\n abso_numbers=[int(i) for i in data[n+2].split()]\n nabso=sum(abso_numbers)\n continue\n if 'Lattice Constant' in line:\n lattice_constant=float(line.split()[2])\n continue\n if 'Cell' in line:\n cell=[float(j) for i in range(3) for j in data[n+i+1].split()]\n continue\n if 'Pbc' in line:\n pbc=[int(i) for i in line.split()[1:]]\n if 'Subs Mass' in line:\n subs_masses=[float(i) for i in data[n+1].split()]\n continue\n if 'Abso Mass' in line:\n abso_masses=[float(i) for i in data[n+1].split()]\n continue\n if 'Subs Radius' in line:\n subs_radius=[float(i) for i in data[n+1].split()]\n continue\n if 'Abso Radius' in line:\n abso_radius=[float(i) for i in data[n+1].split()]\n continue\n if 'Constraints' in line:\n const=[int(i) for i in data[n+1].split()]\n continue\n if 'Subs Structure' in line:\n subs_positions=[float(j) for i in range(nsubs) for j in data[n+1+i].split()]\n continue\n if 'Parameters' in line:\n parameters={}\n for i in data[n+1:]:\n if 'Calculator' in i:\n calc=i\n break\n key=i.split()[0]\n a=[float(j) for j in i.split()[1:]]\n if len(a)==1 and key!='FIX2':a=a[0]\n parameters[key]=a\n cot=1\n atoms_list=[]\n while cot<=parameters['NPAR']:\n subs_symbols=[]\n abso_symbols=[]\n for i,elem in enumerate(subs_elements):\n subs_symbols.extend([elem]*subs_numbers[i])\n for i,elem in enumerate(abso_elements):\n abso_symbols.extend([elem]*abso_numbers[i])\n atoms=PsoAtoms(name=system_name,subs_symbols=subs_symbols, abso_symbols=abso_symbols,subs_positions=subs_positions, subs_masses=subs_masses,abso_masses=abso_masses,\n subs_radius=subs_radius,abso_radius=abso_radius,cell=cell,\n lattice_constant=lattice_constant,constraints=const,pbc=pbc)\n atoms_list.append(atoms)\n cot+=1\n nnmodel = Model(lattice_parameter=lattice_constant)\n pso=Pso(atoms_list=atoms_list,calculator=nnmodel,parameters=parameters)\n print('Pso Object Done!')\n print(len(pso._atoms_list))\n if 'Last Gbest' in line:\n last_gbest=float(line.split()[2])\n continue\n if 'Number of Eliminated structures' in line:\n last_elim=[int(i) for i in data[n+1].split()]\n continue\n if 'Positions' in line and 'Pbest' not in line and 'Gbest' not in line:\n abso_positions=[float(j) for i in range(nabso) for j in data[n+1+i].split()]\n pso._atoms_list[count].set_abso_positions(abso_positions)\n continue\n if 'Velocities' in line:\n velocities=[float(j) for i in range(nabso) for j in data[n+1+i].split()]\n pso._atoms_list[count].set_atoms_velo(velocities)\n count+=1\n continue\n\n #print abso_elements,abso_numbers,abso_symbols\n return pso,last_gbest,last_elim\n\n\ndef get_pair(dist):\n\n row,col=linear_sum_assignment(dist)\n return col,dist[row,col].sum()\n", "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n\"\"\"read energy and forces from 50 static calculation structure\"\"\"\nimport os\nimport sys\nimport numpy as np\n\ndef write_id_prop(filename=\"id_prop.csv\", index=None, force=None, energy=None):\n row, col = force.shape[0], force.shape[1]\n force_str = \"\"\n # print(\"row, col=\", row, col)\n with open(filename, \"a\") as f:\n for i in range(row):\n for j in range(col):\n force_str = force_str + \",\" + str(force[i][j])\n f.write(index+\",\"+str(energy)+force_str+'\\n')\n\n\ndef read_static():\n # title, lattice_constant, a, elements, numofatoms, selective_dynamics, \\\n # selective_flags, direct_ac, atom_pos = read_poscar('N'+str(n)+'-static/' + 'N'+str(n)+'-static/0000/POSCAR')\n with open('0000/POSCAR', 'r') as f:\n data = [line.strip() for line in f.readlines()]\n numofatoms = int(data[6])\n # atom_force = np.zeros_like(atom_pos)\n atom_force = np.zeros((numofatoms, 3))\n for i in range(150):\n str_i = str(i).zfill(4)\n # filename = 'N'+str(n)+'-static/' + 'N'+str(n)+'-static/' + str_i + '/OUTCAR'\n filename = str_i + '/OUTCAR'\n with open (filename, 'r') as f:\n data = [line.strip() for line in f.readlines()]\n\n for j, items in enumerate(data):\n if 'TOTAL-FORCE' in items:\n energy = float(data[j + 14 + int(numofatoms)].split()[6])\n print(\"energy=\", float(data[j + 12 + int(numofatoms)].split()[4]))\n for bias in range(int(numofatoms)):\n content = data[j+2+bias].split()\n for idx in range(len(content)):\n if idx >= 3:\n atom_force[bias][idx - 3] = float(content[idx])\n # print(\"force=\", atom_force[bias][idx - 3])\n else:\n pass\n # write_id_prop('N'+str(n)+'-static/' + 'N'+str(n)+'-static/' + 'id_prop.csv', str_i, atom_force, energy)\n write_id_prop('id_prop.csv', str_i, atom_force, energy)\n else:\n pass\n" ]
[ [ "numpy.array", "numpy.random.rand", "scipy.optimize.linear_sum_assignment", "numpy.argsort", "numpy.abs" ], [ "numpy.zeros" ] ]
loicdiridollou/pandas-loic
[ "ccb36cc8f1eeed53dea321ee7381602a6957de54" ]
[ "pandas/tests/indexes/test_indexing.py" ]
[ "\"\"\"\ntest_indexing tests the following Index methods:\n __getitem__\n get_loc\n get_value\n __contains__\n take\n where\n get_indexer\n get_indexer_for\n slice_locs\n asof_locs\n\nThe corresponding tests.indexes.[index_type].test_indexing files\ncontain tests for the corresponding methods specific to those Index subclasses.\n\"\"\"\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import InvalidIndexError\n\nfrom pandas import (\n DatetimeIndex,\n Index,\n IntervalIndex,\n MultiIndex,\n NaT,\n PeriodIndex,\n RangeIndex,\n Series,\n TimedeltaIndex,\n)\nimport pandas._testing as tm\nfrom pandas.core.api import (\n Float64Index,\n Int64Index,\n UInt64Index,\n)\n\n\nclass TestTake:\n def test_take_invalid_kwargs(self, index):\n indices = [1, 2]\n\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\n with pytest.raises(TypeError, match=msg):\n index.take(indices, foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n index.take(indices, out=indices)\n\n msg = \"the 'mode' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n index.take(indices, mode=\"clip\")\n\n def test_take(self, index):\n indexer = [4, 3, 0, 2]\n if len(index) < 5:\n # not enough elements; ignore\n return\n\n result = index.take(indexer)\n expected = index[indexer]\n assert result.equals(expected)\n\n if not isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):\n # GH 10791\n msg = r\"'(.*Index)' object has no attribute 'freq'\"\n with pytest.raises(AttributeError, match=msg):\n index.freq\n\n def test_take_minus1_without_fill(self, index):\n # -1 does not get treated as NA unless allow_fill=True is passed\n if len(index) == 0:\n # Test is not applicable\n return\n\n result = index.take([0, 0, -1])\n\n expected = index.take([0, 0, len(index) - 1])\n tm.assert_index_equal(result, expected)\n\n\nclass TestContains:\n @pytest.mark.parametrize(\n \"index,val\",\n [\n (Index([0, 1, 2]), 2),\n (Index([0, 1, \"2\"]), \"2\"),\n (Index([0, 1, 2, np.inf, 4]), 4),\n (Index([0, 1, 2, np.nan, 4]), 4),\n (Index([0, 1, 2, np.inf]), np.inf),\n (Index([0, 1, 2, np.nan]), np.nan),\n ],\n )\n def test_index_contains(self, index, val):\n assert val in index\n\n @pytest.mark.parametrize(\n \"index,val\",\n [\n (Index([0, 1, 2]), \"2\"),\n (Index([0, 1, \"2\"]), 2),\n (Index([0, 1, 2, np.inf]), 4),\n (Index([0, 1, 2, np.nan]), 4),\n (Index([0, 1, 2, np.inf]), np.nan),\n (Index([0, 1, 2, np.nan]), np.inf),\n # Checking if np.inf in Int64Index should not cause an OverflowError\n # Related to GH 16957\n (Int64Index([0, 1, 2]), np.inf),\n (Int64Index([0, 1, 2]), np.nan),\n (UInt64Index([0, 1, 2]), np.inf),\n (UInt64Index([0, 1, 2]), np.nan),\n ],\n )\n def test_index_not_contains(self, index, val):\n assert val not in index\n\n @pytest.mark.parametrize(\n \"index,val\", [(Index([0, 1, \"2\"]), 0), (Index([0, 1, \"2\"]), \"2\")]\n )\n def test_mixed_index_contains(self, index, val):\n # GH#19860\n assert val in index\n\n @pytest.mark.parametrize(\n \"index,val\", [(Index([0, 1, \"2\"]), \"1\"), (Index([0, 1, \"2\"]), 2)]\n )\n def test_mixed_index_not_contains(self, index, val):\n # GH#19860\n assert val not in index\n\n def test_contains_with_float_index(self):\n # GH#22085\n integer_index = Int64Index([0, 1, 2, 3])\n uinteger_index = UInt64Index([0, 1, 2, 3])\n float_index = Float64Index([0.1, 1.1, 2.2, 3.3])\n\n for index in (integer_index, uinteger_index):\n assert 1.1 not in index\n assert 1.0 in index\n assert 1 in index\n\n assert 1.1 in float_index\n assert 1.0 not in float_index\n assert 1 not in float_index\n\n def test_contains_requires_hashable_raises(self, index):\n if isinstance(index, MultiIndex):\n return # TODO: do we want this to raise?\n\n msg = \"unhashable type: 'list'\"\n with pytest.raises(TypeError, match=msg):\n [] in index\n\n msg = \"|\".join(\n [\n r\"unhashable type: 'dict'\",\n r\"must be real number, not dict\",\n r\"an integer is required\",\n r\"\\{\\}\",\n r\"pandas\\._libs\\.interval\\.IntervalTree' is not iterable\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n {} in index._engine\n\n\nclass TestGetValue:\n @pytest.mark.parametrize(\n \"index\", [\"string\", \"int\", \"datetime\", \"timedelta\"], indirect=True\n )\n def test_get_value(self, index):\n # TODO(2.0): can remove once get_value deprecation is enforced GH#19728\n values = np.random.randn(100)\n value = index[67]\n\n with pytest.raises(AttributeError, match=\"has no attribute '_values'\"):\n # Index.get_value requires a Series, not an ndarray\n with tm.assert_produces_warning(FutureWarning):\n index.get_value(values, value)\n\n with tm.assert_produces_warning(FutureWarning):\n result = index.get_value(Series(values, index=values), value)\n tm.assert_almost_equal(result, values[67])\n\n\nclass TestGetLoc:\n def test_get_loc_non_hashable(self, index):\n # MultiIndex and Index raise TypeError, others InvalidIndexError\n\n with pytest.raises((TypeError, InvalidIndexError), match=\"slice\"):\n index.get_loc(slice(0, 1))\n\n def test_get_loc_generator(self, index):\n\n exc = KeyError\n if isinstance(\n index,\n (DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex, IntervalIndex),\n ):\n # TODO: make these more consistent?\n exc = InvalidIndexError\n with pytest.raises(exc, match=\"generator object\"):\n # MultiIndex specifically checks for generator; others for scalar\n index.get_loc(x for x in range(5))\n\n\nclass TestGetIndexer:\n def test_get_indexer_base(self, index):\n\n if index._index_as_unique:\n expected = np.arange(index.size, dtype=np.intp)\n actual = index.get_indexer(index)\n tm.assert_numpy_array_equal(expected, actual)\n else:\n msg = \"Reindexing only valid with uniquely valued Index objects\"\n with pytest.raises(InvalidIndexError, match=msg):\n index.get_indexer(index)\n\n with pytest.raises(ValueError, match=\"Invalid fill method\"):\n index.get_indexer(index, method=\"invalid\")\n\n def test_get_indexer_consistency(self, index):\n # See GH#16819\n\n if index._index_as_unique:\n indexer = index.get_indexer(index[0:2])\n assert isinstance(indexer, np.ndarray)\n assert indexer.dtype == np.intp\n else:\n msg = \"Reindexing only valid with uniquely valued Index objects\"\n with pytest.raises(InvalidIndexError, match=msg):\n index.get_indexer(index[0:2])\n\n indexer, _ = index.get_indexer_non_unique(index[0:2])\n assert isinstance(indexer, np.ndarray)\n assert indexer.dtype == np.intp\n\n\nclass TestConvertSliceIndexer:\n def test_convert_almost_null_slice(self, index):\n # slice with None at both ends, but not step\n\n key = slice(None, None, \"foo\")\n\n if isinstance(index, IntervalIndex):\n msg = \"label-based slicing with step!=1 is not supported for IntervalIndex\"\n with pytest.raises(ValueError, match=msg):\n index._convert_slice_indexer(key, \"loc\")\n else:\n msg = \"'>=' not supported between instances of 'str' and 'int'\"\n with pytest.raises(TypeError, match=msg):\n index._convert_slice_indexer(key, \"loc\")\n\n\nclass TestPutmask:\n def test_putmask_with_wrong_mask(self, index):\n # GH#18368\n if not len(index):\n return\n\n fill = index[0]\n\n msg = \"putmask: mask and data must be the same size\"\n with pytest.raises(ValueError, match=msg):\n index.putmask(np.ones(len(index) + 1, np.bool_), fill)\n\n with pytest.raises(ValueError, match=msg):\n index.putmask(np.ones(len(index) - 1, np.bool_), fill)\n\n with pytest.raises(ValueError, match=msg):\n index.putmask(\"foo\", fill)\n\n\n@pytest.mark.parametrize(\n \"idx\", [Index([1, 2, 3]), Index([0.1, 0.2, 0.3]), Index([\"a\", \"b\", \"c\"])]\n)\ndef test_getitem_deprecated_float(idx):\n # https://github.com/pandas-dev/pandas/issues/34191\n\n with tm.assert_produces_warning(FutureWarning):\n result = idx[1.0]\n\n expected = idx[1]\n assert result == expected\n\n\ndef test_maybe_cast_slice_bound_kind_deprecated(index):\n if not len(index):\n return\n\n with tm.assert_produces_warning(FutureWarning):\n # passed as keyword\n index._maybe_cast_slice_bound(index[0], \"left\", kind=\"loc\")\n\n with tm.assert_produces_warning(FutureWarning):\n # pass as positional\n index._maybe_cast_slice_bound(index[0], \"left\", \"loc\")\n\n\n@pytest.mark.parametrize(\n \"idx,target,expected\",\n [\n ([np.nan, \"var1\", np.nan], [np.nan], np.array([0, 2], dtype=np.intp)),\n (\n [np.nan, \"var1\", np.nan],\n [np.nan, \"var1\"],\n np.array([0, 2, 1], dtype=np.intp),\n ),\n (\n np.array([np.nan, \"var1\", np.nan], dtype=object),\n [np.nan],\n np.array([0, 2], dtype=np.intp),\n ),\n (\n DatetimeIndex([\"2020-08-05\", NaT, NaT]),\n [NaT],\n np.array([1, 2], dtype=np.intp),\n ),\n ([\"a\", \"b\", \"a\", np.nan], [np.nan], np.array([3], dtype=np.intp)),\n (\n np.array([\"b\", np.nan, float(\"NaN\"), \"b\"], dtype=object),\n Index([np.nan], dtype=object),\n np.array([1, 2], dtype=np.intp),\n ),\n ],\n)\ndef test_get_indexer_non_unique_multiple_nans(idx, target, expected):\n # GH 35392\n axis = Index(idx)\n actual = axis.get_indexer_for(target)\n tm.assert_numpy_array_equal(actual, expected)\n" ]
[ [ "pandas.core.api.UInt64Index", "pandas.Index", "numpy.array", "pandas.DatetimeIndex", "pandas._testing.assert_produces_warning", "pandas.core.api.Float64Index", "numpy.random.randn", "pandas._testing.assert_almost_equal", "pandas._testing.assert_numpy_array_equal", "numpy.arange", "pandas.Series", "pandas._testing.assert_index_equal", "pandas.core.api.Int64Index" ] ]
Octavian-ai/shortest-path
[ "7baef8d4cad13297fa2d08b5ac0f19f06bb708e3" ]
[ "macgraph/train.py" ]
[ "\nimport tensorflow as tf\nfrom tensorflow.python import debug as tf_debug\nfrom collections import namedtuple\n\nfrom .estimator import get_estimator\nfrom .input import gen_input_fn\nfrom .args import *\n\n# Make TF be quiet\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"]=\"2\"\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\n\ndef train(args):\n\n\t# So I don't frigging forget what caused working models\n\tsave_args(args)\n\t\n\n\tif args[\"use_tf_debug\"]:\n\t\thooks = [tf_debug.LocalCLIDebugHook()]\n\telse:\n\t\thooks = []\n\n\n\ttrain_size = sum(1 for _ in tf.python_io.tf_record_iterator(args[\"train_input_path\"]))\n\ttf.logging.info(f\"Training on {train_size} records\")\n\n\t# ----------------------------------------------------------------------------------\n\n\t\n\n\ttraining_segments = []\n\tTrainingSegment = namedtuple('TrainingSegment', ['args', 'max_steps'])\n\n\tif args[\"use_curriculum\"]:\n\t\tassert args[\"train_max_steps\"] is not None, \"Curriculum training requires --train-max-steps\"\n\n\t\tseg_steps = args[\"train_max_steps\"] / float(args[\"max_decode_iterations\"])\n\n\t\tfor i in range(1, args[\"max_decode_iterations\"]+1):\n\n\t\t\tseg_args = {**args}\n\t\t\tseg_args[\"filter_output_class\"] = [str(j) for j in list(range(i+1))]\n\t\t\ttotal_seg_steps = i*seg_steps*1000\n\n\t\t\t\n\t\t\ttraining_segments.append(TrainingSegment(seg_args, total_seg_steps))\n\n\telse:\n\t\ttraining_segments.append(TrainingSegment(args, args[\"train_max_steps\"]*1000 if args[\"train_max_steps\"] is not None else None))\n\n\n\tfor i in training_segments:\n\n\t\ttf.logging.info(f\"Begin training segment {i.max_steps} {i.args['filter_output_class']}\")\n\n\t\testimator = get_estimator(i.args)\n\n\t\ttrain_spec = tf.estimator.TrainSpec(\n\t\t\tinput_fn=gen_input_fn(i.args, \"train\"), \n\t\t\tmax_steps=int(i.max_steps),\n\t\t\thooks=hooks)\n\t\t\n\t\teval_spec = tf.estimator.EvalSpec(\n\t\t\tinput_fn=gen_input_fn(i.args, \"eval\"),\n\t\t\tthrottle_secs=i.args[\"eval_every\"])\n\n\t\ttf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n\n\n\nif __name__ == \"__main__\":\n\targs = get_args()\n\n\t# DO IT!\n\ttrain(args)\n\n\n\n" ]
[ [ "tensorflow.python.debug.LocalCLIDebugHook", "tensorflow.python_io.tf_record_iterator", "tensorflow.logging.info", "tensorflow.estimator.train_and_evaluate" ] ]
Vibhu-Agarwal/pandas
[ "9c0f6a8d703b6bee48918f2c5d16418a7ff736e3", "bb43726e1f52a0ddee45fcf485690719f262870d" ]
[ "asv_bench/benchmarks/series_methods.py", "pandas/core/generic.py" ]
[ "from datetime import datetime\n\nimport numpy as np\nimport pandas.util.testing as tm\nfrom pandas import Series, date_range, NaT\n\n\nclass SeriesConstructor(object):\n\n params = [None, 'dict']\n param_names = ['data']\n\n def setup(self, data):\n self.idx = date_range(start=datetime(2015, 10, 26),\n end=datetime(2016, 1, 1),\n freq='50s')\n dict_data = dict(zip(self.idx, range(len(self.idx))))\n self.data = None if data is None else dict_data\n\n def time_constructor(self, data):\n Series(data=self.data, index=self.idx)\n\n\nclass IsIn(object):\n\n params = ['int64', 'uint64', 'object']\n param_names = ['dtype']\n\n def setup(self, dtype):\n self.s = Series(np.random.randint(1, 10, 100000)).astype(dtype)\n self.values = [1, 2]\n\n def time_isin(self, dtypes):\n self.s.isin(self.values)\n\n\nclass IsInFloat64(object):\n\n def setup(self):\n self.small = Series([1, 2], dtype=np.float64)\n self.many_different_values = np.arange(10**6, dtype=np.float64)\n self.few_different_values = np.zeros(10**7, dtype=np.float64)\n self.only_nans_values = np.full(10**7, np.nan, dtype=np.float64)\n\n def time_isin_many_different(self):\n # runtime is dominated by creation of the lookup-table\n self.small.isin(self.many_different_values)\n\n def time_isin_few_different(self):\n # runtime is dominated by creation of the lookup-table\n self.small.isin(self.few_different_values)\n\n def time_isin_nan_values(self):\n # runtime is dominated by creation of the lookup-table\n self.small.isin(self.few_different_values)\n\n\nclass IsInForObjects(object):\n\n def setup(self):\n self.s_nans = Series(np.full(10**4, np.nan)).astype(np.object)\n self.vals_nans = np.full(10**4, np.nan).astype(np.object)\n self.s_short = Series(np.arange(2)).astype(np.object)\n self.s_long = Series(np.arange(10**5)).astype(np.object)\n self.vals_short = np.arange(2).astype(np.object)\n self.vals_long = np.arange(10**5).astype(np.object)\n # because of nans floats are special:\n self.s_long_floats = Series(np.arange(10**5,\n dtype=np.float)).astype(np.object)\n self.vals_long_floats = np.arange(10**5,\n dtype=np.float).astype(np.object)\n\n def time_isin_nans(self):\n # if nan-objects are different objects,\n # this has the potential to trigger O(n^2) running time\n self.s_nans.isin(self.vals_nans)\n\n def time_isin_short_series_long_values(self):\n # running time dominated by the preprocessing\n self.s_short.isin(self.vals_long)\n\n def time_isin_long_series_short_values(self):\n # running time dominated by look-up\n self.s_long.isin(self.vals_short)\n\n def time_isin_long_series_long_values(self):\n # no dominating part\n self.s_long.isin(self.vals_long)\n\n def time_isin_long_series_long_values_floats(self):\n # no dominating part\n self.s_long_floats.isin(self.vals_long_floats)\n\n\nclass NSort(object):\n\n params = ['first', 'last', 'all']\n param_names = ['keep']\n\n def setup(self, keep):\n self.s = Series(np.random.randint(1, 10, 100000))\n\n def time_nlargest(self, keep):\n self.s.nlargest(3, keep=keep)\n\n def time_nsmallest(self, keep):\n self.s.nsmallest(3, keep=keep)\n\n\nclass Dropna(object):\n\n params = ['int', 'datetime']\n param_names = ['dtype']\n\n def setup(self, dtype):\n N = 10**6\n data = {'int': np.random.randint(1, 10, N),\n 'datetime': date_range('2000-01-01', freq='S', periods=N)}\n self.s = Series(data[dtype])\n if dtype == 'datetime':\n self.s[np.random.randint(1, N, 100)] = NaT\n\n def time_dropna(self, dtype):\n self.s.dropna()\n\n\nclass Map(object):\n\n params = ['dict', 'Series']\n param_names = 'mapper'\n\n def setup(self, mapper):\n map_size = 1000\n map_data = Series(map_size - np.arange(map_size))\n self.map_data = map_data if mapper == 'Series' else map_data.to_dict()\n self.s = Series(np.random.randint(0, map_size, 10000))\n\n def time_map(self, mapper):\n self.s.map(self.map_data)\n\n\nclass Clip(object):\n params = [50, 1000, 10**5]\n param_names = ['n']\n\n def setup(self, n):\n self.s = Series(np.random.randn(n))\n\n def time_clip(self, n):\n self.s.clip(0, 1)\n\n\nclass ValueCounts(object):\n\n params = ['int', 'uint', 'float', 'object']\n param_names = ['dtype']\n\n def setup(self, dtype):\n self.s = Series(np.random.randint(0, 1000, size=100000)).astype(dtype)\n\n def time_value_counts(self, dtype):\n self.s.value_counts()\n\n\nclass Dir(object):\n\n def setup(self):\n self.s = Series(index=tm.makeStringIndex(10000))\n\n def time_dir_strings(self):\n dir(self.s)\n\n\nclass SeriesGetattr(object):\n # https://github.com/pandas-dev/pandas/issues/19764\n def setup(self):\n self.s = Series(1,\n index=date_range(\"2012-01-01\", freq='s',\n periods=int(1e6)))\n\n def time_series_datetimeindex_repr(self):\n getattr(self.s, 'a', None)\n\n\nfrom .pandas_vb_common import setup # noqa: F401\n", "# pylint: disable=W0231,E1101\nimport collections\nfrom datetime import timedelta\nimport functools\nimport gc\nimport json\nimport operator\nfrom textwrap import dedent\nimport warnings\nimport weakref\n\nimport numpy as np\n\nfrom pandas._libs import Timestamp, iNaT, properties\nimport pandas.compat as compat\nfrom pandas.compat import (\n cPickle as pkl, isidentifier, lrange, lzip, map, set_function_name,\n string_types, to_str, zip)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import (\n Appender, Substitution, rewrite_axis_style_signature)\nfrom pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs\n\nfrom pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask\nfrom pandas.core.dtypes.common import (\n ensure_int64, ensure_object, is_bool, is_bool_dtype,\n is_datetime64_any_dtype, is_datetime64tz_dtype, is_dict_like,\n is_extension_array_dtype, is_integer, is_list_like, is_number,\n is_numeric_dtype, is_object_dtype, is_period_arraylike, is_re_compilable,\n is_scalar, is_timedelta64_dtype, pandas_dtype)\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries\nfrom pandas.core.dtypes.inference import is_hashable\nfrom pandas.core.dtypes.missing import isna, notna\n\nimport pandas as pd\nfrom pandas.core import config, missing, nanops\nimport pandas.core.algorithms as algos\nfrom pandas.core.base import PandasObject, SelectionMixin\nimport pandas.core.common as com\nfrom pandas.core.index import (\n Index, InvalidIndexError, MultiIndex, RangeIndex, ensure_index)\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.period import Period, PeriodIndex\nimport pandas.core.indexing as indexing\nfrom pandas.core.internals import BlockManager\nfrom pandas.core.ops import _align_method_FRAME\n\nfrom pandas.io.formats.format import DataFrameFormatter, format_percentiles\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.tseries.frequencies import to_offset\n\n# goal is to be able to define the docs close to function, while still being\n# able to share\n_shared_docs = dict()\n_shared_doc_kwargs = dict(\n axes='keywords for axes', klass='NDFrame',\n axes_single_arg='int or labels for object',\n args_transpose='axes to permute (int or label for object)',\n optional_by=\"\"\"\n by : str or list of str\n Name or list of names to sort by\"\"\")\n\n# sentinel value to use as kwarg in place of None when None has special meaning\n# and needs to be distinguished from a user explicitly passing None.\nsentinel = object()\n\n\ndef _single_replace(self, to_replace, method, inplace, limit):\n \"\"\"\n Replaces values in a Series using the fill method specified when no\n replacement value is given in the replace method\n \"\"\"\n if self.ndim != 1:\n raise TypeError('cannot replace {0} with method {1} on a {2}'\n .format(to_replace, method, type(self).__name__))\n\n orig_dtype = self.dtype\n result = self if inplace else self.copy()\n fill_f = missing.get_fill_func(method)\n\n mask = missing.mask_missing(result.values, to_replace)\n values = fill_f(result.values, limit=limit, mask=mask)\n\n if values.dtype == orig_dtype and inplace:\n return\n\n result = pd.Series(values, index=self.index,\n dtype=self.dtype).__finalize__(self)\n\n if inplace:\n self._update_inplace(result._data)\n return\n\n return result\n\n\nclass NDFrame(PandasObject, SelectionMixin):\n \"\"\"\n N-dimensional analogue of DataFrame. Store multi-dimensional in a\n size-mutable, labeled data structure\n\n Parameters\n ----------\n data : BlockManager\n axes : list\n copy : boolean, default False\n \"\"\"\n _internal_names = ['_data', '_cacher', '_item_cache', '_cache', '_is_copy',\n '_subtyp', '_name', '_index', '_default_kind',\n '_default_fill_value', '_metadata', '__array_struct__',\n '__array_interface__']\n _internal_names_set = set(_internal_names)\n _accessors = frozenset()\n _deprecations = frozenset(['as_blocks', 'blocks',\n 'convert_objects', 'is_copy'])\n _metadata = []\n _is_copy = None\n\n # dummy attribute so that datetime.__eq__(Series/DataFrame) defers\n # by returning NotImplemented\n timetuple = None\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(self, data, axes=None, copy=False, dtype=None,\n fastpath=False):\n\n if not fastpath:\n if dtype is not None:\n data = data.astype(dtype)\n elif copy:\n data = data.copy()\n\n if axes is not None:\n for i, ax in enumerate(axes):\n data = data.reindex_axis(ax, axis=i)\n\n object.__setattr__(self, '_is_copy', None)\n object.__setattr__(self, '_data', data)\n object.__setattr__(self, '_item_cache', {})\n\n def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):\n \"\"\" passed a manager and a axes dict \"\"\"\n for a, axe in axes.items():\n if axe is not None:\n mgr = mgr.reindex_axis(axe,\n axis=self._get_block_manager_axis(a),\n copy=False)\n\n # make a copy if explicitly requested\n if copy:\n mgr = mgr.copy()\n if dtype is not None:\n # avoid further copies if we can\n if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:\n mgr = mgr.astype(dtype=dtype)\n return mgr\n\n # ----------------------------------------------------------------------\n\n @property\n def is_copy(self):\n \"\"\"\n Return the copy.\n \"\"\"\n warnings.warn(\"Attribute 'is_copy' is deprecated and will be removed \"\n \"in a future version.\", FutureWarning, stacklevel=2)\n return self._is_copy\n\n @is_copy.setter\n def is_copy(self, msg):\n warnings.warn(\"Attribute 'is_copy' is deprecated and will be removed \"\n \"in a future version.\", FutureWarning, stacklevel=2)\n self._is_copy = msg\n\n def _validate_dtype(self, dtype):\n \"\"\" validate the passed dtype \"\"\"\n\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n # a compound dtype\n if dtype.kind == 'V':\n raise NotImplementedError(\"compound dtypes are not implemented\"\n \" in the {0} constructor\"\n .format(self.__class__.__name__))\n\n return dtype\n\n # ----------------------------------------------------------------------\n # Construction\n\n @property\n def _constructor(self):\n \"\"\"Used when a manipulation result has the same dimensions as the\n original.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_sliced(self):\n \"\"\"Used when a manipulation result has one lower dimension(s) as the\n original, such as DataFrame single columns slicing.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_expanddim(self):\n \"\"\"Used when a manipulation result has one higher dimension as the\n original, such as Series.to_frame() and DataFrame.to_panel()\n \"\"\"\n raise NotImplementedError\n\n # ----------------------------------------------------------------------\n # Axis\n\n @classmethod\n def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None,\n slicers=None, axes_are_reversed=False, build_axes=True,\n ns=None, docs=None):\n \"\"\"Provide axes setup for the major PandasObjects.\n\n Parameters\n ----------\n axes : the names of the axes in order (lowest to highest)\n info_axis_num : the axis of the selector dimension (int)\n stat_axis_num : the number of axis for the default stats (int)\n aliases : other names for a single axis (dict)\n slicers : how axes slice to others (dict)\n axes_are_reversed : boolean whether to treat passed axes as\n reversed (DataFrame)\n build_axes : setup the axis properties (default True)\n \"\"\"\n\n cls._AXIS_ORDERS = axes\n cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}\n cls._AXIS_LEN = len(axes)\n cls._AXIS_ALIASES = aliases or dict()\n cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()}\n cls._AXIS_NAMES = dict(enumerate(axes))\n cls._AXIS_SLICEMAP = slicers or None\n cls._AXIS_REVERSED = axes_are_reversed\n\n # typ\n setattr(cls, '_typ', cls.__name__.lower())\n\n # indexing support\n cls._ix = None\n\n if info_axis is not None:\n cls._info_axis_number = info_axis\n cls._info_axis_name = axes[info_axis]\n\n if stat_axis is not None:\n cls._stat_axis_number = stat_axis\n cls._stat_axis_name = axes[stat_axis]\n\n # setup the actual axis\n if build_axes:\n\n def set_axis(a, i):\n setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))\n cls._internal_names_set.add(a)\n\n if axes_are_reversed:\n m = cls._AXIS_LEN - 1\n for i, a in cls._AXIS_NAMES.items():\n set_axis(a, m - i)\n else:\n for i, a in cls._AXIS_NAMES.items():\n set_axis(a, i)\n\n assert not isinstance(ns, dict)\n\n def _construct_axes_dict(self, axes=None, **kwargs):\n \"\"\"Return an axes dictionary for myself.\"\"\"\n d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}\n d.update(kwargs)\n return d\n\n @staticmethod\n def _construct_axes_dict_from(self, axes, **kwargs):\n \"\"\"Return an axes dictionary for the passed axes.\"\"\"\n d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)}\n d.update(kwargs)\n return d\n\n def _construct_axes_dict_for_slice(self, axes=None, **kwargs):\n \"\"\"Return an axes dictionary for myself.\"\"\"\n d = {self._AXIS_SLICEMAP[a]: self._get_axis(a)\n for a in (axes or self._AXIS_ORDERS)}\n d.update(kwargs)\n return d\n\n def _construct_axes_from_arguments(\n self, args, kwargs, require_all=False, sentinel=None):\n \"\"\"Construct and returns axes if supplied in args/kwargs.\n\n If require_all, raise if all axis arguments are not supplied\n return a tuple of (axes, kwargs).\n\n sentinel specifies the default parameter when an axis is not\n supplied; useful to distinguish when a user explicitly passes None\n in scenarios where None has special meaning.\n \"\"\"\n\n # construct the args\n args = list(args)\n for a in self._AXIS_ORDERS:\n\n # if we have an alias for this axis\n alias = self._AXIS_IALIASES.get(a)\n if alias is not None:\n if a in kwargs:\n if alias in kwargs:\n raise TypeError(\"arguments are mutually exclusive \"\n \"for [%s,%s]\" % (a, alias))\n continue\n if alias in kwargs:\n kwargs[a] = kwargs.pop(alias)\n continue\n\n # look for a argument by position\n if a not in kwargs:\n try:\n kwargs[a] = args.pop(0)\n except IndexError:\n if require_all:\n raise TypeError(\"not enough/duplicate arguments \"\n \"specified!\")\n\n axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS}\n return axes, kwargs\n\n @classmethod\n def _from_axes(cls, data, axes, **kwargs):\n # for construction from BlockManager\n if isinstance(data, BlockManager):\n return cls(data, **kwargs)\n else:\n if cls._AXIS_REVERSED:\n axes = axes[::-1]\n d = cls._construct_axes_dict_from(cls, axes, copy=False)\n d.update(kwargs)\n return cls(data, **d)\n\n @classmethod\n def _get_axis_number(cls, axis):\n axis = cls._AXIS_ALIASES.get(axis, axis)\n if is_integer(axis):\n if axis in cls._AXIS_NAMES:\n return axis\n else:\n try:\n return cls._AXIS_NUMBERS[axis]\n except KeyError:\n pass\n raise ValueError('No axis named {0} for object type {1}'\n .format(axis, type(cls)))\n\n @classmethod\n def _get_axis_name(cls, axis):\n axis = cls._AXIS_ALIASES.get(axis, axis)\n if isinstance(axis, string_types):\n if axis in cls._AXIS_NUMBERS:\n return axis\n else:\n try:\n return cls._AXIS_NAMES[axis]\n except KeyError:\n pass\n raise ValueError('No axis named {0} for object type {1}'\n .format(axis, type(cls)))\n\n def _get_axis(self, axis):\n name = self._get_axis_name(axis)\n return getattr(self, name)\n\n @classmethod\n def _get_block_manager_axis(cls, axis):\n \"\"\"Map the axis to the block_manager axis.\"\"\"\n axis = cls._get_axis_number(axis)\n if cls._AXIS_REVERSED:\n m = cls._AXIS_LEN - 1\n return m - axis\n return axis\n\n def _get_axis_resolvers(self, axis):\n # index or columns\n axis_index = getattr(self, axis)\n d = dict()\n prefix = axis[0]\n\n for i, name in enumerate(axis_index.names):\n if name is not None:\n key = level = name\n else:\n # prefix with 'i' or 'c' depending on the input axis\n # e.g., you must do ilevel_0 for the 0th level of an unnamed\n # multiiindex\n key = '{prefix}level_{i}'.format(prefix=prefix, i=i)\n level = i\n\n level_values = axis_index.get_level_values(level)\n s = level_values.to_series()\n s.index = axis_index\n d[key] = s\n\n # put the index/columns itself in the dict\n if isinstance(axis_index, MultiIndex):\n dindex = axis_index\n else:\n dindex = axis_index.to_series()\n\n d[axis] = dindex\n return d\n\n def _get_index_resolvers(self):\n d = {}\n for axis_name in self._AXIS_ORDERS:\n d.update(self._get_axis_resolvers(axis_name))\n return d\n\n @property\n def _info_axis(self):\n return getattr(self, self._info_axis_name)\n\n @property\n def _stat_axis(self):\n return getattr(self, self._stat_axis_name)\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple of axis dimensions\n \"\"\"\n return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)\n\n @property\n def axes(self):\n \"\"\"\n Return index label(s) of the internal NDFrame\n \"\"\"\n # we do it this way because if we have reversed axes, then\n # the block manager shows then reversed\n return [self._get_axis(a) for a in self._AXIS_ORDERS]\n\n @property\n def ndim(self):\n \"\"\"\n Return an int representing the number of axes / array dimensions.\n\n Return 1 if Series. Otherwise return 2 if DataFrame.\n\n See Also\n --------\n ndarray.ndim : Number of array dimensions.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.ndim\n 1\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.ndim\n 2\n \"\"\"\n return self._data.ndim\n\n @property\n def size(self):\n \"\"\"\n Return an int representing the number of elements in this object.\n\n Return the number of rows if Series. Otherwise return the number of\n rows times number of columns if DataFrame.\n\n See Also\n --------\n ndarray.size : Number of elements in the array.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.size\n 3\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.size\n 4\n \"\"\"\n return np.prod(self.shape)\n\n @property\n def _selected_obj(self):\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n @property\n def _obj_with_exclusions(self):\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n def _expand_axes(self, key):\n new_axes = []\n for k, ax in zip(key, self.axes):\n if k not in ax:\n if type(k) != ax.dtype.type:\n ax = ax.astype('O')\n new_axes.append(ax.insert(len(ax), k))\n else:\n new_axes.append(ax)\n\n return new_axes\n\n def set_axis(self, labels, axis=0, inplace=None):\n \"\"\"\n Assign desired index to given axis.\n\n Indexes for column or row labels can be changed by assigning\n a list-like or Index.\n\n .. versionchanged:: 0.21.0\n\n The signature is now `labels` and `axis`, consistent with\n the rest of pandas API. Previously, the `axis` and `labels`\n arguments were respectively the first and second positional\n arguments.\n\n Parameters\n ----------\n labels : list-like, Index\n The values for the new index.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to update. The value 0 identifies the rows, and 1\n identifies the columns.\n\n inplace : bool, default None\n Whether to return a new %(klass)s instance.\n\n .. warning::\n\n ``inplace=None`` currently falls back to to True, but in a\n future version, will default to False. Use inplace=True\n explicitly rather than relying on the default.\n\n Returns\n -------\n renamed : %(klass)s or None\n An object of same type as caller if inplace=False, None otherwise.\n\n See Also\n --------\n DataFrame.rename_axis : Alter the name of the index or columns.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> s.set_axis(['a', 'b', 'c'], axis=0, inplace=False)\n a 1\n b 2\n c 3\n dtype: int64\n\n The original object is not modified.\n\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n **DataFrame**\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n\n Change the row labels.\n\n >>> df.set_axis(['a', 'b', 'c'], axis='index', inplace=False)\n A B\n a 1 4\n b 2 5\n c 3 6\n\n Change the column labels.\n\n >>> df.set_axis(['I', 'II'], axis='columns', inplace=False)\n I II\n 0 1 4\n 1 2 5\n 2 3 6\n\n Now, update the labels inplace.\n\n >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)\n >>> df\n i ii\n 0 1 4\n 1 2 5\n 2 3 6\n \"\"\"\n if is_scalar(labels):\n warnings.warn(\n 'set_axis now takes \"labels\" as first argument, and '\n '\"axis\" as named parameter. The old form, with \"axis\" as '\n 'first parameter and \\\"labels\\\" as second, is still supported '\n 'but will be deprecated in a future version of pandas.',\n FutureWarning, stacklevel=2)\n labels, axis = axis, labels\n\n if inplace is None:\n warnings.warn(\n 'set_axis currently defaults to operating inplace.\\nThis '\n 'will change in a future version of pandas, use '\n 'inplace=True to avoid this warning.',\n FutureWarning, stacklevel=2)\n inplace = True\n if inplace:\n setattr(self, self._get_axis_name(axis), labels)\n else:\n obj = self.copy()\n obj.set_axis(labels, axis=axis, inplace=True)\n return obj\n\n def _set_axis(self, axis, labels):\n self._data.set_axis(axis, labels)\n self._clear_item_cache()\n\n def transpose(self, *args, **kwargs):\n \"\"\"\n Permute the dimensions of the %(klass)s\n\n Parameters\n ----------\n args : %(args_transpose)s\n copy : boolean, default False\n Make a copy of the underlying data. Mixed-dtype data will\n always result in a copy\n\n Returns\n -------\n y : same as input\n\n Examples\n --------\n >>> p.transpose(2, 0, 1)\n >>> p.transpose(2, 0, 1, copy=True)\n \"\"\"\n\n # construct the args\n axes, kwargs = self._construct_axes_from_arguments(args, kwargs,\n require_all=True)\n axes_names = tuple(self._get_axis_name(axes[a])\n for a in self._AXIS_ORDERS)\n axes_numbers = tuple(self._get_axis_number(axes[a])\n for a in self._AXIS_ORDERS)\n\n # we must have unique axes\n if len(axes) != len(set(axes)):\n raise ValueError('Must specify %s unique axes' % self._AXIS_LEN)\n\n new_axes = self._construct_axes_dict_from(self, [self._get_axis(x)\n for x in axes_names])\n new_values = self.values.transpose(axes_numbers)\n if kwargs.pop('copy', None) or (len(args) and args[-1]):\n new_values = new_values.copy()\n\n nv.validate_transpose_for_generic(self, kwargs)\n return self._constructor(new_values, **new_axes).__finalize__(self)\n\n def swapaxes(self, axis1, axis2, copy=True):\n \"\"\"\n Interchange axes and swap values axes appropriately.\n\n Returns\n -------\n y : same as input\n \"\"\"\n i = self._get_axis_number(axis1)\n j = self._get_axis_number(axis2)\n\n if i == j:\n if copy:\n return self.copy()\n return self\n\n mapping = {i: j, j: i}\n\n new_axes = (self._get_axis(mapping.get(k, k))\n for k in range(self._AXIS_LEN))\n new_values = self.values.swapaxes(i, j)\n if copy:\n new_values = new_values.copy()\n\n return self._constructor(new_values, *new_axes).__finalize__(self)\n\n def droplevel(self, level, axis=0):\n \"\"\"\n Return DataFrame with requested index / column level(s) removed.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n level : int, str, or list-like\n If a string is given, must be the name of a level\n If list-like, elements must be names or positional indexes\n of levels.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n Returns\n -------\n DataFrame.droplevel()\n\n Examples\n --------\n >>> df = pd.DataFrame([\n ... [1, 2, 3, 4],\n ... [5, 6, 7, 8],\n ... [9, 10, 11, 12]\n ... ]).set_index([0, 1]).rename_axis(['a', 'b'])\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... ('c', 'e'), ('d', 'f')\n ... ], names=['level_1', 'level_2'])\n\n >>> df\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel('a')\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel('level2', axis=1)\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n \"\"\"\n labels = self._get_axis(axis)\n new_labels = labels.droplevel(level)\n result = self.set_axis(new_labels, axis=axis, inplace=False)\n return result\n\n def pop(self, item):\n \"\"\"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Column label to be popped\n\n Returns\n -------\n popped : Series\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n \"\"\"\n result = self[item]\n del self[item]\n try:\n result._reset_cacher()\n except AttributeError:\n pass\n\n return result\n\n def squeeze(self, axis=None):\n \"\"\"\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = pd.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_0a = df.loc[df.index < 1, ['a']]\n >>> df_0a\n a\n 0 1\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_0a.squeeze('rows')\n a 1\n Name: 0, dtype: int64\n\n Squeezing all axes wil project directly into a scalar:\n\n >>> df_0a.squeeze()\n 1\n \"\"\"\n axis = (self._AXIS_NAMES if axis is None else\n (self._get_axis_number(axis),))\n try:\n return self.iloc[\n tuple(0 if i in axis and len(a) == 1 else slice(None)\n for i, a in enumerate(self.axes))]\n except Exception:\n return self\n\n def swaplevel(self, i=-2, j=-1, axis=0):\n \"\"\"\n Swap levels i and j in a MultiIndex on a particular axis\n\n Parameters\n ----------\n i, j : int, string (can be mixed)\n Level of index to be swapped. Can pass level name as string.\n\n Returns\n -------\n swapped : same type as caller (new object)\n\n .. versionchanged:: 0.18.1\n\n The indexes ``i`` and ``j`` are now optional, and default to\n the two innermost levels of the index.\n\n \"\"\"\n axis = self._get_axis_number(axis)\n result = self.copy()\n labels = result._data.axes[axis]\n result._data.set_axis(axis, labels.swaplevel(i, j))\n return result\n\n # ----------------------------------------------------------------------\n # Rename\n\n def rename(self, *args, **kwargs):\n \"\"\"\n Alter axes input function or functions. Function / dict values must be\n unique (1-to-1). Labels not contained in a dict / Series will be left\n as-is. Extra labels listed don't throw an error. Alternatively, change\n ``Series.name`` with a scalar value (Series only).\n\n Parameters\n ----------\n %(axes)s : scalar, list-like, dict-like or function, optional\n Scalar or list-like will alter the ``Series.name`` attribute,\n and raise on DataFrame or Panel.\n dict-like or functions are transformations to apply to\n that axis' values\n copy : boolean, default True\n Also copy underlying data\n inplace : boolean, default False\n Whether to return a new %(klass)s. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n\n Returns\n -------\n renamed : %(klass)s (new object)\n\n See Also\n --------\n pandas.NDFrame.rename_axis\n\n Examples\n --------\n\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.rename(\"my_name\") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n >>> s.rename({1: 3, 2: 5}) # mapping, changes labels\n 0 1\n 3 2\n 5 3\n dtype: int64\n\n Since ``DataFrame`` doesn't have a ``.name`` attribute,\n only mapping-type arguments are allowed.\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> df.rename(2)\n Traceback (most recent call last):\n ...\n TypeError: 'int' object is not callable\n\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"B\": \"c\"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"C\": \"c\"})\n a B\n 0 1 4\n 1 2 5\n 2 3 6\n\n Using axis-style parameters\n\n >>> df.rename(str.lower, axis='columns')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis='index')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n\n See the :ref:`user guide <basics.rename>` for more.\n \"\"\"\n axes, kwargs = self._construct_axes_from_arguments(args, kwargs)\n copy = kwargs.pop('copy', True)\n inplace = kwargs.pop('inplace', False)\n level = kwargs.pop('level', None)\n axis = kwargs.pop('axis', None)\n if axis is not None:\n # Validate the axis\n self._get_axis_number(axis)\n\n if kwargs:\n raise TypeError('rename() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n if com.count_not_none(*axes.values()) == 0:\n raise TypeError('must pass an index to rename')\n\n self._consolidate_inplace()\n result = self if inplace else self.copy(deep=copy)\n\n # start in the axis order to eliminate too many copies\n for axis in lrange(self._AXIS_LEN):\n v = axes.get(self._AXIS_NAMES[axis])\n if v is None:\n continue\n f = com._get_rename_function(v)\n\n baxis = self._get_block_manager_axis(axis)\n if level is not None:\n level = self.axes[axis]._get_level_number(level)\n result._data = result._data.rename_axis(f, axis=baxis, copy=copy,\n level=level)\n result._clear_item_cache()\n\n if inplace:\n self._update_inplace(result._data)\n else:\n return result.__finalize__(self)\n\n @rewrite_axis_style_signature('mapper', [('copy', True),\n ('inplace', False)])\n def rename_axis(self, mapper=sentinel, **kwargs):\n \"\"\"\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n Value to set the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis' values.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n\n .. versionchanged:: 0.24.0\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to rename.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Series\n or DataFrame.\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n Prior to version 0.21.0, ``rename_axis`` could also be used to change\n the axis *labels* by passing a mapping or scalar. This behavior is\n deprecated and will be removed in a future version. Use ``rename``\n instead.\n\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n In this case, the parameter ``copy`` is ignored.\n\n The second calling convention will modify the names of the\n the corresponding index if mapper is a list or a scalar.\n However, if mapper is dict-like or a function, it will use the\n deprecated behavior of modifying the axis *labels*.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([\"dog\", \"cat\", \"monkey\"])\n >>> s\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n >>> s.rename_axis(\"animal\")\n animal\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n\n **DataFrame**\n\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2],\n ... \"num_arms\": [0, 0, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"animal\")\n >>> df\n num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"limbs\", axis=\"columns\")\n >>> df\n limbs num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> df.index = pd.MultiIndex.from_product([['mammal'],\n ... ['dog', 'cat', 'monkey']],\n ... names=['type', 'name'])\n >>> df\n limbs num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={'type': 'class'})\n limbs num_legs num_arms\n class name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(columns=str.upper)\n LIMBS num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n \"\"\"\n axes, kwargs = self._construct_axes_from_arguments(\n (), kwargs, sentinel=sentinel)\n copy = kwargs.pop('copy', True)\n inplace = kwargs.pop('inplace', False)\n axis = kwargs.pop('axis', 0)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n if kwargs:\n raise TypeError('rename_axis() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n if (mapper is not sentinel):\n # Use v0.23 behavior if a scalar or list\n non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not\n is_dict_like(mapper))\n if non_mapper:\n return self._set_axis_name(mapper, axis=axis, inplace=inplace)\n else:\n # Deprecated (v0.21) behavior is if mapper is specified,\n # and not a list or scalar, then call rename\n msg = (\"Using 'rename_axis' to alter labels is deprecated. \"\n \"Use '.rename' instead\")\n warnings.warn(msg, FutureWarning, stacklevel=3)\n axis = self._get_axis_name(axis)\n d = {'copy': copy, 'inplace': inplace}\n d[axis] = mapper\n return self.rename(**d)\n else:\n # Use new behavior. Means that index and/or columns\n # is specified\n result = self if inplace else self.copy(deep=copy)\n\n for axis in lrange(self._AXIS_LEN):\n v = axes.get(self._AXIS_NAMES[axis])\n if v is sentinel:\n continue\n non_mapper = is_scalar(v) or (is_list_like(v) and not\n is_dict_like(v))\n if non_mapper:\n newnames = v\n else:\n f = com._get_rename_function(v)\n curnames = self._get_axis(axis).names\n newnames = [f(name) for name in curnames]\n result._set_axis_name(newnames, axis=axis,\n inplace=True)\n if not inplace:\n return result\n\n def _set_axis_name(self, name, axis=0, inplace=False):\n \"\"\"\n Set the name(s) of the axis.\n\n Parameters\n ----------\n name : str or list of str\n Name(s) to set.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to set the label. The value 0 or 'index' specifies index,\n and the value 1 or 'columns' specifies columns.\n inplace : bool, default False\n If `True`, do operation inplace and return None.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or `None` if `inplace` is `True`.\n\n See Also\n --------\n DataFrame.rename : Alter the axis labels of :class:`DataFrame`.\n Series.rename : Alter the index labels or set the index name\n of :class:`Series`.\n Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs\n dog 4\n cat 4\n monkey 2\n >>> df._set_axis_name(\"animal\")\n num_legs\n animal\n dog 4\n cat 4\n monkey 2\n >>> df.index = pd.MultiIndex.from_product(\n ... [[\"mammal\"], ['dog', 'cat', 'monkey']])\n >>> df._set_axis_name([\"type\", \"name\"])\n legs\n type name\n mammal dog 4\n cat 4\n monkey 2\n \"\"\"\n pd.MultiIndex.from_product([[\"mammal\"], ['dog', 'cat', 'monkey']])\n axis = self._get_axis_number(axis)\n idx = self._get_axis(axis).set_names(name)\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n renamed = self if inplace else self.copy()\n renamed.set_axis(idx, axis=axis, inplace=True)\n if not inplace:\n return renamed\n\n # ----------------------------------------------------------------------\n # Comparison Methods\n\n def _indexed_same(self, other):\n return all(self._get_axis(a).equals(other._get_axis(a))\n for a in self._AXIS_ORDERS)\n\n def equals(self, other):\n \"\"\"\n Test whether two objects contain the same elements.\n\n This function allows two Series or DataFrames to be compared against\n each other to see if they have the same shape and elements. NaNs in\n the same location are considered equal. The column headers do not\n need to have the same type, but the elements within the columns must\n be the same dtype.\n\n Parameters\n ----------\n other : Series or DataFrame\n The other Series or DataFrame to be compared with the first.\n\n Returns\n -------\n bool\n True if all elements are the same in both objects, False\n otherwise.\n\n See Also\n --------\n Series.eq : Compare two Series objects of the same length\n and return a Series where each element is True if the element\n in each Series is equal, False otherwise.\n DataFrame.eq : Compare two DataFrame objects of the same shape and\n return a DataFrame where each element is True if the respective\n element in each DataFrame is equal, False otherwise.\n assert_series_equal : Return True if left and right Series are equal,\n False otherwise.\n assert_frame_equal : Return True if left and right DataFrames are\n equal, False otherwise.\n numpy.array_equal : Return True if two arrays have the same shape\n and elements, False otherwise.\n\n Notes\n -----\n This function requires that the elements have the same dtype as their\n respective elements in the other Series or DataFrame. However, the\n column labels do not need to have the same type, as long as they are\n still considered equal.\n\n Examples\n --------\n >>> df = pd.DataFrame({1: [10], 2: [20]})\n >>> df\n 1 2\n 0 10 20\n\n DataFrames df and exactly_equal have the same types and values for\n their elements and column labels, which will return True.\n\n >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})\n >>> exactly_equal\n 1 2\n 0 10 20\n >>> df.equals(exactly_equal)\n True\n\n DataFrames df and different_column_type have the same element\n types and values, but have different types for the column labels,\n which will still return True.\n\n >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})\n >>> different_column_type\n 1.0 2.0\n 0 10 20\n >>> df.equals(different_column_type)\n True\n\n DataFrames df and different_data_type have different types for the\n same values for their elements, and will return False even though\n their column labels are the same values and types.\n\n >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})\n >>> different_data_type\n 1 2\n 0 10.0 20.0\n >>> df.equals(different_data_type)\n False\n \"\"\"\n if not isinstance(other, self._constructor):\n return False\n return self._data.equals(other._data)\n\n # -------------------------------------------------------------------------\n # Unary Methods\n\n def __neg__(self):\n values = com.values_from_object(self)\n if is_bool_dtype(values):\n arr = operator.inv(values)\n elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)\n or is_object_dtype(values)):\n arr = operator.neg(values)\n else:\n raise TypeError(\"Unary negative expects numeric dtype, not {}\"\n .format(values.dtype))\n return self.__array_wrap__(arr)\n\n def __pos__(self):\n values = com.values_from_object(self)\n if (is_bool_dtype(values) or is_period_arraylike(values)):\n arr = values\n elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)\n or is_object_dtype(values)):\n arr = operator.pos(values)\n else:\n raise TypeError(\"Unary plus expects numeric dtype, not {}\"\n .format(values.dtype))\n return self.__array_wrap__(arr)\n\n def __invert__(self):\n try:\n arr = operator.inv(com.values_from_object(self))\n return self.__array_wrap__(arr)\n except Exception:\n\n # inv fails with 0 len\n if not np.prod(self.shape):\n return self\n\n raise\n\n def __nonzero__(self):\n raise ValueError(\"The truth value of a {0} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n .format(self.__class__.__name__))\n\n __bool__ = __nonzero__\n\n def bool(self):\n \"\"\"\n Return the bool of a single element PandasObject.\n\n This must be a boolean scalar value, either True or False. Raise a\n ValueError if the PandasObject does not have exactly 1 element, or that\n element is not boolean\n \"\"\"\n v = self.squeeze()\n if isinstance(v, (bool, np.bool_)):\n return bool(v)\n elif is_scalar(v):\n raise ValueError(\"bool cannot act on a non-boolean single element \"\n \"{0}\".format(self.__class__.__name__))\n\n self.__nonzero__()\n\n def __abs__(self):\n return self.abs()\n\n def __round__(self, decimals=0):\n return self.round(decimals)\n\n # -------------------------------------------------------------------------\n # Label or Level Combination Helpers\n #\n # A collection of helper methods for DataFrame/Series operations that\n # accept a combination of column/index labels and levels. All such\n # operations should utilize/extend these methods when possible so that we\n # have consistent precedence and validation logic throughout the library.\n\n def _is_level_reference(self, key, axis=0):\n \"\"\"\n Test whether a key is a level reference for a given axis.\n\n To be considered a level reference, `key` must be a string that:\n - (axis=0): Matches the name of an index level and does NOT match\n a column label.\n - (axis=1): Matches the name of a column level and does NOT match\n an index label.\n\n Parameters\n ----------\n key : str\n Potential level name for the given axis\n axis : int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_level : bool\n \"\"\"\n axis = self._get_axis_number(axis)\n\n if self.ndim > 2:\n raise NotImplementedError(\n \"_is_level_reference is not implemented for {type}\"\n .format(type=type(self)))\n\n return (key is not None and\n is_hashable(key) and\n key in self.axes[axis].names and\n not self._is_label_reference(key, axis=axis))\n\n def _is_label_reference(self, key, axis=0):\n \"\"\"\n Test whether a key is a label reference for a given axis.\n\n To be considered a label reference, `key` must be a string that:\n - (axis=0): Matches a column label\n - (axis=1): Matches an index label\n\n Parameters\n ----------\n key: str\n Potential label name\n axis: int, default 0\n Axis perpendicular to the axis that labels are associated with\n (0 means search for column labels, 1 means search for index labels)\n\n Returns\n -------\n is_label: bool\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]\n\n if self.ndim > 2:\n raise NotImplementedError(\n \"_is_label_reference is not implemented for {type}\"\n .format(type=type(self)))\n\n return (key is not None and\n is_hashable(key) and\n any(key in self.axes[ax] for ax in other_axes))\n\n def _is_label_or_level_reference(self, key, axis=0):\n \"\"\"\n Test whether a key is a label or level reference for a given axis.\n\n To be considered either a label or a level reference, `key` must be a\n string that:\n - (axis=0): Matches a column label or an index level\n - (axis=1): Matches an index label or a column level\n\n Parameters\n ----------\n key: str\n Potential label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_label_or_level: bool\n \"\"\"\n\n if self.ndim > 2:\n raise NotImplementedError(\n \"_is_label_or_level_reference is not implemented for {type}\"\n .format(type=type(self)))\n\n return (self._is_level_reference(key, axis=axis) or\n self._is_label_reference(key, axis=axis))\n\n def _check_label_or_level_ambiguity(self, key, axis=0):\n \"\"\"\n Check whether `key` is ambiguous.\n\n By ambiguous, we mean that it matches both a level of the input\n `axis` and a label of the other axis.\n\n Parameters\n ----------\n key: str or object\n label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Raises\n ------\n ValueError: `key` is ambiguous\n \"\"\"\n\n axis = self._get_axis_number(axis)\n other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]\n\n if self.ndim > 2:\n raise NotImplementedError(\n \"_check_label_or_level_ambiguity is not implemented for {type}\"\n .format(type=type(self)))\n\n if (key is not None and\n is_hashable(key) and\n key in self.axes[axis].names and\n any(key in self.axes[ax] for ax in other_axes)):\n\n # Build an informative and grammatical warning\n level_article, level_type = (('an', 'index')\n if axis == 0 else\n ('a', 'column'))\n\n label_article, label_type = (('a', 'column')\n if axis == 0 else\n ('an', 'index'))\n\n msg = (\"'{key}' is both {level_article} {level_type} level and \"\n \"{label_article} {label_type} label, which is ambiguous.\"\n ).format(key=key,\n level_article=level_article,\n level_type=level_type,\n label_article=label_article,\n label_type=label_type)\n raise ValueError(msg)\n\n def _get_label_or_level_values(self, key, axis=0):\n \"\"\"\n Return a 1-D array of values associated with `key`, a label or level\n from the given `axis`.\n\n Retrieval logic:\n - (axis=0): Return column values if `key` matches a column label.\n Otherwise return index level values if `key` matches an index\n level.\n - (axis=1): Return row values if `key` matches an index label.\n Otherwise return column level values if 'key' matches a column\n level\n\n Parameters\n ----------\n key: str\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n values: np.ndarray\n\n Raises\n ------\n KeyError\n if `key` matches neither a label nor a level\n ValueError\n if `key` matches multiple labels\n FutureWarning\n if `key` is ambiguous. This will become an ambiguity error in a\n future version\n \"\"\"\n\n axis = self._get_axis_number(axis)\n other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]\n\n if self.ndim > 2:\n raise NotImplementedError(\n \"_get_label_or_level_values is not implemented for {type}\"\n .format(type=type(self)))\n\n if self._is_label_reference(key, axis=axis):\n self._check_label_or_level_ambiguity(key, axis=axis)\n values = self.xs(key, axis=other_axes[0])._values\n elif self._is_level_reference(key, axis=axis):\n values = self.axes[axis].get_level_values(key)._values\n else:\n raise KeyError(key)\n\n # Check for duplicates\n if values.ndim > 1:\n\n if other_axes and isinstance(\n self._get_axis(other_axes[0]), MultiIndex):\n multi_message = ('\\n'\n 'For a multi-index, the label must be a '\n 'tuple with elements corresponding to '\n 'each level.')\n else:\n multi_message = ''\n\n label_axis_name = 'column' if axis == 0 else 'index'\n raise ValueError((\"The {label_axis_name} label '{key}' \"\n \"is not unique.{multi_message}\")\n .format(key=key,\n label_axis_name=label_axis_name,\n multi_message=multi_message))\n\n return values\n\n def _drop_labels_or_levels(self, keys, axis=0):\n \"\"\"\n Drop labels and/or levels for the given `axis`.\n\n For each key in `keys`:\n - (axis=0): If key matches a column label then drop the column.\n Otherwise if key matches an index level then drop the level.\n - (axis=1): If key matches an index label then drop the row.\n Otherwise if key matches a column level then drop the level.\n\n Parameters\n ----------\n keys: str or list of str\n labels or levels to drop\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n dropped: DataFrame\n\n Raises\n ------\n ValueError\n if any `keys` match neither a label nor a level\n \"\"\"\n\n axis = self._get_axis_number(axis)\n\n if self.ndim > 2:\n raise NotImplementedError(\n \"_drop_labels_or_levels is not implemented for {type}\"\n .format(type=type(self)))\n\n # Validate keys\n keys = com.maybe_make_list(keys)\n invalid_keys = [k for k in keys if not\n self._is_label_or_level_reference(k, axis=axis)]\n\n if invalid_keys:\n raise ValueError((\"The following keys are not valid labels or \"\n \"levels for axis {axis}: {invalid_keys}\")\n .format(axis=axis,\n invalid_keys=invalid_keys))\n\n # Compute levels and labels to drop\n levels_to_drop = [k for k in keys\n if self._is_level_reference(k, axis=axis)]\n\n labels_to_drop = [k for k in keys\n if not self._is_level_reference(k, axis=axis)]\n\n # Perform copy upfront and then use inplace operations below.\n # This ensures that we always perform exactly one copy.\n # ``copy`` and/or ``inplace`` options could be added in the future.\n dropped = self.copy()\n\n if axis == 0:\n # Handle dropping index levels\n if levels_to_drop:\n dropped.reset_index(levels_to_drop, drop=True, inplace=True)\n\n # Handle dropping columns labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=1, inplace=True)\n else:\n # Handle dropping column levels\n if levels_to_drop:\n if isinstance(dropped.columns, MultiIndex):\n # Drop the specified levels from the MultiIndex\n dropped.columns = dropped.columns.droplevel(levels_to_drop)\n else:\n # Drop the last level of Index by replacing with\n # a RangeIndex\n dropped.columns = RangeIndex(dropped.columns.size)\n\n # Handle dropping index labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=0, inplace=True)\n\n return dropped\n\n # ----------------------------------------------------------------------\n # Iteration\n\n def __hash__(self):\n raise TypeError('{0!r} objects are mutable, thus they cannot be'\n ' hashed'.format(self.__class__.__name__))\n\n def __iter__(self):\n \"\"\"Iterate over infor axis\"\"\"\n return iter(self._info_axis)\n\n # can we get a better explanation of this?\n def keys(self):\n \"\"\"Get the 'info axis' (see Indexing for more)\n\n This is index for Series, columns for DataFrame and major_axis for\n Panel.\n \"\"\"\n return self._info_axis\n\n def iteritems(self):\n \"\"\"Iterate over (label, values) on info axis\n\n This is index for Series, columns for DataFrame, major_axis for Panel,\n and so on.\n \"\"\"\n for h in self._info_axis:\n yield h, self[h]\n\n def __len__(self):\n \"\"\"Returns length of info axis\"\"\"\n return len(self._info_axis)\n\n def __contains__(self, key):\n \"\"\"True if the key is in the info axis\"\"\"\n return key in self._info_axis\n\n @property\n def empty(self):\n \"\"\"\n Indicator whether DataFrame is empty.\n\n True if DataFrame is entirely empty (no items), meaning any of the\n axes are of length 0.\n\n Returns\n -------\n bool\n If DataFrame is empty, return True, if not return False.\n\n See Also\n --------\n pandas.Series.dropna\n pandas.DataFrame.dropna\n\n Notes\n -----\n If DataFrame contains only NaNs, it is still not considered empty. See\n the example below.\n\n Examples\n --------\n An example of an actual empty DataFrame. Notice the index is empty:\n\n >>> df_empty = pd.DataFrame({'A' : []})\n >>> df_empty\n Empty DataFrame\n Columns: [A]\n Index: []\n >>> df_empty.empty\n True\n\n If we only have NaNs in our DataFrame, it is not considered empty! We\n will need to drop the NaNs to make the DataFrame empty:\n\n >>> df = pd.DataFrame({'A' : [np.nan]})\n >>> df\n A\n 0 NaN\n >>> df.empty\n False\n >>> df.dropna().empty\n True\n \"\"\"\n return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)\n\n # ----------------------------------------------------------------------\n # Array Interface\n\n # This is also set in IndexOpsMixin\n # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented\n __array_priority__ = 1000\n\n def __array__(self, dtype=None):\n return com.values_from_object(self)\n\n def __array_wrap__(self, result, context=None):\n d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)\n return self._constructor(result, **d).__finalize__(self)\n\n # ideally we would define this to avoid the getattr checks, but\n # is slower\n # @property\n # def __array_interface__(self):\n # \"\"\" provide numpy array interface method \"\"\"\n # values = self.values\n # return dict(typestr=values.dtype.str,shape=values.shape,data=values)\n\n def to_dense(self):\n \"\"\"\n Return dense representation of NDFrame (as opposed to sparse).\n \"\"\"\n # compat\n return self\n\n # ----------------------------------------------------------------------\n # Picklability\n\n def __getstate__(self):\n meta = {k: getattr(self, k, None) for k in self._metadata}\n return dict(_data=self._data, _typ=self._typ, _metadata=self._metadata,\n **meta)\n\n def __setstate__(self, state):\n\n if isinstance(state, BlockManager):\n self._data = state\n elif isinstance(state, dict):\n typ = state.get('_typ')\n if typ is not None:\n\n # set in the order of internal names\n # to avoid definitional recursion\n # e.g. say fill_value needing _data to be\n # defined\n meta = set(self._internal_names + self._metadata)\n for k in list(meta):\n if k in state:\n v = state[k]\n object.__setattr__(self, k, v)\n\n for k, v in state.items():\n if k not in meta:\n object.__setattr__(self, k, v)\n\n else:\n self._unpickle_series_compat(state)\n elif isinstance(state[0], dict):\n if len(state) == 5:\n self._unpickle_sparse_frame_compat(state)\n else:\n self._unpickle_frame_compat(state)\n elif len(state) == 4:\n self._unpickle_panel_compat(state)\n elif len(state) == 2:\n self._unpickle_series_compat(state)\n else: # pragma: no cover\n # old pickling format, for compatibility\n self._unpickle_matrix_compat(state)\n\n self._item_cache = {}\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def __unicode__(self):\n # unicode representation based upon iterating over self\n # (since, by definition, `PandasContainers` are iterable)\n prepr = '[%s]' % ','.join(map(pprint_thing, self))\n return '%s(%s)' % (self.__class__.__name__, prepr)\n\n def _repr_latex_(self):\n \"\"\"\n Returns a LaTeX representation for a particular object.\n Mainly for use with nbconvert (jupyter notebook conversion to pdf).\n \"\"\"\n if config.get_option('display.latex.repr'):\n return self.to_latex()\n else:\n return None\n\n def _repr_data_resource_(self):\n \"\"\"\n Not a real Jupyter special repr method, but we use the same\n naming convention.\n \"\"\"\n if config.get_option(\"display.html.table_schema\"):\n data = self.head(config.get_option('display.max_rows'))\n payload = json.loads(data.to_json(orient='table'),\n object_pairs_hook=collections.OrderedDict)\n return payload\n\n # ----------------------------------------------------------------------\n # I/O Methods\n\n _shared_docs['to_excel'] = \"\"\"\n Write %(klass)s to an Excel sheet.\n\n To write a single %(klass)s to an Excel .xlsx file it is only necessary to\n specify a target file name. To write to multiple sheets it is necessary to\n create an `ExcelWriter` object with a target file name, and specify a sheet\n in the file to write to.\n\n Multiple sheets may be written to by specifying unique `sheet_name`.\n With all data written to the file it is necessary to save the changes.\n Note that creating an `ExcelWriter` object with a file name that already\n exists will result in the contents of the existing file being erased.\n\n Parameters\n ----------\n excel_writer : str or ExcelWriter object\n File path or existing ExcelWriter.\n sheet_name : str, default 'Sheet1'\n Name of sheet which will contain DataFrame.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, optional\n Format string for floating point numbers. For example\n ``float_format=\"%%.2f\"`` will format 0.1234 to 0.12.\n columns : sequence or list of str, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of string is given it is\n assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, optional\n Column label for index column(s) if desired. If not specified, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : int, default 0\n Upper left cell row to dump data frame.\n startcol : int, default 0\n Upper left cell column to dump data frame.\n engine : str, optional\n Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this\n via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : bool, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding : str, optional\n Encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : str, default 'inf'\n Representation for infinity (there is no native representation for\n infinity in Excel).\n verbose : bool, default True\n Display more information in the error logs.\n freeze_panes : tuple of int (length 2), optional\n Specifies the one-based bottommost row and rightmost column that\n is to be frozen.\n\n .. versionadded:: 0.20.0.\n\n See Also\n --------\n to_csv : Write DataFrame to a comma-separated values (csv) file.\n ExcelWriter : Class for writing DataFrame objects into excel sheets.\n read_excel : Read an Excel file into a pandas DataFrame.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n\n Notes\n -----\n For compatibility with :meth:`~DataFrame.to_csv`,\n to_excel serializes lists and dicts to strings before writing.\n\n Once a workbook has been saved it is not possible write further data\n without rewriting the whole workbook.\n\n Examples\n --------\n\n Create, write to and save a workbook:\n\n >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df1.to_excel(\"output.xlsx\") # doctest: +SKIP\n\n To specify the sheet name:\n\n >>> df1.to_excel(\"output.xlsx\",\n ... sheet_name='Sheet_name_1') # doctest: +SKIP\n\n If you wish to write to more than one sheet in the workbook, it is\n necessary to specify an ExcelWriter object:\n\n >>> df2 = df1.copy()\n >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP\n ... df1.to_excel(writer, sheet_name='Sheet_name_1')\n ... df2.to_excel(writer, sheet_name='Sheet_name_2')\n\n To set the library that is used to write the Excel file,\n you can pass the `engine` keyword (the default engine is\n automatically chosen depending on the file extension):\n\n >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP\n \"\"\"\n\n @Appender(_shared_docs[\"to_excel\"] % dict(klass=\"object\"))\n def to_excel(self, excel_writer, sheet_name=\"Sheet1\", na_rep=\"\",\n float_format=None, columns=None, header=True, index=True,\n index_label=None, startrow=0, startcol=0, engine=None,\n merge_cells=True, encoding=None, inf_rep=\"inf\", verbose=True,\n freeze_panes=None):\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n from pandas.io.formats.excel import ExcelFormatter\n formatter = ExcelFormatter(df, na_rep=na_rep, cols=columns,\n header=header,\n float_format=float_format, index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep)\n formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,\n startcol=startcol, freeze_panes=freeze_panes,\n engine=engine)\n\n def to_json(self, path_or_buf=None, orient=None, date_format=None,\n double_precision=10, force_ascii=True, date_unit='ms',\n default_handler=None, lines=False, compression='infer',\n index=True):\n \"\"\"\n Convert the object to a JSON string.\n\n Note NaN's and None will be converted to null and datetime objects\n will be converted to UNIX timestamps.\n\n Parameters\n ----------\n path_or_buf : string or file handle, optional\n File path or object. If not specified, the result is returned as\n a string.\n orient : string\n Indication of expected JSON string format.\n\n * Series\n\n - default is 'index'\n - allowed values are: {'split','records','index','table'}\n\n * DataFrame\n\n - default is 'columns'\n - allowed values are:\n {'split','records','index','columns','values','table'}\n\n * The format of the JSON string\n\n - 'split' : dict like {'index' -> [index],\n 'columns' -> [columns], 'data' -> [values]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n - 'columns' : dict like {column -> {index -> value}}\n - 'values' : just the values array\n - 'table' : dict like {'schema': {schema}, 'data': {data}}\n describing the data, and the data component is\n like ``orient='records'``.\n\n .. versionchanged:: 0.20.0\n\n date_format : {None, 'epoch', 'iso'}\n Type of date conversion. 'epoch' = epoch milliseconds,\n 'iso' = ISO8601. The default depends on the `orient`. For\n ``orient='table'``, the default is 'iso'. For all other orients,\n the default is 'epoch'.\n double_precision : int, default 10\n The number of decimal places to use when encoding\n floating point values.\n force_ascii : bool, default True\n Force encoded string to be ASCII.\n date_unit : string, default 'ms' (milliseconds)\n The time unit to encode to, governs timestamp and ISO8601\n precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,\n microsecond, and nanosecond respectively.\n default_handler : callable, default None\n Handler to call if object cannot otherwise be converted to a\n suitable format for JSON. Should receive a single argument which is\n the object to convert and return a serialisable object.\n lines : bool, default False\n If 'orient' is 'records' write out line delimited json format. Will\n throw ValueError if incorrect 'orient' since others are not list\n like.\n\n .. versionadded:: 0.19.0\n\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}\n\n A string representing the compression to use in the output file,\n only used when the first argument is a filename. By default, the\n compression is inferred from the filename.\n\n .. versionadded:: 0.21.0\n .. versionchanged:: 0.24.0\n 'infer' option added and set to default\n index : bool, default True\n Whether to include the index values in the JSON string. Not\n including the index (``index=False``) is only supported when\n orient is 'split' or 'table'.\n\n .. versionadded:: 0.23.0\n\n See Also\n --------\n read_json\n\n Examples\n --------\n\n >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df.to_json(orient='split')\n '{\"columns\":[\"col 1\",\"col 2\"],\n \"index\":[\"row 1\",\"row 2\"],\n \"data\":[[\"a\",\"b\"],[\"c\",\"d\"]]}'\n\n Encoding/decoding a Dataframe using ``'records'`` formatted JSON.\n Note that index labels are not preserved with this encoding.\n\n >>> df.to_json(orient='records')\n '[{\"col 1\":\"a\",\"col 2\":\"b\"},{\"col 1\":\"c\",\"col 2\":\"d\"}]'\n\n Encoding/decoding a Dataframe using ``'index'`` formatted JSON:\n\n >>> df.to_json(orient='index')\n '{\"row 1\":{\"col 1\":\"a\",\"col 2\":\"b\"},\"row 2\":{\"col 1\":\"c\",\"col 2\":\"d\"}}'\n\n Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:\n\n >>> df.to_json(orient='columns')\n '{\"col 1\":{\"row 1\":\"a\",\"row 2\":\"c\"},\"col 2\":{\"row 1\":\"b\",\"row 2\":\"d\"}}'\n\n Encoding/decoding a Dataframe using ``'values'`` formatted JSON:\n\n >>> df.to_json(orient='values')\n '[[\"a\",\"b\"],[\"c\",\"d\"]]'\n\n Encoding with Table Schema\n\n >>> df.to_json(orient='table')\n '{\"schema\": {\"fields\": [{\"name\": \"index\", \"type\": \"string\"},\n {\"name\": \"col 1\", \"type\": \"string\"},\n {\"name\": \"col 2\", \"type\": \"string\"}],\n \"primaryKey\": \"index\",\n \"pandas_version\": \"0.20.0\"},\n \"data\": [{\"index\": \"row 1\", \"col 1\": \"a\", \"col 2\": \"b\"},\n {\"index\": \"row 2\", \"col 1\": \"c\", \"col 2\": \"d\"}]}'\n \"\"\"\n\n from pandas.io import json\n if date_format is None and orient == 'table':\n date_format = 'iso'\n elif date_format is None:\n date_format = 'epoch'\n return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient,\n date_format=date_format,\n double_precision=double_precision,\n force_ascii=force_ascii, date_unit=date_unit,\n default_handler=default_handler,\n lines=lines, compression=compression,\n index=index)\n\n def to_hdf(self, path_or_buf, key, **kwargs):\n \"\"\"\n Write the contained data to an HDF5 file using HDFStore.\n\n Hierarchical Data Format (HDF) is self-describing, allowing an\n application to interpret the structure and contents of a file with\n no outside information. One HDF file can hold a mix of related objects\n which can be accessed as a group or as individual objects.\n\n In order to add another DataFrame or Series to an existing HDF file\n please use append mode and a different a key.\n\n For more information see the :ref:`user guide <io.hdf5>`.\n\n Parameters\n ----------\n path_or_buf : str or pandas.HDFStore\n File path or HDFStore object.\n key : str\n Identifier for the group in the store.\n mode : {'a', 'w', 'r+'}, default 'a'\n Mode to open file:\n\n - 'w': write, a new file is created (an existing file with\n the same name would be deleted).\n - 'a': append, an existing file is opened for reading and\n writing, and if the file does not exist it is created.\n - 'r+': similar to 'a', but the file must already exist.\n format : {'fixed', 'table'}, default 'fixed'\n Possible values:\n\n - 'fixed': Fixed format. Fast writing/reading. Not-appendable,\n nor searchable.\n - 'table': Table format. Write as a PyTables Table structure\n which may perform worse but allow more flexible operations\n like searching / selecting subsets of the data.\n append : bool, default False\n For Table formats, append the input data to the existing.\n data_columns : list of columns or True, optional\n List of columns to create as indexed data columns for on-disk\n queries, or True to use all columns. By default only the axes\n of the object are indexed. See :ref:`io.hdf5-query-data-columns`.\n Applicable only to format='table'.\n complevel : {0-9}, optional\n Specifies a compression level for data.\n A value of 0 disables compression.\n complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'\n Specifies the compression library to be used.\n As of v0.20.2 these additional compressors for Blosc are supported\n (default if no compressor specified: 'blosc:blosclz'):\n {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',\n 'blosc:zlib', 'blosc:zstd'}.\n Specifying a compression library which is not available issues\n a ValueError.\n fletcher32 : bool, default False\n If applying compression use the fletcher32 checksum.\n dropna : bool, default False\n If true, ALL nan rows will not be written to store.\n errors : str, default 'strict'\n Specifies how encoding and decoding errors are to be handled.\n See the errors argument for :func:`open` for a full list\n of options.\n\n See Also\n --------\n DataFrame.read_hdf : Read from HDF file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_feather : Write out feather-format for DataFrames.\n DataFrame.to_csv : Write out to a csv file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n ... index=['a', 'b', 'c'])\n >>> df.to_hdf('data.h5', key='df', mode='w')\n\n We can add another object to the same file:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.to_hdf('data.h5', key='s')\n\n Reading from HDF file:\n\n >>> pd.read_hdf('data.h5', 'df')\n A B\n a 1 4\n b 2 5\n c 3 6\n >>> pd.read_hdf('data.h5', 's')\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n Deleting file with data:\n\n >>> import os\n >>> os.remove('data.h5')\n \"\"\"\n from pandas.io import pytables\n return pytables.to_hdf(path_or_buf, key, self, **kwargs)\n\n def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):\n \"\"\"\n Serialize object to input file path using msgpack format.\n\n THIS IS AN EXPERIMENTAL LIBRARY and the storage format\n may not be stable until a future release.\n\n Parameters\n ----------\n path : string File path, buffer-like, or None\n if None, return generated string\n append : bool whether to append to an existing msgpack\n (default is False)\n compress : type of compressor (zlib or blosc), default to None (no\n compression)\n \"\"\"\n\n from pandas.io import packers\n return packers.to_msgpack(path_or_buf, self, encoding=encoding,\n **kwargs)\n\n def to_sql(self, name, con, schema=None, if_exists='fail', index=True,\n index_label=None, chunksize=None, dtype=None, method=None):\n \"\"\"\n Write records stored in a DataFrame to a SQL database.\n\n Databases supported by SQLAlchemy [1]_ are supported. Tables can be\n newly created, appended to, or overwritten.\n\n Parameters\n ----------\n name : string\n Name of SQL table.\n con : sqlalchemy.engine.Engine or sqlite3.Connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library. Legacy support is provided for sqlite3.Connection objects.\n schema : string, optional\n Specify the schema (if database flavor supports this). If None, use\n default schema.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n How to behave if the table already exists.\n\n * fail: Raise a ValueError.\n * replace: Drop the table before inserting new values.\n * append: Insert new values to the existing table.\n\n index : bool, default True\n Write DataFrame index as a column. Uses `index_label` as the column\n name in the table.\n index_label : string or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, optional\n Rows will be written in batches of this size at a time. By default,\n all rows will be written at once.\n dtype : dict, optional\n Specifying the datatype for columns. The keys should be the column\n names and the values should be the SQLAlchemy types or strings for\n the sqlite3 legacy mode.\n method : {None, 'multi', callable}, default None\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n\n .. versionadded:: 0.24.0\n\n Raises\n ------\n ValueError\n When the table already exists and `if_exists` is 'fail' (the\n default).\n\n See Also\n --------\n read_sql : Read a DataFrame from a table.\n\n Notes\n -----\n Timezone aware datetime columns will be written as\n ``Timestamp with timezone`` type with SQLAlchemy if supported by the\n database. Otherwise, the datetimes will be stored as timezone unaware\n timestamps local to the original timezone.\n\n .. versionadded:: 0.24.0\n\n References\n ----------\n .. [1] http://docs.sqlalchemy.org\n .. [2] https://www.python.org/dev/peps/pep-0249/\n\n Examples\n --------\n\n Create an in-memory SQLite database.\n\n >>> from sqlalchemy import create_engine\n >>> engine = create_engine('sqlite://', echo=False)\n\n Create a table from scratch with 3 rows.\n\n >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})\n >>> df\n name\n 0 User 1\n 1 User 2\n 2 User 3\n\n >>> df.to_sql('users', con=engine)\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]\n\n >>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})\n >>> df1.to_sql('users', con=engine, if_exists='append')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),\n (0, 'User 4'), (1, 'User 5')]\n\n Overwrite the table with just ``df1``.\n\n >>> df1.to_sql('users', con=engine, if_exists='replace',\n ... index_label='id')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 4'), (1, 'User 5')]\n\n Specify the dtype (especially useful for integers with missing values).\n Notice that while pandas is forced to store the data as floating point,\n the database supports nullable integers. When fetching the data with\n Python, we get back integer scalars.\n\n >>> df = pd.DataFrame({\"A\": [1, None, 2]})\n >>> df\n A\n 0 1.0\n 1 NaN\n 2 2.0\n\n >>> from sqlalchemy.types import Integer\n >>> df.to_sql('integers', con=engine, index=False,\n ... dtype={\"A\": Integer()})\n\n >>> engine.execute(\"SELECT * FROM integers\").fetchall()\n [(1,), (None,), (2,)]\n \"\"\"\n from pandas.io import sql\n sql.to_sql(self, name, con, schema=schema, if_exists=if_exists,\n index=index, index_label=index_label, chunksize=chunksize,\n dtype=dtype, method=method)\n\n def to_pickle(self, path, compression='infer',\n protocol=pkl.HIGHEST_PROTOCOL):\n \"\"\"\n Pickle (serialize) object to file.\n\n Parameters\n ----------\n path : str\n File path where the pickled object will be stored.\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \\\n default 'infer'\n A string representing the compression to use in the output file. By\n default, infers from the file extension in specified path.\n\n .. versionadded:: 0.20.0\n protocol : int\n Int which indicates which protocol should be used by the pickler,\n default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible\n values for this parameter depend on the version of Python. For\n Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a\n valid value. For Python >= 3.4, 4 is a valid value. A negative\n value for the protocol parameter is equivalent to setting its value\n to HIGHEST_PROTOCOL.\n\n .. [1] https://docs.python.org/3/library/pickle.html\n .. versionadded:: 0.21.0\n\n See Also\n --------\n read_pickle : Load pickled pandas object (or any object) from file.\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_sql : Write DataFrame to a SQL database.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Examples\n --------\n >>> original_df = pd.DataFrame({\"foo\": range(5), \"bar\": range(5, 10)})\n >>> original_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> original_df.to_pickle(\"./dummy.pkl\")\n\n >>> unpickled_df = pd.read_pickle(\"./dummy.pkl\")\n >>> unpickled_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n\n >>> import os\n >>> os.remove(\"./dummy.pkl\")\n \"\"\"\n from pandas.io.pickle import to_pickle\n return to_pickle(self, path, compression=compression,\n protocol=protocol)\n\n def to_clipboard(self, excel=True, sep=None, **kwargs):\n r\"\"\"\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n Parameters\n ----------\n excel : bool, default True\n - True, use the provided separator, writing in a csv format for\n allowing easy pasting into excel.\n - False, write a string representation of the object to the\n clipboard.\n\n sep : str, default ``'\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n See Also\n --------\n DataFrame.to_csv : Write a DataFrame to a comma-separated values\n (csv) file.\n read_clipboard : Read text from clipboard and pass to read_table.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])\n >>> df.to_clipboard(sep=',')\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False)\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n \"\"\"\n from pandas.io import clipboards\n clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)\n\n def to_xarray(self):\n \"\"\"\n Return an xarray object from the pandas object.\n\n Returns\n -------\n xarray.DataArray or xarray.Dataset\n Data in the pandas structure converted to Dataset if the object is\n a DataFrame, or a DataArray if the object is a Series.\n\n See Also\n --------\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Notes\n -----\n See the `xarray docs <http://xarray.pydata.org/en/stable/>`__\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),\n ... ('parrot', 'bird', 24.0, 2),\n ... ('lion', 'mammal', 80.5, 4),\n ... ('monkey', 'mammal', np.nan, 4)],\n ... columns=['name', 'class', 'max_speed',\n ... 'num_legs'])\n >>> df\n name class max_speed num_legs\n 0 falcon bird 389.0 2\n 1 parrot bird 24.0 2\n 2 lion mammal 80.5 4\n 3 monkey mammal NaN 4\n\n >>> df.to_xarray()\n <xarray.Dataset>\n Dimensions: (index: 4)\n Coordinates:\n * index (index) int64 0 1 2 3\n Data variables:\n name (index) object 'falcon' 'parrot' 'lion' 'monkey'\n class (index) object 'bird' 'bird' 'mammal' 'mammal'\n max_speed (index) float64 389.0 24.0 80.5 nan\n num_legs (index) int64 2 2 4 4\n\n >>> df['max_speed'].to_xarray()\n <xarray.DataArray 'max_speed' (index: 4)>\n array([389. , 24. , 80.5, nan])\n Coordinates:\n * index (index) int64 0 1 2 3\n\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',\n ... '2018-01-02', '2018-01-02'])\n >>> df_multiindex = pd.DataFrame({'date': dates,\n ... 'animal': ['falcon', 'parrot', 'falcon',\n ... 'parrot'],\n ... 'speed': [350, 18, 361, 15]}).set_index(['date',\n ... 'animal'])\n >>> df_multiindex\n speed\n date animal\n 2018-01-01 falcon 350\n parrot 18\n 2018-01-02 falcon 361\n parrot 15\n\n >>> df_multiindex.to_xarray()\n <xarray.Dataset>\n Dimensions: (animal: 2, date: 2)\n Coordinates:\n * date (date) datetime64[ns] 2018-01-01 2018-01-02\n * animal (animal) object 'falcon' 'parrot'\n Data variables:\n speed (date, animal) int64 350 18 361 15\n \"\"\"\n\n try:\n import xarray\n except ImportError:\n # Give a nice error message\n raise ImportError(\"the xarray library is not installed\\n\"\n \"you can install via conda\\n\"\n \"conda install xarray\\n\"\n \"or via pip\\n\"\n \"pip install xarray\\n\")\n\n if self.ndim == 1:\n return xarray.DataArray.from_series(self)\n elif self.ndim == 2:\n return xarray.Dataset.from_dataframe(self)\n\n # > 2 dims\n coords = [(a, self._get_axis(a)) for a in self._AXIS_ORDERS]\n return xarray.DataArray(self,\n coords=coords,\n )\n\n def to_latex(self, buf=None, columns=None, col_space=None, header=True,\n index=True, na_rep='NaN', formatters=None, float_format=None,\n sparsify=None, index_names=True, bold_rows=False,\n column_format=None, longtable=None, escape=None,\n encoding=None, decimal='.', multicolumn=None,\n multicolumn_format=None, multirow=None):\n r\"\"\"\n Render an object to a LaTeX tabular environment table.\n\n Render an object to a tabular environment table. You can splice\n this into a LaTeX document. Requires \\usepackage{booktabs}.\n\n .. versionchanged:: 0.20.2\n Added to Series\n\n Parameters\n ----------\n buf : file descriptor or None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given,\n it is assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default 'NaN'\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns' elements by position or\n name. The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : str, optional\n Format string for floating point numbers.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row. By default, the value will be\n read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in `LaTeX table format\n <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3\n columns. By default, 'l' will be used for all columns except\n columns of numbers, which default to 'r'.\n longtable : bool, optional\n By default, the value will be read from the pandas config\n module. Use a longtable environment instead of tabular. Requires\n adding a \\usepackage{longtable} to your LaTeX preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config\n module. When set to False prevents from escaping latex special\n characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n .. versionadded:: 0.18.0\n multicolumn : bool, default True\n Use \\multicolumn to enhance MultiIndex columns.\n The default will be read from the config module.\n .. versionadded:: 0.20.0\n multicolumn_format : str, default 'l'\n The alignment for multicolumns, similar to `column_format`\n The default will be read from the config module.\n .. versionadded:: 0.20.0\n multirow : bool, default False\n Use \\multirow to enhance MultiIndex rows. Requires adding a\n \\usepackage{multirow} to your LaTeX preamble. Will print\n centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read\n from the pandas config module.\n .. versionadded:: 0.20.0\n\n Returns\n -------\n str or None\n If buf is None, returns the resulting LateX format as a\n string. Otherwise returns None.\n\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE\n '\\\\begin{tabular}{lll}\\n\\\\toprule\\n name & mask & weapon\n \\\\\\\\\\n\\\\midrule\\n Raphael & red & sai \\\\\\\\\\n Donatello &\n purple & bo staff \\\\\\\\\\n\\\\bottomrule\\n\\\\end{tabular}\\n'\n \"\"\"\n # Get defaults from the pandas config\n if self.ndim == 1:\n self = self.to_frame()\n if longtable is None:\n longtable = config.get_option(\"display.latex.longtable\")\n if escape is None:\n escape = config.get_option(\"display.latex.escape\")\n if multicolumn is None:\n multicolumn = config.get_option(\"display.latex.multicolumn\")\n if multicolumn_format is None:\n multicolumn_format = config.get_option(\n \"display.latex.multicolumn_format\")\n if multirow is None:\n multirow = config.get_option(\"display.latex.multirow\")\n\n formatter = DataFrameFormatter(self, buf=buf, columns=columns,\n col_space=col_space, na_rep=na_rep,\n header=header, index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n index_names=index_names,\n escape=escape, decimal=decimal)\n formatter.to_latex(column_format=column_format, longtable=longtable,\n encoding=encoding, multicolumn=multicolumn,\n multicolumn_format=multicolumn_format,\n multirow=multirow)\n\n if buf is None:\n return formatter.buf.getvalue()\n\n def to_csv(self, path_or_buf=None, sep=\",\", na_rep='', float_format=None,\n columns=None, header=True, index=True, index_label=None,\n mode='w', encoding=None, compression='infer', quoting=None,\n quotechar='\"', line_terminator=None, chunksize=None,\n tupleize_cols=None, date_format=None, doublequote=True,\n escapechar=None, decimal='.'):\n r\"\"\"\n Write object to a comma-separated values (csv) file.\n\n .. versionchanged:: 0.24.0\n The order of arguments for Series was changed.\n\n Parameters\n ----------\n path_or_buf : str or file handle, default None\n File path or object, if None is provided the result is returned as\n a string.\n\n .. versionchanged:: 0.24.0\n\n Was previously named \"path\" for Series.\n\n sep : str, default ','\n String of length 1. Field delimiter for the output file.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, default None\n Format string for floating point numbers.\n columns : sequence, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given it is\n assumed to be aliases for the column names.\n\n .. versionchanged:: 0.24.0\n\n Previously defaulted to False for Series.\n\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, or False, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the object uses MultiIndex. If\n False do not print fields for index names. Use index_label=False\n for easier importing in R.\n mode : str\n Python write mode, default 'w'.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.\n compression : str, default 'infer'\n Compression mode among the following possible values: {'infer',\n 'gzip', 'bz2', 'zip', 'xz', None}. If 'infer' and `path_or_buf`\n is path-like, then detect compression from the following\n extensions: '.gz', '.bz2', '.zip' or '.xz'. (otherwise no\n compression).\n\n .. versionchanged:: 0.24.0\n\n 'infer' option added and set to default.\n\n quoting : optional constant from csv module\n Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`\n then floats are converted to strings and thus csv.QUOTE_NONNUMERIC\n will treat them as non-numeric.\n quotechar : str, default '\\\"'\n String of length 1. Character used to quote fields.\n line_terminator : string, optional\n The newline character or character sequence to use in the output\n file. Defaults to `os.linesep`, which depends on the OS in which\n this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.).\n\n .. versionchanged:: 0.24.0\n chunksize : int or None\n Rows to write at a time.\n tupleize_cols : bool, default False\n Write MultiIndex columns as a list of tuples (if True) or in\n the new, expanded format, where each MultiIndex column is a row\n in the CSV (if False).\n\n .. deprecated:: 0.21.0\n This argument will be removed and will always write each row\n of the multi-index as a separate row in the CSV file.\n date_format : str, default None\n Format string for datetime objects.\n doublequote : bool, default True\n Control quoting of `quotechar` inside a field.\n escapechar : str, default None\n String of length 1. Character used to escape `sep` and `quotechar`\n when appropriate.\n decimal : str, default '.'\n Character recognized as decimal separator. E.g. use ',' for\n European data.\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting csv format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_csv : Load a CSV file into a DataFrame.\n to_excel : Load an Excel file into a DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> df.to_csv(index=False)\n 'name,mask,weapon\\nRaphael,red,sai\\nDonatello,purple,bo staff\\n'\n \"\"\"\n\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n if tupleize_cols is not None:\n warnings.warn(\"The 'tupleize_cols' parameter is deprecated and \"\n \"will be removed in a future version\",\n FutureWarning, stacklevel=2)\n else:\n tupleize_cols = False\n\n from pandas.io.formats.csvs import CSVFormatter\n formatter = CSVFormatter(df, path_or_buf,\n line_terminator=line_terminator, sep=sep,\n encoding=encoding,\n compression=compression, quoting=quoting,\n na_rep=na_rep, float_format=float_format,\n cols=columns, header=header, index=index,\n index_label=index_label, mode=mode,\n chunksize=chunksize, quotechar=quotechar,\n tupleize_cols=tupleize_cols,\n date_format=date_format,\n doublequote=doublequote,\n escapechar=escapechar, decimal=decimal)\n formatter.save()\n\n if path_or_buf is None:\n return formatter.path_or_buf.getvalue()\n\n # ----------------------------------------------------------------------\n # Fancy Indexing\n\n @classmethod\n def _create_indexer(cls, name, indexer):\n \"\"\"Create an indexer like _name in the class.\"\"\"\n if getattr(cls, name, None) is None:\n _indexer = functools.partial(indexer, name)\n setattr(cls, name, property(_indexer, doc=indexer.__doc__))\n\n def get(self, key, default=None):\n \"\"\"\n Get item from object for given key (DataFrame column, Panel slice,\n etc.). Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n \"\"\"\n try:\n return self[key]\n except (KeyError, ValueError, IndexError):\n return default\n\n def __getitem__(self, item):\n return self._get_item_cache(item)\n\n def _get_item_cache(self, item):\n \"\"\"Return the cached item, item represents a label indexer.\"\"\"\n cache = self._item_cache\n res = cache.get(item)\n if res is None:\n values = self._data.get(item)\n res = self._box_item_values(item, values)\n cache[item] = res\n res._set_as_cached(item, self)\n\n # for a chain\n res._is_copy = self._is_copy\n return res\n\n def _set_as_cached(self, item, cacher):\n \"\"\"Set the _cacher attribute on the calling object with a weakref to\n cacher.\n \"\"\"\n self._cacher = (item, weakref.ref(cacher))\n\n def _reset_cacher(self):\n \"\"\"Reset the cacher.\"\"\"\n if hasattr(self, '_cacher'):\n del self._cacher\n\n def _iget_item_cache(self, item):\n \"\"\"Return the cached item, item represents a positional indexer.\"\"\"\n ax = self._info_axis\n if ax.is_unique:\n lower = self._get_item_cache(ax[item])\n else:\n lower = self._take(item, axis=self._info_axis_number)\n return lower\n\n def _box_item_values(self, key, values):\n raise AbstractMethodError(self)\n\n def _maybe_cache_changed(self, item, value):\n \"\"\"The object has called back to us saying maybe it has changed.\n \"\"\"\n self._data.set(item, value)\n\n @property\n def _is_cached(self):\n \"\"\"Return boolean indicating if self is cached or not.\"\"\"\n return getattr(self, '_cacher', None) is not None\n\n def _get_cacher(self):\n \"\"\"return my cacher or None\"\"\"\n cacher = getattr(self, '_cacher', None)\n if cacher is not None:\n cacher = cacher[1]()\n return cacher\n\n @property\n def _is_view(self):\n \"\"\"Return boolean indicating if self is view of another array \"\"\"\n return self._data.is_view\n\n def _maybe_update_cacher(self, clear=False, verify_is_copy=True):\n \"\"\"\n See if we need to update our parent cacher if clear, then clear our\n cache.\n\n Parameters\n ----------\n clear : boolean, default False\n clear the item cache\n verify_is_copy : boolean, default True\n provide is_copy checks\n\n \"\"\"\n\n cacher = getattr(self, '_cacher', None)\n if cacher is not None:\n ref = cacher[1]()\n\n # we are trying to reference a dead referant, hence\n # a copy\n if ref is None:\n del self._cacher\n else:\n try:\n ref._maybe_cache_changed(cacher[0], self)\n except Exception:\n pass\n\n if verify_is_copy:\n self._check_setitem_copy(stacklevel=5, t='referant')\n\n if clear:\n self._clear_item_cache()\n\n def _clear_item_cache(self, i=None):\n if i is not None:\n self._item_cache.pop(i, None)\n else:\n self._item_cache.clear()\n\n def _slice(self, slobj, axis=0, kind=None):\n \"\"\"\n Construct a slice of this container.\n\n kind parameter is maintained for compatibility with Series slicing.\n \"\"\"\n axis = self._get_block_manager_axis(axis)\n result = self._constructor(self._data.get_slice(slobj, axis=axis))\n result = result.__finalize__(self)\n\n # this could be a view\n # but only in a single-dtyped view slicable case\n is_copy = axis != 0 or result._is_view\n result._set_is_copy(self, copy=is_copy)\n return result\n\n def _set_item(self, key, value):\n self._data.set(key, value)\n self._clear_item_cache()\n\n def _set_is_copy(self, ref=None, copy=True):\n if not copy:\n self._is_copy = None\n else:\n if ref is not None:\n self._is_copy = weakref.ref(ref)\n else:\n self._is_copy = None\n\n def _check_is_chained_assignment_possible(self):\n \"\"\"\n Check if we are a view, have a cacher, and are of mixed type.\n If so, then force a setitem_copy check.\n\n Should be called just near setting a value\n\n Will return a boolean if it we are a view and are cached, but a\n single-dtype meaning that the cacher should be updated following\n setting.\n \"\"\"\n if self._is_view and self._is_cached:\n ref = self._get_cacher()\n if ref is not None and ref._is_mixed_type:\n self._check_setitem_copy(stacklevel=4, t='referant',\n force=True)\n return True\n elif self._is_copy:\n self._check_setitem_copy(stacklevel=4, t='referant')\n return False\n\n def _check_setitem_copy(self, stacklevel=4, t='setting', force=False):\n \"\"\"\n\n Parameters\n ----------\n stacklevel : integer, default 4\n the level to show of the stack when the error is output\n t : string, the type of setting error\n force : boolean, default False\n if True, then force showing an error\n\n validate if we are doing a settitem on a chained copy.\n\n If you call this function, be sure to set the stacklevel such that the\n user will see the error *at the level of setting*\n\n It is technically possible to figure out that we are setting on\n a copy even WITH a multi-dtyped pandas object. In other words, some\n blocks may be views while other are not. Currently _is_view will ALWAYS\n return False for multi-blocks to avoid having to handle this case.\n\n df = DataFrame(np.arange(0,9), columns=['count'])\n df['group'] = 'b'\n\n # This technically need not raise SettingWithCopy if both are view\n # (which is not # generally guaranteed but is usually True. However,\n # this is in general not a good practice and we recommend using .loc.\n df.iloc[0:5]['group'] = 'a'\n\n \"\"\"\n\n if force or self._is_copy:\n\n value = config.get_option('mode.chained_assignment')\n if value is None:\n return\n\n # see if the copy is not actually referred; if so, then dissolve\n # the copy weakref\n try:\n gc.collect(2)\n if not gc.get_referents(self._is_copy()):\n self._is_copy = None\n return\n except Exception:\n pass\n\n # we might be a false positive\n try:\n if self._is_copy().shape == self.shape:\n self._is_copy = None\n return\n except Exception:\n pass\n\n # a custom message\n if isinstance(self._is_copy, string_types):\n t = self._is_copy\n\n elif t == 'referant':\n t = (\"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame\\n\\n\"\n \"See the caveats in the documentation: \"\n \"http://pandas.pydata.org/pandas-docs/stable/\"\n \"indexing.html#indexing-view-versus-copy\"\n )\n\n else:\n t = (\"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame.\\n\"\n \"Try using .loc[row_indexer,col_indexer] = value \"\n \"instead\\n\\nSee the caveats in the documentation: \"\n \"http://pandas.pydata.org/pandas-docs/stable/\"\n \"indexing.html#indexing-view-versus-copy\"\n )\n\n if value == 'raise':\n raise com.SettingWithCopyError(t)\n elif value == 'warn':\n warnings.warn(t, com.SettingWithCopyWarning,\n stacklevel=stacklevel)\n\n def __delitem__(self, key):\n \"\"\"\n Delete item\n \"\"\"\n deleted = False\n\n maybe_shortcut = False\n if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex):\n try:\n maybe_shortcut = key not in self.columns._engine\n except TypeError:\n pass\n\n if maybe_shortcut:\n # Allow shorthand to delete all columns whose first len(key)\n # elements match key:\n if not isinstance(key, tuple):\n key = (key, )\n for col in self.columns:\n if isinstance(col, tuple) and col[:len(key)] == key:\n del self[col]\n deleted = True\n if not deleted:\n # If the above loop ran and didn't delete anything because\n # there was no match, this call should raise the appropriate\n # exception:\n self._data.delete(key)\n\n # delete from the caches\n try:\n del self._item_cache[key]\n except KeyError:\n pass\n\n def _take(self, indices, axis=0, is_copy=True):\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n This is the internal version of ``.take()`` and will contain a wider\n selection of parameters useful for internal use but not as suitable\n for public usage.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : int, default 0\n The axis on which to select elements. \"0\" means that we are\n selecting rows, \"1\" means that we are selecting columns, etc.\n is_copy : bool, default True\n Whether to return a copy of the original object or not.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n numpy.ndarray.take\n numpy.take\n \"\"\"\n self._consolidate_inplace()\n\n new_data = self._data.take(indices,\n axis=self._get_block_manager_axis(axis),\n verify=True)\n result = self._constructor(new_data).__finalize__(self)\n\n # Maybe set copy if we didn't actually change the index.\n if is_copy:\n if not result._get_axis(axis).equals(self._get_axis(axis)):\n result._set_is_copy(self)\n\n return result\n\n def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs):\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n convert : bool, default True\n Whether to convert negative indices into positive ones.\n For example, ``-1`` would map to the ``len(axis) - 1``.\n The conversions are similar to the behavior of indexing a\n regular Python list.\n\n .. deprecated:: 0.21.0\n In the future, negative indices will always be converted.\n\n is_copy : bool, default True\n Whether to return a copy of the original object or not.\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3])\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2])\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n \"\"\"\n if convert is not None:\n msg = (\"The 'convert' parameter is deprecated \"\n \"and will be removed in a future version.\")\n warnings.warn(msg, FutureWarning, stacklevel=2)\n\n nv.validate_take(tuple(), kwargs)\n return self._take(indices, axis=axis, is_copy=is_copy)\n\n def xs(self, key, axis=0, level=None, drop_level=True):\n \"\"\"\n Return cross-section from the Series/DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to retrieve cross-section on.\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n drop_level : bool, default True\n If False, returns object with same levels as self.\n\n Returns\n -------\n Series or DataFrame\n Cross-section from the original Series or DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Notes\n -----\n `xs` can not be used to set values.\n\n MultiIndex Slicers is a generic way to get/set values on\n any level or levels.\n It is a superset of `xs` functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = pd.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal')\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog'))\n num_legs num_wings\n locomotion\n walks 4 0\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1)\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n\n Get values at several indexes and levels\n\n >>> df.xs(('bird', 'walks'),\n ... level=[0, 'locomotion'])\n num_legs num_wings\n animal\n penguin 2 2\n\n Get values at specified column and axis\n\n >>> df.xs('num_wings', axis=1)\n class animal locomotion\n mammal cat walks 0\n dog walks 0\n bat flies 2\n bird penguin walks 2\n Name: num_wings, dtype: int64\n \"\"\"\n axis = self._get_axis_number(axis)\n labels = self._get_axis(axis)\n if level is not None:\n loc, new_ax = labels.get_loc_level(key, level=level,\n drop_level=drop_level)\n\n # create the tuple of the indexer\n indexer = [slice(None)] * self.ndim\n indexer[axis] = loc\n indexer = tuple(indexer)\n\n result = self.iloc[indexer]\n setattr(result, result._get_axis_name(axis), new_ax)\n return result\n\n if axis == 1:\n return self[key]\n\n self._consolidate_inplace()\n\n index = self.index\n if isinstance(index, MultiIndex):\n loc, new_index = self.index.get_loc_level(key,\n drop_level=drop_level)\n else:\n loc = self.index.get_loc(key)\n\n if isinstance(loc, np.ndarray):\n if loc.dtype == np.bool_:\n inds, = loc.nonzero()\n return self._take(inds, axis=axis)\n else:\n return self._take(loc, axis=axis)\n\n if not is_scalar(loc):\n new_index = self.index[loc]\n\n if is_scalar(loc):\n new_values = self._data.fast_xs(loc)\n\n # may need to box a datelike-scalar\n #\n # if we encounter an array-like and we only have 1 dim\n # that means that their are list/ndarrays inside the Series!\n # so just return them (GH 6394)\n if not is_list_like(new_values) or self.ndim == 1:\n return com.maybe_box_datetimelike(new_values)\n\n result = self._constructor_sliced(\n new_values, index=self.columns,\n name=self.index[loc], dtype=new_values.dtype)\n\n else:\n result = self.iloc[loc]\n result.index = new_index\n\n # this could be a view\n # but only in a single-dtyped view slicable case\n result._set_is_copy(self, copy=not result._is_view)\n return result\n\n _xs = xs\n\n def select(self, crit, axis=0):\n \"\"\"\n Return data corresponding to axis labels matching criteria.\n\n .. deprecated:: 0.21.0\n Use df.loc[df.index.map(crit)] to select via labels\n\n Parameters\n ----------\n crit : function\n To be called on each index (label). Should return True or False\n axis : int\n\n Returns\n -------\n selection : same type as caller\n \"\"\"\n warnings.warn(\"'select' is deprecated and will be removed in a \"\n \"future release. You can use \"\n \".loc[labels.map(crit)] as a replacement\",\n FutureWarning, stacklevel=2)\n\n axis = self._get_axis_number(axis)\n axis_name = self._get_axis_name(axis)\n axis_values = self._get_axis(axis)\n\n if len(axis_values) > 0:\n new_axis = axis_values[\n np.asarray([bool(crit(label)) for label in axis_values])]\n else:\n new_axis = axis_values\n\n return self.reindex(**{axis_name: new_axis})\n\n def reindex_like(self, other, method=None, copy=True, limit=None,\n tolerance=None):\n \"\"\"\n Return an object with matching indices as other object.\n\n Conform the object to the same index on all axes. Optional\n filling logic, placing NaN in locations having no value\n in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : Object of the same data type\n Its row and column indices are used to define the new indices\n of this object.\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n limit : int, default None\n Maximum number of consecutive labels to fill for inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n Series or DataFrame\n Same type as caller, but with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = pd.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1)\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN NaN\n 2014-02-15 35.1 NaN medium\n \"\"\"\n d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method,\n copy=copy, limit=limit,\n tolerance=tolerance)\n\n return self.reindex(**d)\n\n def drop(self, labels=None, axis=0, index=None, columns=None, level=None,\n inplace=False, errors='raise'):\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n if labels is not None:\n if index is not None or columns is not None:\n raise ValueError(\"Cannot specify both 'labels' and \"\n \"'index'/'columns'\")\n axis_name = self._get_axis_name(axis)\n axes = {axis_name: labels}\n elif index is not None or columns is not None:\n axes, _ = self._construct_axes_from_arguments((index, columns), {})\n else:\n raise ValueError(\"Need to specify at least one of 'labels', \"\n \"'index' or 'columns'\")\n\n obj = self\n\n for axis, labels in axes.items():\n if labels is not None:\n obj = obj._drop_axis(labels, axis, level=level, errors=errors)\n\n if inplace:\n self._update_inplace(obj)\n else:\n return obj\n\n def _drop_axis(self, labels, axis, level=None, errors='raise'):\n \"\"\"\n Drop labels from specified axis. Used in the ``drop`` method\n internally.\n\n Parameters\n ----------\n labels : single label or list-like\n axis : int or axis name\n level : int or level name, default None\n For MultiIndex\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n \"\"\"\n axis = self._get_axis_number(axis)\n axis_name = self._get_axis_name(axis)\n axis = self._get_axis(axis)\n\n if axis.is_unique:\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError('axis must be a MultiIndex')\n new_axis = axis.drop(labels, level=level, errors=errors)\n else:\n new_axis = axis.drop(labels, errors=errors)\n result = self.reindex(**{axis_name: new_axis})\n\n # Case for non-unique axis\n else:\n labels = ensure_object(com.index_labels_to_array(labels))\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError('axis must be a MultiIndex')\n indexer = ~axis.get_level_values(level).isin(labels)\n\n # GH 18561 MultiIndex.drop should raise if label is absent\n if errors == 'raise' and indexer.all():\n raise KeyError('{} not found in axis'.format(labels))\n else:\n indexer = ~axis.isin(labels)\n # Check if label doesn't exist along axis\n labels_missing = (axis.get_indexer_for(labels) == -1).any()\n if errors == 'raise' and labels_missing:\n raise KeyError('{} not found in axis'.format(labels))\n\n slicer = [slice(None)] * self.ndim\n slicer[self._get_axis_number(axis_name)] = indexer\n\n result = self.loc[tuple(slicer)]\n\n return result\n\n def _update_inplace(self, result, verify_is_copy=True):\n \"\"\"\n Replace self internals with result.\n\n Parameters\n ----------\n verify_is_copy : boolean, default True\n provide is_copy checks\n\n \"\"\"\n # NOTE: This does *not* call __finalize__ and that's an explicit\n # decision that we may revisit in the future.\n\n self._reset_cache()\n self._clear_item_cache()\n self._data = getattr(result, '_data', result)\n self._maybe_update_cacher(verify_is_copy=verify_is_copy)\n\n def add_prefix(self, prefix):\n \"\"\"\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_prefix('item_')\n item_0 1\n item_1 2\n item_2 3\n item_3 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial('{prefix}{}'.format, prefix=prefix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper)\n\n def add_suffix(self, suffix):\n \"\"\"\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add after each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_suffix('_item')\n 0_item 1\n 1_item 2\n 2_item 3\n 3_item 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial('{}{suffix}'.format, suffix=suffix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper)\n\n def sort_values(self, by=None, axis=0, ascending=True, inplace=False,\n kind='quicksort', na_position='last'):\n \"\"\"\n Sort by the values along either axis.\n\n Parameters\n ----------%(optional_by)s\n axis : %(axes_single_arg)s, default 0\n Axis to be sorted.\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the\n end.\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted values if inplace=False, None otherwise.\n\n Examples\n --------\n >>> df = pd.DataFrame({\n ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... })\n >>> df\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort by multiple columns\n\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n 3 NaN 8 4\n\n Putting NAs first\n\n >>> df.sort_values(by='col1', ascending=False, na_position='first')\n col1 col2 col3\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n \"\"\"\n raise NotImplementedError(\"sort_values has not been implemented \"\n \"on Panel or Panel4D objects.\")\n\n def sort_index(self, axis=0, level=None, ascending=True, inplace=False,\n kind='quicksort', na_position='last', sort_remaining=True):\n \"\"\"\n Sort object by labels (along an axis).\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis along which to sort. The value 0 identifies the rows,\n and 1 identifies the columns.\n level : int or level name or list of ints or list of level names\n If not None, sort on values in specified index level(s).\n ascending : bool, default True\n Sort ascending vs. descending.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.\n Not implemented for MultiIndex.\n sort_remaining : bool, default True\n If True and sorting by level and index is multilevel, sort by other\n levels too (in order) after sorting by specified level.\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted index if inplace=False, None otherwise.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n axis = self._get_axis_number(axis)\n axis_name = self._get_axis_name(axis)\n labels = self._get_axis(axis)\n\n if level is not None:\n raise NotImplementedError(\"level is not implemented\")\n if inplace:\n raise NotImplementedError(\"inplace is not implemented\")\n\n sort_index = labels.argsort()\n if not ascending:\n sort_index = sort_index[::-1]\n\n new_axis = labels.take(sort_index)\n return self.reindex(**{axis_name: new_axis})\n\n def reindex(self, *args, **kwargs):\n \"\"\"\n Conform %(klass)s to new index with optional filling logic, placing\n NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n %(optional_labels)s\n %(axes)s : array-like, optional\n New labels / index to conform to, should be specified using\n keywords. Preferably an Index object to avoid duplicating data\n %(optional_axis)s\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n level : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n limit : int, default None\n Maximum number of consecutive elements to forward or backward fill.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n %(klass)s with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']\n >>> df = pd.DataFrame({\n ... 'http_status': [200,200,404,404,301],\n ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},\n ... index=index)\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',\n ... 'Chrome']\n >>> df.reindex(new_index)\n http_status response_time\n Safari 404.0 0.07\n Iceweasel NaN NaN\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Chrome 200.0 0.02\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``. Because the index is not monotonically\n increasing or decreasing, we cannot use arguments to the keyword\n ``method`` to fill the ``NaN`` values.\n\n >>> df.reindex(new_index, fill_value=0)\n http_status response_time\n Safari 404 0.07\n Iceweasel 0 0.00\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Chrome 200 0.02\n\n >>> df.reindex(new_index, fill_value='missing')\n http_status response_time\n Safari 404 0.07\n Iceweasel missing missing\n Comodo Dragon missing missing\n IE10 404 0.08\n Chrome 200 0.02\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=['http_status', 'user_agent'])\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n Or we can use \"axis-style\" keyword arguments\n\n >>> df.reindex(['http_status', 'user_agent'], axis=\"columns\")\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')\n >>> df2 = pd.DataFrame({\"prices\": [100, 101, np.nan, 100, 89, 88]},\n ... index=date_index)\n >>> df2\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')\n >>> df2.reindex(date_index2)\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n The index entries that did not have a value in the original data frame\n (for example, '2009-12-29') are by default filled with ``NaN``.\n If desired, we can fill in the missing values using one of several\n options.\n\n For example, to back-propagate the last valid value to fill the ``NaN``\n values, pass ``bfill`` as an argument to the ``method`` keyword.\n\n >>> df2.reindex(date_index2, method='bfill')\n prices\n 2009-12-29 100.0\n 2009-12-30 100.0\n 2009-12-31 100.0\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n Please note that the ``NaN`` value present in the original dataframe\n (at index value 2010-01-03) will not be filled by any of the\n value propagation schemes. This is because filling while reindexing\n does not look at dataframe values, but only compares the original and\n desired indexes. If you do want to fill in the ``NaN`` values present\n in the original dataframe, use the ``fillna()`` method.\n\n See the :ref:`user guide <basics.reindexing>` for more.\n \"\"\"\n # TODO: Decide if we care about having different examples for different\n # kinds\n\n # construct the args\n axes, kwargs = self._construct_axes_from_arguments(args, kwargs)\n method = missing.clean_reindex_fill_method(kwargs.pop('method', None))\n level = kwargs.pop('level', None)\n copy = kwargs.pop('copy', True)\n limit = kwargs.pop('limit', None)\n tolerance = kwargs.pop('tolerance', None)\n fill_value = kwargs.pop('fill_value', None)\n\n # Series.reindex doesn't use / need the axis kwarg\n # We pop and ignore it here, to make writing Series/Frame generic code\n # easier\n kwargs.pop(\"axis\", None)\n\n if kwargs:\n raise TypeError('reindex() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n self._consolidate_inplace()\n\n # if all axes that are requested to reindex are equal, then only copy\n # if indicated must have index names equal here as well as values\n if all(self._get_axis(axis).identical(ax)\n for axis, ax in axes.items() if ax is not None):\n if copy:\n return self.copy()\n return self\n\n # check if we are a multi reindex\n if self._needs_reindex_multi(axes, method, level):\n try:\n return self._reindex_multi(axes, copy, fill_value)\n except Exception:\n pass\n\n # perform the reindex on the axes\n return self._reindex_axes(axes, level, limit, tolerance, method,\n fill_value, copy).__finalize__(self)\n\n def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,\n copy):\n \"\"\"Perform the reindex for all the axes.\"\"\"\n obj = self\n for a in self._AXIS_ORDERS:\n labels = axes[a]\n if labels is None:\n continue\n\n ax = self._get_axis(a)\n new_index, indexer = ax.reindex(labels, level=level, limit=limit,\n tolerance=tolerance, method=method)\n\n axis = self._get_axis_number(a)\n obj = obj._reindex_with_indexers({axis: [new_index, indexer]},\n fill_value=fill_value,\n copy=copy, allow_dups=False)\n\n return obj\n\n def _needs_reindex_multi(self, axes, method, level):\n \"\"\"Check if we do need a multi reindex.\"\"\"\n return ((com.count_not_none(*axes.values()) == self._AXIS_LEN) and\n method is None and level is None and not self._is_mixed_type)\n\n def _reindex_multi(self, axes, copy, fill_value):\n return NotImplemented\n\n _shared_docs['reindex_axis'] = (\"\"\"\n Conform input object to new index.\n\n .. deprecated:: 0.21.0\n Use `reindex` instead.\n\n By default, places NaN in locations having no value in the\n previous index. A new object is produced unless the new index\n is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n labels : array-like\n New labels / index to conform to. Preferably an Index object to\n avoid duplicating data.\n axis : %(axes_single_arg)s\n Indicate whether to use rows or columns.\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional\n Method to use for filling holes in reindexed DataFrame:\n\n * default: don't fill gaps.\n * pad / ffill: propagate last valid observation forward to next\n valid.\n * backfill / bfill: use next valid observation to fill gap.\n * nearest: use nearest valid observations to fill gap.\n\n level : int or str\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n limit : int, optional\n Maximum number of consecutive elements to forward or backward fill.\n fill_value : float, default NaN\n Value used to fill in locations having no value in the previous\n index.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n %(klass)s\n Returns a new DataFrame object with new indices, unless the new\n index is equivalent to the current one and copy=False.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},\n ... index=['dog', 'hawk'])\n >>> df\n num_legs num_wings\n dog 4 0\n hawk 2 2\n >>> df.reindex(['num_wings', 'num_legs', 'num_heads'],\n ... axis='columns')\n num_wings num_legs num_heads\n dog 0 4 NaN\n hawk 2 2 NaN\n \"\"\")\n\n @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)\n def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,\n limit=None, fill_value=None):\n msg = (\"'.reindex_axis' is deprecated and will be removed in a future \"\n \"version. Use '.reindex' instead.\")\n self._consolidate_inplace()\n\n axis_name = self._get_axis_name(axis)\n axis_values = self._get_axis(axis_name)\n method = missing.clean_reindex_fill_method(method)\n warnings.warn(msg, FutureWarning, stacklevel=3)\n new_index, indexer = axis_values.reindex(labels, method, level,\n limit=limit)\n return self._reindex_with_indexers({axis: [new_index, indexer]},\n fill_value=fill_value, copy=copy)\n\n def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False,\n allow_dups=False):\n \"\"\"allow_dups indicates an internal call here \"\"\"\n\n # reindex doing multiple operations on different axes if indicated\n new_data = self._data\n for axis in sorted(reindexers.keys()):\n index, indexer = reindexers[axis]\n baxis = self._get_block_manager_axis(axis)\n\n if index is None:\n continue\n\n index = ensure_index(index)\n if indexer is not None:\n indexer = ensure_int64(indexer)\n\n # TODO: speed up on homogeneous DataFrame objects\n new_data = new_data.reindex_indexer(index, indexer, axis=baxis,\n fill_value=fill_value,\n allow_dups=allow_dups,\n copy=copy)\n\n if copy and new_data is self._data:\n new_data = new_data.copy()\n\n return self._constructor(new_data).__finalize__(self)\n\n def filter(self, items=None, like=None, regex=None, axis=None):\n \"\"\"\n Subset rows or columns of dataframe according to labels in\n the specified index.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n List of axis to restrict to (must not all be present).\n like : string\n Keep axis where \"arg in col == True\".\n regex : string (regular expression)\n Keep axis with re.search(regex, col) == True.\n axis : int or string axis name\n The axis to filter on. By default this is the info axis,\n 'index' for Series, 'columns' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array(([1,2,3], [4,5,6])),\n ... index=['mouse', 'rabbit'],\n ... columns=['one', 'two', 'three'])\n\n >>> # select columns by name\n >>> df.filter(items=['one', 'three'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex='e$', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing 'bbi'\n >>> df.filter(like='bbi', axis=0)\n one two three\n rabbit 4 5 6\n \"\"\"\n import re\n\n nkw = com.count_not_none(items, like, regex)\n if nkw > 1:\n raise TypeError('Keyword arguments `items`, `like`, or `regex` '\n 'are mutually exclusive')\n\n if axis is None:\n axis = self._info_axis_name\n labels = self._get_axis(axis)\n\n if items is not None:\n name = self._get_axis_name(axis)\n return self.reindex(\n **{name: [r for r in items if r in labels]})\n elif like:\n def f(x):\n return like in to_str(x)\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n elif regex:\n def f(x):\n return matcher.search(to_str(x)) is not None\n matcher = re.compile(regex)\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n else:\n raise TypeError('Must pass either `items`, `like`, or `regex`')\n\n def head(self, n=5):\n \"\"\"\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n obj_head : same type as caller\n The first `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.tail: Returns the last `n` rows.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n \"\"\"\n\n return self.iloc[:n]\n\n def tail(self, n=5):\n \"\"\"\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail()\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3)\n animal\n 6 shark\n 7 whale\n 8 zebra\n \"\"\"\n\n if n == 0:\n return self.iloc[0:0]\n return self.iloc[-n:]\n\n def sample(self, n=None, frac=None, replace=False, weights=None,\n random_state=None, axis=None):\n \"\"\"\n Return a random sample of items from an axis of object.\n\n You can use `random_state` for reproducibility.\n\n Parameters\n ----------\n n : int, optional\n Number of items from axis to return. Cannot be used with `frac`.\n Default = 1 if `frac` = None.\n frac : float, optional\n Fraction of axis items to return. Cannot be used with `n`.\n replace : bool, default False\n Sample with or without replacement.\n weights : str or ndarray-like, optional\n Default 'None' results in equal probability weighting.\n If passed a Series, will align with target object on index. Index\n values in weights not found in sampled object will be ignored and\n index values in sampled object not in weights will be assigned\n weights of zero.\n If called on a DataFrame, will accept the name of a column\n when axis = 0.\n Unless weights are a Series, weights must be same length as axis\n being sampled.\n If weights do not sum to 1, they will be normalized to sum to 1.\n Missing values in the weights column will be treated as zero.\n Infinite values not allowed.\n random_state : int or numpy.random.RandomState, optional\n Seed for the random number generator (if int), or numpy RandomState\n object.\n axis : int or string, optional\n Axis to sample. Accepts axis number or name. Default is stat axis\n for given data type (0 for Series and DataFrames, 1 for Panels).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing `n` items randomly\n sampled from the caller object.\n\n See Also\n --------\n numpy.random.choice: Generates a random sample from a given 1-D numpy\n array.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'])\n >>> df\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n Extract 3 random elements from the ``Series`` ``df['num_legs']``:\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df['num_legs'].sample(n=3, random_state=1)\n fish 0\n spider 8\n falcon 2\n Name: num_legs, dtype: int64\n\n A random 50% sample of the ``DataFrame`` with replacement:\n\n >>> df.sample(frac=0.5, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n\n Using a DataFrame column as weights. Rows with larger value in the\n `num_specimen_seen` column are more likely to be sampled.\n\n >>> df.sample(n=2, weights='num_specimen_seen', random_state=1)\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n \"\"\"\n\n if axis is None:\n axis = self._stat_axis_number\n\n axis = self._get_axis_number(axis)\n axis_length = self.shape[axis]\n\n # Process random_state argument\n rs = com.random_state(random_state)\n\n # Check weights for compliance\n if weights is not None:\n\n # If a series, align with frame\n if isinstance(weights, pd.Series):\n weights = weights.reindex(self.axes[axis])\n\n # Strings acceptable if a dataframe and axis = 0\n if isinstance(weights, string_types):\n if isinstance(self, pd.DataFrame):\n if axis == 0:\n try:\n weights = self[weights]\n except KeyError:\n raise KeyError(\"String passed to weights not a \"\n \"valid column\")\n else:\n raise ValueError(\"Strings can only be passed to \"\n \"weights when sampling from rows on \"\n \"a DataFrame\")\n else:\n raise ValueError(\"Strings cannot be passed as weights \"\n \"when sampling from a Series or Panel.\")\n\n weights = pd.Series(weights, dtype='float64')\n\n if len(weights) != axis_length:\n raise ValueError(\"Weights and axis to be sampled must be of \"\n \"same length\")\n\n if (weights == np.inf).any() or (weights == -np.inf).any():\n raise ValueError(\"weight vector may not include `inf` values\")\n\n if (weights < 0).any():\n raise ValueError(\"weight vector many not include negative \"\n \"values\")\n\n # If has nan, set to zero.\n weights = weights.fillna(0)\n\n # Renormalize if don't sum to 1\n if weights.sum() != 1:\n if weights.sum() != 0:\n weights = weights / weights.sum()\n else:\n raise ValueError(\"Invalid weights: weights sum to zero\")\n\n weights = weights.values\n\n # If no frac or n, default to n=1.\n if n is None and frac is None:\n n = 1\n elif n is not None and frac is None and n % 1 != 0:\n raise ValueError(\"Only integers accepted as `n` values\")\n elif n is None and frac is not None:\n n = int(round(frac * axis_length))\n elif n is not None and frac is not None:\n raise ValueError('Please enter a value for `frac` OR `n`, not '\n 'both')\n\n # Check for negative sizes\n if n < 0:\n raise ValueError(\"A negative number of rows requested. Please \"\n \"provide positive value.\")\n\n locs = rs.choice(axis_length, size=n, replace=replace, p=weights)\n return self.take(locs, axis=axis, is_copy=False)\n\n _shared_docs['pipe'] = (r\"\"\"\n Apply func(self, \\*args, \\*\\*kwargs).\n\n Parameters\n ----------\n func : function\n function to apply to the %(klass)s.\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the %(klass)s.\n args : iterable, optional\n positional arguments passed into ``func``.\n kwargs : mapping, optional\n a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n See Also\n --------\n DataFrame.apply\n DataFrame.applymap\n Series.map\n\n Notes\n -----\n\n Use ``.pipe`` when chaining together functions that expect\n Series, DataFrames or GroupBy objects. Instead of writing\n\n >>> f(g(h(df), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe(f, arg2=b, arg3=c)\n ... )\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe((f, 'arg2'), arg1=a, arg3=c)\n ... )\n \"\"\")\n\n @Appender(_shared_docs['pipe'] % _shared_doc_kwargs)\n def pipe(self, func, *args, **kwargs):\n return com._pipe(self, func, *args, **kwargs)\n\n _shared_docs['aggregate'] = dedent(\"\"\"\n Aggregate using one or more operations over the specified axis.\n\n %(versionadded)s\n\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for aggregating the data. If a function, must either\n work when passed a %(klass)s or when passed to %(klass)s.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.sum, 'mean']``\n - dict of axis labels -> functions, function names or list of such.\n %(axis)s\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n DataFrame, Series or scalar\n if DataFrame.agg is called with a single function, returns a Series\n if DataFrame.agg is called with several functions, returns a DataFrame\n if Series.agg is called with single function, returns a scalar\n if Series.agg is called with several functions, returns a Series\n\n %(see_also)s\n\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n A passed user-defined-function will be passed a Series for evaluation.\n\n %(examples)s\n \"\"\")\n\n _shared_docs['transform'] = (\"\"\"\n Call ``func`` on self producing a %(klass)s with transformed values\n and that has the same axis length as self.\n\n .. versionadded:: 0.20.0\n\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for transforming the data. If a function, must either\n work when passed a %(klass)s or when passed to %(klass)s.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``\n - dict of axis labels -> functions, function names or list of such.\n %(axis)s\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n %(klass)s\n A %(klass)s that must have the same length as self.\n\n Raises\n ------\n ValueError : If the returned %(klass)s has a different length than self.\n\n See Also\n --------\n %(klass)s.agg : Only perform aggregating type operations.\n %(klass)s.apply : Invoke function on a %(klass)s.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})\n >>> df\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n >>> df.transform(lambda x: x + 1)\n A B\n 0 1 2\n 1 2 3\n 2 3 4\n\n Even though the resulting %(klass)s must have the same length as the\n input %(klass)s, it is possible to provide several input functions:\n\n >>> s = pd.Series(range(3))\n >>> s\n 0 0\n 1 1\n 2 2\n dtype: int64\n >>> s.transform([np.sqrt, np.exp])\n sqrt exp\n 0 0.000000 1.000000\n 1 1.000000 2.718282\n 2 1.414214 7.389056\n \"\"\")\n\n # ----------------------------------------------------------------------\n # Attribute access\n\n def __finalize__(self, other, method=None, **kwargs):\n \"\"\"\n Propagate metadata from other to self.\n\n Parameters\n ----------\n other : the object from which to get the attributes that we are going\n to propagate\n method : optional, a passed method name ; possibly to take different\n types of propagation actions based on this\n\n \"\"\"\n if isinstance(other, NDFrame):\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def __getattr__(self, name):\n \"\"\"After regular attribute access, try looking up the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n\n # Note: obj.x will always call obj.__getattribute__('x') prior to\n # calling obj.__getattr__('x').\n\n if (name in self._internal_names_set or name in self._metadata or\n name in self._accessors):\n return object.__getattribute__(self, name)\n else:\n if self._info_axis._can_hold_identifiers_and_holds_name(name):\n return self[name]\n return object.__getattribute__(self, name)\n\n def __setattr__(self, name, value):\n \"\"\"After regular attribute access, try setting the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n\n # first try regular attribute access via __getattribute__, so that\n # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify\n # the same attribute.\n\n try:\n object.__getattribute__(self, name)\n return object.__setattr__(self, name, value)\n except AttributeError:\n pass\n\n # if this fails, go on to more involved attribute setting\n # (note that this matches __getattr__, above).\n if name in self._internal_names_set:\n object.__setattr__(self, name, value)\n elif name in self._metadata:\n object.__setattr__(self, name, value)\n else:\n try:\n existing = getattr(self, name)\n if isinstance(existing, Index):\n object.__setattr__(self, name, value)\n elif name in self._info_axis:\n self[name] = value\n else:\n object.__setattr__(self, name, value)\n except (AttributeError, TypeError):\n if isinstance(self, ABCDataFrame) and (is_list_like(value)):\n warnings.warn(\"Pandas doesn't allow columns to be \"\n \"created via a new attribute name - see \"\n \"https://pandas.pydata.org/pandas-docs/\"\n \"stable/indexing.html#attribute-access\",\n stacklevel=2)\n object.__setattr__(self, name, value)\n\n def _dir_additions(self):\n \"\"\" add the string-like attributes from the info_axis.\n If info_axis is a MultiIndex, it's first level values are used.\n \"\"\"\n additions = {c for c in self._info_axis.unique(level=0)[:100]\n if isinstance(c, string_types) and isidentifier(c)}\n return super(NDFrame, self)._dir_additions().union(additions)\n\n # ----------------------------------------------------------------------\n # Getting and setting elements\n\n # ----------------------------------------------------------------------\n # Consolidation of internals\n\n def _protect_consolidate(self, f):\n \"\"\"Consolidate _data -- if the blocks have changed, then clear the\n cache\n \"\"\"\n blocks_before = len(self._data.blocks)\n result = f()\n if len(self._data.blocks) != blocks_before:\n self._clear_item_cache()\n return result\n\n def _consolidate_inplace(self):\n \"\"\"Consolidate data in place and return None\"\"\"\n\n def f():\n self._data = self._data.consolidate()\n\n self._protect_consolidate(f)\n\n def _consolidate(self, inplace=False):\n \"\"\"\n Compute NDFrame with \"consolidated\" internals (data of each dtype\n grouped together in a single ndarray).\n\n Parameters\n ----------\n inplace : boolean, default False\n If False return new object, otherwise modify existing object\n\n Returns\n -------\n consolidated : same type as caller\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n if inplace:\n self._consolidate_inplace()\n else:\n f = lambda: self._data.consolidate()\n cons_data = self._protect_consolidate(f)\n return self._constructor(cons_data).__finalize__(self)\n\n @property\n def _is_mixed_type(self):\n f = lambda: self._data.is_mixed_type\n return self._protect_consolidate(f)\n\n @property\n def _is_numeric_mixed_type(self):\n f = lambda: self._data.is_numeric_mixed_type\n return self._protect_consolidate(f)\n\n @property\n def _is_datelike_mixed_type(self):\n f = lambda: self._data.is_datelike_mixed_type\n return self._protect_consolidate(f)\n\n def _check_inplace_setting(self, value):\n \"\"\" check whether we allow in-place setting with this type of value \"\"\"\n\n if self._is_mixed_type:\n if not self._is_numeric_mixed_type:\n\n # allow an actual np.nan thru\n try:\n if np.isnan(value):\n return True\n except Exception:\n pass\n\n raise TypeError('Cannot do inplace boolean setting on '\n 'mixed-types with a non np.nan value')\n\n return True\n\n def _get_numeric_data(self):\n return self._constructor(\n self._data.get_numeric_data()).__finalize__(self)\n\n def _get_bool_data(self):\n return self._constructor(self._data.get_bool_data()).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Internal Interface Methods\n\n def as_matrix(self, columns=None):\n \"\"\"\n Convert the frame to its Numpy-array representation.\n\n .. deprecated:: 0.23.0\n Use :meth:`DataFrame.values` instead.\n\n Parameters\n ----------\n columns : list, optional, default:None\n If None, return all columns, otherwise, returns specified columns.\n\n Returns\n -------\n values : ndarray\n If the caller is heterogeneous and contains booleans or objects,\n the result will be of dtype=object. See Notes.\n\n See Also\n --------\n DataFrame.values\n\n Notes\n -----\n Return is NOT a Numpy-matrix, rather, a Numpy-array.\n\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcase to\n int32. By numpy.find_common_type convention, mixing int64 and uint64\n will result in a float64 dtype.\n\n This method is provided for backwards compatibility. Generally,\n it is recommended to use '.values'.\n \"\"\"\n warnings.warn(\"Method .as_matrix will be removed in a future version. \"\n \"Use .values instead.\", FutureWarning, stacklevel=2)\n self._consolidate_inplace()\n return self._data.as_array(transpose=self._AXIS_REVERSED,\n items=columns)\n\n @property\n def values(self):\n \"\"\"\n Return a Numpy representation of the DataFrame.\n\n .. warning::\n\n We recommend using :meth:`DataFrame.to_numpy` instead.\n\n Only the values in the DataFrame will be returned, the axes labels\n will be removed.\n\n Returns\n -------\n numpy.ndarray\n The values of the DataFrame.\n\n See Also\n --------\n DataFrame.to_numpy : Recommended alternative to this method.\n pandas.DataFrame.index : Retrieve the index labels.\n pandas.DataFrame.columns : Retrieving the column names.\n\n Notes\n -----\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcast to\n int32. By :func:`numpy.find_common_type` convention, mixing int64\n and uint64 will result in a float64 dtype.\n\n Examples\n --------\n A DataFrame where all columns are the same type (e.g., int64) results\n in an array of the same type.\n\n >>> df = pd.DataFrame({'age': [ 3, 29],\n ... 'height': [94, 170],\n ... 'weight': [31, 115]})\n >>> df\n age height weight\n 0 3 94 31\n 1 29 170 115\n >>> df.dtypes\n age int64\n height int64\n weight int64\n dtype: object\n >>> df.values\n array([[ 3, 94, 31],\n [ 29, 170, 115]], dtype=int64)\n\n A DataFrame with mixed type columns(e.g., str/object, int64, float32)\n results in an ndarray of the broadest type that accommodates these\n mixed types (e.g., object).\n\n >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),\n ... ('lion', 80.5, 1),\n ... ('monkey', np.nan, None)],\n ... columns=('name', 'max_speed', 'rank'))\n >>> df2.dtypes\n name object\n max_speed float64\n rank object\n dtype: object\n >>> df2.values\n array([['parrot', 24.0, 'second'],\n ['lion', 80.5, 1],\n ['monkey', nan, None]], dtype=object)\n \"\"\"\n self._consolidate_inplace()\n return self._data.as_array(transpose=self._AXIS_REVERSED)\n\n @property\n def _values(self):\n \"\"\"internal implementation\"\"\"\n return self.values\n\n @property\n def _get_values(self):\n # compat\n return self.values\n\n def get_values(self):\n \"\"\"\n Return an ndarray after converting sparse values to dense.\n\n This is the same as ``.values`` for non-sparse data. For sparse\n data contained in a `pandas.SparseArray`, the data are first\n converted to a dense representation.\n\n Returns\n -------\n numpy.ndarray\n Numpy representation of DataFrame\n\n See Also\n --------\n values : Numpy representation of DataFrame.\n pandas.SparseArray : Container for sparse data.\n\n Examples\n --------\n >>> df = pd.DataFrame({'a': [1, 2], 'b': [True, False],\n ... 'c': [1.0, 2.0]})\n >>> df\n a b c\n 0 1 True 1.0\n 1 2 False 2.0\n\n >>> df.get_values()\n array([[1, True, 1.0], [2, False, 2.0]], dtype=object)\n\n >>> df = pd.DataFrame({\"a\": pd.SparseArray([1, None, None]),\n ... \"c\": [1.0, 2.0, 3.0]})\n >>> df\n a c\n 0 1.0 1.0\n 1 NaN 2.0\n 2 NaN 3.0\n\n >>> df.get_values()\n array([[ 1., 1.],\n [nan, 2.],\n [nan, 3.]])\n \"\"\"\n return self.values\n\n def get_dtype_counts(self):\n \"\"\"\n Return counts of unique dtypes in this object.\n\n Returns\n -------\n dtype : Series\n Series with the count of columns with each dtype.\n\n See Also\n --------\n dtypes : Return the dtypes in this object.\n\n Examples\n --------\n >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]\n >>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])\n >>> df\n str int float\n 0 a 1 1.0\n 1 b 2 2.0\n 2 c 3 3.0\n\n >>> df.get_dtype_counts()\n float64 1\n int64 1\n object 1\n dtype: int64\n \"\"\"\n from pandas import Series\n return Series(self._data.get_dtype_counts())\n\n def get_ftype_counts(self):\n \"\"\"\n Return counts of unique ftypes in this object.\n\n .. deprecated:: 0.23.0\n\n This is useful for SparseDataFrame or for DataFrames containing\n sparse arrays.\n\n Returns\n -------\n dtype : Series\n Series with the count of columns with each type and\n sparsity (dense/sparse)\n\n See Also\n --------\n ftypes : Return ftypes (indication of sparse/dense and dtype) in\n this object.\n\n Examples\n --------\n >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]\n >>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])\n >>> df\n str int float\n 0 a 1 1.0\n 1 b 2 2.0\n 2 c 3 3.0\n\n >>> df.get_ftype_counts() # doctest: +SKIP\n float64:dense 1\n int64:dense 1\n object:dense 1\n dtype: int64\n \"\"\"\n warnings.warn(\"get_ftype_counts is deprecated and will \"\n \"be removed in a future version\",\n FutureWarning, stacklevel=2)\n\n from pandas import Series\n return Series(self._data.get_ftype_counts())\n\n @property\n def dtypes(self):\n \"\"\"\n Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type of each column.\n\n See Also\n --------\n pandas.DataFrame.ftypes : Dtype and sparsity information.\n\n Examples\n --------\n >>> df = pd.DataFrame({'float': [1.0],\n ... 'int': [1],\n ... 'datetime': [pd.Timestamp('20180310')],\n ... 'string': ['foo']})\n >>> df.dtypes\n float float64\n int int64\n datetime datetime64[ns]\n string object\n dtype: object\n \"\"\"\n from pandas import Series\n return Series(self._data.get_dtypes(), index=self._info_axis,\n dtype=np.object_)\n\n @property\n def ftypes(self):\n \"\"\"\n Return the ftypes (indication of sparse/dense and dtype) in DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type and indication of sparse/dense of each column.\n\n See Also\n --------\n pandas.DataFrame.dtypes: Series with just dtype information.\n pandas.SparseDataFrame : Container for sparse tabular data.\n\n Notes\n -----\n Sparse data should have the same dtypes as its dense representation.\n\n Examples\n --------\n >>> arr = np.random.RandomState(0).randn(100, 4)\n >>> arr[arr < .8] = np.nan\n >>> pd.DataFrame(arr).ftypes\n 0 float64:dense\n 1 float64:dense\n 2 float64:dense\n 3 float64:dense\n dtype: object\n\n >>> pd.SparseDataFrame(arr).ftypes\n 0 float64:sparse\n 1 float64:sparse\n 2 float64:sparse\n 3 float64:sparse\n dtype: object\n \"\"\"\n from pandas import Series\n return Series(self._data.get_ftypes(), index=self._info_axis,\n dtype=np.object_)\n\n def as_blocks(self, copy=True):\n \"\"\"\n Convert the frame to a dict of dtype -> Constructor Types that each has\n a homogeneous dtype.\n\n .. deprecated:: 0.21.0\n\n NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in\n as_matrix)\n\n Parameters\n ----------\n copy : boolean, default True\n\n Returns\n -------\n values : a dict of dtype -> Constructor Types\n \"\"\"\n warnings.warn(\"as_blocks is deprecated and will \"\n \"be removed in a future version\",\n FutureWarning, stacklevel=2)\n return self._to_dict_of_blocks(copy=copy)\n\n @property\n def blocks(self):\n \"\"\"\n Internal property, property synonym for as_blocks().\n\n .. deprecated:: 0.21.0\n \"\"\"\n return self.as_blocks()\n\n def _to_dict_of_blocks(self, copy=True):\n \"\"\"\n Return a dict of dtype -> Constructor Types that\n each is a homogeneous dtype.\n\n Internal ONLY\n \"\"\"\n return {k: self._constructor(v).__finalize__(self)\n for k, v, in self._data.to_dict(copy=copy).items()}\n\n def astype(self, dtype, copy=True, errors='raise', **kwargs):\n \"\"\"\n Cast a pandas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n copy : bool, default True\n Return a copy when ``copy=True`` (be very careful setting\n ``copy=False`` as changes to values then may propagate to other\n pandas objects).\n errors : {'raise', 'ignore'}, default 'raise'\n Control raising of exceptions on invalid data for provided dtype.\n\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n\n .. versionadded:: 0.20.0\n\n kwargs : keyword arguments to pass on to the constructor\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n numpy.ndarray.astype : Cast a numpy array to a specified type.\n\n Examples\n --------\n >>> ser = pd.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n Convert to categorical type:\n\n >>> ser.astype('category')\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [1, 2]\n\n Convert to ordered categorical type with custom ordering:\n\n >>> cat_dtype = pd.api.types.CategoricalDtype(\n ... categories=[2, 1], ordered=True)\n >>> ser.astype(cat_dtype)\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [2 < 1]\n\n Note that using ``copy=False`` and changing data on a new\n pandas object may propagate changes:\n\n >>> s1 = pd.Series([1,2])\n >>> s2 = s1.astype('int64', copy=False)\n >>> s2[0] = 10\n >>> s1 # note that s1[0] has changed too\n 0 10\n 1 2\n dtype: int64\n \"\"\"\n if is_dict_like(dtype):\n if self.ndim == 1: # i.e. Series\n if len(dtype) > 1 or self.name not in dtype:\n raise KeyError('Only the Series name can be used for '\n 'the key in Series dtype mappings.')\n new_type = dtype[self.name]\n return self.astype(new_type, copy, errors, **kwargs)\n elif self.ndim > 2:\n raise NotImplementedError(\n 'astype() only accepts a dtype arg of type dict when '\n 'invoked on Series and DataFrames. A single dtype must be '\n 'specified when invoked on a Panel.'\n )\n for col_name in dtype.keys():\n if col_name not in self:\n raise KeyError('Only a column name can be used for the '\n 'key in a dtype mappings argument.')\n results = []\n for col_name, col in self.iteritems():\n if col_name in dtype:\n results.append(col.astype(dtype[col_name], copy=copy))\n else:\n results.append(results.append(col.copy() if copy else col))\n\n elif is_extension_array_dtype(dtype) and self.ndim > 1:\n # GH 18099/22869: columnwise conversion to extension dtype\n # GH 24704: use iloc to handle duplicate column names\n results = (self.iloc[:, i].astype(dtype, copy=copy)\n for i in range(len(self.columns)))\n\n else:\n # else, only a single dtype is given\n new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,\n **kwargs)\n return self._constructor(new_data).__finalize__(self)\n\n # GH 19920: retain column metadata after concat\n result = pd.concat(results, axis=1, copy=False)\n result.columns = self.columns\n return result\n\n def copy(self, deep=True):\n \"\"\"\n Make a copy of this object's indices and data.\n\n When ``deep=True`` (default), a new object will be created with a\n copy of the calling object's data and indices. Modifications to\n the data or indices of the copy will not be reflected in the\n original object (see notes below).\n\n When ``deep=False``, a new object will be created without copying\n the calling object's data or index (only references to the data\n and index are copied). Any changes to the data of the original\n will be reflected in the shallow copy (and vice versa).\n\n Parameters\n ----------\n deep : bool, default True\n Make a deep copy, including a copy of the data and the indices.\n With ``deep=False`` neither the indices nor the data are copied.\n\n Returns\n -------\n copy : Series, DataFrame or Panel\n Object type matches caller.\n\n Notes\n -----\n When ``deep=True``, data is copied but actual Python objects\n will not be copied recursively, only the reference to the object.\n This is in contrast to `copy.deepcopy` in the Standard Library,\n which recursively copies object data (see examples below).\n\n While ``Index`` objects are copied when ``deep=True``, the underlying\n numpy array is not copied for performance reasons. Since ``Index`` is\n immutable, the underlying data can be safely shared and a copy\n is not needed.\n\n Examples\n --------\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> s\n a 1\n b 2\n dtype: int64\n\n >>> s_copy = s.copy()\n >>> s_copy\n a 1\n b 2\n dtype: int64\n\n **Shallow copy versus default (deep) copy:**\n\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> deep = s.copy()\n >>> shallow = s.copy(deep=False)\n\n Shallow copy shares data and index with original.\n\n >>> s is shallow\n False\n >>> s.values is shallow.values and s.index is shallow.index\n True\n\n Deep copy has own copy of data and index.\n\n >>> s is deep\n False\n >>> s.values is deep.values or s.index is deep.index\n False\n\n Updates to the data shared by shallow copy and original is reflected\n in both; deep copy remains unchanged.\n\n >>> s[0] = 3\n >>> shallow[1] = 4\n >>> s\n a 3\n b 4\n dtype: int64\n >>> shallow\n a 3\n b 4\n dtype: int64\n >>> deep\n a 1\n b 2\n dtype: int64\n\n Note that when copying an object containing Python objects, a deep copy\n will copy the data, but will not do so recursively. Updating a nested\n data object will be reflected in the deep copy.\n\n >>> s = pd.Series([[1, 2], [3, 4]])\n >>> deep = s.copy()\n >>> s[0][0] = 10\n >>> s\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n >>> deep\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n \"\"\"\n data = self._data.copy(deep=deep)\n return self._constructor(data).__finalize__(self)\n\n def __copy__(self, deep=True):\n return self.copy(deep=deep)\n\n def __deepcopy__(self, memo=None):\n \"\"\"\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n \"\"\"\n if memo is None:\n memo = {}\n return self.copy(deep=True)\n\n def _convert(self, datetime=False, numeric=False, timedelta=False,\n coerce=False, copy=True):\n \"\"\"\n Attempt to infer better dtype for object columns\n\n Parameters\n ----------\n datetime : boolean, default False\n If True, convert to date where possible.\n numeric : boolean, default False\n If True, attempt to convert to numbers (including strings), with\n unconvertible values becoming NaN.\n timedelta : boolean, default False\n If True, convert to timedelta where possible.\n coerce : boolean, default False\n If True, force conversion with unconvertible values converted to\n nulls (NaN or NaT)\n copy : boolean, default True\n If True, return a copy even if no copy is necessary (e.g. no\n conversion was done). Note: This is meant for internal use, and\n should not be confused with inplace.\n\n Returns\n -------\n converted : same as input object\n \"\"\"\n return self._constructor(\n self._data.convert(datetime=datetime, numeric=numeric,\n timedelta=timedelta, coerce=coerce,\n copy=copy)).__finalize__(self)\n\n def convert_objects(self, convert_dates=True, convert_numeric=False,\n convert_timedeltas=True, copy=True):\n \"\"\"\n Attempt to infer better dtype for object columns.\n\n .. deprecated:: 0.21.0\n\n Parameters\n ----------\n convert_dates : boolean, default True\n If True, convert to date where possible. If 'coerce', force\n conversion, with unconvertible values becoming NaT.\n convert_numeric : boolean, default False\n If True, attempt to coerce to numbers (including strings), with\n unconvertible values becoming NaN.\n convert_timedeltas : boolean, default True\n If True, convert to timedelta where possible. If 'coerce', force\n conversion, with unconvertible values becoming NaT.\n copy : boolean, default True\n If True, return a copy even if no copy is necessary (e.g. no\n conversion was done). Note: This is meant for internal use, and\n should not be confused with inplace.\n\n Returns\n -------\n converted : same as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n \"\"\"\n msg = (\"convert_objects is deprecated. To re-infer data dtypes for \"\n \"object columns, use {klass}.infer_objects()\\nFor all \"\n \"other conversions use the data-type specific converters \"\n \"pd.to_datetime, pd.to_timedelta and pd.to_numeric.\"\n ).format(klass=self.__class__.__name__)\n warnings.warn(msg, FutureWarning, stacklevel=2)\n\n return self._constructor(\n self._data.convert(convert_dates=convert_dates,\n convert_numeric=convert_numeric,\n convert_timedeltas=convert_timedeltas,\n copy=copy)).__finalize__(self)\n\n def infer_objects(self):\n \"\"\"\n Attempt to infer better dtypes for object columns.\n\n Attempts soft conversion of object-dtyped\n columns, leaving non-object and unconvertible\n columns unchanged. The inference rules are the\n same as during normal Series/DataFrame construction.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n converted : same type as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [\"a\", 1, 2, 3]})\n >>> df = df.iloc[1:]\n >>> df\n A\n 1 1\n 2 2\n 3 3\n\n >>> df.dtypes\n A object\n dtype: object\n\n >>> df.infer_objects().dtypes\n A int64\n dtype: object\n \"\"\"\n # numeric=False necessary to only soft convert;\n # python objects will still be converted to\n # native numpy numeric types\n return self._constructor(\n self._data.convert(datetime=True, numeric=False,\n timedelta=True, coerce=False,\n copy=True)).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Filling NA's\n\n def fillna(self, value=None, method=None, axis=None, inplace=False,\n limit=None, downcast=None):\n \"\"\"\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series, or DataFrame\n Value to use to fill holes (e.g. 0), alternately a\n dict/Series/DataFrame of values specifying which value to use for\n each index (for a Series) or column (for a DataFrame). (values not\n in the dict/Series/DataFrame will not be filled). This value cannot\n be a list.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use NEXT valid observation to fill gap\n axis : %(axes_single_arg)s\n inplace : boolean, default False\n If True, fill in place. Note: this will modify any\n other views on this object, (e.g. a no-copy slice for a column in a\n DataFrame).\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n downcast : dict, default is None\n a dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible)\n\n Returns\n -------\n filled : %(klass)s\n\n See Also\n --------\n interpolate : Fill NaN values using interpolation.\n reindex, asfreq\n\n Examples\n --------\n >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],\n ... [3, 4, np.nan, 1],\n ... [np.nan, np.nan, np.nan, 5],\n ... [np.nan, 3, np.nan, 4]],\n ... columns=list('ABCD'))\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 NaN 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 0.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 NaN 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 2.0 4\n\n Only replace the first NaN element.\n\n >>> df.fillna(value=values, limit=1)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 NaN 1\n 2 NaN 1.0 NaN 5\n 3 NaN 3.0 NaN 4\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n value, method = validate_fillna_kwargs(value, method)\n\n self._consolidate_inplace()\n\n # set the default here, so functions examining the signaure\n # can detect if something was set (e.g. in groupby) (GH9221)\n if axis is None:\n axis = 0\n axis = self._get_axis_number(axis)\n\n from pandas import DataFrame\n if value is None:\n\n if self._is_mixed_type and axis == 1:\n if inplace:\n raise NotImplementedError()\n result = self.T.fillna(method=method, limit=limit).T\n\n # need to downcast here because of all of the transposes\n result._data = result._data.downcast()\n\n return result\n\n # > 3d\n if self.ndim > 3:\n raise NotImplementedError('Cannot fillna with a method for > '\n '3dims')\n\n # 3d\n elif self.ndim == 3:\n # fill in 2d chunks\n result = {col: s.fillna(method=method, value=value)\n for col, s in self.iteritems()}\n prelim_obj = self._constructor.from_dict(result)\n new_obj = prelim_obj.__finalize__(self)\n new_data = new_obj._data\n\n else:\n # 2d or less\n new_data = self._data.interpolate(method=method, axis=axis,\n limit=limit, inplace=inplace,\n coerce=True,\n downcast=downcast)\n else:\n if len(self._get_axis(axis)) == 0:\n return self\n\n if self.ndim == 1:\n if isinstance(value, (dict, ABCSeries)):\n from pandas import Series\n value = Series(value)\n elif not is_list_like(value):\n pass\n else:\n raise TypeError('\"value\" parameter must be a scalar, dict '\n 'or Series, but you passed a '\n '\"{0}\"'.format(type(value).__name__))\n\n new_data = self._data.fillna(value=value, limit=limit,\n inplace=inplace,\n downcast=downcast)\n\n elif isinstance(value, (dict, ABCSeries)):\n if axis == 1:\n raise NotImplementedError('Currently only can fill '\n 'with dict/Series column '\n 'by column')\n\n result = self if inplace else self.copy()\n for k, v in compat.iteritems(value):\n if k not in result:\n continue\n obj = result[k]\n obj.fillna(v, limit=limit, inplace=True, downcast=downcast)\n return result if not inplace else None\n\n elif not is_list_like(value):\n new_data = self._data.fillna(value=value, limit=limit,\n inplace=inplace,\n downcast=downcast)\n elif isinstance(value, DataFrame) and self.ndim == 2:\n new_data = self.where(self.notna(), value)\n else:\n raise ValueError(\"invalid fill value with a %s\" % type(value))\n\n if inplace:\n self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n def ffill(self, axis=None, inplace=False, limit=None, downcast=None):\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.\n \"\"\"\n return self.fillna(method='ffill', axis=axis, inplace=inplace,\n limit=limit, downcast=downcast)\n\n def bfill(self, axis=None, inplace=False, limit=None, downcast=None):\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.\n \"\"\"\n return self.fillna(method='bfill', axis=axis, inplace=inplace,\n limit=limit, downcast=downcast)\n\n _shared_docs['replace'] = (\"\"\"\n Replace values given in `to_replace` with `value`.\n\n Values of the %(klass)s are replaced with other values dynamically.\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\n\n Parameters\n ----------\n to_replace : str, regex, list, dict, Series, int, float, or None\n How to find the values that will be replaced.\n\n * numeric, str or regex:\n\n - numeric: numeric values equal to `to_replace` will be\n replaced with `value`\n - str: string exactly matching `to_replace` will be replaced\n with `value`\n - regex: regexs matching `to_replace` will be replaced with\n `value`\n\n * list of str, regex, or numeric:\n\n - First, if `to_replace` and `value` are both lists, they\n **must** be the same length.\n - Second, if ``regex=True`` then all of the strings in **both**\n lists will be interpreted as regexs otherwise they will match\n directly. This doesn't matter much for `value` since there\n are only a few possible substitution regexes you can use.\n - str, regex and numeric rules apply as above.\n\n * dict:\n\n - Dicts can be used to specify different replacement values\n for different existing values. For example,\n ``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and\n 'y' with 'z'. To use a dict in this way the `value`\n parameter should be `None`.\n - For a DataFrame a dict can specify that different values\n should be replaced in different columns. For example,\n ``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'\n and the value 'z' in column 'b' and replaces these values\n with whatever is specified in `value`. The `value` parameter\n should not be ``None`` in this case. You can treat this as a\n special case of passing two lists except that you are\n specifying the column to search in.\n - For a DataFrame nested dictionaries, e.g.,\n ``{'a': {'b': np.nan}}``, are read as follows: look in column\n 'a' for the value 'b' and replace it with NaN. The `value`\n parameter should be ``None`` to use a nested dict in this\n way. You can nest regular expressions as well. Note that\n column names (the top-level dictionary keys in a nested\n dictionary) **cannot** be regular expressions.\n\n * None:\n\n - This means that the `regex` argument must be a string,\n compiled regular expression, or list, dict, ndarray or\n Series of such elements. If `value` is also ``None`` then\n this **must** be a nested dictionary or Series.\n\n See the examples section for examples of each of these.\n value : scalar, dict, list, str, regex, default None\n Value to replace any values matching `to_replace` with.\n For a DataFrame a dict of values can be used to specify which\n value to use for each column (columns not in the dict will not be\n filled). Regular expressions, strings and lists or dicts of such\n objects are also allowed.\n inplace : bool, default False\n If True, in place. Note: this will modify any\n other views on this object (e.g. a column from a DataFrame).\n Returns the caller if this is True.\n limit : int, default None\n Maximum size gap to forward or backward fill.\n regex : bool or same types as `to_replace`, default False\n Whether to interpret `to_replace` and/or `value` as regular\n expressions. If this is ``True`` then `to_replace` *must* be a\n string. Alternatively, this could be a regular expression or a\n list, dict, or array of regular expressions in which case\n `to_replace` must be ``None``.\n method : {'pad', 'ffill', 'bfill', `None`}\n The method to use when for replacement, when `to_replace` is a\n scalar, list or tuple and `value` is ``None``.\n\n .. versionchanged:: 0.23.0\n Added to DataFrame.\n\n Returns\n -------\n %(klass)s\n Object after replacement.\n\n Raises\n ------\n AssertionError\n * If `regex` is not a ``bool`` and `to_replace` is not\n ``None``.\n TypeError\n * If `to_replace` is a ``dict`` and `value` is not a ``list``,\n ``dict``, ``ndarray``, or ``Series``\n * If `to_replace` is ``None`` and `regex` is not compilable\n into a regular expression or is a list, dict, ndarray, or\n Series.\n * When replacing multiple ``bool`` or ``datetime64`` objects and\n the arguments to `to_replace` does not match the type of the\n value being replaced\n ValueError\n * If a ``list`` or an ``ndarray`` is passed to `to_replace` and\n `value` but they are not the same length.\n\n See Also\n --------\n %(klass)s.fillna : Fill NA values.\n %(klass)s.where : Replace values based on boolean condition.\n Series.str.replace : Simple string replacement.\n\n Notes\n -----\n * Regex substitution is performed under the hood with ``re.sub``. The\n rules for substitution for ``re.sub`` are the same.\n * Regular expressions will only substitute on strings, meaning you\n cannot provide, for example, a regular expression matching floating\n point numbers and expect the columns in your frame that have a\n numeric dtype to be matched. However, if those floating point\n numbers *are* strings, then you can do this.\n * This method has *a lot* of options. You are encouraged to experiment\n and play with this method to gain intuition about how it works.\n * When dict is used as the `to_replace` value, it is like\n key(s) in the dict are the to_replace part and\n value(s) in the dict are the value parameter.\n\n Examples\n --------\n\n **Scalar `to_replace` and `value`**\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.replace(0, 5)\n 0 5\n 1 1\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],\n ... 'B': [5, 6, 7, 8, 9],\n ... 'C': ['a', 'b', 'c', 'd', 'e']})\n >>> df.replace(0, 5)\n A B C\n 0 5 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n **List-like `to_replace`**\n\n >>> df.replace([0, 1, 2, 3], 4)\n A B C\n 0 4 5 a\n 1 4 6 b\n 2 4 7 c\n 3 4 8 d\n 4 4 9 e\n\n >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])\n A B C\n 0 4 5 a\n 1 3 6 b\n 2 2 7 c\n 3 1 8 d\n 4 4 9 e\n\n >>> s.replace([1, 2], method='bfill')\n 0 0\n 1 3\n 2 3\n 3 3\n 4 4\n dtype: int64\n\n **dict-like `to_replace`**\n\n >>> df.replace({0: 10, 1: 100})\n A B C\n 0 10 5 a\n 1 100 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({'A': 0, 'B': 5}, 100)\n A B C\n 0 100 100 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({'A': {0: 100, 4: 400}})\n A B C\n 0 100 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 400 9 e\n\n **Regular expression `to_replace`**\n\n >>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],\n ... 'B': ['abc', 'bar', 'xyz']})\n >>> df.replace(to_replace=r'^ba.$', value='new', regex=True)\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)\n A B\n 0 new abc\n 1 foo bar\n 2 bait xyz\n\n >>> df.replace(regex=r'^ba.$', value='new')\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})\n A B\n 0 new abc\n 1 xyz new\n 2 bait xyz\n\n >>> df.replace(regex=[r'^ba.$', 'foo'], value='new')\n A B\n 0 new abc\n 1 new new\n 2 bait xyz\n\n Note that when replacing multiple ``bool`` or ``datetime64`` objects,\n the data types in the `to_replace` parameter must match the data\n type of the value being replaced:\n\n >>> df = pd.DataFrame({'A': [True, False, True],\n ... 'B': [False, True, False]})\n >>> df.replace({'a string': 'new value', True: False}) # raises\n Traceback (most recent call last):\n ...\n TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'\n\n This raises a ``TypeError`` because one of the ``dict`` keys is not of\n the correct type for replacement.\n\n Compare the behavior of ``s.replace({'a': None})`` and\n ``s.replace('a', None)`` to understand the peculiarities\n of the `to_replace` parameter:\n\n >>> s = pd.Series([10, 'a', 'a', 'b', 'a'])\n\n When one uses a dict as the `to_replace` value, it is like the\n value(s) in the dict are equal to the `value` parameter.\n ``s.replace({'a': None})`` is equivalent to\n ``s.replace(to_replace={'a': None}, value=None, method=None)``:\n\n >>> s.replace({'a': None})\n 0 10\n 1 None\n 2 None\n 3 b\n 4 None\n dtype: object\n\n When ``value=None`` and `to_replace` is a scalar, list or\n tuple, `replace` uses the method parameter (default 'pad') to do the\n replacement. So this is why the 'a' values are being replaced by 10\n in rows 1 and 2 and 'b' in row 4 in this case.\n The command ``s.replace('a', None)`` is actually equivalent to\n ``s.replace(to_replace='a', value=None, method='pad')``:\n\n >>> s.replace('a', None)\n 0 10\n 1 10\n 2 10\n 3 b\n 4 b\n dtype: object\n \"\"\")\n\n @Appender(_shared_docs['replace'] % _shared_doc_kwargs)\n def replace(self, to_replace=None, value=None, inplace=False, limit=None,\n regex=False, method='pad'):\n inplace = validate_bool_kwarg(inplace, 'inplace')\n if not is_bool(regex) and to_replace is not None:\n raise AssertionError(\"'to_replace' must be 'None' if 'regex' is \"\n \"not a bool\")\n\n self._consolidate_inplace()\n\n if value is None:\n # passing a single value that is scalar like\n # when value is None (GH5319), for compat\n if not is_dict_like(to_replace) and not is_dict_like(regex):\n to_replace = [to_replace]\n\n if isinstance(to_replace, (tuple, list)):\n if isinstance(self, pd.DataFrame):\n return self.apply(_single_replace,\n args=(to_replace, method, inplace,\n limit))\n return _single_replace(self, to_replace, method, inplace,\n limit)\n\n if not is_dict_like(to_replace):\n if not is_dict_like(regex):\n raise TypeError('If \"to_replace\" and \"value\" are both None'\n ' and \"to_replace\" is not a list, then '\n 'regex must be a mapping')\n to_replace = regex\n regex = True\n\n items = list(compat.iteritems(to_replace))\n keys, values = lzip(*items) or ([], [])\n\n are_mappings = [is_dict_like(v) for v in values]\n\n if any(are_mappings):\n if not all(are_mappings):\n raise TypeError(\"If a nested mapping is passed, all values\"\n \" of the top level mapping must be \"\n \"mappings\")\n # passed a nested dict/Series\n to_rep_dict = {}\n value_dict = {}\n\n for k, v in items:\n keys, values = lzip(*v.items()) or ([], [])\n if set(keys) & set(values):\n raise ValueError(\"Replacement not allowed with \"\n \"overlapping keys and values\")\n to_rep_dict[k] = list(keys)\n value_dict[k] = list(values)\n\n to_replace, value = to_rep_dict, value_dict\n else:\n to_replace, value = keys, values\n\n return self.replace(to_replace, value, inplace=inplace,\n limit=limit, regex=regex)\n else:\n\n # need a non-zero len on all axes\n for a in self._AXIS_ORDERS:\n if not len(self._get_axis(a)):\n return self\n\n new_data = self._data\n if is_dict_like(to_replace):\n if is_dict_like(value): # {'A' : NA} -> {'A' : 0}\n res = self if inplace else self.copy()\n for c, src in compat.iteritems(to_replace):\n if c in value and c in self:\n # object conversion is handled in\n # series.replace which is called recursivelly\n res[c] = res[c].replace(to_replace=src,\n value=value[c],\n inplace=False,\n regex=regex)\n return None if inplace else res\n\n # {'A': NA} -> 0\n elif not is_list_like(value):\n keys = [(k, src) for k, src in compat.iteritems(to_replace)\n if k in self]\n keys_len = len(keys) - 1\n for i, (k, src) in enumerate(keys):\n convert = i == keys_len\n new_data = new_data.replace(to_replace=src,\n value=value,\n filter=[k],\n inplace=inplace,\n regex=regex,\n convert=convert)\n else:\n raise TypeError('value argument must be scalar, dict, or '\n 'Series')\n\n elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']\n if is_list_like(value):\n if len(to_replace) != len(value):\n raise ValueError('Replacement lists must match '\n 'in length. Expecting %d got %d ' %\n (len(to_replace), len(value)))\n\n new_data = self._data.replace_list(src_list=to_replace,\n dest_list=value,\n inplace=inplace,\n regex=regex)\n\n else: # [NA, ''] -> 0\n new_data = self._data.replace(to_replace=to_replace,\n value=value, inplace=inplace,\n regex=regex)\n elif to_replace is None:\n if not (is_re_compilable(regex) or\n is_list_like(regex) or is_dict_like(regex)):\n raise TypeError(\"'regex' must be a string or a compiled \"\n \"regular expression or a list or dict of \"\n \"strings or regular expressions, you \"\n \"passed a\"\n \" {0!r}\".format(type(regex).__name__))\n return self.replace(regex, value, inplace=inplace, limit=limit,\n regex=True)\n else:\n\n # dest iterable dict-like\n if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}\n new_data = self._data\n\n for k, v in compat.iteritems(value):\n if k in self:\n new_data = new_data.replace(to_replace=to_replace,\n value=v, filter=[k],\n inplace=inplace,\n regex=regex)\n\n elif not is_list_like(value): # NA -> 0\n new_data = self._data.replace(to_replace=to_replace,\n value=value, inplace=inplace,\n regex=regex)\n else:\n msg = ('Invalid \"to_replace\" type: '\n '{0!r}').format(type(to_replace).__name__)\n raise TypeError(msg) # pragma: no cover\n\n if inplace:\n self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n _shared_docs['interpolate'] = \"\"\"\n Please note that only ``method='linear'`` is supported for\n DataFrame/Series with a MultiIndex.\n\n Parameters\n ----------\n method : str, default 'linear'\n Interpolation technique to use. One of:\n\n * 'linear': Ignore the index and treat the values as equally\n spaced. This is the only method supported on MultiIndexes.\n * 'time': Works on daily and higher resolution data to interpolate\n given length of interval.\n * 'index', 'values': use the actual numerical values of the index.\n * 'pad': Fill in NaNs using existing values.\n * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',\n 'barycentric', 'polynomial': Passed to\n `scipy.interpolate.interp1d`. Both 'polynomial' and 'spline'\n require that you also specify an `order` (int),\n e.g. ``df.interpolate(method='polynomial', order=5)``.\n These use the numerical values of the index.\n * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':\n Wrappers around the SciPy interpolation methods of similar\n names. See `Notes`.\n * 'from_derivatives': Refers to\n `scipy.interpolate.BPoly.from_derivatives` which\n replaces 'piecewise_polynomial' interpolation method in\n scipy 0.18.\n\n .. versionadded:: 0.18.1\n\n Added support for the 'akima' method.\n Added interpolate method 'from_derivatives' which replaces\n 'piecewise_polynomial' in SciPy 0.18; backwards-compatible with\n SciPy < 0.18\n\n axis : {0 or 'index', 1 or 'columns', None}, default None\n Axis to interpolate along.\n limit : int, optional\n Maximum number of consecutive NaNs to fill. Must be greater than\n 0.\n inplace : bool, default False\n Update the data in place if possible.\n limit_direction : {'forward', 'backward', 'both'}, default 'forward'\n If limit is specified, consecutive NaNs will be filled in this\n direction.\n limit_area : {`None`, 'inside', 'outside'}, default None\n If limit is specified, consecutive NaNs will be filled with this\n restriction.\n\n * ``None``: No fill restriction.\n * 'inside': Only fill NaNs surrounded by valid values\n (interpolate).\n * 'outside': Only fill NaNs outside valid values (extrapolate).\n\n .. versionadded:: 0.21.0\n\n downcast : optional, 'infer' or None, defaults to None\n Downcast dtypes if possible.\n **kwargs\n Keyword arguments to pass on to the interpolating function.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller, interpolated at\n some or all ``NaN`` values\n\n See Also\n --------\n fillna : Fill missing values using different methods.\n scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials\n (Akima interpolator).\n scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the\n Bernstein basis.\n scipy.interpolate.interp1d : Interpolate a 1-D function.\n scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh\n interpolator).\n scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic\n interpolation.\n scipy.interpolate.CubicSpline : Cubic spline data interpolator.\n\n Notes\n -----\n The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'\n methods are wrappers around the respective SciPy implementations of\n similar names. These use the actual numerical values of the index.\n For more information on their behavior, see the\n `SciPy documentation\n <http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__\n and `SciPy tutorial\n <http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.\n\n Examples\n --------\n Filling in ``NaN`` in a :class:`~pandas.Series` via linear\n interpolation.\n\n >>> s = pd.Series([0, 1, np.nan, 3])\n >>> s\n 0 0.0\n 1 1.0\n 2 NaN\n 3 3.0\n dtype: float64\n >>> s.interpolate()\n 0 0.0\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n Filling in ``NaN`` in a Series by padding, but filling at most two\n consecutive ``NaN`` at a time.\n\n >>> s = pd.Series([np.nan, \"single_one\", np.nan,\n ... \"fill_two_more\", np.nan, np.nan, np.nan,\n ... 4.71, np.nan])\n >>> s\n 0 NaN\n 1 single_one\n 2 NaN\n 3 fill_two_more\n 4 NaN\n 5 NaN\n 6 NaN\n 7 4.71\n 8 NaN\n dtype: object\n >>> s.interpolate(method='pad', limit=2)\n 0 NaN\n 1 single_one\n 2 single_one\n 3 fill_two_more\n 4 fill_two_more\n 5 fill_two_more\n 6 NaN\n 7 4.71\n 8 4.71\n dtype: object\n\n Filling in ``NaN`` in a Series via polynomial interpolation or splines:\n Both 'polynomial' and 'spline' methods require that you also specify\n an ``order`` (int).\n\n >>> s = pd.Series([0, 2, np.nan, 8])\n >>> s.interpolate(method='polynomial', order=2)\n 0 0.000000\n 1 2.000000\n 2 4.666667\n 3 8.000000\n dtype: float64\n\n Fill the DataFrame forward (that is, going down) along each column\n using linear interpolation.\n\n Note how the last entry in column 'a' is interpolated differently,\n because there is no entry after it to use for interpolation.\n Note how the first entry in column 'b' remains ``NaN``, because there\n is no entry befofe it to use for interpolation.\n\n >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),\n ... (np.nan, 2.0, np.nan, np.nan),\n ... (2.0, 3.0, np.nan, 9.0),\n ... (np.nan, 4.0, -4.0, 16.0)],\n ... columns=list('abcd'))\n >>> df\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 NaN 2.0 NaN NaN\n 2 2.0 3.0 NaN 9.0\n 3 NaN 4.0 -4.0 16.0\n >>> df.interpolate(method='linear', limit_direction='forward', axis=0)\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 1.0 2.0 -2.0 5.0\n 2 2.0 3.0 -3.0 9.0\n 3 2.0 4.0 -4.0 16.0\n\n Using polynomial interpolation.\n\n >>> df['d'].interpolate(method='polynomial', order=2)\n 0 1.0\n 1 4.0\n 2 9.0\n 3 16.0\n Name: d, dtype: float64\n \"\"\"\n\n @Appender(_shared_docs['interpolate'] % _shared_doc_kwargs)\n def interpolate(self, method='linear', axis=0, limit=None, inplace=False,\n limit_direction='forward', limit_area=None,\n downcast=None, **kwargs):\n \"\"\"\n Interpolate values according to different methods.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n if self.ndim > 2:\n raise NotImplementedError(\"Interpolate has not been implemented \"\n \"on Panel and Panel 4D objects.\")\n\n if axis == 0:\n ax = self._info_axis_name\n _maybe_transposed_self = self\n elif axis == 1:\n _maybe_transposed_self = self.T\n ax = 1\n else:\n _maybe_transposed_self = self\n ax = _maybe_transposed_self._get_axis_number(ax)\n\n if _maybe_transposed_self.ndim == 2:\n alt_ax = 1 - ax\n else:\n alt_ax = ax\n\n if (isinstance(_maybe_transposed_self.index, MultiIndex) and\n method != 'linear'):\n raise ValueError(\"Only `method=linear` interpolation is supported \"\n \"on MultiIndexes.\")\n\n if _maybe_transposed_self._data.get_dtype_counts().get(\n 'object') == len(_maybe_transposed_self.T):\n raise TypeError(\"Cannot interpolate with all object-dtype columns \"\n \"in the DataFrame. Try setting at least one \"\n \"column to a numeric dtype.\")\n\n # create/use the index\n if method == 'linear':\n # prior default\n index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))\n else:\n index = _maybe_transposed_self._get_axis(alt_ax)\n\n if isna(index).any():\n raise NotImplementedError(\"Interpolation with NaNs in the index \"\n \"has not been implemented. Try filling \"\n \"those NaNs before interpolating.\")\n data = _maybe_transposed_self._data\n new_data = data.interpolate(method=method, axis=ax, index=index,\n values=_maybe_transposed_self, limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n inplace=inplace, downcast=downcast,\n **kwargs)\n\n if inplace:\n if axis == 1:\n new_data = self._constructor(new_data).T._data\n self._update_inplace(new_data)\n else:\n res = self._constructor(new_data).__finalize__(self)\n if axis == 1:\n res = res.T\n return res\n\n # ----------------------------------------------------------------------\n # Timeseries methods Methods\n\n def asof(self, where, subset=None):\n \"\"\"\n Return the last row(s) without any NaNs before `where`.\n\n The last row (for each element in `where`, if list) without any\n NaN is taken.\n In case of a :class:`~pandas.DataFrame`, the last row without NaN\n considering only the subset of columns (if not `None`)\n\n .. versionadded:: 0.19.0 For DataFrame\n\n If there is no good value, NaN is returned for a Series or\n a Series of NaN values for a DataFrame\n\n Parameters\n ----------\n where : date or array-like of dates\n Date(s) before which the last row(s) are returned.\n subset : str or array-like of str, default `None`\n For DataFrame, if not `None`, only use these columns to\n check for NaNs.\n\n Returns\n -------\n scalar, Series, or DataFrame\n\n * scalar : when `self` is a Series and `where` is a scalar\n * Series: when `self` is a Series and `where` is an array-like,\n or when `self` is a DataFrame and `where` is a scalar\n * DataFrame : when `self` is a DataFrame and `where` is an\n array-like\n\n See Also\n --------\n merge_asof : Perform an asof merge. Similar to left join.\n\n Notes\n -----\n Dates are assumed to be sorted. Raises if this is not the case.\n\n Examples\n --------\n A Series and a scalar `where`.\n\n >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])\n >>> s\n 10 1.0\n 20 2.0\n 30 NaN\n 40 4.0\n dtype: float64\n\n >>> s.asof(20)\n 2.0\n\n For a sequence `where`, a Series is returned. The first value is\n NaN, because the first element of `where` is before the first\n index value.\n\n >>> s.asof([5, 20])\n 5 NaN\n 20 2.0\n dtype: float64\n\n Missing values are not considered. The following is ``2.0``, not\n NaN, even though NaN is at the index location for ``30``.\n\n >>> s.asof(30)\n 2.0\n\n Take all columns into consideration\n\n >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],\n ... 'b': [None, None, None, None, 500]},\n ... index=pd.DatetimeIndex(['2018-02-27 09:01:00',\n ... '2018-02-27 09:02:00',\n ... '2018-02-27 09:03:00',\n ... '2018-02-27 09:04:00',\n ... '2018-02-27 09:05:00']))\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']))\n a b\n 2018-02-27 09:03:30 NaN NaN\n 2018-02-27 09:04:30 NaN NaN\n\n Take a single column into consideration\n\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']),\n ... subset=['a'])\n a b\n 2018-02-27 09:03:30 30.0 NaN\n 2018-02-27 09:04:30 40.0 NaN\n \"\"\"\n if isinstance(where, compat.string_types):\n from pandas import to_datetime\n where = to_datetime(where)\n\n if not self.index.is_monotonic:\n raise ValueError(\"asof requires a sorted index\")\n\n is_series = isinstance(self, ABCSeries)\n if is_series:\n if subset is not None:\n raise ValueError(\"subset is not valid for Series\")\n elif self.ndim > 2:\n raise NotImplementedError(\"asof is not implemented \"\n \"for {type}\".format(type=type(self)))\n else:\n if subset is None:\n subset = self.columns\n if not is_list_like(subset):\n subset = [subset]\n\n is_list = is_list_like(where)\n if not is_list:\n start = self.index[0]\n if isinstance(self.index, PeriodIndex):\n where = Period(where, freq=self.index.freq).ordinal\n start = start.ordinal\n\n if where < start:\n if not is_series:\n from pandas import Series\n return Series(index=self.columns, name=where)\n return np.nan\n\n # It's always much faster to use a *while* loop here for\n # Series than pre-computing all the NAs. However a\n # *while* loop is extremely expensive for DataFrame\n # so we later pre-compute all the NAs and use the same\n # code path whether *where* is a scalar or list.\n # See PR: https://github.com/pandas-dev/pandas/pull/14476\n if is_series:\n loc = self.index.searchsorted(where, side='right')\n if loc > 0:\n loc -= 1\n\n values = self._values\n while loc > 0 and isna(values[loc]):\n loc -= 1\n return values[loc]\n\n if not isinstance(where, Index):\n where = Index(where) if is_list else Index([where])\n\n nulls = self.isna() if is_series else self[subset].isna().any(1)\n if nulls.all():\n if is_series:\n return self._constructor(np.nan, index=where, name=self.name)\n elif is_list:\n from pandas import DataFrame\n return DataFrame(np.nan, index=where, columns=self.columns)\n else:\n from pandas import Series\n return Series(np.nan, index=self.columns, name=where[0])\n\n locs = self.index.asof_locs(where, ~(nulls.values))\n\n # mask the missing\n missing = locs == -1\n data = self.take(locs, is_copy=False)\n data.index = where\n data.loc[missing] = np.nan\n return data if is_list else data.iloc[-1]\n\n # ----------------------------------------------------------------------\n # Action Methods\n\n _shared_docs['isna'] = \"\"\"\n Detect missing values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as None or :attr:`numpy.NaN`, gets mapped to True\n values.\n Everything else gets mapped to False values. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n\n Returns\n -------\n %(klass)s\n Mask of bool values for each element in %(klass)s that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n %(klass)s.isnull : Alias of isna.\n %(klass)s.notna : Boolean inverse of isna.\n %(klass)s.dropna : Omit axes labels with missing values.\n isna : Top-level isna.\n\n Examples\n --------\n Show which entries in a DataFrame are NA.\n\n >>> df = pd.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.isna()\n age born name toy\n 0 False True False True\n 1 False False False False\n 2 True False False False\n\n Show which entries in a Series are NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.isna()\n 0 False\n 1 False\n 2 True\n dtype: bool\n \"\"\"\n\n @Appender(_shared_docs['isna'] % _shared_doc_kwargs)\n def isna(self):\n return isna(self).__finalize__(self)\n\n @Appender(_shared_docs['isna'] % _shared_doc_kwargs)\n def isnull(self):\n return isna(self).__finalize__(self)\n\n _shared_docs['notna'] = \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n NA values, such as None or :attr:`numpy.NaN`, get mapped to False\n values.\n\n Returns\n -------\n %(klass)s\n Mask of bool values for each element in %(klass)s that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n %(klass)s.notnull : Alias of notna.\n %(klass)s.isna : Boolean inverse of notna.\n %(klass)s.dropna : Omit axes labels with missing values.\n notna : Top-level notna.\n\n Examples\n --------\n Show which entries in a DataFrame are not NA.\n\n >>> df = pd.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.notna()\n age born name toy\n 0 True False True False\n 1 True True True True\n 2 False True True True\n\n Show which entries in a Series are not NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.notna()\n 0 True\n 1 True\n 2 False\n dtype: bool\n \"\"\"\n\n @Appender(_shared_docs['notna'] % _shared_doc_kwargs)\n def notna(self):\n return notna(self).__finalize__(self)\n\n @Appender(_shared_docs['notna'] % _shared_doc_kwargs)\n def notnull(self):\n return notna(self).__finalize__(self)\n\n def _clip_with_scalar(self, lower, upper, inplace=False):\n if ((lower is not None and np.any(isna(lower))) or\n (upper is not None and np.any(isna(upper)))):\n raise ValueError(\"Cannot use an NA value as a clip threshold\")\n\n result = self\n mask = isna(self.values)\n\n with np.errstate(all='ignore'):\n if upper is not None:\n subset = self.to_numpy() <= upper\n result = result.where(subset, upper, axis=None, inplace=False)\n if lower is not None:\n subset = self.to_numpy() >= lower\n result = result.where(subset, lower, axis=None, inplace=False)\n\n if np.any(mask):\n result[mask] = np.nan\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def _clip_with_one_bound(self, threshold, method, axis, inplace):\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # method is self.le for upper bound and self.ge for lower bound\n if is_scalar(threshold) and is_number(threshold):\n if method.__name__ == 'le':\n return self._clip_with_scalar(None, threshold, inplace=inplace)\n return self._clip_with_scalar(threshold, None, inplace=inplace)\n\n subset = method(threshold, axis=axis) | isna(self)\n\n # GH #15390\n # In order for where method to work, the threshold must\n # be transformed to NDFrame from other array like structure.\n if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):\n if isinstance(self, ABCSeries):\n threshold = pd.Series(threshold, index=self.index)\n else:\n threshold = _align_method_FRAME(self, threshold,\n axis)\n return self.where(subset, threshold, axis=axis, inplace=inplace)\n\n def clip(self, lower=None, upper=None, axis=None, inplace=False,\n *args, **kwargs):\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values. Thresholds\n can be singular values or array like, and in the latter case\n the clipping is performed element-wise in the specified axis.\n\n Parameters\n ----------\n lower : float or array_like, default None\n Minimum threshold value. All values below this\n threshold will be set to it.\n upper : float or array_like, default None\n Maximum threshold value. All values above this\n threshold will be set to it.\n axis : int or string axis name, optional\n Align object with lower and upper along the given axis.\n inplace : boolean, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n *args, **kwargs\n Additional keywords have no effect but might be accepted\n for compatibility with numpy.\n\n Returns\n -------\n Series or DataFrame\n Same type as calling object with the values outside the\n clip boundaries replaced\n\n Examples\n --------\n >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}\n >>> df = pd.DataFrame(data)\n >>> df\n col_0 col_1\n 0 9 -2\n 1 -3 -7\n 2 0 6\n 3 -1 8\n 4 5 -5\n\n Clips per column using lower and upper thresholds:\n\n >>> df.clip(-4, 6)\n col_0 col_1\n 0 6 -2\n 1 -3 -4\n 2 0 6\n 3 -1 6\n 4 5 -4\n\n Clips using specific lower and upper thresholds per column element:\n\n >>> t = pd.Series([2, -4, -1, 6, 3])\n >>> t\n 0 2\n 1 -4\n 2 -1\n 3 6\n 4 3\n dtype: int64\n\n >>> df.clip(t, t + 4, axis=0)\n col_0 col_1\n 0 6 2\n 1 -3 -4\n 2 0 3\n 3 6 8\n 4 5 3\n \"\"\"\n if isinstance(self, ABCPanel):\n raise NotImplementedError(\"clip is not supported yet for panels\")\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n axis = nv.validate_clip_with_axis(axis, args, kwargs)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # GH 17276\n # numpy doesn't like NaN as a clip value\n # so ignore\n # GH 19992\n # numpy doesn't drop a list-like bound containing NaN\n if not is_list_like(lower) and np.any(pd.isnull(lower)):\n lower = None\n if not is_list_like(upper) and np.any(pd.isnull(upper)):\n upper = None\n\n # GH 2747 (arguments were reversed)\n if lower is not None and upper is not None:\n if is_scalar(lower) and is_scalar(upper):\n lower, upper = min(lower, upper), max(lower, upper)\n\n # fast-path for scalars\n if ((lower is None or (is_scalar(lower) and is_number(lower))) and\n (upper is None or (is_scalar(upper) and is_number(upper)))):\n return self._clip_with_scalar(lower, upper, inplace=inplace)\n\n result = self\n if lower is not None:\n result = result._clip_with_one_bound(lower, method=self.ge,\n axis=axis, inplace=inplace)\n if upper is not None:\n if inplace:\n result = self\n result = result._clip_with_one_bound(upper, method=self.le,\n axis=axis, inplace=inplace)\n\n return result\n\n def clip_upper(self, threshold, axis=None, inplace=False):\n \"\"\"\n Trim values above a given threshold.\n\n .. deprecated:: 0.24.0\n Use clip(upper=threshold) instead.\n\n Elements above the `threshold` will be changed to match the\n `threshold` value(s). Threshold can be a single value or an array,\n in the latter case it performs the truncation element-wise.\n\n Parameters\n ----------\n threshold : numeric or array-like\n Maximum value allowed. All values above threshold will be set to\n this value.\n\n * float : every value is compared to `threshold`.\n * array-like : The shape of `threshold` should match the object\n it's compared to. When `self` is a Series, `threshold` should be\n the length. When `self` is a DataFrame, `threshold` should 2-D\n and the same shape as `self` for ``axis=None``, or 1-D and the\n same length as the axis being compared.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Align object with `threshold` along the given axis.\n inplace : boolean, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series or DataFrame\n Original data with values trimmed.\n\n See Also\n --------\n Series.clip : General purpose method to trim Series values to given\n threshold(s).\n DataFrame.clip : General purpose method to trim DataFrame values to\n given threshold(s).\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4, 5])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n dtype: int64\n\n >>> s.clip(upper=3)\n 0 1\n 1 2\n 2 3\n 3 3\n 4 3\n dtype: int64\n\n >>> elemwise_thresholds = [5, 4, 3, 2, 1]\n >>> elemwise_thresholds\n [5, 4, 3, 2, 1]\n\n >>> s.clip(upper=elemwise_thresholds)\n 0 1\n 1 2\n 2 3\n 3 2\n 4 1\n dtype: int64\n \"\"\"\n warnings.warn('clip_upper(threshold) is deprecated, '\n 'use clip(upper=threshold) instead',\n FutureWarning, stacklevel=2)\n return self._clip_with_one_bound(threshold, method=self.le,\n axis=axis, inplace=inplace)\n\n def clip_lower(self, threshold, axis=None, inplace=False):\n \"\"\"\n Trim values below a given threshold.\n\n .. deprecated:: 0.24.0\n Use clip(lower=threshold) instead.\n\n Elements below the `threshold` will be changed to match the\n `threshold` value(s). Threshold can be a single value or an array,\n in the latter case it performs the truncation element-wise.\n\n Parameters\n ----------\n threshold : numeric or array-like\n Minimum value allowed. All values below threshold will be set to\n this value.\n\n * float : every value is compared to `threshold`.\n * array-like : The shape of `threshold` should match the object\n it's compared to. When `self` is a Series, `threshold` should be\n the length. When `self` is a DataFrame, `threshold` should 2-D\n and the same shape as `self` for ``axis=None``, or 1-D and the\n same length as the axis being compared.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Align `self` with `threshold` along the given axis.\n\n inplace : boolean, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series or DataFrame\n Original data with values trimmed.\n\n See Also\n --------\n Series.clip : General purpose method to trim Series values to given\n threshold(s).\n DataFrame.clip : General purpose method to trim DataFrame values to\n given threshold(s).\n\n Examples\n --------\n\n Series single threshold clipping:\n\n >>> s = pd.Series([5, 6, 7, 8, 9])\n >>> s.clip(lower=8)\n 0 8\n 1 8\n 2 8\n 3 8\n 4 9\n dtype: int64\n\n Series clipping element-wise using an array of thresholds. `threshold`\n should be the same length as the Series.\n\n >>> elemwise_thresholds = [4, 8, 7, 2, 5]\n >>> s.clip(lower=elemwise_thresholds)\n 0 5\n 1 8\n 2 7\n 3 8\n 4 9\n dtype: int64\n\n DataFrames can be compared to a scalar.\n\n >>> df = pd.DataFrame({\"A\": [1, 3, 5], \"B\": [2, 4, 6]})\n >>> df\n A B\n 0 1 2\n 1 3 4\n 2 5 6\n\n >>> df.clip(lower=3)\n A B\n 0 3 3\n 1 3 4\n 2 5 6\n\n Or to an array of values. By default, `threshold` should be the same\n shape as the DataFrame.\n\n >>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]]))\n A B\n 0 3 4\n 1 3 4\n 2 6 6\n\n Control how `threshold` is broadcast with `axis`. In this case\n `threshold` should be the same length as the axis specified by\n `axis`.\n\n >>> df.clip(lower=[3, 3, 5], axis='index')\n A B\n 0 3 3\n 1 3 4\n 2 5 6\n\n >>> df.clip(lower=[4, 5], axis='columns')\n A B\n 0 4 5\n 1 4 5\n 2 5 6\n \"\"\"\n warnings.warn('clip_lower(threshold) is deprecated, '\n 'use clip(lower=threshold) instead',\n FutureWarning, stacklevel=2)\n return self._clip_with_one_bound(threshold, method=self.ge,\n axis=axis, inplace=inplace)\n\n def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,\n group_keys=True, squeeze=False, observed=False, **kwargs):\n \"\"\"\n Group DataFrame or Series using a mapper or by a Series of columns.\n\n A groupby operation involves some combination of splitting the\n object, applying a function, and combining the results. This can be\n used to group large amounts of data and compute operations on these\n groups.\n\n Parameters\n ----------\n by : mapping, function, label, or list of labels\n Used to determine the groups for the groupby.\n If ``by`` is a function, it's called on each value of the object's\n index. If a dict or Series is passed, the Series or dict VALUES\n will be used to determine the groups (the Series' values are first\n aligned; see ``.align()`` method). If an ndarray is passed, the\n values are used as-is determine the groups. A label or list of\n labels may be passed to group by the columns in ``self``. Notice\n that a tuple is interpreted a (single) key.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Split along rows (0) or columns (1).\n level : int, level name, or sequence of such, default None\n If the axis is a MultiIndex (hierarchical), group by a particular\n level or levels.\n as_index : bool, default True\n For aggregated output, return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively \"SQL-style\" grouped output.\n sort : bool, default True\n Sort group keys. Get better performance by turning this off.\n Note this does not influence the order of observations within each\n group. Groupby preserves the order of rows within each group.\n group_keys : bool, default True\n When calling apply, add group keys to index to identify pieces.\n squeeze : bool, default False\n Reduce the dimensionality of the return type if possible,\n otherwise return a consistent type.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionadded:: 0.23.0\n\n **kwargs\n Optional, only accepts keyword argument 'mutated' and is passed\n to groupby.\n\n Returns\n -------\n DataFrameGroupBy or SeriesGroupBy\n Depends on the calling object and returns groupby object that\n contains information about the groups.\n\n See Also\n --------\n resample : Convenience method for frequency conversion and resampling\n of time series.\n\n Notes\n -----\n See the `user guide\n <http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.\n\n Examples\n --------\n >>> df = pd.DataFrame({'Animal' : ['Falcon', 'Falcon',\n ... 'Parrot', 'Parrot'],\n ... 'Max Speed' : [380., 370., 24., 26.]})\n >>> df\n Animal Max Speed\n 0 Falcon 380.0\n 1 Falcon 370.0\n 2 Parrot 24.0\n 3 Parrot 26.0\n >>> df.groupby(['Animal']).mean()\n Max Speed\n Animal\n Falcon 375.0\n Parrot 25.0\n\n **Hierarchical Indexes**\n\n We can groupby different levels of a hierarchical index\n using the `level` parameter:\n\n >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n ... ['Capitve', 'Wild', 'Capitve', 'Wild']]\n >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))\n >>> df = pd.DataFrame({'Max Speed' : [390., 350., 30., 20.]},\n ... index=index)\n >>> df\n Max Speed\n Animal Type\n Falcon Capitve 390.0\n Wild 350.0\n Parrot Capitve 30.0\n Wild 20.0\n >>> df.groupby(level=0).mean()\n Max Speed\n Animal\n Falcon 370.0\n Parrot 25.0\n >>> df.groupby(level=1).mean()\n Max Speed\n Type\n Capitve 210.0\n Wild 185.0\n \"\"\"\n from pandas.core.groupby.groupby import groupby\n\n if level is None and by is None:\n raise TypeError(\"You have to supply one of 'by' and 'level'\")\n axis = self._get_axis_number(axis)\n return groupby(self, by=by, axis=axis, level=level, as_index=as_index,\n sort=sort, group_keys=group_keys, squeeze=squeeze,\n observed=observed, **kwargs)\n\n def asfreq(self, freq, method=None, how=None, normalize=False,\n fill_value=None):\n \"\"\"\n Convert TimeSeries to specified frequency.\n\n Optionally provide filling method to pad/backfill missing values.\n\n Returns the original data conformed to a new index with the specified\n frequency. ``resample`` is more appropriate if an operation, such as\n summarization, is necessary to represent the data at the new frequency.\n\n Parameters\n ----------\n freq : DateOffset object, or string\n method : {'backfill'/'bfill', 'pad'/'ffill'}, default None\n Method to use for filling holes in reindexed Series (note this\n does not fill NaNs that already were present):\n\n * 'pad' / 'ffill': propagate last valid observation forward to next\n valid\n * 'backfill' / 'bfill': use NEXT valid observation to fill\n how : {'start', 'end'}, default end\n For PeriodIndex only, see PeriodIndex.asfreq\n normalize : bool, default False\n Whether to reset output index to midnight\n fill_value : scalar, optional\n Value to use for missing values, applied during upsampling (note\n this does not fill NaNs that already were present).\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n converted : same type as caller\n\n See Also\n --------\n reindex\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n\n Start by creating a series with 4 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=4, freq='T')\n >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)\n >>> df = pd.DataFrame({'s':series})\n >>> df\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:03:00 3.0\n\n Upsample the series into 30 second bins.\n\n >>> df.asfreq(freq='30S')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 NaN\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``fill value``.\n\n >>> df.asfreq(freq='30S', fill_value=9.0)\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 9.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 9.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 9.0\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``method``.\n\n >>> df.asfreq(freq='30S', method='bfill')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 2.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 3.0\n 2000-01-01 00:03:00 3.0\n \"\"\"\n from pandas.core.resample import asfreq\n return asfreq(self, freq, method=method, how=how, normalize=normalize,\n fill_value=fill_value)\n\n def at_time(self, time, asof=False, axis=None):\n \"\"\"\n Select values at particular time of day (e.g. 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or string\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n values_at_time : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> ts.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n try:\n indexer = index.indexer_at_time(time, asof=asof)\n except AttributeError:\n raise TypeError('Index must be DatetimeIndex')\n\n return self._take(indexer, axis=axis)\n\n def between_time(self, start_time, end_time, include_start=True,\n include_end=True, axis=None):\n \"\"\"\n Select values between particular times of the day (e.g., 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or string\n end_time : datetime.time or string\n include_start : boolean, default True\n include_end : boolean, default True\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n values_between_time : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> ts.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> ts.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n try:\n indexer = index.indexer_between_time(\n start_time, end_time, include_start=include_start,\n include_end=include_end)\n except AttributeError:\n raise TypeError('Index must be DatetimeIndex')\n\n return self._take(indexer, axis=axis)\n\n def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,\n label=None, convention='start', kind=None, loffset=None,\n limit=None, base=0, on=None, level=None):\n \"\"\"\n Resample time-series data.\n\n Convenience method for frequency conversion and resampling of time\n series. Object must have a datetime-like index (`DatetimeIndex`,\n `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values\n to the `on` or `level` keyword.\n\n Parameters\n ----------\n rule : str\n The offset string or object representing target conversion.\n how : str\n Method for down/re-sampling, default to 'mean' for downsampling.\n\n .. deprecated:: 0.18.0\n The new syntax is ``.resample(...).mean()``, or\n ``.resample(...).apply(<func>)``\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Which axis to use for up- or down-sampling. For `Series` this\n will default to 0, i.e. along the rows. Must be\n `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.\n fill_method : str, default None\n Filling method for upsampling.\n\n .. deprecated:: 0.18.0\n The new syntax is ``.resample(...).<func>()``,\n e.g. ``.resample(...).pad()``\n closed : {'right', 'left'}, default None\n Which side of bin interval is closed. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n label : {'right', 'left'}, default None\n Which bin edge label to label bucket with. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n convention : {'start', 'end', 's', 'e'}, default 'start'\n For `PeriodIndex` only, controls whether to use the start or\n end of `rule`.\n kind : {'timestamp', 'period'}, optional, default None\n Pass 'timestamp' to convert the resulting index to a\n `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.\n By default the input representation is retained.\n loffset : timedelta, default None\n Adjust the resampled time labels.\n limit : int, default None\n Maximum size gap when reindexing with `fill_method`.\n\n .. deprecated:: 0.18.0\n base : int, default 0\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n on : str, optional\n For a DataFrame, column to use instead of index for resampling.\n Column must be datetime-like.\n\n .. versionadded:: 0.19.0\n\n level : str or int, optional\n For a MultiIndex, level (name or number) to use for\n resampling. `level` must be datetime-like.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n Resampler object\n\n See Also\n --------\n groupby : Group by mapping, function, label, or list of labels.\n Series.resample : Resample a Series.\n DataFrame.resample: Resample a DataFrame.\n\n Notes\n -----\n See the `user guide\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_\n for more.\n\n To learn more about the offset strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n\n Start by creating a series with 9 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=9, freq='T')\n >>> series = pd.Series(range(9), index=index)\n >>> series\n 2000-01-01 00:00:00 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:02:00 2\n 2000-01-01 00:03:00 3\n 2000-01-01 00:04:00 4\n 2000-01-01 00:05:00 5\n 2000-01-01 00:06:00 6\n 2000-01-01 00:07:00 7\n 2000-01-01 00:08:00 8\n Freq: T, dtype: int64\n\n Downsample the series into 3 minute bins and sum the values\n of the timestamps falling into a bin.\n\n >>> series.resample('3T').sum()\n 2000-01-01 00:00:00 3\n 2000-01-01 00:03:00 12\n 2000-01-01 00:06:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but label each\n bin using the right edge instead of the left. Please note that the\n value in the bucket used as the label is not included in the bucket,\n which it labels. For example, in the original series the\n bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed\n value in the resampled bucket with the label ``2000-01-01 00:03:00``\n does not include 3 (if it did, the summed value would be 6, not 3).\n To include this value close the right side of the bin interval as\n illustrated in the example below this one.\n\n >>> series.resample('3T', label='right').sum()\n 2000-01-01 00:03:00 3\n 2000-01-01 00:06:00 12\n 2000-01-01 00:09:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but close the right\n side of the bin interval.\n\n >>> series.resample('3T', label='right', closed='right').sum()\n 2000-01-01 00:00:00 0\n 2000-01-01 00:03:00 6\n 2000-01-01 00:06:00 15\n 2000-01-01 00:09:00 15\n Freq: 3T, dtype: int64\n\n Upsample the series into 30 second bins.\n\n >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 1.0\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n Freq: 30S, dtype: float64\n\n Upsample the series into 30 second bins and fill the ``NaN``\n values using the ``pad`` method.\n\n >>> series.resample('30S').pad()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 1\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Upsample the series into 30 second bins and fill the\n ``NaN`` values using the ``bfill`` method.\n\n >>> series.resample('30S').bfill()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 1\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 2\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Pass a custom function via ``apply``\n\n >>> def custom_resampler(array_like):\n ... return np.sum(array_like) + 5\n ...\n >>> series.resample('3T').apply(custom_resampler)\n 2000-01-01 00:00:00 8\n 2000-01-01 00:03:00 17\n 2000-01-01 00:06:00 26\n Freq: 3T, dtype: int64\n\n For a Series with a PeriodIndex, the keyword `convention` can be\n used to control whether to use the start or end of `rule`.\n\n Resample a year by quarter using 'start' `convention`. Values are\n assigned to the first quarter of the period.\n\n >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',\n ... freq='A',\n ... periods=2))\n >>> s\n 2012 1\n 2013 2\n Freq: A-DEC, dtype: int64\n >>> s.resample('Q', convention='start').asfreq()\n 2012Q1 1.0\n 2012Q2 NaN\n 2012Q3 NaN\n 2012Q4 NaN\n 2013Q1 2.0\n 2013Q2 NaN\n 2013Q3 NaN\n 2013Q4 NaN\n Freq: Q-DEC, dtype: float64\n\n Resample quarters by month using 'end' `convention`. Values are\n assigned to the last month of the period.\n\n >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',\n ... freq='Q',\n ... periods=4))\n >>> q\n 2018Q1 1\n 2018Q2 2\n 2018Q3 3\n 2018Q4 4\n Freq: Q-DEC, dtype: int64\n >>> q.resample('M', convention='end').asfreq()\n 2018-03 1.0\n 2018-04 NaN\n 2018-05 NaN\n 2018-06 2.0\n 2018-07 NaN\n 2018-08 NaN\n 2018-09 3.0\n 2018-10 NaN\n 2018-11 NaN\n 2018-12 4.0\n Freq: M, dtype: float64\n\n For DataFrame objects, the keyword `on` can be used to specify the\n column instead of the index for resampling.\n\n >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df = pd.DataFrame(d)\n >>> df['week_starting'] = pd.date_range('01/01/2018',\n ... periods=8,\n ... freq='W')\n >>> df\n price volume week_starting\n 0 10 50 2018-01-07\n 1 11 60 2018-01-14\n 2 9 40 2018-01-21\n 3 13 100 2018-01-28\n 4 14 50 2018-02-04\n 5 18 100 2018-02-11\n 6 17 40 2018-02-18\n 7 19 50 2018-02-25\n >>> df.resample('M', on='week_starting').mean()\n price volume\n week_starting\n 2018-01-31 10.75 62.5\n 2018-02-28 17.00 60.0\n\n For a DataFrame with MultiIndex, the keyword `level` can be used to\n specify on which level the resampling needs to take place.\n\n >>> days = pd.date_range('1/1/2000', periods=4, freq='D')\n >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df2 = pd.DataFrame(d2,\n ... index=pd.MultiIndex.from_product([days,\n ... ['morning',\n ... 'afternoon']]\n ... ))\n >>> df2\n price volume\n 2000-01-01 morning 10 50\n afternoon 11 60\n 2000-01-02 morning 9 40\n afternoon 13 100\n 2000-01-03 morning 14 50\n afternoon 18 100\n 2000-01-04 morning 17 40\n afternoon 19 50\n >>> df2.resample('D', level=0).sum()\n price volume\n 2000-01-01 21 110\n 2000-01-02 22 140\n 2000-01-03 32 150\n 2000-01-04 36 90\n \"\"\"\n\n from pandas.core.resample import (resample,\n _maybe_process_deprecations)\n axis = self._get_axis_number(axis)\n r = resample(self, freq=rule, label=label, closed=closed,\n axis=axis, kind=kind, loffset=loffset,\n convention=convention,\n base=base, key=on, level=level)\n return _maybe_process_deprecations(r,\n how=how,\n fill_method=fill_method,\n limit=limit)\n\n def first(self, offset):\n \"\"\"\n Convenience method for subsetting initial periods of time series data\n based on a date offset.\n\n Parameters\n ----------\n offset : string, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n last : Select final periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the first 3 days:\n\n >>> ts.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calender days were returned, not the first\n 3 days observed in the dataset, and therefore data for 2018-04-13 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'first' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n end_date = end = self.index[0] + offset\n\n # Tick-like, e.g. 3 weeks\n if not offset.isAnchored() and hasattr(offset, '_inc'):\n if end_date in self.index:\n end = self.index.searchsorted(end_date, side='left')\n return self.iloc[:end]\n\n return self.loc[:end]\n\n def last(self, offset):\n \"\"\"\n Convenience method for subsetting final periods of time series data\n based on a date offset.\n\n Parameters\n ----------\n offset : string, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n first : Select initial periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> ts.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calender days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'last' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n\n start_date = self.index[-1] - offset\n start = self.index.searchsorted(start_date, side='right')\n return self.iloc[start:]\n\n def rank(self, axis=0, method='average', numeric_only=None,\n na_option='keep', ascending=True, pct=False):\n \"\"\"\n Compute numerical data ranks (1 through n) along axis. Equal values are\n assigned a rank that is the average of the ranks of those values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n index to direct ranking\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n numeric_only : boolean, default None\n Include only float, int, boolean data. Valid only for DataFrame or\n Panel objects\n na_option : {'keep', 'top', 'bottom'}\n * keep: leave NA values where they are\n * top: smallest rank if ascending\n * bottom: smallest rank if descending\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n pct : boolean, default False\n Computes percentage rank of data\n\n Returns\n -------\n ranks : same type as caller\n \"\"\"\n axis = self._get_axis_number(axis)\n\n if self.ndim > 2:\n msg = \"rank does not make sense when ndim > 2\"\n raise NotImplementedError(msg)\n\n if na_option not in {'keep', 'top', 'bottom'}:\n msg = \"na_option must be one of 'keep', 'top', or 'bottom'\"\n raise ValueError(msg)\n\n def ranker(data):\n ranks = algos.rank(data.values, axis=axis, method=method,\n ascending=ascending, na_option=na_option,\n pct=pct)\n ranks = self._constructor(ranks, **data._construct_axes_dict())\n return ranks.__finalize__(self)\n\n # if numeric_only is None, and we can't get anything, we try with\n # numeric_only=True\n if numeric_only is None:\n try:\n return ranker(self)\n except TypeError:\n numeric_only = True\n\n if numeric_only:\n data = self._get_numeric_data()\n else:\n data = self\n\n return ranker(data)\n\n _shared_docs['align'] = (\"\"\"\n Align two objects on their axes with the\n specified join method for each axis Index.\n\n Parameters\n ----------\n other : DataFrame or Series\n join : {'outer', 'inner', 'left', 'right'}, default 'outer'\n axis : allowed axis of the other object, default None\n Align on index (0), columns (1), or both (None)\n level : int or level name, default None\n Broadcast across a level, matching Index values on the\n passed MultiIndex level\n copy : boolean, default True\n Always returns new objects. If copy=False and no reindexing is\n required then original objects are returned.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use NEXT valid observation to fill gap\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n fill_axis : %(axes_single_arg)s, default 0\n Filling axis, method and limit\n broadcast_axis : %(axes_single_arg)s, default None\n Broadcast values along this axis, if aligning two objects of\n different dimensions\n\n Returns\n -------\n (left, right) : (%(klass)s, type of other)\n Aligned objects\n \"\"\")\n\n @Appender(_shared_docs['align'] % _shared_doc_kwargs)\n def align(self, other, join='outer', axis=None, level=None, copy=True,\n fill_value=None, method=None, limit=None, fill_axis=0,\n broadcast_axis=None):\n from pandas import DataFrame, Series\n method = missing.clean_fill_method(method)\n\n if broadcast_axis == 1 and self.ndim != other.ndim:\n if isinstance(self, Series):\n # this means other is a DataFrame, and we need to broadcast\n # self\n cons = self._constructor_expanddim\n df = cons({c: self for c in other.columns},\n **other._construct_axes_dict())\n return df._align_frame(other, join=join, axis=axis,\n level=level, copy=copy,\n fill_value=fill_value, method=method,\n limit=limit, fill_axis=fill_axis)\n elif isinstance(other, Series):\n # this means self is a DataFrame, and we need to broadcast\n # other\n cons = other._constructor_expanddim\n df = cons({c: other for c in self.columns},\n **self._construct_axes_dict())\n return self._align_frame(df, join=join, axis=axis, level=level,\n copy=copy, fill_value=fill_value,\n method=method, limit=limit,\n fill_axis=fill_axis)\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n if isinstance(other, DataFrame):\n return self._align_frame(other, join=join, axis=axis, level=level,\n copy=copy, fill_value=fill_value,\n method=method, limit=limit,\n fill_axis=fill_axis)\n elif isinstance(other, Series):\n return self._align_series(other, join=join, axis=axis, level=level,\n copy=copy, fill_value=fill_value,\n method=method, limit=limit,\n fill_axis=fill_axis)\n else: # pragma: no cover\n raise TypeError('unsupported type: %s' % type(other))\n\n def _align_frame(self, other, join='outer', axis=None, level=None,\n copy=True, fill_value=None, method=None, limit=None,\n fill_axis=0):\n # defaults\n join_index, join_columns = None, None\n ilidx, iridx = None, None\n clidx, cridx = None, None\n\n is_series = isinstance(self, ABCSeries)\n\n if axis is None or axis == 0:\n if not self.index.equals(other.index):\n join_index, ilidx, iridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True)\n\n if axis is None or axis == 1:\n if not is_series and not self.columns.equals(other.columns):\n join_columns, clidx, cridx = self.columns.join(\n other.columns, how=join, level=level, return_indexers=True)\n\n if is_series:\n reindexers = {0: [join_index, ilidx]}\n else:\n reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}\n\n left = self._reindex_with_indexers(reindexers, copy=copy,\n fill_value=fill_value,\n allow_dups=True)\n # other must be always DataFrame\n right = other._reindex_with_indexers({0: [join_index, iridx],\n 1: [join_columns, cridx]},\n copy=copy, fill_value=fill_value,\n allow_dups=True)\n\n if method is not None:\n left = left.fillna(axis=fill_axis, method=method, limit=limit)\n right = right.fillna(axis=fill_axis, method=method, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_datetime64tz_dtype(left.index):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return left.__finalize__(self), right.__finalize__(other)\n\n def _align_series(self, other, join='outer', axis=None, level=None,\n copy=True, fill_value=None, method=None, limit=None,\n fill_axis=0):\n\n is_series = isinstance(self, ABCSeries)\n\n # series/series compat, other must always be a Series\n if is_series:\n if axis:\n raise ValueError('cannot align series to a series other than '\n 'axis 0')\n\n # equal\n if self.index.equals(other.index):\n join_index, lidx, ridx = None, None, None\n else:\n join_index, lidx, ridx = self.index.join(other.index, how=join,\n level=level,\n return_indexers=True)\n\n left = self._reindex_indexer(join_index, lidx, copy)\n right = other._reindex_indexer(join_index, ridx, copy)\n\n else:\n # one has > 1 ndim\n fdata = self._data\n if axis == 0:\n join_index = self.index\n lidx, ridx = None, None\n if not self.index.equals(other.index):\n join_index, lidx, ridx = self.index.join(\n other.index, how=join, level=level,\n return_indexers=True)\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=1)\n\n elif axis == 1:\n join_index = self.columns\n lidx, ridx = None, None\n if not self.columns.equals(other.index):\n join_index, lidx, ridx = self.columns.join(\n other.index, how=join, level=level,\n return_indexers=True)\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=0)\n else:\n raise ValueError('Must specify axis=0 or 1')\n\n if copy and fdata is self._data:\n fdata = fdata.copy()\n\n left = self._constructor(fdata)\n\n if ridx is None:\n right = other\n else:\n right = other.reindex(join_index, level=level)\n\n # fill\n fill_na = notna(fill_value) or (method is not None)\n if fill_na:\n left = left.fillna(fill_value, method=method, limit=limit,\n axis=fill_axis)\n right = right.fillna(fill_value, method=method, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_series or (not is_series and axis == 0):\n if is_datetime64tz_dtype(left.index):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return left.__finalize__(self), right.__finalize__(other)\n\n def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,\n errors='raise', try_cast=False):\n \"\"\"\n Equivalent to public method `where`, except that `other` is not\n applied as a function even if callable. Used in __setitem__.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n # align the cond to same shape as myself\n cond = com.apply_if_callable(cond, self)\n if isinstance(cond, NDFrame):\n cond, _ = cond.align(self, join='right', broadcast_axis=1)\n else:\n if not hasattr(cond, 'shape'):\n cond = np.asanyarray(cond)\n if cond.shape != self.shape:\n raise ValueError('Array conditional must be same shape as '\n 'self')\n cond = self._constructor(cond, **self._construct_axes_dict())\n\n # make sure we are boolean\n fill_value = True if inplace else False\n cond = cond.fillna(fill_value)\n\n msg = \"Boolean array expected for the condition, not {dtype}\"\n\n if not isinstance(cond, pd.DataFrame):\n # This is a single-dimensional object.\n if not is_bool_dtype(cond):\n raise ValueError(msg.format(dtype=cond.dtype))\n elif not cond.empty:\n for dt in cond.dtypes:\n if not is_bool_dtype(dt):\n raise ValueError(msg.format(dtype=dt))\n\n cond = -cond if inplace else cond\n\n # try to align with other\n try_quick = True\n if hasattr(other, 'align'):\n\n # align with me\n if other.ndim <= self.ndim:\n\n _, other = self.align(other, join='left', axis=axis,\n level=level, fill_value=np.nan)\n\n # if we are NOT aligned, raise as we cannot where index\n if (axis is None and\n not all(other._get_axis(i).equals(ax)\n for i, ax in enumerate(self.axes))):\n raise InvalidIndexError\n\n # slice me out of the other\n else:\n raise NotImplementedError(\"cannot align with a higher \"\n \"dimensional NDFrame\")\n\n if isinstance(other, np.ndarray):\n\n if other.shape != self.shape:\n\n if self.ndim == 1:\n\n icond = cond.values\n\n # GH 2745 / GH 4192\n # treat like a scalar\n if len(other) == 1:\n other = np.array(other[0])\n\n # GH 3235\n # match True cond to other\n elif len(cond[icond]) == len(other):\n\n # try to not change dtype at first (if try_quick)\n if try_quick:\n\n try:\n new_other = com.values_from_object(self)\n new_other = new_other.copy()\n new_other[icond] = other\n other = new_other\n except Exception:\n try_quick = False\n\n # let's create a new (if we failed at the above\n # or not try_quick\n if not try_quick:\n\n dtype, fill_value = maybe_promote(other.dtype)\n new_other = np.empty(len(icond), dtype=dtype)\n new_other.fill(fill_value)\n maybe_upcast_putmask(new_other, icond, other)\n other = new_other\n\n else:\n raise ValueError('Length of replacements must equal '\n 'series length')\n\n else:\n raise ValueError('other must be the same shape as self '\n 'when an ndarray')\n\n # we are the same shape, so create an actual object for alignment\n else:\n other = self._constructor(other, **self._construct_axes_dict())\n\n if axis is None:\n axis = 0\n\n if self.ndim == getattr(other, 'ndim', 0):\n align = True\n else:\n align = (self._get_axis_number(axis) == 1)\n\n block_axis = self._get_block_manager_axis(axis)\n\n if inplace:\n # we may have different type blocks come out of putmask, so\n # reconstruct the block manager\n\n self._check_inplace_setting(other)\n new_data = self._data.putmask(mask=cond, new=other, align=align,\n inplace=True, axis=block_axis,\n transpose=self._AXIS_REVERSED)\n self._update_inplace(new_data)\n\n else:\n new_data = self._data.where(other=other, cond=cond, align=align,\n errors=errors,\n try_cast=try_cast, axis=block_axis,\n transpose=self._AXIS_REVERSED)\n\n return self._constructor(new_data).__finalize__(self)\n\n _shared_docs['where'] = (\"\"\"\n Replace values where the condition is %(cond_rev)s.\n\n Parameters\n ----------\n cond : boolean %(klass)s, array-like, or callable\n Where `cond` is %(cond)s, keep the original value. Where\n %(cond_rev)s, replace with corresponding value from `other`.\n If `cond` is callable, it is computed on the %(klass)s and\n should return boolean %(klass)s or array. The callable must\n not change input %(klass)s (though pandas doesn't check it).\n\n .. versionadded:: 0.18.1\n A callable can be used as cond.\n\n other : scalar, %(klass)s, or callable\n Entries where `cond` is %(cond_rev)s are replaced with\n corresponding value from `other`.\n If other is callable, it is computed on the %(klass)s and\n should return scalar or %(klass)s. The callable must not\n change input %(klass)s (though pandas doesn't check it).\n\n .. versionadded:: 0.18.1\n A callable can be used as other.\n\n inplace : boolean, default False\n Whether to perform the operation in place on the data.\n axis : int, default None\n Alignment axis if needed.\n level : int, default None\n Alignment level if needed.\n errors : str, {'raise', 'ignore'}, default `raise`\n Note that currently this parameter won't affect\n the results and will always coerce to a suitable dtype.\n\n - `raise` : allow exceptions to be raised.\n - `ignore` : suppress exceptions. On error return original object.\n\n try_cast : boolean, default False\n Try to cast the result back to the input type (if possible).\n raise_on_error : boolean, default True\n Whether to raise on invalid data types (e.g. trying to where on\n strings).\n\n .. deprecated:: 0.21.0\n\n Use `errors`.\n\n Returns\n -------\n wh : same type as caller\n\n See Also\n --------\n :func:`DataFrame.%(name_other)s` : Return an object of same shape as\n self.\n\n Notes\n -----\n The %(name)s method is an application of the if-then idiom. For each\n element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the\n element is used; otherwise the corresponding element from the DataFrame\n ``other`` is used.\n\n The signature for :func:`DataFrame.where` differs from\n :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to\n ``np.where(m, df1, df2)``.\n\n For further details and examples see the ``%(name)s`` documentation in\n :ref:`indexing <indexing.where_mask>`.\n\n Examples\n --------\n >>> s = pd.Series(range(5))\n >>> s.where(s > 0)\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n 4 4.0\n dtype: float64\n\n >>> s.mask(s > 0)\n 0 0.0\n 1 NaN\n 2 NaN\n 3 NaN\n 4 NaN\n dtype: float64\n\n >>> s.where(s > 1, 10)\n 0 10\n 1 10\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])\n >>> m = df %% 3 == 0\n >>> df.where(m, -df)\n A B\n 0 0 -1\n 1 -2 3\n 2 -4 -5\n 3 6 -7\n 4 -8 9\n >>> df.where(m, -df) == np.where(m, df, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n >>> df.where(m, -df) == df.mask(~m, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n \"\"\")\n\n @Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond=\"True\",\n cond_rev=\"False\", name='where',\n name_other='mask'))\n def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,\n errors='raise', try_cast=False, raise_on_error=None):\n\n if raise_on_error is not None:\n warnings.warn(\n \"raise_on_error is deprecated in \"\n \"favor of errors='raise|ignore'\",\n FutureWarning, stacklevel=2)\n\n if raise_on_error:\n errors = 'raise'\n else:\n errors = 'ignore'\n\n other = com.apply_if_callable(other, self)\n return self._where(cond, other, inplace, axis, level,\n errors=errors, try_cast=try_cast)\n\n @Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond=\"False\",\n cond_rev=\"True\", name='mask',\n name_other='where'))\n def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,\n errors='raise', try_cast=False, raise_on_error=None):\n\n if raise_on_error is not None:\n warnings.warn(\n \"raise_on_error is deprecated in \"\n \"favor of errors='raise|ignore'\",\n FutureWarning, stacklevel=2)\n\n if raise_on_error:\n errors = 'raise'\n else:\n errors = 'ignore'\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n cond = com.apply_if_callable(cond, self)\n\n # see gh-21891\n if not hasattr(cond, \"__invert__\"):\n cond = np.array(cond)\n\n return self.where(~cond, other=other, inplace=inplace, axis=axis,\n level=level, try_cast=try_cast,\n errors=errors)\n\n _shared_docs['shift'] = (\"\"\"\n Shift index by desired number of periods with an optional time `freq`.\n\n When `freq` is not passed, shift the index without realigning the data.\n If `freq` is passed (in this case, the index must be date or datetime,\n or it will raise a `NotImplementedError`), the index will be\n increased using the periods and the `freq`.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n freq : DateOffset, tseries.offsets, timedelta, or str, optional\n Offset to use from the tseries module or time rule (e.g. 'EOM').\n If `freq` is specified then the index values are shifted but the\n data is not realigned. That is, use `freq` if you would like to\n extend the index when shifting and preserve the original data.\n axis : {0 or 'index', 1 or 'columns', None}, default None\n Shift direction.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n the default depends on the dtype of `self`.\n For numeric data, ``np.nan`` is used.\n For datetime, timedelta, or period data, etc. :attr:`NaT` is used.\n For extension dtypes, ``self.dtype.na_value`` is used.\n\n .. versionchanged:: 0.24.0\n\n Returns\n -------\n %(klass)s\n Copy of input object, shifted.\n\n See Also\n --------\n Index.shift : Shift values of Index.\n DatetimeIndex.shift : Shift values of DatetimeIndex.\n PeriodIndex.shift : Shift values of PeriodIndex.\n tshift : Shift the time index, using the index's frequency if\n available.\n\n Examples\n --------\n >>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]})\n\n >>> df.shift(periods=3)\n Col1 Col2 Col3\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 10.0 13.0 17.0\n 4 20.0 23.0 27.0\n\n >>> df.shift(periods=1, axis='columns')\n Col1 Col2 Col3\n 0 NaN 10.0 13.0\n 1 NaN 20.0 23.0\n 2 NaN 15.0 18.0\n 3 NaN 30.0 33.0\n 4 NaN 45.0 48.0\n\n >>> df.shift(periods=3, fill_value=0)\n Col1 Col2 Col3\n 0 0 0 0\n 1 0 0 0\n 2 0 0 0\n 3 10 13 17\n 4 20 23 27\n \"\"\")\n\n @Appender(_shared_docs['shift'] % _shared_doc_kwargs)\n def shift(self, periods=1, freq=None, axis=0, fill_value=None):\n if periods == 0:\n return self.copy()\n\n block_axis = self._get_block_manager_axis(axis)\n if freq is None:\n new_data = self._data.shift(periods=periods, axis=block_axis,\n fill_value=fill_value)\n else:\n return self.tshift(periods, freq)\n\n return self._constructor(new_data).__finalize__(self)\n\n def slice_shift(self, periods=1, axis=0):\n \"\"\"\n Equivalent to `shift` without copying data. The shifted data will\n not include the dropped periods and the shifted axis will be smaller\n than the original.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n\n Returns\n -------\n shifted : same type as caller\n\n Notes\n -----\n While the `slice_shift` is faster than `shift`, you may pay for it\n later during alignment.\n \"\"\"\n if periods == 0:\n return self\n\n if periods > 0:\n vslicer = slice(None, -periods)\n islicer = slice(periods, None)\n else:\n vslicer = slice(-periods, None)\n islicer = slice(None, periods)\n\n new_obj = self._slice(vslicer, axis=axis)\n shifted_axis = self._get_axis(axis)[islicer]\n new_obj.set_axis(shifted_axis, axis=axis, inplace=True)\n\n return new_obj.__finalize__(self)\n\n def tshift(self, periods=1, freq=None, axis=0):\n \"\"\"\n Shift the time index, using the index's frequency if available.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n freq : DateOffset, timedelta, or time rule string, default None\n Increment to use from the tseries module or time rule (e.g. 'EOM')\n axis : int or basestring\n Corresponds to the axis that contains the Index\n\n Returns\n -------\n shifted : NDFrame\n\n Notes\n -----\n If freq is not specified then tries to use the freq or inferred_freq\n attributes of the index. If neither of those attributes exist, a\n ValueError is thrown\n \"\"\"\n\n index = self._get_axis(axis)\n if freq is None:\n freq = getattr(index, 'freq', None)\n\n if freq is None:\n freq = getattr(index, 'inferred_freq', None)\n\n if freq is None:\n msg = 'Freq was not given and was not set in the index'\n raise ValueError(msg)\n\n if periods == 0:\n return self\n\n if isinstance(freq, string_types):\n freq = to_offset(freq)\n\n block_axis = self._get_block_manager_axis(axis)\n if isinstance(index, PeriodIndex):\n orig_freq = to_offset(index.freq)\n if freq == orig_freq:\n new_data = self._data.copy()\n new_data.axes[block_axis] = index.shift(periods)\n else:\n msg = ('Given freq %s does not match PeriodIndex freq %s' %\n (freq.rule_code, orig_freq.rule_code))\n raise ValueError(msg)\n else:\n new_data = self._data.copy()\n new_data.axes[block_axis] = index.shift(periods, freq)\n\n return self._constructor(new_data).__finalize__(self)\n\n def truncate(self, before=None, after=None, axis=None, copy=True):\n \"\"\"\n Truncate a Series or DataFrame before and after some index value.\n\n This is a useful shorthand for boolean indexing based on index\n values above or below certain thresholds.\n\n Parameters\n ----------\n before : date, string, int\n Truncate all rows before this index value.\n after : date, string, int\n Truncate all rows after this index value.\n axis : {0 or 'index', 1 or 'columns'}, optional\n Axis to truncate. Truncates the index (rows) by default.\n copy : boolean, default is True,\n Return a copy of the truncated section.\n\n Returns\n -------\n type of caller\n The truncated Series or DataFrame.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by label.\n DataFrame.iloc : Select a subset of a DataFrame by position.\n\n Notes\n -----\n If the index being truncated contains only datetime values,\n `before` and `after` may be specified as strings instead of\n Timestamps.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],\n ... 'B': ['f', 'g', 'h', 'i', 'j'],\n ... 'C': ['k', 'l', 'm', 'n', 'o']},\n ... index=[1, 2, 3, 4, 5])\n >>> df\n A B C\n 1 a f k\n 2 b g l\n 3 c h m\n 4 d i n\n 5 e j o\n\n >>> df.truncate(before=2, after=4)\n A B C\n 2 b g l\n 3 c h m\n 4 d i n\n\n The columns of a DataFrame can be truncated.\n\n >>> df.truncate(before=\"A\", after=\"B\", axis=\"columns\")\n A B\n 1 a f\n 2 b g\n 3 c h\n 4 d i\n 5 e j\n\n For Series, only rows can be truncated.\n\n >>> df['A'].truncate(before=2, after=4)\n 2 b\n 3 c\n 4 d\n Name: A, dtype: object\n\n The index values in ``truncate`` can be datetimes or string\n dates.\n\n >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')\n >>> df = pd.DataFrame(index=dates, data={'A': 1})\n >>> df.tail()\n A\n 2016-01-31 23:59:56 1\n 2016-01-31 23:59:57 1\n 2016-01-31 23:59:58 1\n 2016-01-31 23:59:59 1\n 2016-02-01 00:00:00 1\n\n >>> df.truncate(before=pd.Timestamp('2016-01-05'),\n ... after=pd.Timestamp('2016-01-10')).tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Because the index is a DatetimeIndex containing only dates, we can\n specify `before` and `after` as strings. They will be coerced to\n Timestamps before truncation.\n\n >>> df.truncate('2016-01-05', '2016-01-10').tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Note that ``truncate`` assumes a 0 value for any unspecified time\n component (midnight). This differs from partial string slicing, which\n returns any partially matching dates.\n\n >>> df.loc['2016-01-05':'2016-01-10', :].tail()\n A\n 2016-01-10 23:59:55 1\n 2016-01-10 23:59:56 1\n 2016-01-10 23:59:57 1\n 2016-01-10 23:59:58 1\n 2016-01-10 23:59:59 1\n \"\"\"\n\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n # GH 17935\n # Check that index is sorted\n if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:\n raise ValueError(\"truncate requires a sorted index\")\n\n # if we have a date index, convert to dates, otherwise\n # treat like a slice\n if ax.is_all_dates:\n from pandas.core.tools.datetimes import to_datetime\n before = to_datetime(before)\n after = to_datetime(after)\n\n if before is not None and after is not None:\n if before > after:\n raise ValueError('Truncate: %s must be after %s' %\n (after, before))\n\n slicer = [slice(None, None)] * self._AXIS_LEN\n slicer[axis] = slice(before, after)\n result = self.loc[tuple(slicer)]\n\n if isinstance(ax, MultiIndex):\n setattr(result, self._get_axis_name(axis),\n ax.truncate(before, after))\n\n if copy:\n result = result.copy()\n\n return result\n\n def tz_convert(self, tz, axis=0, level=None, copy=True):\n \"\"\"\n Convert tz-aware axis to target time zone.\n\n Parameters\n ----------\n tz : string or pytz.timezone object\n axis : the axis to convert\n level : int, str, default None\n If axis ia a MultiIndex, convert a specific level. Otherwise\n must be None\n copy : boolean, default True\n Also make a copy of the underlying data\n\n Returns\n -------\n\n Raises\n ------\n TypeError\n If the axis is tz-naive.\n \"\"\"\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_convert(ax, tz):\n if not hasattr(ax, 'tz_convert'):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError('%s is not a valid DatetimeIndex or '\n 'PeriodIndex' % ax_name)\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_convert(tz)\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_convert(ax.levels[level], tz)\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(\"The level {0} is not valid\".format(level))\n ax = _tz_convert(ax, tz)\n\n result = self._constructor(self._data, copy=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self)\n\n def tz_localize(self, tz, axis=0, level=None, copy=True,\n ambiguous='raise', nonexistent='raise'):\n \"\"\"\n Localize tz-naive index of a Series or DataFrame to target time zone.\n\n This operation localizes the Index. To localize the values in a\n timezone-naive Series, use :meth:`Series.dt.tz_localize`.\n\n Parameters\n ----------\n tz : string or pytz.timezone object\n axis : the axis to localize\n level : int, str, default None\n If axis ia a MultiIndex, localize a specific level. Otherwise\n must be None\n copy : boolean, default True\n Also make a copy of the underlying data\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from\n 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at\n 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the\n `ambiguous` parameter dictates how ambiguous times should be\n handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times\n nonexistent : str, default 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST. Valid valuse are:\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Same type as the input.\n\n Raises\n ------\n TypeError\n If the TimeSeries is tz-aware and tz is not None.\n\n Examples\n --------\n\n Localize local times:\n\n >>> s = pd.Series([1],\n ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))\n >>> s.tz_localize('CET')\n 2018-09-15 01:30:00+02:00 1\n dtype: int64\n\n Be careful with DST changes. When there is sequential data, pandas\n can infer the DST time:\n\n >>> s = pd.Series(range(7), index=pd.DatetimeIndex([\n ... '2018-10-28 01:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 03:00:00',\n ... '2018-10-28 03:30:00']))\n >>> s.tz_localize('CET', ambiguous='infer')\n 2018-10-28 01:30:00+02:00 0\n 2018-10-28 02:00:00+02:00 1\n 2018-10-28 02:30:00+02:00 2\n 2018-10-28 02:00:00+01:00 3\n 2018-10-28 02:30:00+01:00 4\n 2018-10-28 03:00:00+01:00 5\n 2018-10-28 03:30:00+01:00 6\n dtype: int64\n\n In some cases, inferring the DST is impossible. In such cases, you can\n pass an ndarray to the ambiguous parameter to set the DST explicitly\n\n >>> s = pd.Series(range(3), index=pd.DatetimeIndex([\n ... '2018-10-28 01:20:00',\n ... '2018-10-28 02:36:00',\n ... '2018-10-28 03:46:00']))\n >>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))\n 2018-10-28 01:20:00+02:00 0\n 2018-10-28 02:36:00+02:00 1\n 2018-10-28 03:46:00+01:00 2\n dtype: int64\n\n If the DST transition causes nonexistent times, you can shift these\n dates forward or backwards with a timedelta object or `'shift_forward'`\n or `'shift_backwards'`.\n >>> s = pd.Series(range(2), index=pd.DatetimeIndex([\n ... '2015-03-29 02:30:00',\n ... '2015-03-29 03:30:00']))\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')\n 2015-03-29 03:00:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')\n 2015-03-29 01:59:59.999999999+01:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))\n 2015-03-29 03:30:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n \"\"\"\n nonexistent_options = ('raise', 'NaT', 'shift_forward',\n 'shift_backward')\n if nonexistent not in nonexistent_options and not isinstance(\n nonexistent, timedelta):\n raise ValueError(\"The nonexistent argument must be one of 'raise',\"\n \" 'NaT', 'shift_forward', 'shift_backward' or\"\n \" a timedelta object\")\n\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_localize(ax, tz, ambiguous, nonexistent):\n if not hasattr(ax, 'tz_localize'):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError('%s is not a valid DatetimeIndex or '\n 'PeriodIndex' % ax_name)\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_localize(\n tz, ambiguous=ambiguous, nonexistent=nonexistent\n )\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_localize(\n ax.levels[level], tz, ambiguous, nonexistent\n )\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(\"The level {0} is not valid\".format(level))\n ax = _tz_localize(ax, tz, ambiguous, nonexistent)\n\n result = self._constructor(self._data, copy=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Numeric Methods\n def abs(self):\n \"\"\"\n Return a Series/DataFrame with absolute numeric value of each element.\n\n This function only applies to elements that are all numeric.\n\n Returns\n -------\n abs\n Series/DataFrame containing the absolute value of each element.\n\n See Also\n --------\n numpy.absolute : Calculate the absolute value element-wise.\n\n Notes\n -----\n For ``complex`` inputs, ``1.2 + 1j``, the absolute value is\n :math:`\\\\sqrt{ a^2 + b^2 }`.\n\n Examples\n --------\n Absolute numeric values in a Series.\n\n >>> s = pd.Series([-1.10, 2, -3.33, 4])\n >>> s.abs()\n 0 1.10\n 1 2.00\n 2 3.33\n 3 4.00\n dtype: float64\n\n Absolute numeric values in a Series with complex numbers.\n\n >>> s = pd.Series([1.2 + 1j])\n >>> s.abs()\n 0 1.56205\n dtype: float64\n\n Absolute numeric values in a Series with a Timedelta element.\n\n >>> s = pd.Series([pd.Timedelta('1 days')])\n >>> s.abs()\n 0 1 days\n dtype: timedelta64[ns]\n\n Select rows with data closest to certain value using argsort (from\n `StackOverflow <https://stackoverflow.com/a/17758115>`__).\n\n >>> df = pd.DataFrame({\n ... 'a': [4, 5, 6, 7],\n ... 'b': [10, 20, 30, 40],\n ... 'c': [100, 50, -30, -50]\n ... })\n >>> df\n a b c\n 0 4 10 100\n 1 5 20 50\n 2 6 30 -30\n 3 7 40 -50\n >>> df.loc[(df.c - 43).abs().argsort()]\n a b c\n 1 5 20 50\n 0 4 10 100\n 2 6 30 -30\n 3 7 40 -50\n \"\"\"\n return np.abs(self)\n\n def describe(self, percentiles=None, include=None, exclude=None):\n \"\"\"\n Generate descriptive statistics that summarize the central tendency,\n dispersion and shape of a dataset's distribution, excluding\n ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list-like of numbers, optional\n The percentiles to include in the output. All should\n fall between 0 and 1. The default is\n ``[.25, .5, .75]``, which returns the 25th, 50th, and\n 75th percentiles.\n include : 'all', list-like of dtypes or None (default), optional\n A white list of data types to include in the result. Ignored\n for ``Series``. Here are the options:\n\n - 'all' : All columns of the input will be included in the output.\n - A list-like of dtypes : Limits the results to the\n provided data types.\n To limit the result to numeric types submit\n ``numpy.number``. To limit it instead to object columns submit\n the ``numpy.object`` data type. Strings\n can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n select pandas categorical columns, use ``'category'``\n - None (default) : The result will include all numeric columns.\n exclude : list-like of dtypes or None (default), optional,\n A black list of data types to omit from the result. Ignored\n for ``Series``. Here are the options:\n\n - A list-like of dtypes : Excludes the provided data types\n from the result. To exclude numeric types submit\n ``numpy.number``. To exclude object columns submit the data\n type ``numpy.object``. Strings can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n exclude pandas categorical columns, use ``'category'``\n - None (default) : The result will exclude nothing.\n\n Returns\n -------\n Series or DataFrame\n Summary statistics of the Series or Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the obersvations.\n DataFrame.select_dtypes: Subset of a DataFrame including/excluding\n columns based on their dtype.\n\n Notes\n -----\n For numeric data, the result's index will include ``count``,\n ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and\n upper percentiles. By default the lower percentile is ``25`` and the\n upper percentile is ``75``. The ``50`` percentile is the\n same as the median.\n\n For object data (e.g. strings or timestamps), the result's index\n will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``\n is the most common value. The ``freq`` is the most common value's\n frequency. Timestamps also include the ``first`` and ``last`` items.\n\n If multiple object values have the highest count, then the\n ``count`` and ``top`` results will be arbitrarily chosen from\n among those with the highest count.\n\n For mixed data types provided via a ``DataFrame``, the default is to\n return only an analysis of numeric columns. If the dataframe consists\n only of object and categorical data without any numeric columns, the\n default is to return an analysis of both the object and categorical\n columns. If ``include='all'`` is provided as an option, the result\n will include a union of attributes of each type.\n\n The `include` and `exclude` parameters can be used to limit\n which columns in a ``DataFrame`` are analyzed for the output.\n The parameters are ignored when analyzing a ``Series``.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n dtype: float64\n\n Describing a categorical ``Series``.\n\n >>> s = pd.Series(['a', 'a', 'b', 'c'])\n >>> s.describe()\n count 4\n unique 3\n top a\n freq 2\n dtype: object\n\n Describing a timestamp ``Series``.\n\n >>> s = pd.Series([\n ... np.datetime64(\"2000-01-01\"),\n ... np.datetime64(\"2010-01-01\"),\n ... np.datetime64(\"2010-01-01\")\n ... ])\n >>> s.describe()\n count 3\n unique 2\n top 2010-01-01 00:00:00\n freq 2\n first 2000-01-01 00:00:00\n last 2010-01-01 00:00:00\n dtype: object\n\n Describing a ``DataFrame``. By default only numeric fields\n are returned.\n\n >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),\n ... 'numeric': [1, 2, 3],\n ... 'object': ['a', 'b', 'c']\n ... })\n >>> df.describe()\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Describing all columns of a ``DataFrame`` regardless of data type.\n\n >>> df.describe(include='all')\n categorical numeric object\n count 3 3.0 3\n unique 3 NaN 3\n top f NaN c\n freq 1 NaN 1\n mean NaN 2.0 NaN\n std NaN 1.0 NaN\n min NaN 1.0 NaN\n 25% NaN 1.5 NaN\n 50% NaN 2.0 NaN\n 75% NaN 2.5 NaN\n max NaN 3.0 NaN\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n Name: numeric, dtype: float64\n\n Including only numeric columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.number])\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Including only string columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.object])\n object\n count 3\n unique 3\n top c\n freq 1\n\n Including only categorical columns from a ``DataFrame`` description.\n\n >>> df.describe(include=['category'])\n categorical\n count 3\n unique 3\n top f\n freq 1\n\n Excluding numeric columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.number])\n categorical object\n count 3 3\n unique 3 3\n top f c\n freq 1 1\n\n Excluding object columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.object])\n categorical numeric\n count 3 3.0\n unique 3 NaN\n top f NaN\n freq 1 NaN\n mean NaN 2.0\n std NaN 1.0\n min NaN 1.0\n 25% NaN 1.5\n 50% NaN 2.0\n 75% NaN 2.5\n max NaN 3.0\n \"\"\"\n if self.ndim >= 3:\n msg = \"describe is not implemented on Panel objects.\"\n raise NotImplementedError(msg)\n elif self.ndim == 2 and self.columns.size == 0:\n raise ValueError(\"Cannot describe a DataFrame without columns\")\n\n if percentiles is not None:\n # explicit conversion of `percentiles` to list\n percentiles = list(percentiles)\n\n # get them all to be in [0, 1]\n self._check_percentile(percentiles)\n\n # median should always be included\n if 0.5 not in percentiles:\n percentiles.append(0.5)\n percentiles = np.asarray(percentiles)\n else:\n percentiles = np.array([0.25, 0.5, 0.75])\n\n # sort and check for duplicates\n unique_pcts = np.unique(percentiles)\n if len(unique_pcts) < len(percentiles):\n raise ValueError(\"percentiles cannot contain duplicates\")\n percentiles = unique_pcts\n\n formatted_percentiles = format_percentiles(percentiles)\n\n def describe_numeric_1d(series):\n stat_index = (['count', 'mean', 'std', 'min'] +\n formatted_percentiles + ['max'])\n d = ([series.count(), series.mean(), series.std(), series.min()] +\n series.quantile(percentiles).tolist() + [series.max()])\n return pd.Series(d, index=stat_index, name=series.name)\n\n def describe_categorical_1d(data):\n names = ['count', 'unique']\n objcounts = data.value_counts()\n count_unique = len(objcounts[objcounts != 0])\n result = [data.count(), count_unique]\n if result[1] > 0:\n top, freq = objcounts.index[0], objcounts.iloc[0]\n\n if is_datetime64_any_dtype(data):\n tz = data.dt.tz\n asint = data.dropna().values.view('i8')\n top = Timestamp(top)\n if top.tzinfo is not None and tz is not None:\n # Don't tz_localize(None) if key is already tz-aware\n top = top.tz_convert(tz)\n else:\n top = top.tz_localize(tz)\n names += ['top', 'freq', 'first', 'last']\n result += [top, freq,\n Timestamp(asint.min(), tz=tz),\n Timestamp(asint.max(), tz=tz)]\n else:\n names += ['top', 'freq']\n result += [top, freq]\n\n return pd.Series(result, index=names, name=data.name)\n\n def describe_1d(data):\n if is_bool_dtype(data):\n return describe_categorical_1d(data)\n elif is_numeric_dtype(data):\n return describe_numeric_1d(data)\n elif is_timedelta64_dtype(data):\n return describe_numeric_1d(data)\n else:\n return describe_categorical_1d(data)\n\n if self.ndim == 1:\n return describe_1d(self)\n elif (include is None) and (exclude is None):\n # when some numerics are found, keep only numerics\n data = self.select_dtypes(include=[np.number])\n if len(data.columns) == 0:\n data = self\n elif include == 'all':\n if exclude is not None:\n msg = \"exclude must be None when include is 'all'\"\n raise ValueError(msg)\n data = self\n else:\n data = self.select_dtypes(include=include, exclude=exclude)\n\n ldesc = [describe_1d(s) for _, s in data.iteritems()]\n # set a convenient order for rows\n names = []\n ldesc_indexes = sorted((x.index for x in ldesc), key=len)\n for idxnames in ldesc_indexes:\n for name in idxnames:\n if name not in names:\n names.append(name)\n\n d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)\n d.columns = data.columns.copy()\n return d\n\n def _check_percentile(self, q):\n \"\"\"\n Validate percentiles (used by describe and quantile).\n \"\"\"\n\n msg = (\"percentiles should all be in the interval [0, 1]. \"\n \"Try {0} instead.\")\n q = np.asarray(q)\n if q.ndim == 0:\n if not 0 <= q <= 1:\n raise ValueError(msg.format(q / 100.0))\n else:\n if not all(0 <= qs <= 1 for qs in q):\n raise ValueError(msg.format(q / 100.0))\n return q\n\n _shared_docs['pct_change'] = \"\"\"\n Percentage change between the current and a prior element.\n\n Computes the percentage change from the immediately previous row by\n default. This is useful in comparing the percentage of change in a time\n series of elements.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n fill_method : str, default 'pad'\n How to handle NAs before computing percent changes.\n limit : int, default None\n The number of consecutive NAs to fill before stopping.\n freq : DateOffset, timedelta, or offset alias string, optional\n Increment to use from time series API (e.g. 'M' or BDay()).\n **kwargs\n Additional keyword arguments are passed into\n `DataFrame.shift` or `Series.shift`.\n\n Returns\n -------\n chg : Series or DataFrame\n The same type as the calling object.\n\n See Also\n --------\n Series.diff : Compute the difference of two elements in a Series.\n DataFrame.diff : Compute the difference of two elements in a DataFrame.\n Series.shift : Shift the index by some number of periods.\n DataFrame.shift : Shift the index by some number of periods.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([90, 91, 85])\n >>> s\n 0 90\n 1 91\n 2 85\n dtype: int64\n\n >>> s.pct_change()\n 0 NaN\n 1 0.011111\n 2 -0.065934\n dtype: float64\n\n >>> s.pct_change(periods=2)\n 0 NaN\n 1 NaN\n 2 -0.055556\n dtype: float64\n\n See the percentage change in a Series where filling NAs with last\n valid observation forward to next valid.\n\n >>> s = pd.Series([90, 91, None, 85])\n >>> s\n 0 90.0\n 1 91.0\n 2 NaN\n 3 85.0\n dtype: float64\n\n >>> s.pct_change(fill_method='ffill')\n 0 NaN\n 1 0.011111\n 2 0.000000\n 3 -0.065934\n dtype: float64\n\n **DataFrame**\n\n Percentage change in French franc, Deutsche Mark, and Italian lira from\n 1980-01-01 to 1980-03-01.\n\n >>> df = pd.DataFrame({\n ... 'FR': [4.0405, 4.0963, 4.3149],\n ... 'GR': [1.7246, 1.7482, 1.8519],\n ... 'IT': [804.74, 810.01, 860.13]},\n ... index=['1980-01-01', '1980-02-01', '1980-03-01'])\n >>> df\n FR GR IT\n 1980-01-01 4.0405 1.7246 804.74\n 1980-02-01 4.0963 1.7482 810.01\n 1980-03-01 4.3149 1.8519 860.13\n\n >>> df.pct_change()\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 0.013810 0.013684 0.006549\n 1980-03-01 0.053365 0.059318 0.061876\n\n Percentage of change in GOOG and APPL stock volume. Shows computing\n the percentage change between columns.\n\n >>> df = pd.DataFrame({\n ... '2016': [1769950, 30586265],\n ... '2015': [1500923, 40912316],\n ... '2014': [1371819, 41403351]},\n ... index=['GOOG', 'APPL'])\n >>> df\n 2016 2015 2014\n GOOG 1769950 1500923 1371819\n APPL 30586265 40912316 41403351\n\n >>> df.pct_change(axis='columns')\n 2016 2015 2014\n GOOG NaN -0.151997 -0.086016\n APPL NaN 0.337604 0.012002\n \"\"\"\n\n @Appender(_shared_docs['pct_change'] % _shared_doc_kwargs)\n def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,\n **kwargs):\n # TODO: Not sure if above is correct - need someone to confirm.\n axis = self._get_axis_number(kwargs.pop('axis', self._stat_axis_name))\n if fill_method is None:\n data = self\n else:\n data = self.fillna(method=fill_method, limit=limit, axis=axis)\n\n rs = (data.div(data.shift(periods=periods, freq=freq, axis=axis,\n **kwargs)) - 1)\n rs = rs.reindex_like(data)\n if freq is None:\n mask = isna(com.values_from_object(data))\n np.putmask(rs.values, mask, np.nan)\n return rs\n\n def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):\n if axis is None:\n raise ValueError(\"Must specify 'axis' when aggregating by level.\")\n grouped = self.groupby(level=level, axis=axis, sort=False)\n if hasattr(grouped, name) and skipna:\n return getattr(grouped, name)(**kwargs)\n axis = self._get_axis_number(axis)\n method = getattr(type(self), name)\n applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)\n return grouped.aggregate(applyf)\n\n @classmethod\n def _add_numeric_operations(cls):\n \"\"\"\n Add the operations to the cls; evaluate the doc strings again\n \"\"\"\n\n axis_descr, name, name2 = _doc_parms(cls)\n\n cls.any = _make_logical_function(\n cls, 'any', name, name2, axis_descr, _any_desc, nanops.nanany,\n _any_see_also, _any_examples, empty_value=False)\n cls.all = _make_logical_function(\n cls, 'all', name, name2, axis_descr, _all_desc, nanops.nanall,\n _all_see_also, _all_examples, empty_value=True)\n\n @Substitution(outname='mad',\n desc=\"Return the mean absolute deviation of the values \"\n \"for the requested axis.\",\n name1=name, name2=name2, axis_descr=axis_descr,\n min_count='', see_also='', examples='')\n @Appender(_num_doc)\n def mad(self, axis=None, skipna=None, level=None):\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level('mad', axis=axis, level=level,\n skipna=skipna)\n\n data = self._get_numeric_data()\n if axis == 0:\n demeaned = data - data.mean(axis=0)\n else:\n demeaned = data.sub(data.mean(axis=1), axis=0)\n return np.abs(demeaned).mean(axis=axis, skipna=skipna)\n\n cls.mad = mad\n\n cls.sem = _make_stat_function_ddof(\n cls, 'sem', name, name2, axis_descr,\n \"Return unbiased standard error of the mean over requested \"\n \"axis.\\n\\nNormalized by N-1 by default. This can be changed \"\n \"using the ddof argument\",\n nanops.nansem)\n cls.var = _make_stat_function_ddof(\n cls, 'var', name, name2, axis_descr,\n \"Return unbiased variance over requested axis.\\n\\nNormalized by \"\n \"N-1 by default. This can be changed using the ddof argument\",\n nanops.nanvar)\n cls.std = _make_stat_function_ddof(\n cls, 'std', name, name2, axis_descr,\n \"Return sample standard deviation over requested axis.\"\n \"\\n\\nNormalized by N-1 by default. This can be changed using the \"\n \"ddof argument\",\n nanops.nanstd)\n\n @Substitution(outname='compounded',\n desc=\"Return the compound percentage of the values for \"\n \"the requested axis.\", name1=name, name2=name2,\n axis_descr=axis_descr,\n min_count='', see_also='', examples='')\n @Appender(_num_doc)\n def compound(self, axis=None, skipna=None, level=None):\n if skipna is None:\n skipna = True\n return (1 + self).prod(axis=axis, skipna=skipna, level=level) - 1\n\n cls.compound = compound\n\n cls.cummin = _make_cum_function(\n cls, 'cummin', name, name2, axis_descr, \"minimum\",\n lambda y, axis: np.minimum.accumulate(y, axis), \"min\",\n np.inf, np.nan, _cummin_examples)\n cls.cumsum = _make_cum_function(\n cls, 'cumsum', name, name2, axis_descr, \"sum\",\n lambda y, axis: y.cumsum(axis), \"sum\", 0.,\n np.nan, _cumsum_examples)\n cls.cumprod = _make_cum_function(\n cls, 'cumprod', name, name2, axis_descr, \"product\",\n lambda y, axis: y.cumprod(axis), \"prod\", 1.,\n np.nan, _cumprod_examples)\n cls.cummax = _make_cum_function(\n cls, 'cummax', name, name2, axis_descr, \"maximum\",\n lambda y, axis: np.maximum.accumulate(y, axis), \"max\",\n -np.inf, np.nan, _cummax_examples)\n\n cls.sum = _make_min_count_stat_function(\n cls, 'sum', name, name2, axis_descr,\n \"\"\"Return the sum of the values for the requested axis.\\n\n This is equivalent to the method ``numpy.sum``.\"\"\",\n nanops.nansum, _stat_func_see_also, _sum_examples)\n cls.mean = _make_stat_function(\n cls, 'mean', name, name2, axis_descr,\n 'Return the mean of the values for the requested axis.',\n nanops.nanmean)\n cls.skew = _make_stat_function(\n cls, 'skew', name, name2, axis_descr,\n 'Return unbiased skew over requested axis\\nNormalized by N-1.',\n nanops.nanskew)\n cls.kurt = _make_stat_function(\n cls, 'kurt', name, name2, axis_descr,\n \"Return unbiased kurtosis over requested axis using Fisher's \"\n \"definition of\\nkurtosis (kurtosis of normal == 0.0). Normalized \"\n \"by N-1.\",\n nanops.nankurt)\n cls.kurtosis = cls.kurt\n cls.prod = _make_min_count_stat_function(\n cls, 'prod', name, name2, axis_descr,\n 'Return the product of the values for the requested axis.',\n nanops.nanprod, examples=_prod_examples)\n cls.product = cls.prod\n cls.median = _make_stat_function(\n cls, 'median', name, name2, axis_descr,\n 'Return the median of the values for the requested axis.',\n nanops.nanmedian)\n cls.max = _make_stat_function(\n cls, 'max', name, name2, axis_descr,\n \"\"\"Return the maximum of the values for the requested axis.\\n\n If you want the *index* of the maximum, use ``idxmax``. This is\n the equivalent of the ``numpy.ndarray`` method ``argmax``.\"\"\",\n nanops.nanmax, _stat_func_see_also, _max_examples)\n cls.min = _make_stat_function(\n cls, 'min', name, name2, axis_descr,\n \"\"\"Return the minimum of the values for the requested axis.\\n\n If you want the *index* of the minimum, use ``idxmin``. This is\n the equivalent of the ``numpy.ndarray`` method ``argmin``.\"\"\",\n nanops.nanmin, _stat_func_see_also, _min_examples)\n\n @classmethod\n def _add_series_only_operations(cls):\n \"\"\"\n Add the series only operations to the cls; evaluate the doc\n strings again.\n \"\"\"\n\n axis_descr, name, name2 = _doc_parms(cls)\n\n def nanptp(values, axis=0, skipna=True):\n nmax = nanops.nanmax(values, axis, skipna)\n nmin = nanops.nanmin(values, axis, skipna)\n warnings.warn(\"Method .ptp is deprecated and will be removed \"\n \"in a future version. Use numpy.ptp instead.\",\n FutureWarning, stacklevel=4)\n return nmax - nmin\n\n cls.ptp = _make_stat_function(\n cls, 'ptp', name, name2, axis_descr,\n \"\"\"Returns the difference between the maximum value and the\n minimum value in the object. This is the equivalent of the\n ``numpy.ndarray`` method ``ptp``.\\n\\n.. deprecated:: 0.24.0\n Use numpy.ptp instead\"\"\",\n nanptp)\n\n @classmethod\n def _add_series_or_dataframe_operations(cls):\n \"\"\"\n Add the series or dataframe only operations to the cls; evaluate\n the doc strings again.\n \"\"\"\n\n from pandas.core import window as rwindow\n\n @Appender(rwindow.rolling.__doc__)\n def rolling(self, window, min_periods=None, center=False,\n win_type=None, on=None, axis=0, closed=None):\n axis = self._get_axis_number(axis)\n return rwindow.rolling(self, window=window,\n min_periods=min_periods,\n center=center, win_type=win_type,\n on=on, axis=axis, closed=closed)\n\n cls.rolling = rolling\n\n @Appender(rwindow.expanding.__doc__)\n def expanding(self, min_periods=1, center=False, axis=0):\n axis = self._get_axis_number(axis)\n return rwindow.expanding(self, min_periods=min_periods,\n center=center, axis=axis)\n\n cls.expanding = expanding\n\n @Appender(rwindow.ewm.__doc__)\n def ewm(self, com=None, span=None, halflife=None, alpha=None,\n min_periods=0, adjust=True, ignore_na=False,\n axis=0):\n axis = self._get_axis_number(axis)\n return rwindow.ewm(self, com=com, span=span, halflife=halflife,\n alpha=alpha, min_periods=min_periods,\n adjust=adjust, ignore_na=ignore_na, axis=axis)\n\n cls.ewm = ewm\n\n @Appender(_shared_docs['transform'] % dict(axis=\"\", **_shared_doc_kwargs))\n def transform(self, func, *args, **kwargs):\n result = self.agg(func, *args, **kwargs)\n if is_scalar(result) or len(result) != len(self):\n raise ValueError(\"transforms cannot produce \"\n \"aggregated results\")\n\n return result\n\n # ----------------------------------------------------------------------\n # Misc methods\n\n _shared_docs['valid_index'] = \"\"\"\n Return index for %(position)s non-NA/null value.\n\n Returns\n --------\n scalar : type of index\n\n Notes\n --------\n If all elements are non-NA/null, returns None.\n Also returns None for empty %(klass)s.\n \"\"\"\n\n def _find_valid_index(self, how):\n \"\"\"\n Retrieves the index of the first valid value.\n\n Parameters\n ----------\n how : {'first', 'last'}\n Use this parameter to change between the first or last valid index.\n\n Returns\n -------\n idx_first_valid : type of index\n \"\"\"\n assert how in ['first', 'last']\n\n if len(self) == 0: # early stop\n return None\n is_valid = ~self.isna()\n\n if self.ndim == 2:\n is_valid = is_valid.any(1) # reduce axis 1\n\n if how == 'first':\n idxpos = is_valid.values[::].argmax()\n\n if how == 'last':\n idxpos = len(self) - 1 - is_valid.values[::-1].argmax()\n\n chk_notna = is_valid.iat[idxpos]\n idx = self.index[idxpos]\n\n if not chk_notna:\n return None\n return idx\n\n @Appender(_shared_docs['valid_index'] % {'position': 'first',\n 'klass': 'NDFrame'})\n def first_valid_index(self):\n return self._find_valid_index('first')\n\n @Appender(_shared_docs['valid_index'] % {'position': 'last',\n 'klass': 'NDFrame'})\n def last_valid_index(self):\n return self._find_valid_index('last')\n\n\ndef _doc_parms(cls):\n \"\"\"Return a tuple of the doc parms.\"\"\"\n axis_descr = \"{%s}\" % ', '.join([\"{0} ({1})\".format(a, i)\n for i, a in enumerate(cls._AXIS_ORDERS)])\n name = (cls._constructor_sliced.__name__\n if cls._AXIS_LEN > 1 else 'scalar')\n name2 = cls.__name__\n return axis_descr, name, name2\n\n\n_num_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\n Axis for the function to be applied on.\nskipna : bool, default True\n Exclude NA/null values when computing the result.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n%(min_count)s\\\n**kwargs\n Additional keyword arguments to be passed to the function.\n\nReturns\n-------\n%(outname)s : %(name1)s or %(name2)s (if level specified)\n%(see_also)s\n%(examples)s\\\n\"\"\"\n\n_num_ddof_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\nskipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s\nddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\nnumeric_only : boolean, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n\nReturns\n-------\n%(outname)s : %(name1)s or %(name2)s (if level specified)\\n\"\"\"\n\n_bool_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns', None}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n * 1 / 'columns' : reduce the columns, return a Series whose index is the\n original index.\n * None : reduce all axes, return a scalar.\n\nbool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data. Not implemented for Series.\nskipna : bool, default True\n Exclude NA/null values. If the entire row/column is NA and skipna is\n True, then the result will be %(empty_value)s, as for an empty row/column.\n If skipna is False, then NA are treated as True, because these are not\n equal to zero.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\n**kwargs : any, default None\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\n If level is specified, then, %(name2)s is returned; otherwise, %(name1)s\n is returned.\n\n%(see_also)s\n%(examples)s\"\"\"\n\n_all_desc = \"\"\"\\\nReturn whether all elements are True, potentially over an axis.\n\nReturns True unless there at least one element within a series or\nalong a Dataframe axis that is False or equivalent (e.g. zero or\nempty).\"\"\"\n\n_all_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> pd.Series([True, True]).all()\nTrue\n>>> pd.Series([True, False]).all()\nFalse\n>>> pd.Series([]).all()\nTrue\n>>> pd.Series([np.nan]).all()\nTrue\n>>> pd.Series([np.nan]).all(skipna=False)\nTrue\n\n**DataFrames**\n\nCreate a dataframe from a dictionary.\n\n>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})\n>>> df\n col1 col2\n0 True True\n1 True False\n\nDefault behaviour checks if column-wise values all return True.\n\n>>> df.all()\ncol1 True\ncol2 False\ndtype: bool\n\nSpecify ``axis='columns'`` to check if row-wise values all return True.\n\n>>> df.all(axis='columns')\n0 True\n1 False\ndtype: bool\n\nOr ``axis=None`` for whether every value is True.\n\n>>> df.all(axis=None)\nFalse\n\"\"\"\n\n_all_see_also = \"\"\"\\\nSee Also\n--------\nSeries.all : Return True if all elements are True.\nDataFrame.any : Return True if one (or more) elements are True.\n\"\"\"\n\n_cnum_doc = \"\"\"\nReturn cumulative %(desc)s over a DataFrame or Series axis.\n\nReturns a DataFrame or Series of the same size containing the cumulative\n%(desc)s.\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns'}, default 0\n The index or the name of the axis. 0 is equivalent to None or 'index'.\nskipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n*args, **kwargs :\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(outname)s : %(name1)s or %(name2)s\\n\nSee Also\n--------\ncore.window.Expanding.%(accum_func_name)s : Similar functionality\n but ignores ``NaN`` values.\n%(name2)s.%(accum_func_name)s : Return the %(desc)s over\n %(name2)s axis.\n%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.\n%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.\n%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.\n%(name2)s.cumprod : Return cumulative product over %(name2)s axis.\n\n%(examples)s\n\"\"\"\n\n_cummin_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummin()\n0 2.0\n1 NaN\n2 2.0\n3 -1.0\n4 -1.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummin(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the minimum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummin()\n A B\n0 2.0 1.0\n1 2.0 NaN\n2 1.0 0.0\n\nTo iterate over columns and find the minimum in each row,\nuse ``axis=1``\n\n>>> df.cummin(axis=1)\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cumsum_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumsum()\n0 2.0\n1 NaN\n2 7.0\n3 6.0\n4 6.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumsum(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the sum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumsum()\n A B\n0 2.0 1.0\n1 5.0 NaN\n2 6.0 1.0\n\nTo iterate over columns and find the sum in each row,\nuse ``axis=1``\n\n>>> df.cumsum(axis=1)\n A B\n0 2.0 3.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_cumprod_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumprod()\n0 2.0\n1 NaN\n2 10.0\n3 -10.0\n4 -0.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumprod(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the product\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumprod()\n A B\n0 2.0 1.0\n1 6.0 NaN\n2 6.0 0.0\n\nTo iterate over columns and find the product in each row,\nuse ``axis=1``\n\n>>> df.cumprod(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cummax_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummax()\n0 2.0\n1 NaN\n2 5.0\n3 5.0\n4 5.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummax(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the maximum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummax()\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 3.0 1.0\n\nTo iterate over columns and find the maximum in each row,\nuse ``axis=1``\n\n>>> df.cummax(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_any_see_also = \"\"\"\\\nSee Also\n--------\nnumpy.any : Numpy version of this method.\nSeries.any : Return whether any element is True.\nSeries.all : Return whether all elements are True.\nDataFrame.any : Return whether any element is True over requested axis.\nDataFrame.all : Return whether all elements are True over requested axis.\n\"\"\"\n\n_any_desc = \"\"\"\\\nReturn whether any element is True, potentially over an axis.\n\nReturns False unless there at least one element within a series or\nalong a Dataframe axis that is True or equivalent (e.g. non-zero or\nnon-empty).\"\"\"\n\n_any_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\nFor Series input, the output is a scalar indicating whether any element\nis True.\n\n>>> pd.Series([False, False]).any()\nFalse\n>>> pd.Series([True, False]).any()\nTrue\n>>> pd.Series([]).any()\nFalse\n>>> pd.Series([np.nan]).any()\nFalse\n>>> pd.Series([np.nan]).any(skipna=False)\nTrue\n\n**DataFrame**\n\nWhether each column contains at least one True element (the default).\n\n>>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [0, 2], \"C\": [0, 0]})\n>>> df\n A B C\n0 1 0 0\n1 2 2 0\n\n>>> df.any()\nA True\nB True\nC False\ndtype: bool\n\nAggregating over the columns.\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 2]})\n>>> df\n A B\n0 True 1\n1 False 2\n\n>>> df.any(axis='columns')\n0 True\n1 True\ndtype: bool\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 0]})\n>>> df\n A B\n0 True 1\n1 False 0\n\n>>> df.any(axis='columns')\n0 True\n1 False\ndtype: bool\n\nAggregating over the entire DataFrame with ``axis=None``.\n\n>>> df.any(axis=None)\nTrue\n\n`any` for an empty DataFrame is an empty Series.\n\n>>> pd.DataFrame([]).any()\nSeries([], dtype: bool)\n\"\"\"\n\n_shared_docs['stat_func_example'] = \"\"\"\\\nExamples\n--------\n\n>>> idx = pd.MultiIndex.from_arrays([\n... ['warm', 'warm', 'cold', 'cold'],\n... ['dog', 'falcon', 'fish', 'spider']],\n... names=['blooded', 'animal'])\n>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)\n>>> s\nblooded animal\nwarm dog 4\n falcon 2\ncold fish 0\n spider 8\nName: legs, dtype: int64\n\n>>> s.{stat_func}()\n{default_output}\n\n{verb} using level names, as well as indices.\n\n>>> s.{stat_func}(level='blooded')\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\n\n>>> s.{stat_func}(level=0)\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\n\"\"\"\n\n_sum_examples = _shared_docs['stat_func_example'].format(\n stat_func='sum',\n verb='Sum',\n default_output=14,\n level_output_0=6,\n level_output_1=8)\n\n_sum_examples += \"\"\"\nBy default, the sum of an empty or all-NA Series is ``0``.\n\n>>> pd.Series([]).sum() # min_count=0 is the default\n0.0\n\nThis can be controlled with the ``min_count`` parameter. For example, if\nyou'd like the sum of an empty series to be NaN, pass ``min_count=1``.\n\n>>> pd.Series([]).sum(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).sum()\n0.0\n\n>>> pd.Series([np.nan]).sum(min_count=1)\nnan\n\"\"\"\n\n_max_examples = _shared_docs['stat_func_example'].format(\n stat_func='max',\n verb='Max',\n default_output=8,\n level_output_0=4,\n level_output_1=8)\n\n_min_examples = _shared_docs['stat_func_example'].format(\n stat_func='min',\n verb='Min',\n default_output=0,\n level_output_0=2,\n level_output_1=0)\n\n_stat_func_see_also = \"\"\"\nSee Also\n--------\nSeries.sum : Return the sum.\nSeries.min : Return the minimum.\nSeries.max : Return the maximum.\nSeries.idxmin : Return the index of the minimum.\nSeries.idxmax : Return the index of the maximum.\nDataFrame.min : Return the sum over the requested axis.\nDataFrame.min : Return the minimum over the requested axis.\nDataFrame.max : Return the maximum over the requested axis.\nDataFrame.idxmin : Return the index of the minimum over the requested axis.\nDataFrame.idxmax : Return the index of the maximum over the requested axis.\n\"\"\"\n\n_prod_examples = \"\"\"\\\nExamples\n--------\nBy default, the product of an empty or all-NA Series is ``1``\n\n>>> pd.Series([]).prod()\n1.0\n\nThis can be controlled with the ``min_count`` parameter\n\n>>> pd.Series([]).prod(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).prod()\n1.0\n\n>>> pd.Series([np.nan]).prod(min_count=1)\nnan\n\"\"\"\n\n_min_count_stub = \"\"\"\\\nmin_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n .. versionadded :: 0.22.0\n\n Added with the default being 0. This means the sum of an all-NA\n or empty Series is 0, and the product of an all-NA or empty\n Series is 1.\n\"\"\"\n\n\ndef _make_min_count_stat_function(cls, name, name1, name2, axis_descr, desc,\n f, see_also='', examples=''):\n @Substitution(outname=name, desc=desc, name1=name1, name2=name2,\n axis_descr=axis_descr, min_count=_min_count_stub,\n see_also=see_also, examples=examples)\n @Appender(_num_doc)\n def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,\n min_count=0,\n **kwargs):\n if name == 'sum':\n nv.validate_sum(tuple(), kwargs)\n elif name == 'prod':\n nv.validate_prod(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(name, axis=axis, level=level,\n skipna=skipna, min_count=min_count)\n return self._reduce(f, name, axis=axis, skipna=skipna,\n numeric_only=numeric_only, min_count=min_count)\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function(cls, name, name1, name2, axis_descr, desc, f,\n see_also='', examples=''):\n @Substitution(outname=name, desc=desc, name1=name1, name2=name2,\n axis_descr=axis_descr, min_count='', see_also=see_also,\n examples=examples)\n @Appender(_num_doc)\n def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,\n **kwargs):\n if name == 'median':\n nv.validate_median(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(name, axis=axis, level=level,\n skipna=skipna)\n return self._reduce(f, name, axis=axis, skipna=skipna,\n numeric_only=numeric_only)\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):\n @Substitution(outname=name, desc=desc, name1=name1, name2=name2,\n axis_descr=axis_descr)\n @Appender(_num_ddof_doc)\n def stat_func(self, axis=None, skipna=None, level=None, ddof=1,\n numeric_only=None, **kwargs):\n nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(name, axis=axis, level=level,\n skipna=skipna, ddof=ddof)\n return self._reduce(f, name, axis=axis, numeric_only=numeric_only,\n skipna=skipna, ddof=ddof)\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_cum_function(cls, name, name1, name2, axis_descr, desc,\n accum_func, accum_func_name, mask_a, mask_b, examples):\n @Substitution(outname=name, desc=desc, name1=name1, name2=name2,\n axis_descr=axis_descr, accum_func_name=accum_func_name,\n examples=examples)\n @Appender(_cnum_doc)\n def cum_func(self, axis=None, skipna=True, *args, **kwargs):\n skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)\n if axis is None:\n axis = self._stat_axis_number\n else:\n axis = self._get_axis_number(axis)\n\n y = com.values_from_object(self).copy()\n\n if (skipna and\n issubclass(y.dtype.type, (np.datetime64, np.timedelta64))):\n result = accum_func(y, axis)\n mask = isna(self)\n np.putmask(result, mask, iNaT)\n elif skipna and not issubclass(y.dtype.type, (np.integer, np.bool_)):\n mask = isna(self)\n np.putmask(y, mask, mask_a)\n result = accum_func(y, axis)\n np.putmask(result, mask, mask_b)\n else:\n result = accum_func(y, axis)\n\n d = self._construct_axes_dict()\n d['copy'] = False\n return self._constructor(result, **d).__finalize__(self)\n\n return set_function_name(cum_func, name, cls)\n\n\ndef _make_logical_function(cls, name, name1, name2, axis_descr, desc, f,\n see_also, examples, empty_value):\n @Substitution(outname=name, desc=desc, name1=name1, name2=name2,\n axis_descr=axis_descr, see_also=see_also, examples=examples,\n empty_value=empty_value)\n @Appender(_bool_doc)\n def logical_func(self, axis=0, bool_only=None, skipna=True, level=None,\n **kwargs):\n nv.validate_logical_func(tuple(), kwargs, fname=name)\n if level is not None:\n if bool_only is not None:\n raise NotImplementedError(\"Option bool_only is not \"\n \"implemented with option level.\")\n return self._agg_by_level(name, axis=axis, level=level,\n skipna=skipna)\n return self._reduce(f, name, axis=axis, skipna=skipna,\n numeric_only=bool_only, filter_type='bool')\n\n return set_function_name(logical_func, name, cls)\n\n\n# install the indexes\nfor _name, _indexer in indexing.get_indexers_list():\n NDFrame._create_indexer(_name, _indexer)\n" ]
[ [ "numpy.full", "numpy.zeros", "pandas.date_range", "numpy.random.randn", "numpy.random.randint", "numpy.arange", "pandas.util.testing.makeStringIndex", "pandas.Series" ], [ "pandas.io.pickle.to_pickle", "pandas.core.index.RangeIndex", "pandas.core.missing.clean_reindex_fill_method", "pandas.compat.lzip", "pandas.io.formats.excel.ExcelFormatter", "pandas.core.dtypes.common.is_number", "pandas.core.window.rolling", "pandas.compat.iteritems", "pandas.DataFrame", "pandas.compat.numpy.function.validate_clip_with_axis", "pandas.compat.numpy.function.validate_cum_func_with_skipna", "pandas.core.common.SettingWithCopyError", "pandas.core.dtypes.common.pandas_dtype", "pandas.core.common._get_rename_function", "numpy.prod", "pandas.core.dtypes.common.is_bool", "pandas.util._decorators.rewrite_axis_style_signature", "pandas.core.common.count_not_none", "pandas.Index", "numpy.minimum.accumulate", "numpy.errstate", "numpy.any", "pandas.core.window.ewm", "pandas.core.resample.asfreq", "pandas.io.formats.csvs.CSVFormatter", "pandas.concat", "pandas.core.dtypes.missing.isna", "pandas.core.index.Index", "pandas.core.nanops.nanmin", "pandas.core.dtypes.common.is_re_compilable", "pandas.core.dtypes.common.is_datetime64tz_dtype", "pandas.core.common.random_state", "pandas.core.missing.clean_fill_method", "pandas.compat.to_str", "numpy.array", "pandas.io.sql.to_sql", "pandas.core.common.maybe_box_datetimelike", "pandas.core.ops._align_method_FRAME", "pandas.isnull", "pandas.core.common.maybe_make_list", "pandas.core.indexes.datetimes.DatetimeIndex", "numpy.maximum.accumulate", "pandas.core.dtypes.common.is_period_arraylike", "pandas.Series", "pandas.core.dtypes.common.is_timedelta64_dtype", "pandas._libs.Timestamp", "pandas.core.dtypes.common.is_bool_dtype", "pandas.core.dtypes.common.is_datetime64_any_dtype", "pandas.core.indexing.get_indexers_list", "pandas.core.dtypes.cast.maybe_upcast_putmask", "pandas.core.common.apply_if_callable", "pandas.io.formats.format.format_percentiles", "pandas.core.nanops.nanmax", "pandas.core.dtypes.common.is_dict_like", "pandas.core.indexes.period.Period", "pandas.core.dtypes.common.is_object_dtype", "pandas.util._validators.validate_fillna_kwargs", "pandas.core.config.get_option", "pandas.util._decorators.Substitution", "pandas.errors.AbstractMethodError", "pandas.core.common.index_labels_to_array", "pandas.core.dtypes.cast.maybe_promote", "pandas.core.dtypes.common.is_scalar", "pandas.core.common.values_from_object", "pandas.core.index.ensure_index", "pandas.core.dtypes.common.is_integer", "pandas.core.algorithms.rank", "pandas.io.json.to_json", "pandas.io.formats.format.DataFrameFormatter", "pandas.core.dtypes.common.ensure_int64", "pandas.core.dtypes.common.is_list_like", "numpy.asarray", "pandas.core.missing.mask_missing", "pandas.core.missing.get_fill_func", "numpy.abs", "pandas.core.resample.resample", "pandas.core.groupby.groupby.groupby", "pandas.io.pytables.to_hdf", "pandas.core.window.expanding", "pandas.compat.map", "pandas.core.dtypes.inference.is_hashable", "pandas.core.dtypes.missing.notna", "numpy.putmask", "pandas.core.dtypes.common.is_extension_array_dtype", "pandas.core.common._pipe", "pandas.util._validators.validate_bool_kwarg", "pandas.core.tools.datetimes.to_datetime", "pandas.compat.isidentifier", "pandas.util._decorators.Appender", "pandas.compat.lrange", "pandas.core.resample._maybe_process_deprecations", "pandas.MultiIndex.from_product", "pandas.compat.numpy.function.validate_transpose_for_generic", "pandas.tseries.frequencies.to_offset", "numpy.isnan", "pandas.compat.zip", "pandas.io.clipboards.to_clipboard", "pandas.core.dtypes.common.is_numeric_dtype", "pandas.io.packers.to_msgpack", "pandas.compat.set_function_name", "numpy.asanyarray", "numpy.unique" ] ]
antsfamily/torchtool
[ "fd0d6e6fe6701206b15f95af145d6178a87233f9" ]
[ "torchlib/module/loss/contrast.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2019-07-24 18:29:48\n# @Author : Zhi Liu (zhiliu.mind@gmail.com)\n# @Link : http://iridescent.ink\n# @Version : $1.0$\n\nimport torch as th\nimport torchlib as tl\n\n\nclass ContrastReciprocalLoss(th.nn.Module):\n r\"\"\"ContrastReciprocalLoss\n\n way1 is defined as follows, for contrast, see [1]:\n\n .. math::\n C = \\frac{{\\rm E}(|I|^2)}{\\sqrt{{\\rm E}\\left(|I|^2 - {\\rm E}(|I|^2)\\right)^2}}\n\n\n way2 is defined as follows, for contrast, see [2]:\n\n .. math::\n C = \\frac{\\left({\\rm E}(|I|)\\right)^2}{{\\rm E}(|I|^2)}\n\n [1] Efficient Nonparametric ISAR Autofocus Algorithm Based on Contrast Maximization and Newton\n [2] section 13.4.1 in \"Ian G. Cumming's SAR book\"\n\n Parameters\n ----------\n X : numpy ndarray\n The image array.\n mode : str, optional\n ``'way1'`` or ``'way2'``\n axis : tuple, None, optional\n the dimensions for compute entropy. by default None (if input's dimension > 2, then all but the first, else all).\n caxis : int or None\n If :attr:`X` is complex-valued, :attr:`caxis` is ignored. If :attr:`X` is real-valued and :attr:`caxis` is integer\n then :attr:`X` will be treated as complex-valued, in this case, :attr:`caxis` specifies the complex axis;\n otherwise (None), :attr:`X` will be treated as real-valued\n reduction : str, optional\n The operation in batch dim, ``'None'``, ``'mean'`` or ``'sum'`` (the default is 'mean')\n\n Returns\n -------\n scalar\n The contrast value of input.\n\n Examples\n --------\n\n ::\n\n th.manual_seed(2020)\n X = th.randn(1, 3, 4, 2)\n ctst_func = ContrastReciprocalLoss(mode='way1', axis=(1, 2), caxis=-1, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n X = X[:, :, :, 0] + 1j * X[:, :, :, 1]\n ctst_func = ContrastReciprocalLoss(mode='way1', axis=(1, 2), caxis=None, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n ctst_func = ContrastReciprocalLoss(mode='way1', axis=None, caxis=None, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n ctst_func = ContrastReciprocalLoss(mode='way1', axis=(2), caxis=None, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n ctst_func = ContrastReciprocalLoss(mode='way1', axis=(2), caxis=None, reduction=None)\n V = ctst_func(X)\n print(V)\n\n \"\"\"\n\n def __init__(self, mode='way1', axis=None, caxis=None, reduction='mean'):\n super(ContrastReciprocalLoss, self).__init__()\n self.mode = mode\n self.axis = axis\n self.caxis = caxis\n self.reduction = reduction\n\n def forward(self, X):\n\n if th.is_complex(X):\n X = (X * X.conj()).real\n else:\n if type(self.caxis) is int:\n if X.shape[self.caxis] != 2:\n raise ValueError('The complex input is represented in real-valued formation, but you specifies wrong axis!')\n X = th.pow(X, 2).sum(axis=self.caxis, keepdims=True)\n if self.caxis is None:\n X = th.pow(X, 2)\n\n if self.axis is None:\n D = X.dim()\n axis = tuple(range(1, D)) if D > 2 else tuple(range(0, D))\n else:\n axis = self.axis\n\n if X.dtype is not th.float32 or th.double:\n X = X.to(th.float32)\n\n if self.mode in ['way1', 'WAY1']:\n Xmean = X.mean(axis=axis, keepdims=True)\n C = Xmean / ((X - Xmean).pow(2).mean(axis=axis, keepdims=True).sqrt() + EPS)\n if self.mode in ['way2', 'WAY2']:\n C = (X.sqrt().mean(axis=axis, keepdims=True)).pow(2) / (X.mean(axis=axis, keepdims=True) + EPS)\n\n if self.reduction == 'mean':\n C = th.mean(C)\n if self.reduction == 'sum':\n C = th.sum(C)\n return C\n\n\nclass NegativeContrastLoss(th.nn.Module):\n r\"\"\"NegativeContrastLoss\n\n way1 is defined as follows, for contrast, see [1]:\n\n .. math::\n C = -\\frac{\\sqrt{{\\rm E}\\left(|I|^2 - {\\rm E}(|I|^2)\\right)^2}}{{\\rm E}(|I|^2)}\n\n\n way2 is defined as follows, for contrast, see [2]:\n\n .. math::\n C = -\\frac{{\\rm E}(|I|^2)}{\\left({\\rm E}(|I|)\\right)^2}\n\n [1] Efficient Nonparametric ISAR Autofocus Algorithm Based on Contrast Maximization and Newton\n [2] section 13.4.1 in \"Ian G. Cumming's SAR book\"\n\n Parameters\n ----------\n X : numpy ndarray\n The image array.\n mode : str, optional\n ``'way1'`` or ``'way2'``\n axis : tuple, None, optional\n the dimensions for compute entropy. by default None (if input's dimension > 2, then all but the first, else all).\n caxis : int or None\n If :attr:`X` is complex-valued, :attr:`caxis` is ignored. If :attr:`X` is real-valued and :attr:`caxis` is integer\n then :attr:`X` will be treated as complex-valued, in this case, :attr:`caxis` specifies the complex axis;\n otherwise (None), :attr:`X` will be treated as real-valued\n reduction : str, optional\n The operation in batch dim, ``'None'``, ``'mean'`` or ``'sum'`` (the default is 'mean')\n\n Returns\n -------\n scalar\n The contrast value of input.\n\n Examples\n --------\n\n ::\n\n th.manual_seed(2020)\n X = th.randn(1, 3, 4, 2)\n ctst_func = NegativeContrastLoss(mode='way1', axis=(1, 2), caxis=-1, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n X = X[:, :, :, 0] + 1j * X[:, :, :, 1]\n ctst_func = NegativeContrastLoss(mode='way1', axis=(1, 2), caxis=None, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n ctst_func = NegativeContrastLoss(mode='way1', axis=None, caxis=None, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n ctst_func = NegativeContrastLoss(mode='way1', axis=(2), caxis=None, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n ctst_func = NegativeContrastLoss(mode='way1', axis=(2), caxis=None, reduction=None)\n V = ctst_func(X)\n print(V)\n \n # output\n tensor(-1.2694)\n tensor(-1.2694)\n tensor(-1.2694)\n tensor(-0.7724)\n tensor([[[-0.9093],\n [-1.0752],\n [-0.3326]]])\n\n\n \"\"\"\n\n def __init__(self, mode='way1', axis=None, caxis=None, reduction='mean'):\n super(NegativeContrastLoss, self).__init__()\n self.mode = mode\n self.axis = axis\n self.caxis = caxis\n self.reduction = reduction\n\n def forward(self, X):\n return -tl.contrast(X, mode=self.mode, axis=self.axis, caxis=self.caxis, reduction=self.reduction)\n\n\nclass ContrastLoss(th.nn.Module):\n r\"\"\"Contrast Loss\n\n way1 is defined as follows, see [1]:\n\n .. math::\n C = \\frac{\\sqrt{{\\rm E}\\left(|I|^2 - {\\rm E}(|I|^2)\\right)^2}}{{\\rm E}(|I|^2)}\n\n\n way2 is defined as follows, see [2]:\n\n .. math::\n C = \\frac{{\\rm E}(|I|^2)}{\\left({\\rm E}(|I|)\\right)^2}\n\n [1] Efficient Nonparametric ISAR Autofocus Algorithm Based on Contrast Maximization and Newton\n [2] section 13.4.1 in \"Ian G. Cumming's SAR book\"\n\n Parameters\n ----------\n X : numpy ndarray\n The image array.\n mode : str, optional\n ``'way1'`` or ``'way2'``\n axis : tuple, None, optional\n the dimensions for compute entropy. by default None (if input's dimension > 2, then all but the first, else all).\n caxis : int or None\n If :attr:`X` is complex-valued, :attr:`caxis` is ignored. If :attr:`X` is real-valued and :attr:`caxis` is integer\n then :attr:`X` will be treated as complex-valued, in this case, :attr:`caxis` specifies the complex axis;\n otherwise (None), :attr:`X` will be treated as real-valued\n reduction : str, optional\n The operation in batch dim, ``'None'``, ``'mean'`` or ``'sum'`` (the default is 'mean')\n\n Returns\n -------\n scalar\n The contrast value of input.\n\n Examples\n --------\n\n ::\n\n th.manual_seed(2020)\n X = th.randn(1, 3, 4, 2)\n ctst_func = ContrastLoss(mode='way1', axis=(1, 2), caxis=-1, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n X = X[:, :, :, 0] + 1j * X[:, :, :, 1]\n ctst_func = ContrastLoss(mode='way1', axis=(1, 2), caxis=None, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n ctst_func = ContrastLoss(mode='way1', axis=None, caxis=None, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n ctst_func = ContrastLoss(mode='way1', axis=(2), caxis=None, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n ctst_func = ContrastLoss(mode='way1', axis=(2), caxis=None, reduction=None)\n V = ctst_func(X)\n print(V)\n \n # output\n tensor(1.2694)\n tensor(1.2694)\n tensor(1.2694)\n tensor(0.7724)\n tensor([[[0.9093],\n [1.0752],\n [0.3326]]])\n \"\"\"\n\n def __init__(self, mode='way1', axis=None, caxis=None, reduction='mean'):\n super(ContrastLoss, self).__init__()\n self.mode = mode\n self.axis = axis\n self.caxis = caxis\n self.reduction = reduction\n\n def forward(self, X):\n\n return tl.contrast(X, mode=self.mode, axis=self.axis, caxis=self.caxis, reduction=self.reduction)\n\n\nif __name__ == '__main__':\n\n th.manual_seed(2020)\n X = th.randn(1, 3, 4, 2)\n ctst_func = NegativeContrastLoss(mode='way1', axis=(1, 2), caxis=-1, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n X = X[:, :, :, 0] + 1j * X[:, :, :, 1]\n ctst_func = NegativeContrastLoss(mode='way1', axis=(1, 2), caxis=None, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n ctst_func = NegativeContrastLoss(mode='way1', axis=None, caxis=None, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n ctst_func = NegativeContrastLoss(mode='way1', axis=(2), caxis=None, reduction='mean')\n V = ctst_func(X)\n print(V)\n\n ctst_func = NegativeContrastLoss(mode='way1', axis=(2), caxis=None, reduction=None)\n V = ctst_func(X)\n print(V)" ]
[ [ "torch.pow", "torch.manual_seed", "torch.is_complex", "torch.randn", "torch.mean", "torch.sum" ] ]
hhy37/tensor2tensor
[ "b4094d065fa0ae8842cd667fb0e5a2c652407c9c", "b4094d065fa0ae8842cd667fb0e5a2c652407c9c" ]
[ "tensor2tensor/layers/latent_layers_test.py", "tensor2tensor/models/video/basic_recurrent.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for layers in latent variable models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport six\n\nfrom tensor2tensor.layers import common_image_attention as cia\nfrom tensor2tensor.layers import discretization\nfrom tensor2tensor.layers import latent_layers\nfrom tensor2tensor.models import transformer\n\nimport tensorflow as tf\n\n\ndef imagetransformer_latent_tiny():\n \"\"\"Tiny set of hparams for a latent image model.\"\"\"\n hparams = transformer.transformer_small()\n hparams.batch_size = 2\n hparams.num_hidden_layers = 3\n hparams.hidden_size = 16\n hparams.filter_size = 32\n hparams.compress_filter_size = 64\n hparams.ffn_layer = \"conv_hidden_relu\"\n hparams.layer_prepostprocess_dropout = 0.2\n hparams.layer_preprocess_sequence = \"none\"\n hparams.layer_postprocess_sequence = \"dan\"\n hparams.dropout = 0.3\n hparams.pos = \"timing\"\n hparams.num_encoder_layers = 1\n hparams.num_decoder_layers = 2\n hparams.use_pad_remover = False\n hparams.add_hparam(\"logit_normalization\", True)\n hparams.add_hparam(\"bottleneck_kind\", \"dvq\")\n hparams.add_hparam(\"bottleneck_bits\", 4)\n hparams.add_hparam(\"num_residuals\", 1)\n hparams.add_hparam(\"use_gold_targets\", False)\n hparams.add_hparam(\"do_compress_attend\", False)\n hparams.add_hparam(\"do_decompress_attend\", False)\n hparams.add_hparam(\"drop_inputs\", False)\n hparams.add_hparam(\"num_compress_steps\", 2)\n hparams.add_hparam(\"startup_steps\", 10000)\n hparams.add_hparam(\"mask_startup_steps\", 50000)\n hparams.add_hparam(\"latent_dropout\", 0.0)\n hparams.add_hparam(\"decode_autoregressive\", False)\n hparams.add_hparam(\"vq_beta\", 0.25)\n hparams.add_hparam(\"vq_epsilon\", 1e-5)\n hparams.add_hparam(\"vq_decay\", 0.999)\n hparams.add_hparam(\"ema\", False)\n hparams.add_hparam(\"soft_em\", True)\n hparams.add_hparam(\"num_samples\", 1)\n hparams.add_hparam(\"num_latent_layers\", 2)\n hparams.add_hparam(\"num_res_layers\", 2)\n hparams.add_hparam(\"res_kernel_size\", 3)\n hparams.add_hparam(\"num_blocks\", 1)\n hparams.add_hparam(\"reshape_method\", \"slice\")\n hparams.add_hparam(\"shared_rel\", False)\n hparams.add_hparam(\"block_size\", 1)\n hparams.add_hparam(\"kernel_size\", 3)\n hparams.add_hparam(\"img_len\", 8)\n hparams.add_hparam(\"num_channels\", 1)\n hparams.add_hparam(\"local_and_global_att\", False)\n hparams.add_hparam(\"block_length\", 32)\n hparams.add_hparam(\"block_width\", 128)\n hparams.add_hparam(\"dec_attention_type\", cia.AttentionType.LOCAL_1D)\n hparams.add_hparam(\"latent_attention_type\", cia.AttentionType.GLOBAL)\n hparams.add_hparam(\"block_raster_scan\", False)\n hparams.add_hparam(\"num_latents\", 1)\n hparams.add_hparam(\"q_filter_width\", 1)\n hparams.add_hparam(\"kv_filter_width\", 1)\n return hparams\n\n\nclass LatentLayersTest(tf.test.TestCase):\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testTransformerAutoencoder(self):\n hparams = imagetransformer_latent_tiny()\n hparams.mode = tf.estimator.ModeKeys.TRAIN\n block_dim = int(hparams.hidden_size // hparams.num_blocks)\n block_v_size = 2**(hparams.bottleneck_bits /\n (hparams.num_residuals * hparams.num_blocks))\n block_v_size = int(block_v_size)\n means = tf.get_variable(\n name=\"means\",\n shape=[hparams.num_residuals,\n hparams.num_blocks,\n block_v_size,\n block_dim],\n initializer=tf.uniform_unit_scaling_initializer())\n hparams.bottleneck = functools.partial(\n discretization.discrete_bottleneck,\n hidden_size=hparams.hidden_size,\n z_size=hparams.bottleneck_bits,\n filter_size=hparams.filter_size,\n startup_steps=hparams.startup_steps,\n bottleneck_kind=hparams.bottleneck_kind,\n num_blocks=hparams.num_blocks,\n num_residuals=hparams.num_residuals,\n reshape_method=hparams.reshape_method,\n beta=hparams.vq_beta,\n decay=hparams.vq_decay,\n soft_em=hparams.soft_em,\n num_samples=hparams.num_samples,\n epsilon=hparams.vq_epsilon,\n ema=hparams.ema,\n means=means)\n\n inputs = None\n batch_size = hparams.batch_size\n targets = tf.random_uniform([batch_size,\n hparams.img_len,\n hparams.img_len,\n hparams.hidden_size],\n minval=-1., maxval=1.)\n target_space_id = None\n\n tf.train.create_global_step()\n decoder_output, losses, cache = latent_layers.transformer_autoencoder(\n inputs, targets, target_space_id, hparams)\n\n self.assertEqual(set(six.iterkeys(losses)),\n {\"extra\", \"extra_loss\", \"latent_pred\"})\n\n self.evaluate(tf.global_variables_initializer())\n decoder_output_, extra_loss_, latent_pred_ = self.evaluate(\n [decoder_output, losses[\"extra_loss\"], losses[\"latent_pred\"]])\n self.assertEqual(decoder_output_.shape, (batch_size,\n hparams.img_len,\n hparams.img_len,\n hparams.hidden_size))\n self.assertEqual(extra_loss_.shape, (batch_size,))\n self.assertEqual(latent_pred_.shape, (batch_size,))\n self.assertAllGreaterEqual(extra_loss_, 0.)\n self.assertAllGreaterEqual(latent_pred_, 0.)\n self.assertEqual(cache, None)\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic recurrent models for testing simple tasks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensor2tensor.layers import common_attention\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.layers import common_video\nfrom tensor2tensor.models.video import basic_stochastic\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\ntfl = tf.layers\ntfcl = tf.contrib.layers\n\n\n@registry.register_model\nclass NextFrameBasicRecurrent(\n basic_stochastic.NextFrameBasicStochasticDiscrete):\n \"\"\"Basic next-frame recurrent model.\"\"\"\n\n def predict_next_frame(self, frame, action, lstm_states):\n hparams = self.hparams\n filters = hparams.hidden_size\n kernel1, kernel2 = (3, 3), (4, 4)\n lstm_func = common_video.conv_lstm_2d\n\n # Embed the inputs.\n inputs_shape = common_layers.shape_list(frame)\n # Using non-zero bias initializer below for edge cases of uniform inputs.\n x = tf.layers.dense(\n frame, filters, name=\"inputs_embed\",\n bias_initializer=tf.random_normal_initializer(stddev=0.01))\n x = common_attention.add_timing_signal_nd(x)\n\n # Down-stride.\n layer_inputs = [x]\n for i in range(hparams.num_compress_steps):\n with tf.variable_scope(\"downstride%d\" % i):\n layer_inputs.append(x)\n x = common_layers.make_even_size(x)\n if i < hparams.filter_double_steps:\n filters *= 2\n x = common_attention.add_timing_signal_nd(x)\n x = tf.layers.conv2d(x, filters, kernel2, activation=common_layers.belu,\n strides=(2, 2), padding=\"SAME\")\n x = common_layers.layer_norm(x)\n\n # Add embedded action if present.\n if self.has_action:\n x = self.inject_additional_input(\n x, action, \"action_enc\", hparams.action_injection)\n\n x, extra_loss = self.inject_latent(x, self.features, filters)\n\n # LSTM layers\n for j in range(hparams.num_lstm_layers):\n x, lstm_states[j] = lstm_func(x, lstm_states[j], hparams.num_lstm_filters)\n\n # Run a stack of convolutions.\n for i in range(hparams.num_hidden_layers):\n with tf.variable_scope(\"layer%d\" % i):\n y = tf.nn.dropout(x, 1.0 - hparams.dropout)\n y = tf.layers.conv2d(y, filters, kernel1, activation=common_layers.belu,\n strides=(1, 1), padding=\"SAME\")\n if i == 0:\n x = y\n else:\n x = common_layers.layer_norm(x + y)\n\n # Up-convolve.\n layer_inputs = list(reversed(layer_inputs))\n for i in range(hparams.num_compress_steps):\n with tf.variable_scope(\"upstride%d\" % i):\n if self.has_action:\n x = self.inject_additional_input(\n x, action, \"action_enc\", hparams.action_injection)\n if i >= hparams.num_compress_steps - hparams.filter_double_steps:\n filters //= 2\n x = tf.layers.conv2d_transpose(\n x, filters, kernel2, activation=common_layers.belu,\n strides=(2, 2), padding=\"SAME\")\n y = layer_inputs[i]\n shape = common_layers.shape_list(y)\n x = x[:, :shape[1], :shape[2], :]\n x = common_layers.layer_norm(x + y)\n x = common_attention.add_timing_signal_nd(x)\n\n # Cut down to original size.\n x = x[:, :inputs_shape[1], :inputs_shape[2], :]\n if self.is_per_pixel_softmax:\n x = tf.layers.dense(x, hparams.problem.num_channels * 256, name=\"logits\")\n else:\n x = tf.layers.dense(x, hparams.problem.num_channels, name=\"logits\")\n\n # Reward prediction if needed.\n reward_pred = 0.0\n if self.has_reward:\n reward_pred = tf.expand_dims( # Add a fake channels dim.\n tf.reduce_mean(x, axis=[1, 2], keepdims=True), axis=3)\n return x, reward_pred, extra_loss, lstm_states\n\n def body(self, features):\n hparams = self.hparams\n self.has_action = \"input_action\" in features\n self.has_reward = \"target_reward\" in features\n # dirty hack to enable the latent tower\n self.features = features\n\n # Split inputs and targets into lists.\n input_frames = tf.unstack(features[\"inputs\"], axis=1)\n target_frames = tf.unstack(features[\"targets\"], axis=1)\n all_frames = input_frames + target_frames\n if self.has_action:\n input_actions = tf.unstack(features[\"input_action\"], axis=1)\n target_actions = tf.unstack(features[\"target_action\"], axis=1)\n all_actions = input_actions + target_actions\n\n res_frames, sampled_frames, sampled_frames_raw, res_rewards = [], [], [], []\n lstm_states = [None] * hparams.num_lstm_layers\n extra_loss = 0.0\n\n num_frames = len(all_frames)\n for i in range(num_frames - 1):\n frame = all_frames[i]\n action = all_actions[i] if self.has_action else None\n\n # more hack to enable latent_tower\n # TODO(mbz): clean this up.\n self.features[\"inputs\"] = all_frames[i]\n self.features[\"cur_target_frame\"] = all_frames[i+1]\n\n # Run model.\n with tf.variable_scope(\"recurrent_model\", reuse=tf.AUTO_REUSE):\n func_out = self.predict_next_frame(frame, action, lstm_states)\n res_frame, res_reward, res_extra_loss, lstm_states = func_out\n res_frames.append(res_frame)\n res_rewards.append(res_reward)\n extra_loss += res_extra_loss\n\n sampled_frame_raw = self.get_sampled_frame(res_frame)\n sampled_frames_raw.append(sampled_frame_raw)\n # TODO(lukaszkaiser): this should be consistent with modality.bottom()\n sampled_frame = common_layers.standardize_images(sampled_frame_raw)\n sampled_frames.append(sampled_frame)\n\n # Only for Softmax loss: sample next frame so we can keep iterating.\n if self.is_predicting and i >= hparams.video_num_input_frames:\n all_frames[i+1] = sampled_frame\n\n # Concatenate results and return them.\n output_frames = res_frames[hparams.video_num_input_frames-1:]\n frames = tf.stack(output_frames, axis=1)\n\n has_input_predictions = hparams.video_num_input_frames > 1\n if self.is_training and hparams.internal_loss and has_input_predictions:\n # add the loss for input frames as well.\n extra_gts = input_frames[1:]\n extra_pds = res_frames[:hparams.video_num_input_frames-1]\n extra_raw_gts = features[\"inputs_raw\"][:, 1:]\n recon_loss = self.get_extra_internal_loss(\n extra_raw_gts, extra_gts, extra_pds)\n extra_loss += recon_loss\n\n if not self.has_reward:\n return frames, extra_loss\n rewards = tf.concat(res_rewards[hparams.video_num_input_frames-1:], axis=1)\n return {\"targets\": frames, \"target_reward\": rewards}, extra_loss\n\n\n@registry.register_hparams\ndef next_frame_basic_recurrent():\n \"\"\"Basic 2-frame recurrent model with stochastic tower.\"\"\"\n hparams = basic_stochastic.next_frame_basic_stochastic_discrete()\n hparams.video_num_input_frames = 4\n hparams.video_num_target_frames = 4\n hparams.add_hparam(\"num_lstm_layers\", 2)\n hparams.add_hparam(\"num_lstm_filters\", 256)\n return hparams\n" ]
[ [ "tensorflow.contrib.eager.run_test_in_graph_and_eager_modes", "tensorflow.uniform_unit_scaling_initializer", "tensorflow.train.create_global_step", "tensorflow.random_uniform", "tensorflow.test.main", "tensorflow.global_variables_initializer" ], [ "tensorflow.concat", "tensorflow.variable_scope", "tensorflow.layers.conv2d", "tensorflow.nn.dropout", "tensorflow.stack", "tensorflow.layers.dense", "tensorflow.layers.conv2d_transpose", "tensorflow.reduce_mean", "tensorflow.unstack", "tensorflow.random_normal_initializer" ] ]
Monytccc/Script313
[ "ecd1a28c964f721dc2c21824dde887b013893491" ]
[ "bruteforce.py" ]
[ "import string\r\nfrom itertools import product\r\nfrom time import time\r\nfrom numpy import loadtxt\r\n\r\n\r\ndef product_loop(password, generator):\r\n for p in generator:\r\n if ''.join(p) == password:\r\n print('\\nPassword:', ''.join(p))\r\n return ''.join(p)\r\n return False\r\n\r\n\r\ndef bruteforce(password, max_nchar=8):\r\n \"\"\"Password brute-force algorithm.\r\n\r\n Parameters\r\n ----------\r\n password : string\r\n To-be-found password.\r\n max_nchar : int\r\n Maximum number of characters of password.\r\n\r\n Return\r\n ------\r\n bruteforce_password : string\r\n Brute-forced password\r\n \"\"\"\r\n print('1) Comparing with most common passwords / first names')\r\n common_pass = loadtxt('probable-v2-top12000.txt', dtype=str)\r\n common_names = loadtxt('middle-names.txt', dtype=str)\r\n cp = [c for c in common_pass if c == password]\r\n cn = [c for c in common_names if c == password]\r\n cnl = [c.lower() for c in common_names if c.lower() == password]\r\n\r\n if len(cp) == 1:\r\n print('\\nPassword:', cp)\r\n return cp\r\n if len(cn) == 1:\r\n print('\\nPassword:', cn)\r\n return cn\r\n if len(cnl) == 1:\r\n print('\\nPassword:', cnl)\r\n return cnl\r\n\r\n print('2) Digits cartesian product')\r\n for l in range(1, 9):\r\n generator = product(string.digits, repeat=int(l))\r\n print(\"\\t..%d digit\" % l)\r\n p = product_loop(password, generator)\r\n if p is not False:\r\n return p\r\n\r\n print('3) Digits + ASCII lowercase')\r\n for l in range(1, max_nchar + 1):\r\n print(\"\\t..%d char\" % l)\r\n generator = product(string.digits + string.ascii_lowercase,\r\n repeat=int(l))\r\n p = product_loop(password, generator)\r\n if p is not False:\r\n return p\r\n\r\n print('4) Digits + ASCII lower / upper + punctuation')\r\n # If it fails, we start brute-forcing the 'hard' way\r\n # Same as possible_char = string.printable[:-5]\r\n all_char = string.digits + string.ascii_letters + string.punctuation\r\n\r\n for l in range(1, max_nchar + 1):\r\n print(\"\\t..%d char\" % l)\r\n generator = product(all_char, repeat=int(l))\r\n p = product_loop(password, generator)\r\n if p is not False:\r\n return p\r\n\r\n\r\n# EXAMPLE\r\nstart = time()\r\nbruteforce('sunshine') # Try with '123456' or '751345' or 'test2018'\r\nend = time()\r\nprint('Total time: %.2f seconds' % (end - start))" ]
[ [ "numpy.loadtxt" ] ]
gwli/supervised-reptile
[ "5b181577e139a7854729650bac9410b9bb5d29d9", "5b181577e139a7854729650bac9410b9bb5d29d9" ]
[ "supervised_reptile/train.py", "supervised_reptile/reptile.py" ]
[ "\"\"\"\nTraining helpers for supervised meta-learning.\n\"\"\"\n\nimport os\nimport time\n\nimport tensorflow as tf\n\nfrom .reptile import Reptile\nfrom .variables import weight_decay\n\n# pylint: disable=R0913,R0914\ndef train(sess,\n model,\n train_set,\n test_set,\n save_dir,\n num_classes=5,\n num_shots=5,\n inner_batch_size=5,\n inner_iters=20,\n meta_step_size=0.1,\n meta_step_size_final=0.1,\n meta_batch_size=1,\n meta_iters=400000,\n eval_inner_batch_size=5,\n eval_inner_iters=50,\n eval_interval=10,\n weight_decay_rate=1,\n time_deadline=None,\n train_shots=None,\n transductive=False,\n reptile_fn=Reptile,\n log_fn=print):\n \"\"\"\n Train a model on a dataset.\n \"\"\"\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n saver = tf.train.Saver()\n reptile = reptile_fn(sess,\n transductive=transductive,\n pre_step_op=weight_decay(weight_decay_rate))\n accuracy_ph = tf.placeholder(tf.float32, shape=())\n tf.summary.scalar('accuracy', accuracy_ph)\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(os.path.join(save_dir, 'train'), sess.graph)\n test_writer = tf.summary.FileWriter(os.path.join(save_dir, 'test'), sess.graph)\n tf.global_variables_initializer().run()\n sess.run(tf.global_variables_initializer())\n for i in range(meta_iters):\n frac_done = i / meta_iters\n cur_meta_step_size = frac_done * meta_step_size_final + (1 - frac_done) * meta_step_size\n reptile.train_step(train_set, model.input_ph, model.label_ph, model.minimize_op,\n num_classes=num_classes, num_shots=(train_shots or num_shots),\n inner_batch_size=inner_batch_size, inner_iters=inner_iters,\n meta_step_size=cur_meta_step_size, meta_batch_size=meta_batch_size)\n if i % eval_interval == 0:\n accuracies = []\n for dataset, writer in [(train_set, train_writer), (test_set, test_writer)]:\n correct = reptile.evaluate(dataset, model.input_ph, model.label_ph,\n model.minimize_op, model.predictions,\n num_classes=num_classes, num_shots=num_shots,\n inner_batch_size=eval_inner_batch_size,\n inner_iters=eval_inner_iters)\n summary = sess.run(merged, feed_dict={accuracy_ph: correct/num_classes})\n writer.add_summary(summary, i)\n writer.flush()\n accuracies.append(correct / num_classes)\n log_fn('batch %d: train=%f test=%f' % (i, accuracies[0], accuracies[1]))\n if i % 100 == 0 or i == meta_iters-1:\n saver.save(sess, os.path.join(save_dir, 'model.ckpt'), global_step=i)\n if time_deadline is not None and time.time() > time_deadline:\n break\n", "\"\"\"\nSupervised Reptile learning and evaluation on arbitrary\ndatasets.\n\"\"\"\n\nimport random\n\nimport tensorflow as tf\n\nfrom .variables import (interpolate_vars, average_vars, subtract_vars, add_vars, scale_vars,\n VariableState)\n\nclass Reptile:\n \"\"\"\n A meta-learning session.\n\n Reptile can operate in two evaluation modes: normal\n and transductive. In transductive mode, information is\n allowed to leak between test samples via BatchNorm.\n Typically, MAML is used in a transductive manner.\n \"\"\"\n def __init__(self, session, variables=None, transductive=False, pre_step_op=None):\n self.session = session\n self._model_state = VariableState(self.session, variables or tf.trainable_variables())\n self._full_state = VariableState(self.session,\n tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))\n self._transductive = transductive\n self._pre_step_op = pre_step_op\n\n # pylint: disable=R0913,R0914\n def train_step(self,\n dataset,\n input_ph,\n label_ph,\n minimize_op,\n num_classes,\n num_shots,\n inner_batch_size,\n inner_iters,\n meta_step_size,\n meta_batch_size):\n \"\"\"\n Perform a Reptile training step.\n\n Args:\n dataset: a sequence of data classes, where each data\n class has a sample(n) method.\n input_ph: placeholder for a batch of samples.\n label_ph: placeholder for a batch of labels.\n minimize_op: TensorFlow Op to minimize a loss on the\n batch specified by input_ph and label_ph.\n num_classes: number of data classes to sample.\n num_shots: number of examples per data class.\n inner_batch_size: batch size for every inner-loop\n training iteration.\n inner_iters: number of inner-loop iterations.\n meta_step_size: interpolation coefficient.\n meta_batch_size: how many inner-loops to run.\n \"\"\"\n old_vars = self._model_state.export_variables()\n new_vars = []\n for _ in range(meta_batch_size):\n mini_dataset = _sample_mini_dataset(dataset, num_classes, num_shots)\n for batch in _mini_batches(mini_dataset, inner_batch_size, inner_iters):\n inputs, labels = zip(*batch)\n if self._pre_step_op:\n self.session.run(self._pre_step_op)\n self.session.run(minimize_op, feed_dict={input_ph: inputs, label_ph: labels})\n new_vars.append(self._model_state.export_variables())\n self._model_state.import_variables(old_vars)\n new_vars = average_vars(new_vars)\n self._model_state.import_variables(interpolate_vars(old_vars, new_vars, meta_step_size))\n\n def evaluate(self,\n dataset,\n input_ph,\n label_ph,\n minimize_op,\n predictions,\n num_classes,\n num_shots,\n inner_batch_size,\n inner_iters):\n \"\"\"\n Run a single evaluation of the model.\n\n Samples a few-shot learning task and measures\n performance.\n\n Args:\n dataset: a sequence of data classes, where each data\n class has a sample(n) method.\n input_ph: placeholder for a batch of samples.\n label_ph: placeholder for a batch of labels.\n minimize_op: TensorFlow Op to minimize a loss on the\n batch specified by input_ph and label_ph.\n predictions: a Tensor of integer label predictions.\n num_classes: number of data classes to sample.\n num_shots: number of examples per data class.\n inner_batch_size: batch size for every inner-loop\n training iteration.\n inner_iters: number of inner-loop iterations.\n\n Returns:\n The number of correctly predicted samples.\n This always ranges from 0 to num_classes.\n \"\"\"\n train_set = list(_sample_mini_dataset(dataset, num_classes, num_shots+1))\n test_set = []\n for label in range(num_classes):\n for i, item in enumerate(train_set):\n if item[1] == label:\n del train_set[i]\n test_set.append(item)\n break\n old_vars = self._full_state.export_variables()\n for batch in _mini_batches(train_set, inner_batch_size, inner_iters):\n inputs, labels = zip(*batch)\n if self._pre_step_op:\n self.session.run(self._pre_step_op)\n self.session.run(minimize_op, feed_dict={input_ph: inputs, label_ph: labels})\n test_preds = self._test_predictions(train_set, test_set, input_ph, predictions)\n num_correct = sum([pred == sample[1] for pred, sample in zip(test_preds, test_set)])\n self._full_state.import_variables(old_vars)\n return num_correct\n\n def _test_predictions(self, train_set, test_set, input_ph, predictions):\n if self._transductive:\n inputs, _ = zip(*test_set)\n return self.session.run(predictions, feed_dict={input_ph: inputs})\n res = []\n for test_sample in test_set:\n inputs, _ = zip(*train_set)\n inputs += (test_sample[0],)\n res.append(self.session.run(predictions, feed_dict={input_ph: inputs})[-1])\n return res\n\nclass FOML(Reptile):\n \"\"\"\n A basic implementation of \"first-order MAML\" (FOML).\n\n FOML is similar to Reptile, except that you use the\n gradient from the last mini-batch as the update\n direction.\n \"\"\"\n # pylint: disable=R0913,R0914\n def train_step(self,\n dataset,\n input_ph,\n label_ph,\n minimize_op,\n num_classes,\n num_shots,\n inner_batch_size,\n inner_iters,\n meta_step_size,\n meta_batch_size):\n old_vars = self._model_state.export_variables()\n updates = []\n for _ in range(meta_batch_size):\n mini_dataset = _sample_mini_dataset(dataset, num_classes, num_shots)\n for batch in _mini_batches(mini_dataset, inner_batch_size, inner_iters):\n inputs, labels = zip(*batch)\n last_backup = self._model_state.export_variables()\n if self._pre_step_op:\n self.session.run(self._pre_step_op)\n self.session.run(minimize_op, feed_dict={input_ph: inputs, label_ph: labels})\n updates.append(subtract_vars(self._model_state.export_variables(), last_backup))\n self._model_state.import_variables(old_vars)\n update = average_vars(updates)\n self._model_state.import_variables(add_vars(old_vars, scale_vars(update, meta_step_size)))\n\ndef _sample_mini_dataset(dataset, num_classes, num_shots):\n \"\"\"\n Sample a few shot task from a dataset.\n\n Returns:\n An iterable of (input, label) pairs.\n \"\"\"\n shuffled = list(dataset)\n random.shuffle(shuffled)\n for class_idx, class_obj in enumerate(shuffled[:num_classes]):\n for sample in class_obj.sample(num_shots):\n yield (sample, class_idx)\n\ndef _mini_batches(samples, batch_size, num_batches):\n \"\"\"\n Generate mini-batches from some data.\n\n Returns:\n An iterable of sequences of (input, label) pairs,\n where each sequence is a mini-batch.\n \"\"\"\n cur_batch = []\n samples = list(samples)\n batch_count = 0\n while True:\n random.shuffle(samples)\n for sample in samples:\n cur_batch.append(sample)\n if len(cur_batch) < batch_size:\n continue\n yield cur_batch\n cur_batch = []\n batch_count += 1\n if batch_count == num_batches:\n return\n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.train.Saver", "tensorflow.placeholder", "tensorflow.summary.merge_all", "tensorflow.global_variables_initializer" ], [ "tensorflow.trainable_variables", "tensorflow.get_collection" ] ]
yogeshmj/clinica
[ "1588d708eb401731c62ca23c2db26c29e2ad92e6" ]
[ "clinica/iotools/converters/adni_to_bids/adni_modalities/adni_dwi.py" ]
[ "# coding: utf-8\n\n\"\"\"\n Module for converting DWI of ADNI\n\"\"\"\n\n__author__ = \"Jorge Samper-Gonzalez and Sabrina Fontanella\"\n__copyright__ = \"Copyright 2016-2019 The Aramis Lab Team\"\n__license__ = \"See LICENSE.txt file\"\n__version__ = \"0.1.0\"\n__maintainer__ = \"Jorge Samper-Gonzalez\"\n__email__ = \"jorge.samper-gonzalez@inria.fr\"\n__status__ = \"Development\"\n\n\ndef convert_adni_dwi(source_dir, csv_dir, dest_dir, subjs_list=None):\n \"\"\"\n Convert DW images of ADNI into BIDS format\n\n Args:\n source_dir: path to the ADNI directory\n csv_dir: path to the clinical data directory\n dest_dir: path to the destination BIDS directory\n subjs_list: subjects list\n\n \"\"\"\n\n import pandas as pd\n from os import path\n from clinica.utils.stream import cprint\n from colorama import Fore\n from clinica.iotools.converters.adni_to_bids.adni_utils import paths_to_bids\n\n if subjs_list is None:\n adni_merge_path = path.join(csv_dir, 'ADNIMERGE.csv')\n adni_merge = pd.read_csv(adni_merge_path, sep=',', low_memory=False)\n subjs_list = list(adni_merge.PTID.unique())\n\n cprint('Calculating paths of DWI images. Output will be stored in ' + path.join(dest_dir, 'conversion_info') + '.')\n images = compute_dwi_paths(source_dir, csv_dir, dest_dir, subjs_list)\n cprint('Paths of DWI images found. Exporting images into BIDS ...')\n # dwi_paths_to_bids(images, dest_dir)\n paths_to_bids(images, dest_dir, 'dwi')\n cprint(Fore.GREEN + 'DWI conversion done.' + Fore.RESET)\n\n\ndef compute_dwi_paths(source_dir, csv_dir, dest_dir, subjs_list):\n \"\"\"\n Compute paths to DW images to convert to BIDS\n\n Args:\n source_dir: path to the ADNI directory\n csv_dir: path to the clinical data directory\n dest_dir: path to the destination BIDS directory\n subjs_list: subjects list\n\n Returns:\n images: pandas dataframe that contains the path to all the DW images to convert\n\n \"\"\"\n\n from os import path, mkdir\n import pandas as pd\n\n from clinica.iotools.converters.adni_to_bids.adni_utils import find_image_path, visits_to_timepoints\n\n dwi_col_df = ['Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date',\n 'Study_ID', 'Series_ID', 'Image_ID', 'Field_Strength']\n dwi_df = pd.DataFrame(columns=dwi_col_df)\n dwi_dfs_list = []\n\n # Loading needed .csv files\n adni_merge = pd.read_csv(path.join(csv_dir, 'ADNIMERGE.csv'), sep=',', low_memory=False)\n\n mayo_mri_qc = pd.read_csv(path.join(csv_dir, 'MAYOADIRL_MRI_IMAGEQC_12_08_15.csv'), sep=',', low_memory=False)\n mayo_mri_qc = mayo_mri_qc[mayo_mri_qc.series_type == 'DTI']\n\n mri_list = pd.read_csv(path.join(csv_dir, 'MRILIST.csv'), sep=',', low_memory=False)\n\n # Selecting only DTI images that are not Multiband, processed or enhanced images\n mri_list = mri_list[mri_list.SEQUENCE.str.contains('dti', case=False, na=False)]\n unwanted_sequences = ['MB', 'ADC', 'FA', 'TRACEW', 'Enhanced', 'Reg']\n mri_list = mri_list[mri_list.SEQUENCE.map(lambda x: not any(subs in x for subs in unwanted_sequences))]\n\n for subj in subjs_list:\n\n # Filter ADNIMERGE, MRI_LIST and QC for only one subject and sort the rows/visits by examination date\n adnimerge_subj = adni_merge[adni_merge.PTID == subj]\n adnimerge_subj = adnimerge_subj.sort_values('EXAMDATE')\n\n mri_list_subj = mri_list[mri_list.SUBJECT == subj]\n mri_list_subj = mri_list_subj.sort_values('SCANDATE')\n\n mayo_mri_qc_subj = mayo_mri_qc[mayo_mri_qc.RID == int(subj[-4:])]\n\n # Obtain corresponding timepoints for the subject visits\n visits = visits_to_timepoints(subj, mri_list_subj, adnimerge_subj, \"DWI\")\n\n for visit_info in visits.keys():\n timepoint = visit_info[0]\n visit_str = visits[visit_info]\n\n visit_mri_list = mri_list_subj[mri_list_subj.VISIT == visit_str]\n axial = dwi_image(subj, timepoint, visits[visit_info], visit_mri_list, mayo_mri_qc_subj)\n\n if axial is not None:\n row_to_append = pd.DataFrame(axial, index=['i', ])\n dwi_dfs_list.append(row_to_append)\n\n if dwi_dfs_list:\n dwi_df = pd.concat(dwi_dfs_list, ignore_index=True)\n\n # Exceptions\n # ==========\n conversion_errors = [('029_S_2395', 'm60'),\n ('029_S_0824', 'm108'),\n ('029_S_0914', 'm108'),\n ('027_S_2219', 'm36'),\n ('129_S_2332', 'm12'),\n ('029_S_4384', 'm48'),\n ('029_S_4385', 'm48'),\n ('029_S_4585', 'm48'),\n ('016_S_4591', 'm24'),\n ('094_S_4630', 'm06'),\n ('094_S_4649', 'm06'),\n ('029_S_5219', 'm24'),\n ('094_S_2238', 'm48'),\n ('129_S_4287', 'bl'),\n ('007_S_4611', 'm03'),\n ('016_S_4638', 'bl'),\n ('027_S_5118', 'bl'),\n ('098_S_4018', 'bl'),\n ('098_S_4003', 'm12'),\n ('016_S_4584', 'm24'),\n ('016_S_5007', 'm12'),\n ('129_S_2347', 'm06'),\n ('129_S_4220', 'bl'),\n ('007_S_2058', 'm12'),\n ('016_S_2007', 'm06'),\n ('020_S_6358', 'bl'),\n ('114_S_6039', 'm12'),\n ('114_S_6057', 'bl'),\n ('153_S_6274', 'bl'),\n ('006_S_4485', 'm84'),\n ('153_S_6237', 'bl'),\n ('153_S_6336', 'bl'),\n ('153_S_6450', 'bl'),\n ('003_S_4441', 'm12'),\n # Several output images\n ('029_S_2395', 'm72')]\n\n # Removing known exceptions from images to convert\n if not dwi_df.empty:\n error_ind = dwi_df.index[dwi_df.apply(lambda x: ((x.Subject_ID, x.VISCODE) in conversion_errors), axis=1)]\n dwi_df.drop(error_ind, inplace=True)\n\n # Checking for images paths in filesystem\n images = find_image_path(dwi_df, source_dir, 'DWI', 'S', 'Series_ID')\n\n dwi_tsv_path = path.join(dest_dir, 'conversion_info')\n if not path.exists(dwi_tsv_path):\n mkdir(dwi_tsv_path)\n images.to_csv(path.join(dwi_tsv_path, 'dwi_paths.tsv'), sep='\\t', index=False)\n\n return images\n\n\ndef dwi_image(subject_id, timepoint, visit_str, visit_mri_list, mri_qc_subj):\n \"\"\"\n One image among those in the input list is chosen according to QC\n and then correspoding metadata is extracted to a dictionary\n\n Args:\n subject_id: Subject identifier\n timepoint: Visit code\n visit_str: Visit name\n visit_mri_list: List of images metadata\n mri_qc_subj: Dataframe containing list of QC of scans for the subject\n\n Returns: dictionary - contains image metadata\n\n \"\"\"\n from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars, select_image_qc\n\n sel_image = select_image_qc(list(visit_mri_list.IMAGEUID), mri_qc_subj)\n if sel_image is None:\n return None\n\n sel_scan = visit_mri_list[visit_mri_list.IMAGEUID == sel_image].iloc[0]\n\n image_dict = {'Subject_ID': subject_id,\n 'VISCODE': timepoint,\n 'Visit': visit_str,\n 'Sequence': replace_sequence_chars(sel_scan.SEQUENCE),\n 'Scan_Date': sel_scan['SCANDATE'],\n 'Study_ID': str(int(sel_scan.STUDYID)),\n 'Series_ID': str(int(sel_scan.SERIESID)),\n 'Image_ID': str(int(sel_scan.IMAGEUID)),\n 'Field_Strength': sel_scan.MAGSTRENGTH}\n\n return image_dict\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.concat" ] ]
Komnomnomnom/pandas
[ "1b43f0fcfc882bcd499f6da114df104ee3813748" ]
[ "pandas/io/tests/test_pytables.py" ]
[ "import nose\nimport sys\nimport os\nimport warnings\nimport tempfile\nfrom contextlib import contextmanager\n\nimport datetime\nimport numpy as np\n\nimport pandas\nfrom pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,\n date_range, Index, DatetimeIndex, isnull)\nfrom pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,\n IncompatibilityWarning, PerformanceWarning,\n AttributeConflictWarning, DuplicateWarning,\n PossibleDataLossError, ClosedFileError)\nfrom pandas.io import pytables as pytables\nimport pandas.util.testing as tm\nfrom pandas.util.testing import (assert_panel4d_equal,\n assert_panel_equal,\n assert_frame_equal,\n assert_series_equal)\nfrom pandas import concat, Timestamp\nfrom pandas import compat, _np_version_under1p7\nfrom pandas.compat import range, lrange, u\nfrom pandas.util.testing import assert_produces_warning\n\ntry:\n import tables\nexcept ImportError:\n raise nose.SkipTest('no pytables')\n\nfrom distutils.version import LooseVersion\n\n_default_compressor = LooseVersion(tables.__version__) >= '2.2' \\\n and 'blosc' or 'zlib'\n\n_multiprocess_can_split_ = False\n\n# contextmanager to ensure the file cleanup\ndef safe_remove(path):\n if path is not None:\n try:\n os.remove(path)\n except:\n pass\n\n\ndef safe_close(store):\n try:\n if store is not None:\n store.close()\n except:\n pass\n\n\ndef create_tempfile(path):\n \"\"\" create an unopened named temporary file \"\"\"\n return os.path.join(tempfile.gettempdir(),path)\n\n@contextmanager\ndef ensure_clean_store(path, mode='a', complevel=None, complib=None,\n fletcher32=False):\n\n try:\n\n # put in the temporary path if we don't have one already\n if not len(os.path.dirname(path)):\n path = create_tempfile(path)\n\n store = HDFStore(path, mode=mode, complevel=complevel,\n complib=complib, fletcher32=False)\n yield store\n finally:\n safe_close(store)\n if mode == 'w' or mode == 'a':\n safe_remove(path)\n\n@contextmanager\ndef ensure_clean_path(path):\n \"\"\"\n return essentially a named temporary file that is not opened\n and deleted on existing; if path is a list, then create and\n return list of filenames\n \"\"\"\n try:\n if isinstance(path, list):\n filenames = [ create_tempfile(p) for p in path ]\n yield filenames\n else:\n filenames = [ create_tempfile(path) ]\n yield filenames[0]\n finally:\n for f in filenames:\n safe_remove(f)\n\n# set these parameters so we don't have file sharing\ntables.parameters.MAX_NUMEXPR_THREADS = 1\ntables.parameters.MAX_BLOSC_THREADS = 1\ntables.parameters.MAX_THREADS = 1\n\ndef _maybe_remove(store, key):\n \"\"\"For tests using tables, try removing the table to be sure there is\n no content from previous tests using the same table name.\"\"\"\n try:\n store.remove(key)\n except:\n pass\n\n\ndef compat_assert_produces_warning(w,f):\n \"\"\" don't produce a warning under PY3 \"\"\"\n if compat.PY3:\n f()\n else:\n with tm.assert_produces_warning(expected_warning=w):\n f()\n\n\nclass TestHDFStore(tm.TestCase):\n\n def setUp(self):\n warnings.filterwarnings(action='ignore', category=FutureWarning)\n\n self.path = 'tmp.__%s__.h5' % tm.rands(10)\n\n def tearDown(self):\n pass\n\n def test_factory_fun(self):\n try:\n with get_store(self.path) as tbl:\n raise ValueError('blah')\n except ValueError:\n pass\n finally:\n safe_remove(self.path)\n\n try:\n with get_store(self.path) as tbl:\n tbl['a'] = tm.makeDataFrame()\n\n with get_store(self.path) as tbl:\n self.assertEquals(len(tbl), 1)\n self.assertEquals(type(tbl['a']), DataFrame)\n finally:\n safe_remove(self.path)\n\n def test_conv_read_write(self):\n\n try:\n\n def roundtrip(key, obj,**kwargs):\n obj.to_hdf(self.path, key,**kwargs)\n return read_hdf(self.path, key)\n\n o = tm.makeTimeSeries()\n assert_series_equal(o, roundtrip('series',o))\n\n o = tm.makeStringSeries()\n assert_series_equal(o, roundtrip('string_series',o))\n\n o = tm.makeDataFrame()\n assert_frame_equal(o, roundtrip('frame',o))\n\n o = tm.makePanel()\n assert_panel_equal(o, roundtrip('panel',o))\n\n # table\n df = DataFrame(dict(A=lrange(5), B=lrange(5)))\n df.to_hdf(self.path,'table',append=True)\n result = read_hdf(self.path, 'table', where = ['index>2'])\n assert_frame_equal(df[df.index>2],result)\n\n finally:\n safe_remove(self.path)\n\n def test_api(self):\n\n # GH4584\n # API issue when to_hdf doesn't acdept append AND format args\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.iloc[:10].to_hdf(path,'df',append=True,format='table')\n df.iloc[10:].to_hdf(path,'df',append=True,format='table')\n assert_frame_equal(read_hdf(path,'df'),df)\n\n # append to False\n df.iloc[:10].to_hdf(path,'df',append=False,format='table')\n df.iloc[10:].to_hdf(path,'df',append=True,format='table')\n assert_frame_equal(read_hdf(path,'df'),df)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.iloc[:10].to_hdf(path,'df',append=True)\n df.iloc[10:].to_hdf(path,'df',append=True,format='table')\n assert_frame_equal(read_hdf(path,'df'),df)\n\n # append to False\n df.iloc[:10].to_hdf(path,'df',append=False,format='table')\n df.iloc[10:].to_hdf(path,'df',append=True)\n assert_frame_equal(read_hdf(path,'df'),df)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.to_hdf(path,'df',append=False,format='fixed')\n assert_frame_equal(read_hdf(path,'df'),df)\n\n df.to_hdf(path,'df',append=False,format='f')\n assert_frame_equal(read_hdf(path,'df'),df)\n\n df.to_hdf(path,'df',append=False)\n assert_frame_equal(read_hdf(path,'df'),df)\n\n df.to_hdf(path,'df')\n assert_frame_equal(read_hdf(path,'df'),df)\n\n with ensure_clean_store(self.path) as store:\n\n path = store._path\n df = tm.makeDataFrame()\n\n _maybe_remove(store,'df')\n store.append('df',df.iloc[:10],append=True,format='table')\n store.append('df',df.iloc[10:],append=True,format='table')\n assert_frame_equal(store.select('df'),df)\n\n # append to False\n _maybe_remove(store,'df')\n store.append('df',df.iloc[:10],append=False,format='table')\n store.append('df',df.iloc[10:],append=True,format='table')\n assert_frame_equal(store.select('df'),df)\n\n # formats\n _maybe_remove(store,'df')\n store.append('df',df.iloc[:10],append=False,format='table')\n store.append('df',df.iloc[10:],append=True,format='table')\n assert_frame_equal(store.select('df'),df)\n\n _maybe_remove(store,'df')\n store.append('df',df.iloc[:10],append=False,format='table')\n store.append('df',df.iloc[10:],append=True,format=None)\n assert_frame_equal(store.select('df'),df)\n\n with ensure_clean_path(self.path) as path:\n\n # invalid\n df = tm.makeDataFrame()\n self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='f')\n self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='fixed')\n\n self.assertRaises(TypeError, df.to_hdf, path,'df',append=True,format='foo')\n self.assertRaises(TypeError, df.to_hdf, path,'df',append=False,format='bar')\n\n\n def test_api_default_format(self):\n\n # default_format option\n with ensure_clean_store(self.path) as store:\n df = tm.makeDataFrame()\n\n pandas.set_option('io.hdf.default_format','fixed')\n _maybe_remove(store,'df')\n store.put('df',df)\n self.assert_(not store.get_storer('df').is_table)\n self.assertRaises(ValueError, store.append, 'df2',df)\n\n pandas.set_option('io.hdf.default_format','table')\n _maybe_remove(store,'df')\n store.put('df',df)\n self.assert_(store.get_storer('df').is_table)\n _maybe_remove(store,'df2')\n store.append('df2',df)\n self.assert_(store.get_storer('df').is_table)\n\n pandas.set_option('io.hdf.default_format',None)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n\n pandas.set_option('io.hdf.default_format','fixed')\n df.to_hdf(path,'df')\n with get_store(path) as store:\n self.assert_(not store.get_storer('df').is_table)\n self.assertRaises(ValueError, df.to_hdf, path,'df2', append=True)\n\n pandas.set_option('io.hdf.default_format','table')\n df.to_hdf(path,'df3')\n with get_store(path) as store:\n self.assert_(store.get_storer('df3').is_table)\n df.to_hdf(path,'df4',append=True)\n with get_store(path) as store:\n self.assert_(store.get_storer('df4').is_table)\n\n pandas.set_option('io.hdf.default_format',None)\n\n def test_keys(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n store['b'] = tm.makeStringSeries()\n store['c'] = tm.makeDataFrame()\n store['d'] = tm.makePanel()\n store['foo/bar'] = tm.makePanel()\n self.assertEquals(len(store), 5)\n self.assert_(set(\n store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))\n\n def test_repr(self):\n\n with ensure_clean_store(self.path) as store:\n repr(store)\n store['a'] = tm.makeTimeSeries()\n store['b'] = tm.makeStringSeries()\n store['c'] = tm.makeDataFrame()\n store['d'] = tm.makePanel()\n store['foo/bar'] = tm.makePanel()\n store.append('e', tm.makePanel())\n\n df = tm.makeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['bool1'] = df['A'] > 0\n df['bool2'] = df['B'] > 0\n df['bool3'] = True\n df['int1'] = 1\n df['int2'] = 2\n df['timestamp1'] = Timestamp('20010102')\n df['timestamp2'] = Timestamp('20010103')\n df['datetime1'] = datetime.datetime(2001,1,2,0,0)\n df['datetime2'] = datetime.datetime(2001,1,3,0,0)\n df.ix[3:6,['obj1']] = np.nan\n df = df.consolidate().convert_objects()\n\n warnings.filterwarnings('ignore', category=PerformanceWarning)\n store['df'] = df\n warnings.filterwarnings('always', category=PerformanceWarning)\n\n # make a random group in hdf space\n store._handle.createGroup(store._handle.root,'bah')\n\n repr(store)\n str(store)\n\n # storers\n with ensure_clean_store(self.path) as store:\n\n df = tm.makeDataFrame()\n store.append('df',df)\n\n s = store.get_storer('df')\n repr(s)\n str(s)\n\n def test_contains(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n store['b'] = tm.makeDataFrame()\n store['foo/bar'] = tm.makeDataFrame()\n self.assert_('a' in store)\n self.assert_('b' in store)\n self.assert_('c' not in store)\n self.assert_('foo/bar' in store)\n self.assert_('/foo/bar' in store)\n self.assert_('/foo/b' not in store)\n self.assert_('bar' not in store)\n\n # GH 2694\n warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)\n store['node())'] = tm.makeDataFrame()\n self.assert_('node())' in store)\n\n def test_versioning(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n store['b'] = tm.makeDataFrame()\n df = tm.makeTimeDataFrame()\n _maybe_remove(store, 'df1')\n store.append('df1', df[:10])\n store.append('df1', df[10:])\n self.assert_(store.root.a._v_attrs.pandas_version == '0.10.1')\n self.assert_(store.root.b._v_attrs.pandas_version == '0.10.1')\n self.assert_(store.root.df1._v_attrs.pandas_version == '0.10.1')\n\n # write a file and wipe its versioning\n _maybe_remove(store, 'df2')\n store.append('df2', df)\n\n # this is an error because its table_type is appendable, but no version\n # info\n store.get_node('df2')._v_attrs.pandas_version = None\n self.assertRaises(Exception, store.select, 'df2')\n\n def test_mode(self):\n\n df = tm.makeTimeDataFrame()\n\n def check(mode):\n\n with ensure_clean_path(self.path) as path:\n\n # constructor\n if mode in ['r','r+']:\n self.assertRaises(IOError, HDFStore, path, mode=mode)\n\n else:\n store = HDFStore(path,mode=mode)\n self.assert_(store._handle.mode == mode)\n store.close()\n\n with ensure_clean_path(self.path) as path:\n\n # context\n if mode in ['r','r+']:\n def f():\n with get_store(path,mode=mode) as store:\n pass\n self.assertRaises(IOError, f)\n else:\n with get_store(path,mode=mode) as store:\n self.assert_(store._handle.mode == mode)\n\n with ensure_clean_path(self.path) as path:\n\n # conv write\n if mode in ['r','r+']:\n self.assertRaises(IOError, df.to_hdf, path, 'df', mode=mode)\n df.to_hdf(path,'df',mode='w')\n else:\n df.to_hdf(path,'df',mode=mode)\n\n # conv read\n if mode in ['w']:\n self.assertRaises(KeyError, read_hdf, path, 'df', mode=mode)\n else:\n result = read_hdf(path,'df',mode=mode)\n assert_frame_equal(result,df)\n\n check('r')\n check('r+')\n check('a')\n check('w')\n\n def test_reopen_handle(self):\n\n with ensure_clean_path(self.path) as path:\n\n store = HDFStore(path,mode='a')\n store['a'] = tm.makeTimeSeries()\n\n # invalid mode change\n self.assertRaises(PossibleDataLossError, store.open, 'w')\n store.close()\n self.assert_(not store.is_open)\n\n # truncation ok here\n store.open('w')\n self.assert_(store.is_open)\n self.assertEquals(len(store), 0)\n store.close()\n self.assert_(not store.is_open)\n\n store = HDFStore(path,mode='a')\n store['a'] = tm.makeTimeSeries()\n\n # reopen as read\n store.open('r')\n self.assert_(store.is_open)\n self.assertEquals(len(store), 1)\n self.assert_(store._mode == 'r')\n store.close()\n self.assert_(not store.is_open)\n\n # reopen as append\n store.open('a')\n self.assert_(store.is_open)\n self.assertEquals(len(store), 1)\n self.assert_(store._mode == 'a')\n store.close()\n self.assert_(not store.is_open)\n\n # reopen as append (again)\n store.open('a')\n self.assert_(store.is_open)\n self.assertEquals(len(store), 1)\n self.assert_(store._mode == 'a')\n store.close()\n self.assert_(not store.is_open)\n\n def test_open_args(self):\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n\n # create an in memory store\n store = HDFStore(path,mode='a',driver='H5FD_CORE',driver_core_backing_store=0)\n store['df'] = df\n store.append('df2',df)\n\n tm.assert_frame_equal(store['df'],df)\n tm.assert_frame_equal(store['df2'],df)\n\n store.close()\n\n # only supported on pytable >= 3.0.0\n if LooseVersion(tables.__version__) >= '3.0.0':\n\n # the file should not have actually been written\n self.assert_(os.path.exists(path) is False)\n\n def test_flush(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n store.flush()\n store.flush(fsync=True)\n\n def test_get(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n left = store.get('a')\n right = store['a']\n tm.assert_series_equal(left, right)\n\n left = store.get('/a')\n right = store['/a']\n tm.assert_series_equal(left, right)\n\n self.assertRaises(KeyError, store.get, 'b')\n\n def test_getattr(self):\n\n with ensure_clean_store(self.path) as store:\n\n s = tm.makeTimeSeries()\n store['a'] = s\n\n # test attribute access\n result = store.a\n tm.assert_series_equal(result, s)\n result = getattr(store,'a')\n tm.assert_series_equal(result, s)\n\n df = tm.makeTimeDataFrame()\n store['df'] = df\n result = store.df\n tm.assert_frame_equal(result, df)\n\n # errors\n self.assertRaises(AttributeError, getattr, store, 'd')\n\n for x in ['mode','path','handle','complib']:\n self.assertRaises(AttributeError, getattr, store, x)\n\n # not stores\n for x in ['mode','path','handle','complib']:\n getattr(store,\"_%s\" % x)\n\n def test_put(self):\n\n with ensure_clean_store(self.path) as store:\n\n ts = tm.makeTimeSeries()\n df = tm.makeTimeDataFrame()\n store['a'] = ts\n store['b'] = df[:10]\n store['foo/bar/bah'] = df[:10]\n store['foo'] = df[:10]\n store['/foo'] = df[:10]\n store.put('c', df[:10], format='table')\n\n # not OK, not a table\n self.assertRaises(\n ValueError, store.put, 'b', df[10:], append=True)\n\n # node does not currently exist, test _is_table_type returns False in\n # this case\n # _maybe_remove(store, 'f')\n # self.assertRaises(ValueError, store.put, 'f', df[10:], append=True)\n\n # can't put to a table (use append instead)\n self.assertRaises(ValueError, store.put, 'c', df[10:], append=True)\n\n # overwrite table\n store.put('c', df[:10], format='table', append=False)\n tm.assert_frame_equal(df[:10], store['c'])\n\n def test_put_string_index(self):\n\n with ensure_clean_store(self.path) as store:\n\n index = Index(\n [\"I am a very long string index: %s\" % i for i in range(20)])\n s = Series(np.arange(20), index=index)\n df = DataFrame({'A': s, 'B': s})\n\n store['a'] = s\n tm.assert_series_equal(store['a'], s)\n\n store['b'] = df\n tm.assert_frame_equal(store['b'], df)\n\n # mixed length\n index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + [\"I am a very long string index: %s\" % i for i in range(20)])\n s = Series(np.arange(21), index=index)\n df = DataFrame({'A': s, 'B': s})\n store['a'] = s\n tm.assert_series_equal(store['a'], s)\n\n store['b'] = df\n tm.assert_frame_equal(store['b'], df)\n\n def test_put_compression(self):\n\n with ensure_clean_store(self.path) as store:\n df = tm.makeTimeDataFrame()\n\n store.put('c', df, format='table', complib='zlib')\n tm.assert_frame_equal(store['c'], df)\n\n # can't compress if format='fixed'\n self.assertRaises(ValueError, store.put, 'b', df,\n format='fixed', complib='zlib')\n\n def test_put_compression_blosc(self):\n tm.skip_if_no_package('tables', '2.2', app='blosc support')\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n\n # can't compress if format='fixed'\n self.assertRaises(ValueError, store.put, 'b', df,\n format='fixed', complib='blosc')\n\n store.put('c', df, format='table', complib='blosc')\n tm.assert_frame_equal(store['c'], df)\n\n def test_put_integer(self):\n # non-date, non-string index\n df = DataFrame(np.random.randn(50, 100))\n self._check_roundtrip(df, tm.assert_frame_equal)\n\n def test_put_mixed_type(self):\n df = tm.makeTimeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['bool1'] = df['A'] > 0\n df['bool2'] = df['B'] > 0\n df['bool3'] = True\n df['int1'] = 1\n df['int2'] = 2\n df['timestamp1'] = Timestamp('20010102')\n df['timestamp2'] = Timestamp('20010103')\n df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)\n df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)\n df.ix[3:6, ['obj1']] = np.nan\n df = df.consolidate().convert_objects()\n\n with ensure_clean_store(self.path) as store:\n _maybe_remove(store, 'df')\n\n # cannot use assert_produces_warning here for some reason\n # a PendingDeprecationWarning is also raised?\n warnings.filterwarnings('ignore', category=PerformanceWarning)\n store.put('df',df)\n warnings.filterwarnings('always', category=PerformanceWarning)\n\n expected = store.get('df')\n tm.assert_frame_equal(expected,df)\n\n def test_append(self):\n\n with ensure_clean_store(self.path) as store:\n df = tm.makeTimeDataFrame()\n _maybe_remove(store, 'df1')\n store.append('df1', df[:10])\n store.append('df1', df[10:])\n tm.assert_frame_equal(store['df1'], df)\n\n _maybe_remove(store, 'df2')\n store.put('df2', df[:10], format='table')\n store.append('df2', df[10:])\n tm.assert_frame_equal(store['df2'], df)\n\n _maybe_remove(store, 'df3')\n store.append('/df3', df[:10])\n store.append('/df3', df[10:])\n tm.assert_frame_equal(store['df3'], df)\n\n # this is allowed by almost always don't want to do it\n with tm.assert_produces_warning(expected_warning=tables.NaturalNameWarning):\n _maybe_remove(store, '/df3 foo')\n store.append('/df3 foo', df[:10])\n store.append('/df3 foo', df[10:])\n tm.assert_frame_equal(store['df3 foo'], df)\n\n # panel\n wp = tm.makePanel()\n _maybe_remove(store, 'wp1')\n store.append('wp1', wp.ix[:, :10, :])\n store.append('wp1', wp.ix[:, 10:, :])\n assert_panel_equal(store['wp1'], wp)\n\n # ndim\n p4d = tm.makePanel4D()\n _maybe_remove(store, 'p4d')\n store.append('p4d', p4d.ix[:, :, :10, :])\n store.append('p4d', p4d.ix[:, :, 10:, :])\n assert_panel4d_equal(store['p4d'], p4d)\n\n # test using axis labels\n _maybe_remove(store, 'p4d')\n store.append('p4d', p4d.ix[:, :, :10, :], axes=[\n 'items', 'major_axis', 'minor_axis'])\n store.append('p4d', p4d.ix[:, :, 10:, :], axes=[\n 'items', 'major_axis', 'minor_axis'])\n assert_panel4d_equal(store['p4d'], p4d)\n\n # test using differnt number of items on each axis\n p4d2 = p4d.copy()\n p4d2['l4'] = p4d['l1']\n p4d2['l5'] = p4d['l1']\n _maybe_remove(store, 'p4d2')\n store.append(\n 'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis'])\n assert_panel4d_equal(store['p4d2'], p4d2)\n\n # test using differt order of items on the non-index axes\n _maybe_remove(store, 'wp1')\n wp_append1 = wp.ix[:, :10, :]\n store.append('wp1', wp_append1)\n wp_append2 = wp.ix[:, 10:, :].reindex(items=wp.items[::-1])\n store.append('wp1', wp_append2)\n assert_panel_equal(store['wp1'], wp)\n\n # dtype issues - mizxed type in a single object column\n df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])\n df['mixed_column'] = 'testing'\n df.ix[2, 'mixed_column'] = np.nan\n _maybe_remove(store, 'df')\n store.append('df', df)\n tm.assert_frame_equal(store['df'], df)\n\n # uints - test storage of uints\n uint_data = DataFrame({'u08' : Series(np.random.random_integers(0, high=255, size=5), dtype=np.uint8),\n 'u16' : Series(np.random.random_integers(0, high=65535, size=5), dtype=np.uint16),\n 'u32' : Series(np.random.random_integers(0, high=2**30, size=5), dtype=np.uint32),\n 'u64' : Series([2**58, 2**59, 2**60, 2**61, 2**62], dtype=np.uint64)},\n index=np.arange(5))\n _maybe_remove(store, 'uints')\n store.append('uints', uint_data)\n tm.assert_frame_equal(store['uints'], uint_data)\n\n # uints - test storage of uints in indexable columns\n _maybe_remove(store, 'uints')\n store.append('uints', uint_data, data_columns=['u08','u16','u32']) # 64-bit indices not yet supported\n tm.assert_frame_equal(store['uints'], uint_data)\n\n def test_append_series(self):\n\n with ensure_clean_store(self.path) as store:\n\n # basic\n ss = tm.makeStringSeries()\n ts = tm.makeTimeSeries()\n ns = Series(np.arange(100))\n\n store.append('ss', ss)\n result = store['ss']\n tm.assert_series_equal(result, ss)\n self.assert_(result.name is None)\n\n store.append('ts', ts)\n result = store['ts']\n tm.assert_series_equal(result, ts)\n self.assert_(result.name is None)\n\n ns.name = 'foo'\n store.append('ns', ns)\n result = store['ns']\n tm.assert_series_equal(result, ns)\n self.assert_(result.name == ns.name)\n\n # select on the values\n expected = ns[ns>60]\n result = store.select('ns',Term('foo>60'))\n tm.assert_series_equal(result,expected)\n\n # select on the index and values\n expected = ns[(ns>70) & (ns.index<90)]\n result = store.select('ns',[Term('foo>70'), Term('index<90')])\n tm.assert_series_equal(result,expected)\n\n # multi-index\n mi = DataFrame(np.random.randn(5,1),columns=['A'])\n mi['B'] = np.arange(len(mi))\n mi['C'] = 'foo'\n mi.loc[3:5,'C'] = 'bar'\n mi.set_index(['C','B'],inplace=True)\n s = mi.stack()\n s.index = s.index.droplevel(2)\n store.append('mi', s)\n tm.assert_series_equal(store['mi'], s)\n\n def test_store_index_types(self):\n # GH5386\n # test storing various index types\n\n with ensure_clean_store(self.path) as store:\n\n def check(format,index):\n df = DataFrame(np.random.randn(10,2),columns=list('AB'))\n df.index = index(len(df))\n\n _maybe_remove(store, 'df')\n store.put('df',df,format=format)\n assert_frame_equal(df,store['df'])\n\n for index in [ tm.makeFloatIndex, tm.makeStringIndex, tm.makeIntIndex,\n tm.makeDateIndex, tm.makePeriodIndex ]:\n\n check('table',index)\n check('fixed',index)\n\n # unicode\n index = tm.makeUnicodeIndex\n if compat.PY3:\n check('table',index)\n check('fixed',index)\n else:\n\n # only support for fixed types (and they have a perf warning)\n self.assertRaises(TypeError, check, 'table', index)\n with tm.assert_produces_warning(expected_warning=PerformanceWarning):\n check('fixed',index)\n\n def test_encoding(self):\n\n if LooseVersion(tables.__version__) < '3.0.0':\n raise nose.SkipTest('tables version does not support proper encoding')\n if sys.byteorder != 'little':\n raise nose.SkipTest('system byteorder is not little')\n\n with ensure_clean_store(self.path) as store:\n df = DataFrame(dict(A='foo',B='bar'),index=range(5))\n df.loc[2,'A'] = np.nan\n df.loc[3,'B'] = np.nan\n _maybe_remove(store, 'df')\n store.append('df', df, encoding='ascii')\n tm.assert_frame_equal(store['df'], df)\n\n expected = df.reindex(columns=['A'])\n result = store.select('df',Term('columns=A',encoding='ascii'))\n tm.assert_frame_equal(result,expected)\n\n def test_append_some_nans(self):\n\n with ensure_clean_store(self.path) as store:\n df = DataFrame({'A' : Series(np.random.randn(20)).astype('int32'),\n 'A1' : np.random.randn(20),\n 'A2' : np.random.randn(20),\n 'B' : 'foo', 'C' : 'bar', 'D' : Timestamp(\"20010101\"), 'E' : datetime.datetime(2001,1,2,0,0) },\n index=np.arange(20))\n # some nans\n _maybe_remove(store, 'df1')\n df.ix[0:15,['A1','B','D','E']] = np.nan\n store.append('df1', df[:10])\n store.append('df1', df[10:])\n tm.assert_frame_equal(store['df1'], df)\n\n # first column\n df1 = df.copy()\n df1.ix[:,'A1'] = np.nan\n _maybe_remove(store, 'df1')\n store.append('df1', df1[:10])\n store.append('df1', df1[10:])\n tm.assert_frame_equal(store['df1'], df1)\n\n # 2nd column\n df2 = df.copy()\n df2.ix[:,'A2'] = np.nan\n _maybe_remove(store, 'df2')\n store.append('df2', df2[:10])\n store.append('df2', df2[10:])\n tm.assert_frame_equal(store['df2'], df2)\n\n # datetimes\n df3 = df.copy()\n df3.ix[:,'E'] = np.nan\n _maybe_remove(store, 'df3')\n store.append('df3', df3[:10])\n store.append('df3', df3[10:])\n tm.assert_frame_equal(store['df3'], df3)\n\n def test_append_all_nans(self):\n\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame({'A1' : np.random.randn(20),\n 'A2' : np.random.randn(20)},\n index=np.arange(20))\n df.ix[0:15,:] = np.nan\n\n\n # nan some entire rows (dropna=True)\n _maybe_remove(store, 'df')\n store.append('df', df[:10], dropna=True)\n store.append('df', df[10:], dropna=True)\n tm.assert_frame_equal(store['df'], df[-4:])\n\n # nan some entire rows (dropna=False)\n _maybe_remove(store, 'df2')\n store.append('df2', df[:10], dropna=False)\n store.append('df2', df[10:], dropna=False)\n tm.assert_frame_equal(store['df2'], df)\n\n # tests the option io.hdf.dropna_table\n pandas.set_option('io.hdf.dropna_table',False)\n _maybe_remove(store, 'df3')\n store.append('df3', df[:10])\n store.append('df3', df[10:])\n tm.assert_frame_equal(store['df3'], df)\n\n pandas.set_option('io.hdf.dropna_table',True)\n _maybe_remove(store, 'df4')\n store.append('df4', df[:10])\n store.append('df4', df[10:])\n tm.assert_frame_equal(store['df4'], df[-4:])\n\n # nan some entire rows (string are still written!)\n df = DataFrame({'A1' : np.random.randn(20),\n 'A2' : np.random.randn(20),\n 'B' : 'foo', 'C' : 'bar'},\n index=np.arange(20))\n\n df.ix[0:15,:] = np.nan\n\n _maybe_remove(store, 'df')\n store.append('df', df[:10], dropna=True)\n store.append('df', df[10:], dropna=True)\n tm.assert_frame_equal(store['df'], df)\n\n _maybe_remove(store, 'df2')\n store.append('df2', df[:10], dropna=False)\n store.append('df2', df[10:], dropna=False)\n tm.assert_frame_equal(store['df2'], df)\n\n # nan some entire rows (but since we have dates they are still written!)\n df = DataFrame({'A1' : np.random.randn(20),\n 'A2' : np.random.randn(20),\n 'B' : 'foo', 'C' : 'bar', 'D' : Timestamp(\"20010101\"), 'E' : datetime.datetime(2001,1,2,0,0) },\n index=np.arange(20))\n\n df.ix[0:15,:] = np.nan\n\n _maybe_remove(store, 'df')\n store.append('df', df[:10], dropna=True)\n store.append('df', df[10:], dropna=True)\n tm.assert_frame_equal(store['df'], df)\n\n _maybe_remove(store, 'df2')\n store.append('df2', df[:10], dropna=False)\n store.append('df2', df[10:], dropna=False)\n tm.assert_frame_equal(store['df2'], df)\n\n def test_append_frame_column_oriented(self):\n\n with ensure_clean_store(self.path) as store:\n\n # column oriented\n df = tm.makeTimeDataFrame()\n _maybe_remove(store, 'df1')\n store.append('df1', df.ix[:, :2], axes=['columns'])\n store.append('df1', df.ix[:, 2:])\n tm.assert_frame_equal(store['df1'], df)\n\n result = store.select('df1', 'columns=A')\n expected = df.reindex(columns=['A'])\n tm.assert_frame_equal(expected, result)\n\n # selection on the non-indexable\n result = store.select(\n 'df1', ('columns=A', Term('index=df.index[0:4]')))\n expected = df.reindex(columns=['A'], index=df.index[0:4])\n tm.assert_frame_equal(expected, result)\n\n # this isn't supported\n self.assertRaises(TypeError, store.select, 'df1', (\n 'columns=A', Term('index>df.index[4]')))\n\n def test_append_with_different_block_ordering(self):\n\n #GH 4096; using same frames, but different block orderings\n with ensure_clean_store(self.path) as store:\n\n for i in range(10):\n\n df = DataFrame(np.random.randn(10,2),columns=list('AB'))\n df['index'] = range(10)\n df['index'] += i*10\n df['int64'] = Series([1]*len(df),dtype='int64')\n df['int16'] = Series([1]*len(df),dtype='int16')\n\n if i % 2 == 0:\n del df['int64']\n df['int64'] = Series([1]*len(df),dtype='int64')\n if i % 3 == 0:\n a = df.pop('A')\n df['A'] = a\n\n df.set_index('index',inplace=True)\n\n store.append('df',df)\n\n # test a different ordering but with more fields (like invalid combinate)\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(np.random.randn(10,2),columns=list('AB'), dtype='float64')\n df['int64'] = Series([1]*len(df),dtype='int64')\n df['int16'] = Series([1]*len(df),dtype='int16')\n store.append('df',df)\n\n # store additonal fields in different blocks\n df['int16_2'] = Series([1]*len(df),dtype='int16')\n self.assertRaises(ValueError, store.append, 'df', df)\n\n # store multile additonal fields in different blocks\n df['float_3'] = Series([1.]*len(df),dtype='float64')\n self.assertRaises(ValueError, store.append, 'df', df)\n\n def test_ndim_indexables(self):\n \"\"\" test using ndim tables in new ways\"\"\"\n\n with ensure_clean_store(self.path) as store:\n\n p4d = tm.makePanel4D()\n\n def check_indexers(key, indexers):\n for i, idx in enumerate(indexers):\n self.assert_(getattr(getattr(\n store.root, key).table.description, idx)._v_pos == i)\n\n # append then change (will take existing schema)\n indexers = ['items', 'major_axis', 'minor_axis']\n\n _maybe_remove(store, 'p4d')\n store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)\n store.append('p4d', p4d.ix[:, :, 10:, :])\n assert_panel4d_equal(store.select('p4d'), p4d)\n check_indexers('p4d', indexers)\n\n # same as above, but try to append with differnt axes\n _maybe_remove(store, 'p4d')\n store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)\n store.append('p4d', p4d.ix[:, :, 10:, :], axes=[\n 'labels', 'items', 'major_axis'])\n assert_panel4d_equal(store.select('p4d'), p4d)\n check_indexers('p4d', indexers)\n\n # pass incorrect number of axes\n _maybe_remove(store, 'p4d')\n self.assertRaises(ValueError, store.append, 'p4d', p4d.ix[\n :, :, :10, :], axes=['major_axis', 'minor_axis'])\n\n # different than default indexables #1\n indexers = ['labels', 'major_axis', 'minor_axis']\n _maybe_remove(store, 'p4d')\n store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)\n store.append('p4d', p4d.ix[:, :, 10:, :])\n assert_panel4d_equal(store['p4d'], p4d)\n check_indexers('p4d', indexers)\n\n # different than default indexables #2\n indexers = ['major_axis', 'labels', 'minor_axis']\n _maybe_remove(store, 'p4d')\n store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)\n store.append('p4d', p4d.ix[:, :, 10:, :])\n assert_panel4d_equal(store['p4d'], p4d)\n check_indexers('p4d', indexers)\n\n # partial selection\n result = store.select('p4d', ['labels=l1'])\n expected = p4d.reindex(labels=['l1'])\n assert_panel4d_equal(result, expected)\n\n # partial selection2\n result = store.select('p4d', [Term(\n 'labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])\n expected = p4d.reindex(\n labels=['l1'], items=['ItemA'], minor_axis=['B'])\n assert_panel4d_equal(result, expected)\n\n # non-existant partial selection\n result = store.select('p4d', [Term(\n 'labels=l1'), Term('items=Item1'), Term('minor_axis=B')])\n expected = p4d.reindex(labels=['l1'], items=[], minor_axis=['B'])\n assert_panel4d_equal(result, expected)\n\n def test_append_with_strings(self):\n\n with ensure_clean_store(self.path) as store:\n wp = tm.makePanel()\n wp2 = wp.rename_axis(\n dict([(x, \"%s_extra\" % x) for x in wp.minor_axis]), axis=2)\n\n def check_col(key,name,size):\n self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size)\n\n store.append('s1', wp, min_itemsize=20)\n store.append('s1', wp2)\n expected = concat([wp, wp2], axis=2)\n expected = expected.reindex(minor_axis=sorted(expected.minor_axis))\n assert_panel_equal(store['s1'], expected)\n check_col('s1', 'minor_axis', 20)\n\n # test dict format\n store.append('s2', wp, min_itemsize={'minor_axis': 20})\n store.append('s2', wp2)\n expected = concat([wp, wp2], axis=2)\n expected = expected.reindex(minor_axis=sorted(expected.minor_axis))\n assert_panel_equal(store['s2'], expected)\n check_col('s2', 'minor_axis', 20)\n\n # apply the wrong field (similar to #1)\n store.append('s3', wp, min_itemsize={'major_axis': 20})\n self.assertRaises(ValueError, store.append, 's3', wp2)\n\n # test truncation of bigger strings\n store.append('s4', wp)\n self.assertRaises(ValueError, store.append, 's4', wp2)\n\n # avoid truncation on elements\n df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])\n store.append('df_big', df)\n tm.assert_frame_equal(store.select('df_big'), df)\n check_col('df_big', 'values_block_1', 15)\n\n # appending smaller string ok\n df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])\n store.append('df_big', df2)\n expected = concat([df, df2])\n tm.assert_frame_equal(store.select('df_big'), expected)\n check_col('df_big', 'values_block_1', 15)\n\n # avoid truncation on elements\n df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])\n store.append('df_big2', df, min_itemsize={'values': 50})\n tm.assert_frame_equal(store.select('df_big2'), df)\n check_col('df_big2', 'values_block_1', 50)\n\n # bigger string on next append\n store.append('df_new', df)\n df_new = DataFrame(\n [[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])\n self.assertRaises(ValueError, store.append, 'df_new', df_new)\n\n # with nans\n _maybe_remove(store, 'df')\n df = tm.makeTimeDataFrame()\n df['string'] = 'foo'\n df.ix[1:4, 'string'] = np.nan\n df['string2'] = 'bar'\n df.ix[4:8, 'string2'] = np.nan\n df['string3'] = 'bah'\n df.ix[1:, 'string3'] = np.nan\n store.append('df', df)\n result = store.select('df')\n tm.assert_frame_equal(result, df)\n\n with ensure_clean_store(self.path) as store:\n\n def check_col(key,name,size):\n self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size)\n\n df = DataFrame(dict(A = 'foo', B = 'bar'),index=range(10))\n\n # a min_itemsize that creates a data_column\n _maybe_remove(store, 'df')\n store.append('df', df, min_itemsize={'A' : 200 })\n check_col('df', 'A', 200)\n self.assert_(store.get_storer('df').data_columns == ['A'])\n\n # a min_itemsize that creates a data_column2\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })\n check_col('df', 'A', 200)\n self.assert_(store.get_storer('df').data_columns == ['B','A'])\n\n # a min_itemsize that creates a data_column2\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns = ['B'], min_itemsize={'values' : 200 })\n check_col('df', 'B', 200)\n check_col('df', 'values_block_0', 200)\n self.assert_(store.get_storer('df').data_columns == ['B'])\n\n # infer the .typ on subsequent appends\n _maybe_remove(store, 'df')\n store.append('df', df[:5], min_itemsize=200)\n store.append('df', df[5:], min_itemsize=200)\n tm.assert_frame_equal(store['df'], df)\n\n # invalid min_itemsize keys\n df = DataFrame(['foo','foo','foo','barh','barh','barh'],columns=['A'])\n _maybe_remove(store, 'df')\n self.assertRaises(ValueError, store.append, 'df', df, min_itemsize={'foo' : 20, 'foobar' : 20})\n\n def test_append_with_data_columns(self):\n\n with ensure_clean_store(self.path) as store:\n df = tm.makeTimeDataFrame()\n df.loc[:,'B'].iloc[0] = 1.\n _maybe_remove(store, 'df')\n store.append('df', df[:2], data_columns=['B'])\n store.append('df', df[2:])\n tm.assert_frame_equal(store['df'], df)\n\n # check that we have indicies created\n assert(store._handle.root.df.table.cols.index.is_indexed is True)\n assert(store._handle.root.df.table.cols.B.is_indexed is True)\n\n # data column searching\n result = store.select('df', [Term('B>0')])\n expected = df[df.B > 0]\n tm.assert_frame_equal(result, expected)\n\n # data column searching (with an indexable and a data_columns)\n result = store.select(\n 'df', [Term('B>0'), Term('index>df.index[3]')])\n df_new = df.reindex(index=df.index[4:])\n expected = df_new[df_new.B > 0]\n tm.assert_frame_equal(result, expected)\n\n # data column selection with a string data_column\n df_new = df.copy()\n df_new['string'] = 'foo'\n df_new['string'][1:4] = np.nan\n df_new['string'][5:6] = 'bar'\n _maybe_remove(store, 'df')\n store.append('df', df_new, data_columns=['string'])\n result = store.select('df', [Term('string=foo')])\n expected = df_new[df_new.string == 'foo']\n tm.assert_frame_equal(result, expected)\n\n # using min_itemsize and a data column\n def check_col(key,name,size):\n self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size)\n\n with ensure_clean_store(self.path) as store:\n _maybe_remove(store, 'df')\n store.append('df', df_new, data_columns=['string'],\n min_itemsize={'string': 30})\n check_col('df', 'string', 30)\n _maybe_remove(store, 'df')\n store.append(\n 'df', df_new, data_columns=['string'], min_itemsize=30)\n check_col('df', 'string', 30)\n _maybe_remove(store, 'df')\n store.append('df', df_new, data_columns=['string'],\n min_itemsize={'values': 30})\n check_col('df', 'string', 30)\n\n with ensure_clean_store(self.path) as store:\n df_new['string2'] = 'foobarbah'\n df_new['string_block1'] = 'foobarbah1'\n df_new['string_block2'] = 'foobarbah2'\n _maybe_remove(store, 'df')\n store.append('df', df_new, data_columns=['string', 'string2'], min_itemsize={'string': 30, 'string2': 40, 'values': 50})\n check_col('df', 'string', 30)\n check_col('df', 'string2', 40)\n check_col('df', 'values_block_1', 50)\n\n with ensure_clean_store(self.path) as store:\n # multiple data columns\n df_new = df.copy()\n df_new.loc[:,'A'].iloc[0] = 1.\n df_new.loc[:,'B'].iloc[0] = -1.\n df_new['string'] = 'foo'\n df_new['string'][1:4] = np.nan\n df_new['string'][5:6] = 'bar'\n df_new['string2'] = 'foo'\n df_new['string2'][2:5] = np.nan\n df_new['string2'][7:8] = 'bar'\n _maybe_remove(store, 'df')\n store.append(\n 'df', df_new, data_columns=['A', 'B', 'string', 'string2'])\n result = store.select('df', [Term('string=foo'), Term(\n 'string2=foo'), Term('A>0'), Term('B<0')])\n expected = df_new[(df_new.string == 'foo') & (\n df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n # yield an empty frame\n result = store.select('df', [Term('string=foo'), Term(\n 'string2=cool')])\n expected = df_new[(df_new.string == 'foo') & (\n df_new.string2 == 'cool')]\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n with ensure_clean_store(self.path) as store:\n # doc example\n df_dc = df.copy()\n df_dc['string'] = 'foo'\n df_dc.ix[4:6, 'string'] = np.nan\n df_dc.ix[7:9, 'string'] = 'bar'\n df_dc['string2'] = 'cool'\n df_dc['datetime'] = Timestamp('20010102')\n df_dc = df_dc.convert_objects()\n df_dc.ix[3:5, ['A', 'B', 'datetime']] = np.nan\n\n _maybe_remove(store, 'df_dc')\n store.append('df_dc', df_dc, data_columns=['B', 'C',\n 'string', 'string2', 'datetime'])\n result = store.select('df_dc', [Term('B>0')])\n\n expected = df_dc[df_dc.B > 0]\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n result = store.select(\n 'df_dc', ['B > 0', 'C > 0', 'string == foo'])\n expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (\n df_dc.string == 'foo')]\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n with ensure_clean_store(self.path) as store:\n # doc example part 2\n np.random.seed(1234)\n index = date_range('1/1/2000', periods=8)\n df_dc = DataFrame(np.random.randn(8, 3), index=index,\n columns=['A', 'B', 'C'])\n df_dc['string'] = 'foo'\n df_dc.ix[4:6,'string'] = np.nan\n df_dc.ix[7:9,'string'] = 'bar'\n df_dc.ix[:,['B','C']] = df_dc.ix[:,['B','C']].abs()\n df_dc['string2'] = 'cool'\n\n # on-disk operations\n store.append('df_dc', df_dc, data_columns = ['B', 'C', 'string', 'string2'])\n\n result = store.select('df_dc', [ Term('B>0') ])\n expected = df_dc[df_dc.B>0]\n tm.assert_frame_equal(result,expected)\n\n result = store.select('df_dc', ['B > 0', 'C > 0', 'string == \"foo\"'])\n expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == 'foo')]\n tm.assert_frame_equal(result,expected)\n\n with ensure_clean_store(self.path) as store:\n # panel\n # GH5717 not handling data_columns\n np.random.seed(1234)\n p = tm.makePanel()\n\n store.append('p1',p)\n tm.assert_panel_equal(store.select('p1'),p)\n\n store.append('p2',p,data_columns=True)\n tm.assert_panel_equal(store.select('p2'),p)\n\n result = store.select('p2',where='ItemA>0')\n expected = p.to_frame()\n expected = expected[expected['ItemA']>0]\n tm.assert_frame_equal(result.to_frame(),expected)\n\n result = store.select('p2',where='ItemA>0 & minor_axis=[\"A\",\"B\"]')\n expected = p.to_frame()\n expected = expected[expected['ItemA']>0]\n expected = expected[expected.reset_index(level=['major']).index.isin(['A','B'])]\n tm.assert_frame_equal(result.to_frame(),expected)\n\n def test_create_table_index(self):\n\n with ensure_clean_store(self.path) as store:\n\n def col(t,column):\n return getattr(store.get_storer(t).table.cols,column)\n\n # index=False\n wp = tm.makePanel()\n store.append('p5', wp, index=False)\n store.create_table_index('p5', columns=['major_axis'])\n assert(col('p5', 'major_axis').is_indexed is True)\n assert(col('p5', 'minor_axis').is_indexed is False)\n\n # index=True\n store.append('p5i', wp, index=True)\n assert(col('p5i', 'major_axis').is_indexed is True)\n assert(col('p5i', 'minor_axis').is_indexed is True)\n\n # default optlevels\n store.get_storer('p5').create_index()\n assert(col('p5', 'major_axis').index.optlevel == 6)\n assert(col('p5', 'minor_axis').index.kind == 'medium')\n\n # let's change the indexing scheme\n store.create_table_index('p5')\n assert(col('p5', 'major_axis').index.optlevel == 6)\n assert(col('p5', 'minor_axis').index.kind == 'medium')\n store.create_table_index('p5', optlevel=9)\n assert(col('p5', 'major_axis').index.optlevel == 9)\n assert(col('p5', 'minor_axis').index.kind == 'medium')\n store.create_table_index('p5', kind='full')\n assert(col('p5', 'major_axis').index.optlevel == 9)\n assert(col('p5', 'minor_axis').index.kind == 'full')\n store.create_table_index('p5', optlevel=1, kind='light')\n assert(col('p5', 'major_axis').index.optlevel == 1)\n assert(col('p5', 'minor_axis').index.kind == 'light')\n\n # data columns\n df = tm.makeTimeDataFrame()\n df['string'] = 'foo'\n df['string2'] = 'bar'\n store.append('f', df, data_columns=['string', 'string2'])\n assert(col('f', 'index').is_indexed is True)\n assert(col('f', 'string').is_indexed is True)\n assert(col('f', 'string2').is_indexed is True)\n\n # specify index=columns\n store.append(\n 'f2', df, index=['string'], data_columns=['string', 'string2'])\n assert(col('f2', 'index').is_indexed is False)\n assert(col('f2', 'string').is_indexed is True)\n assert(col('f2', 'string2').is_indexed is False)\n\n # try to index a non-table\n _maybe_remove(store, 'f2')\n store.put('f2', df)\n self.assertRaises(TypeError, store.create_table_index, 'f2')\n\n # try to change the version supports flag\n from pandas.io import pytables\n pytables._table_supports_index = False\n self.assertRaises(Exception, store.create_table_index, 'f')\n\n # test out some versions\n original = tables.__version__\n\n for v in ['2.2', '2.2b']:\n pytables._table_mod = None\n pytables._table_supports_index = False\n tables.__version__ = v\n self.assertRaises(Exception, store.create_table_index, 'f')\n\n for v in ['2.3.1', '2.3.1b', '2.4dev', '2.4', original]:\n pytables._table_mod = None\n pytables._table_supports_index = False\n tables.__version__ = v\n store.create_table_index('f')\n pytables._table_mod = None\n pytables._table_supports_index = False\n tables.__version__ = original\n\n def test_big_table_frame(self):\n raise nose.SkipTest('no big table frame')\n\n # create and write a big table\n df = DataFrame(np.random.randn(2000 * 100, 100), index=range(\n 2000 * 100), columns=['E%03d' % i for i in range(100)])\n for x in range(20):\n df['String%03d' % x] = 'string%03d' % x\n\n import time\n x = time.time()\n with ensure_clean_store(self.path,mode='w') as store:\n store.append('df', df)\n rows = store.root.df.table.nrows\n recons = store.select('df')\n assert isinstance(recons, DataFrame)\n\n print(\"\\nbig_table frame [%s] -> %5.2f\" % (rows, time.time() - x))\n\n def test_big_table2_frame(self):\n # this is a really big table: 1m rows x 60 float columns, 20 string, 20 datetime\n # columns\n raise nose.SkipTest('no big table2 frame')\n\n # create and write a big table\n print(\"\\nbig_table2 start\")\n import time\n start_time = time.time()\n df = DataFrame(np.random.randn(1000 * 1000, 60), index=range(int(\n 1000 * 1000)), columns=['E%03d' % i for i in range(60)])\n for x in range(20):\n df['String%03d' % x] = 'string%03d' % x\n for x in range(20):\n df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0)\n\n print(\"\\nbig_table2 frame (creation of df) [rows->%s] -> %5.2f\"\n % (len(df.index), time.time() - start_time))\n\n def f(chunksize):\n with ensure_clean_store(self.path,mode='w') as store:\n store.append('df', df, chunksize=chunksize)\n r = store.root.df.table.nrows\n return r\n\n for c in [10000, 50000, 250000]:\n start_time = time.time()\n print(\"big_table2 frame [chunk->%s]\" % c)\n rows = f(c)\n print(\"big_table2 frame [rows->%s,chunk->%s] -> %5.2f\"\n % (rows, c, time.time() - start_time))\n\n def test_big_put_frame(self):\n raise nose.SkipTest('no big put frame')\n\n print(\"\\nbig_put start\")\n import time\n start_time = time.time()\n df = DataFrame(np.random.randn(1000 * 1000, 60), index=range(int(\n 1000 * 1000)), columns=['E%03d' % i for i in range(60)])\n for x in range(20):\n df['String%03d' % x] = 'string%03d' % x\n for x in range(20):\n df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0)\n\n print(\"\\nbig_put frame (creation of df) [rows->%s] -> %5.2f\"\n % (len(df.index), time.time() - start_time))\n\n with ensure_clean_store(self.path, mode='w') as store:\n start_time = time.time()\n store = HDFStore(self.path, mode='w')\n store.put('df', df)\n\n print(df.get_dtype_counts())\n print(\"big_put frame [shape->%s] -> %5.2f\"\n % (df.shape, time.time() - start_time))\n\n def test_big_table_panel(self):\n raise nose.SkipTest('no big table panel')\n\n # create and write a big table\n wp = Panel(\n np.random.randn(20, 1000, 1000), items=['Item%03d' % i for i in range(20)],\n major_axis=date_range('1/1/2000', periods=1000), minor_axis=['E%03d' % i for i in range(1000)])\n\n wp.ix[:, 100:200, 300:400] = np.nan\n\n for x in range(100):\n wp['String%03d'] = 'string%03d' % x\n\n import time\n x = time.time()\n\n\n with ensure_clean_store(self.path, mode='w') as store:\n store.append('wp', wp)\n rows = store.root.wp.table.nrows\n recons = store.select('wp')\n assert isinstance(recons, Panel)\n\n print(\"\\nbig_table panel [%s] -> %5.2f\" % (rows, time.time() - x))\n\n def test_append_diff_item_order(self):\n\n wp = tm.makePanel()\n wp1 = wp.ix[:, :10, :]\n wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]\n\n with ensure_clean_store(self.path) as store:\n store.put('panel', wp1, format='table')\n self.assertRaises(ValueError, store.put, 'panel', wp2,\n append=True)\n\n def test_append_hierarchical(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['foo', 'bar'])\n df = DataFrame(np.random.randn(10, 3), index=index,\n columns=['A', 'B', 'C'])\n\n with ensure_clean_store(self.path) as store:\n store.append('mi', df)\n result = store.select('mi')\n tm.assert_frame_equal(result, df)\n\n # GH 3748\n result = store.select('mi',columns=['A','B'])\n expected = df.reindex(columns=['A','B'])\n tm.assert_frame_equal(result,expected)\n\n with ensure_clean_path('test.hdf') as path:\n df.to_hdf(path,'df',format='table')\n result = read_hdf(path,'df',columns=['A','B'])\n expected = df.reindex(columns=['A','B'])\n tm.assert_frame_equal(result,expected)\n\n def test_column_multiindex(self):\n # GH 4710\n # recreate multi-indexes properly\n\n index = MultiIndex.from_tuples([('A','a'), ('A','b'), ('B','a'), ('B','b')], names=['first','second'])\n df = DataFrame(np.arange(12).reshape(3,4), columns=index)\n\n with ensure_clean_store(self.path) as store:\n\n store.put('df',df)\n tm.assert_frame_equal(store['df'],df,check_index_type=True,check_column_type=True)\n\n store.put('df1',df,format='table')\n tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)\n\n self.assertRaises(ValueError, store.put, 'df2',df,format='table',data_columns=['A'])\n self.assertRaises(ValueError, store.put, 'df3',df,format='table',data_columns=True)\n\n # non_index_axes name\n df = DataFrame(np.arange(12).reshape(3,4), columns=Index(list('ABCD'),name='foo'))\n\n with ensure_clean_store(self.path) as store:\n\n store.put('df1',df,format='table')\n tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)\n\n def test_store_multiindex(self):\n\n # validate multi-index names\n # GH 5527\n with ensure_clean_store(self.path) as store:\n\n def make_index(names=None):\n return MultiIndex.from_tuples([( datetime.datetime(2013,12,d), s, t) for d in range(1,3) for s in range(2) for t in range(3)],\n names=names)\n\n\n # no names\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index())\n store.append('df',df)\n tm.assert_frame_equal(store.select('df'),df)\n\n # partial names\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date',None,None]))\n store.append('df',df)\n tm.assert_frame_equal(store.select('df'),df)\n\n # series\n _maybe_remove(store, 's')\n s = Series(np.zeros(12), index=make_index(['date',None,None]))\n store.append('s',s)\n tm.assert_series_equal(store.select('s'),s)\n\n # dup with column\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','a','t']))\n self.assertRaises(ValueError, store.append, 'df',df)\n\n # dup within level\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','date','date']))\n self.assertRaises(ValueError, store.append, 'df',df)\n\n # fully names\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','s','t']))\n store.append('df',df)\n tm.assert_frame_equal(store.select('df'),df)\n\n def test_pass_spec_to_storer(self):\n\n df = tm.makeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n store.put('df',df)\n self.assertRaises(TypeError, store.select, 'df', columns=['A'])\n self.assertRaises(TypeError, store.select, 'df',where=[('columns=A')])\n\n def test_append_misc(self):\n\n with ensure_clean_store(self.path) as store:\n\n # unsuported data types for non-tables\n p4d = tm.makePanel4D()\n self.assertRaises(TypeError, store.put,'p4d',p4d)\n\n # unsuported data types\n self.assertRaises(TypeError, store.put,'abc',None)\n self.assertRaises(TypeError, store.put,'abc','123')\n self.assertRaises(TypeError, store.put,'abc',123)\n self.assertRaises(TypeError, store.put,'abc',np.arange(5))\n\n df = tm.makeDataFrame()\n store.append('df', df, chunksize=1)\n result = store.select('df')\n tm.assert_frame_equal(result, df)\n\n store.append('df1', df, expectedrows=10)\n result = store.select('df1')\n tm.assert_frame_equal(result, df)\n\n # more chunksize in append tests\n def check(obj, comparator):\n for c in [10, 200, 1000]:\n with ensure_clean_store(self.path,mode='w') as store:\n store.append('obj', obj, chunksize=c)\n result = store.select('obj')\n comparator(result,obj)\n\n df = tm.makeDataFrame()\n df['string'] = 'foo'\n df['float322'] = 1.\n df['float322'] = df['float322'].astype('float32')\n df['bool'] = df['float322'] > 0\n df['time1'] = Timestamp('20130101')\n df['time2'] = Timestamp('20130102')\n check(df, tm.assert_frame_equal)\n\n p = tm.makePanel()\n check(p, assert_panel_equal)\n\n p4d = tm.makePanel4D()\n check(p4d, assert_panel4d_equal)\n\n # empty frame, GH4273\n with ensure_clean_store(self.path) as store:\n\n # 0 len\n df_empty = DataFrame(columns=list('ABC'))\n store.append('df',df_empty)\n self.assertRaises(KeyError,store.select, 'df')\n\n # repeated append of 0/non-zero frames\n df = DataFrame(np.random.rand(10,3),columns=list('ABC'))\n store.append('df',df)\n assert_frame_equal(store.select('df'),df)\n store.append('df',df_empty)\n assert_frame_equal(store.select('df'),df)\n\n # store\n df = DataFrame(columns=list('ABC'))\n store.put('df2',df)\n assert_frame_equal(store.select('df2'),df)\n\n # 0 len\n p_empty = Panel(items=list('ABC'))\n store.append('p',p_empty)\n self.assertRaises(KeyError,store.select, 'p')\n\n # repeated append of 0/non-zero frames\n p = Panel(np.random.randn(3,4,5),items=list('ABC'))\n store.append('p',p)\n assert_panel_equal(store.select('p'),p)\n store.append('p',p_empty)\n assert_panel_equal(store.select('p'),p)\n\n # store\n store.put('p2',p_empty)\n assert_panel_equal(store.select('p2'),p_empty)\n\n def test_append_raise(self):\n\n with ensure_clean_store(self.path) as store:\n\n # test append with invalid input to get good error messages\n\n # list in column\n df = tm.makeDataFrame()\n df['invalid'] = [['a']] * len(df)\n self.assert_(df.dtypes['invalid'] == np.object_)\n self.assertRaises(TypeError, store.append,'df',df)\n\n # multiple invalid columns\n df['invalid2'] = [['a']] * len(df)\n df['invalid3'] = [['a']] * len(df)\n self.assertRaises(TypeError, store.append,'df',df)\n\n # datetime with embedded nans as object\n df = tm.makeDataFrame()\n s = Series(datetime.datetime(2001,1,2),index=df.index)\n s = s.astype(object)\n s[0:5] = np.nan\n df['invalid'] = s\n self.assert_(df.dtypes['invalid'] == np.object_)\n self.assertRaises(TypeError, store.append,'df', df)\n\n # directy ndarray\n self.assertRaises(TypeError, store.append,'df',np.arange(10))\n\n # series directly\n self.assertRaises(TypeError, store.append,'df',Series(np.arange(10)))\n\n # appending an incompatbile table\n df = tm.makeDataFrame()\n store.append('df',df)\n\n df['foo'] = 'foo'\n self.assertRaises(ValueError, store.append,'df',df)\n\n def test_table_index_incompatible_dtypes(self):\n df1 = DataFrame({'a': [1, 2, 3]})\n df2 = DataFrame({'a': [4, 5, 6]},\n index=date_range('1/1/2000', periods=3))\n\n with ensure_clean_store(self.path) as store:\n store.put('frame', df1, format='table')\n self.assertRaises(TypeError, store.put, 'frame', df2,\n format='table', append=True)\n\n def test_table_values_dtypes_roundtrip(self):\n\n with ensure_clean_store(self.path) as store:\n df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')\n store.append('df_f8', df1)\n assert_series_equal(df1.dtypes,store['df_f8'].dtypes)\n\n df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')\n store.append('df_i8', df2)\n assert_series_equal(df2.dtypes,store['df_i8'].dtypes)\n\n # incompatible dtype\n self.assertRaises(ValueError, store.append, 'df_i8', df1)\n\n # check creation/storage/retrieval of float32 (a bit hacky to actually create them thought)\n df1 = DataFrame(np.array([[1],[2],[3]],dtype='f4'),columns = ['A'])\n store.append('df_f4', df1)\n assert_series_equal(df1.dtypes,store['df_f4'].dtypes)\n assert df1.dtypes[0] == 'float32'\n\n # check with mixed dtypes\n df1 = DataFrame(dict([ (c,Series(np.random.randn(5),dtype=c)) for c in\n ['float32','float64','int32','int64','int16','int8'] ]))\n df1['string'] = 'foo'\n df1['float322'] = 1.\n df1['float322'] = df1['float322'].astype('float32')\n df1['bool'] = df1['float32'] > 0\n df1['time1'] = Timestamp('20130101')\n df1['time2'] = Timestamp('20130102')\n\n store.append('df_mixed_dtypes1', df1)\n result = store.select('df_mixed_dtypes1').get_dtype_counts()\n expected = Series({ 'float32' : 2, 'float64' : 1,'int32' : 1, 'bool' : 1,\n 'int16' : 1, 'int8' : 1, 'int64' : 1, 'object' : 1,\n 'datetime64[ns]' : 2})\n result.sort()\n expected.sort()\n tm.assert_series_equal(result,expected)\n\n def test_table_mixed_dtypes(self):\n\n # frame\n df = tm.makeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['bool1'] = df['A'] > 0\n df['bool2'] = df['B'] > 0\n df['bool3'] = True\n df['int1'] = 1\n df['int2'] = 2\n df['timestamp1'] = Timestamp('20010102')\n df['timestamp2'] = Timestamp('20010103')\n df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)\n df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)\n df.ix[3:6, ['obj1']] = np.nan\n df = df.consolidate().convert_objects()\n\n with ensure_clean_store(self.path) as store:\n store.append('df1_mixed', df)\n tm.assert_frame_equal(store.select('df1_mixed'), df)\n\n # panel\n wp = tm.makePanel()\n wp['obj1'] = 'foo'\n wp['obj2'] = 'bar'\n wp['bool1'] = wp['ItemA'] > 0\n wp['bool2'] = wp['ItemB'] > 0\n wp['int1'] = 1\n wp['int2'] = 2\n wp = wp.consolidate()\n\n with ensure_clean_store(self.path) as store:\n store.append('p1_mixed', wp)\n assert_panel_equal(store.select('p1_mixed'), wp)\n\n # ndim\n wp = tm.makePanel4D()\n wp['obj1'] = 'foo'\n wp['obj2'] = 'bar'\n wp['bool1'] = wp['l1'] > 0\n wp['bool2'] = wp['l2'] > 0\n wp['int1'] = 1\n wp['int2'] = 2\n wp = wp.consolidate()\n\n with ensure_clean_store(self.path) as store:\n store.append('p4d_mixed', wp)\n assert_panel4d_equal(store.select('p4d_mixed'), wp)\n\n def test_unimplemented_dtypes_table_columns(self):\n\n with ensure_clean_store(self.path) as store:\n\n l = [('date', datetime.date(2001, 1, 2))]\n\n # py3 ok for unicode\n if not compat.PY3:\n l.append(('unicode', u('\\\\u03c3')))\n\n ### currently not supported dtypes ####\n for n, f in l:\n df = tm.makeDataFrame()\n df[n] = f\n self.assertRaises(\n TypeError, store.append, 'df1_%s' % n, df)\n\n # frame\n df = tm.makeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['datetime1'] = datetime.date(2001, 1, 2)\n df = df.consolidate().convert_objects()\n\n with ensure_clean_store(self.path) as store:\n # this fails because we have a date in the object block......\n self.assertRaises(TypeError, store.append, 'df_unimplemented', df)\n\n def test_append_with_timezones(self):\n\n from datetime import timedelta\n\n def compare(a,b):\n tm.assert_frame_equal(a,b)\n\n # compare the zones on each element\n for c in a.columns:\n for i in a.index:\n a_e = a[c][i]\n b_e = b[c][i]\n if not (a_e == b_e and a_e.tz == b_e.tz):\n raise AssertionError(\"invalid tz comparsion [%s] [%s]\" % (a_e,b_e))\n\n # as columns\n with ensure_clean_store(self.path) as store:\n\n _maybe_remove(store, 'df_tz')\n df = DataFrame(dict(A = [ Timestamp('20130102 2:00:00',tz='US/Eastern') + timedelta(hours=1)*i for i in range(5) ]))\n store.append('df_tz',df,data_columns=['A'])\n result = store['df_tz']\n compare(result,df)\n assert_frame_equal(result,df)\n\n # select with tz aware\n compare(store.select('df_tz',where=Term('A>=df.A[3]')),df[df.A>=df.A[3]])\n\n _maybe_remove(store, 'df_tz')\n df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130103',tz='US/Eastern')),index=range(5))\n store.append('df_tz',df)\n result = store['df_tz']\n compare(result,df)\n assert_frame_equal(result,df)\n\n _maybe_remove(store, 'df_tz')\n df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=range(5))\n self.assertRaises(TypeError, store.append, 'df_tz', df)\n\n # this is ok\n _maybe_remove(store, 'df_tz')\n store.append('df_tz',df,data_columns=['A','B'])\n result = store['df_tz']\n compare(result,df)\n assert_frame_equal(result,df)\n\n # can't append with diff timezone\n df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=range(5))\n self.assertRaises(ValueError, store.append, 'df_tz', df)\n\n # as index\n with ensure_clean_store(self.path) as store:\n\n # GH 4098 example\n df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H', tz='US/Eastern'))))\n\n _maybe_remove(store, 'df')\n store.put('df',df)\n result = store.select('df')\n assert_frame_equal(result,df)\n\n _maybe_remove(store, 'df')\n store.append('df',df)\n result = store.select('df')\n assert_frame_equal(result,df)\n\n def test_store_timezone(self):\n # GH2852\n # issue storing datetime.date with a timezone as it resets when read back in a new timezone\n\n import platform\n if platform.system() == \"Windows\":\n raise nose.SkipTest(\"timezone setting not supported on windows\")\n\n import datetime\n import time\n import os\n\n # original method\n with ensure_clean_store(self.path) as store:\n\n today = datetime.date(2013,9,10)\n df = DataFrame([1,2,3], index = [today, today, today])\n store['obj1'] = df\n result = store['obj1']\n assert_frame_equal(result, df)\n\n # with tz setting\n orig_tz = os.environ.get('TZ')\n\n def setTZ(tz):\n if tz is None:\n try:\n del os.environ['TZ']\n except:\n pass\n else:\n os.environ['TZ']=tz\n time.tzset()\n\n try:\n\n with ensure_clean_store(self.path) as store:\n\n setTZ('EST5EDT')\n today = datetime.date(2013,9,10)\n df = DataFrame([1,2,3], index = [today, today, today])\n store['obj1'] = df\n\n setTZ('CST6CDT')\n result = store['obj1']\n\n assert_frame_equal(result, df)\n\n finally:\n setTZ(orig_tz)\n\n def test_append_with_timedelta(self):\n if _np_version_under1p7:\n raise nose.SkipTest(\"requires numpy >= 1.7\")\n\n # GH 3577\n # append timedelta\n\n from datetime import timedelta\n df = DataFrame(dict(A = Timestamp('20130101'), B = [ Timestamp('20130101') + timedelta(days=i,seconds=10) for i in range(10) ]))\n df['C'] = df['A']-df['B']\n df.ix[3:5,'C'] = np.nan\n\n with ensure_clean_store(self.path) as store:\n\n # table\n _maybe_remove(store, 'df')\n store.append('df',df,data_columns=True)\n result = store.select('df')\n assert_frame_equal(result,df)\n\n result = store.select('df',Term(\"C<100000\"))\n assert_frame_equal(result,df)\n\n result = store.select('df',Term(\"C\",\"<\",-3*86400))\n assert_frame_equal(result,df.iloc[3:])\n\n result = store.select('df',\"C<'-3D'\")\n assert_frame_equal(result,df.iloc[3:])\n\n # a bit hacky here as we don't really deal with the NaT properly\n\n result = store.select('df',\"C<'-500000s'\")\n result = result.dropna(subset=['C'])\n assert_frame_equal(result,df.iloc[6:])\n\n result = store.select('df',\"C<'-3.5D'\")\n result = result.iloc[1:]\n assert_frame_equal(result,df.iloc[4:])\n\n # fixed\n _maybe_remove(store, 'df2')\n store.put('df2',df)\n result = store.select('df2')\n assert_frame_equal(result,df)\n\n def test_remove(self):\n\n with ensure_clean_store(self.path) as store:\n\n ts = tm.makeTimeSeries()\n df = tm.makeDataFrame()\n store['a'] = ts\n store['b'] = df\n _maybe_remove(store, 'a')\n self.assertEquals(len(store), 1)\n tm.assert_frame_equal(df, store['b'])\n\n _maybe_remove(store, 'b')\n self.assertEquals(len(store), 0)\n\n # nonexistence\n self.assertRaises(KeyError, store.remove, 'a_nonexistent_store')\n\n # pathing\n store['a'] = ts\n store['b/foo'] = df\n _maybe_remove(store, 'foo')\n _maybe_remove(store, 'b/foo')\n self.assertEquals(len(store), 1)\n\n store['a'] = ts\n store['b/foo'] = df\n _maybe_remove(store, 'b')\n self.assertEquals(len(store), 1)\n\n # __delitem__\n store['a'] = ts\n store['b'] = df\n del store['a']\n del store['b']\n self.assertEquals(len(store), 0)\n\n def test_remove_where(self):\n\n with ensure_clean_store(self.path) as store:\n\n # non-existance\n crit1 = Term('index>foo')\n self.assertRaises(KeyError, store.remove, 'a', [crit1])\n\n # try to remove non-table (with crit)\n # non-table ok (where = None)\n wp = tm.makePanel()\n store.put('wp', wp, format='table')\n store.remove('wp', [\"minor_axis=['A', 'D']\"])\n rs = store.select('wp')\n expected = wp.reindex(minor_axis=['B', 'C'])\n assert_panel_equal(rs, expected)\n\n # empty where\n _maybe_remove(store, 'wp')\n store.put('wp', wp, format='table')\n\n # deleted number (entire table)\n n = store.remove('wp', [])\n assert(n == 120)\n\n # non - empty where\n _maybe_remove(store, 'wp')\n store.put('wp', wp, format='table')\n self.assertRaises(ValueError, store.remove,\n 'wp', ['foo'])\n\n # selectin non-table with a where\n # store.put('wp2', wp, format='f')\n # self.assertRaises(ValueError, store.remove,\n # 'wp2', [('column', ['A', 'D'])])\n\n def test_remove_crit(self):\n\n with ensure_clean_store(self.path) as store:\n\n wp = tm.makePanel()\n\n # group row removal\n date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])\n crit4 = Term('major_axis=date4')\n store.put('wp3', wp, format='t')\n n = store.remove('wp3', where=[crit4])\n assert(n == 36)\n result = store.select('wp3')\n expected = wp.reindex(major_axis=wp.major_axis - date4)\n assert_panel_equal(result, expected)\n\n # upper half\n store.put('wp', wp, format='table')\n date = wp.major_axis[len(wp.major_axis) // 2]\n\n crit1 = Term('major_axis>date')\n crit2 = Term(\"minor_axis=['A', 'D']\")\n n = store.remove('wp', where=[crit1])\n\n assert(n == 56)\n\n n = store.remove('wp', where=[crit2])\n assert(n == 32)\n\n result = store['wp']\n expected = wp.truncate(after=date).reindex(minor=['B', 'C'])\n assert_panel_equal(result, expected)\n\n # individual row elements\n store.put('wp2', wp, format='table')\n\n date1 = wp.major_axis[1:3]\n crit1 = Term('major_axis=date1')\n store.remove('wp2', where=[crit1])\n result = store.select('wp2')\n expected = wp.reindex(major_axis=wp.major_axis - date1)\n assert_panel_equal(result, expected)\n\n date2 = wp.major_axis[5]\n crit2 = Term('major_axis=date2')\n store.remove('wp2', where=[crit2])\n result = store['wp2']\n expected = wp.reindex(\n major_axis=wp.major_axis - date1 - Index([date2]))\n assert_panel_equal(result, expected)\n\n date3 = [wp.major_axis[7], wp.major_axis[9]]\n crit3 = Term('major_axis=date3')\n store.remove('wp2', where=[crit3])\n result = store['wp2']\n expected = wp.reindex(\n major_axis=wp.major_axis - date1 - Index([date2]) - Index(date3))\n assert_panel_equal(result, expected)\n\n # corners\n store.put('wp4', wp, format='table')\n n = store.remove(\n 'wp4', where=[Term('major_axis>wp.major_axis[-1]')])\n result = store.select('wp4')\n assert_panel_equal(result, wp)\n\n def test_invalid_terms(self):\n\n with ensure_clean_store(self.path) as store:\n\n df = tm.makeTimeDataFrame()\n df['string'] = 'foo'\n df.ix[0:4,'string'] = 'bar'\n wp = tm.makePanel()\n p4d = tm.makePanel4D()\n store.put('df', df, format='table')\n store.put('wp', wp, format='table')\n store.put('p4d', p4d, format='table')\n\n # some invalid terms\n self.assertRaises(ValueError, store.select, 'wp', \"minor=['A', 'B']\")\n self.assertRaises(ValueError, store.select, 'wp', [\"index=['20121114']\"])\n self.assertRaises(ValueError, store.select, 'wp', [\"index=['20121114', '20121114']\"])\n self.assertRaises(TypeError, Term)\n\n # more invalid\n self.assertRaises(ValueError, store.select, 'df','df.index[3]')\n self.assertRaises(SyntaxError, store.select, 'df','index>')\n self.assertRaises(ValueError, store.select, 'wp', \"major_axis<'20000108' & minor_axis['A', 'B']\")\n\n # from the docs\n with ensure_clean_path(self.path) as path:\n dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))\n dfq.to_hdf(path,'dfq',format='table',data_columns=True)\n\n # check ok\n read_hdf(path,'dfq',where=\"index>Timestamp('20130104') & columns=['A', 'B']\")\n read_hdf(path,'dfq',where=\"A>0 or C>0\")\n\n # catch the invalid reference\n with ensure_clean_path(self.path) as path:\n dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))\n dfq.to_hdf(path,'dfq',format='table')\n\n self.assertRaises(ValueError, read_hdf, path,'dfq',where=\"A>0 or C>0\")\n\n def test_terms(self):\n\n with ensure_clean_store(self.path) as store:\n\n wp = tm.makePanel()\n p4d = tm.makePanel4D()\n store.put('wp', wp, table=True)\n store.put('p4d', p4d, table=True)\n\n # panel\n result = store.select('wp', [Term(\n 'major_axis<\"20000108\"'), Term(\"minor_axis=['A', 'B']\")])\n expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])\n assert_panel_equal(result, expected)\n\n # with deprecation\n result = store.select('wp', [Term(\n 'major_axis','<',\"20000108\"), Term(\"minor_axis=['A', 'B']\")])\n expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])\n tm.assert_panel_equal(result, expected)\n\n # p4d\n result = store.select('p4d', [Term('major_axis<\"20000108\"'),\n Term(\"minor_axis=['A', 'B']\"),\n Term(\"items=['ItemA', 'ItemB']\")])\n expected = p4d.truncate(after='20000108').reindex(\n minor=['A', 'B'], items=['ItemA', 'ItemB'])\n assert_panel4d_equal(result, expected)\n\n # back compat invalid terms\n terms = [\n dict(field='major_axis', op='>', value='20121114'),\n [ dict(field='major_axis', op='>', value='20121114') ],\n [ \"minor_axis=['A','B']\", dict(field='major_axis', op='>', value='20121114') ]\n ]\n for t in terms:\n with tm.assert_produces_warning(expected_warning=DeprecationWarning):\n Term(t)\n\n # valid terms\n terms = [\n ('major_axis=20121114'),\n ('major_axis>20121114'),\n ((\"major_axis=['20121114', '20121114']\"),),\n ('major_axis=datetime.datetime(2012, 11, 14)'),\n 'major_axis> 20121114',\n 'major_axis >20121114',\n 'major_axis > 20121114',\n ((\"minor_axis=['A', 'B']\"),),\n ((\"minor_axis=['A', 'B']\"),),\n (((\"minor_axis==['A', 'B']\"),),),\n ((\"items=['ItemA', 'ItemB']\"),),\n ('items=ItemA'),\n ]\n\n for t in terms:\n store.select('wp', t)\n store.select('p4d', t)\n\n # valid for p4d only\n terms = [\n ((\"labels=['l1', 'l2']\"),),\n Term(\"labels=['l1', 'l2']\"),\n ]\n\n for t in terms:\n store.select('p4d', t)\n\n def test_term_compat(self):\n with ensure_clean_store(self.path) as store:\n\n wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],\n major_axis=date_range('1/1/2000', periods=5),\n minor_axis=['A', 'B', 'C', 'D'])\n store.append('wp',wp)\n\n result = store.select('wp', [Term('major_axis>20000102'),\n Term('minor_axis', '=', ['A','B']) ])\n expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]\n assert_panel_equal(result, expected)\n\n store.remove('wp', Term('major_axis>20000103'))\n result = store.select('wp')\n expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]\n assert_panel_equal(result, expected)\n\n with ensure_clean_store(self.path) as store:\n\n wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],\n major_axis=date_range('1/1/2000', periods=5),\n minor_axis=['A', 'B', 'C', 'D'])\n store.append('wp',wp)\n\n # stringified datetimes\n result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2))])\n expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]\n assert_panel_equal(result, expected)\n\n result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2,0,0))])\n expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]\n assert_panel_equal(result, expected)\n\n result = store.select('wp', [Term('major_axis','=',[datetime.datetime(2000,1,2,0,0),datetime.datetime(2000,1,3,0,0)])])\n expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]\n assert_panel_equal(result, expected)\n\n result = store.select('wp', [Term('minor_axis','=',['A','B'])])\n expected = wp.loc[:,:,['A','B']]\n assert_panel_equal(result, expected)\n\n def test_same_name_scoping(self):\n\n with ensure_clean_store(self.path) as store:\n\n import pandas as pd\n df = DataFrame(np.random.randn(20, 2),index=pd.date_range('20130101',periods=20))\n store.put('df', df, table=True)\n expected = df[df.index>pd.Timestamp('20130105')]\n\n import datetime\n result = store.select('df','index>datetime.datetime(2013,1,5)')\n assert_frame_equal(result,expected)\n\n from datetime import datetime\n\n # technically an error, but allow it\n result = store.select('df','index>datetime.datetime(2013,1,5)')\n assert_frame_equal(result,expected)\n\n result = store.select('df','index>datetime(2013,1,5)')\n assert_frame_equal(result,expected)\n\n def test_series(self):\n\n s = tm.makeStringSeries()\n self._check_roundtrip(s, tm.assert_series_equal)\n\n ts = tm.makeTimeSeries()\n self._check_roundtrip(ts, tm.assert_series_equal)\n\n ts2 = Series(ts.index, Index(ts.index, dtype=object))\n self._check_roundtrip(ts2, tm.assert_series_equal)\n\n ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),\n dtype=object))\n self._check_roundtrip(ts3, tm.assert_series_equal)\n\n def test_sparse_series(self):\n\n s = tm.makeStringSeries()\n s[3:5] = np.nan\n ss = s.to_sparse()\n self._check_roundtrip(ss, tm.assert_series_equal,\n check_series_type=True)\n\n ss2 = s.to_sparse(kind='integer')\n self._check_roundtrip(ss2, tm.assert_series_equal,\n check_series_type=True)\n\n ss3 = s.to_sparse(fill_value=0)\n self._check_roundtrip(ss3, tm.assert_series_equal,\n check_series_type=True)\n\n def test_sparse_frame(self):\n\n s = tm.makeDataFrame()\n s.ix[3:5, 1:3] = np.nan\n s.ix[8:10, -2] = np.nan\n ss = s.to_sparse()\n\n self._check_double_roundtrip(ss, tm.assert_frame_equal,\n check_frame_type=True)\n\n ss2 = s.to_sparse(kind='integer')\n self._check_double_roundtrip(ss2, tm.assert_frame_equal,\n check_frame_type=True)\n\n ss3 = s.to_sparse(fill_value=0)\n self._check_double_roundtrip(ss3, tm.assert_frame_equal,\n check_frame_type=True)\n\n def test_sparse_panel(self):\n\n items = ['x', 'y', 'z']\n p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))\n sp = p.to_sparse()\n\n self._check_double_roundtrip(sp, assert_panel_equal,\n check_panel_type=True)\n\n sp2 = p.to_sparse(kind='integer')\n self._check_double_roundtrip(sp2, assert_panel_equal,\n check_panel_type=True)\n\n sp3 = p.to_sparse(fill_value=0)\n self._check_double_roundtrip(sp3, assert_panel_equal,\n check_panel_type=True)\n\n def test_float_index(self):\n\n # GH #454\n index = np.random.randn(10)\n s = Series(np.random.randn(10), index=index)\n self._check_roundtrip(s, tm.assert_series_equal)\n\n def test_tuple_index(self):\n\n # GH #492\n col = np.arange(10)\n idx = [(0., 1.), (2., 3.), (4., 5.)]\n data = np.random.randn(30).reshape((3, 10))\n DF = DataFrame(data, index=idx, columns=col)\n with tm.assert_produces_warning(expected_warning=PerformanceWarning):\n self._check_roundtrip(DF, tm.assert_frame_equal)\n\n def test_index_types(self):\n\n values = np.random.randn(2)\n\n func = lambda l, r: tm.assert_series_equal(l, r,\n check_dtype=True,\n check_index_type=True,\n check_series_type=True)\n\n with tm.assert_produces_warning(expected_warning=PerformanceWarning):\n ser = Series(values, [0, 'y'])\n self._check_roundtrip(ser, func)\n\n with tm.assert_produces_warning(expected_warning=PerformanceWarning):\n ser = Series(values, [datetime.datetime.today(), 0])\n self._check_roundtrip(ser, func)\n\n with tm.assert_produces_warning(expected_warning=PerformanceWarning):\n ser = Series(values, ['y', 0])\n self._check_roundtrip(ser, func)\n\n with tm.assert_produces_warning(expected_warning=PerformanceWarning):\n ser = Series(values, [datetime.date.today(), 'a'])\n self._check_roundtrip(ser, func)\n\n with tm.assert_produces_warning(expected_warning=PerformanceWarning):\n ser = Series(values, [1.23, 'b'])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, [1, 1.53])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, [1, 5])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, [datetime.datetime(\n 2012, 1, 1), datetime.datetime(2012, 1, 2)])\n self._check_roundtrip(ser, func)\n\n def test_timeseries_preepoch(self):\n\n if sys.version_info[0] == 2 and sys.version_info[1] < 7:\n raise nose.SkipTest(\"won't work on Python < 2.7\")\n\n dr = bdate_range('1/1/1940', '1/1/1960')\n ts = Series(np.random.randn(len(dr)), index=dr)\n try:\n self._check_roundtrip(ts, tm.assert_series_equal)\n except OverflowError:\n raise nose.SkipTest('known failer on some windows platforms')\n\n def test_frame(self):\n\n df = tm.makeDataFrame()\n\n # put in some random NAs\n df.values[0, 0] = np.nan\n df.values[5, 3] = np.nan\n\n self._check_roundtrip_table(df, tm.assert_frame_equal)\n self._check_roundtrip(df, tm.assert_frame_equal)\n\n self._check_roundtrip_table(df, tm.assert_frame_equal,\n compression=True)\n self._check_roundtrip(df, tm.assert_frame_equal,\n compression=True)\n\n tdf = tm.makeTimeDataFrame()\n self._check_roundtrip(tdf, tm.assert_frame_equal)\n self._check_roundtrip(tdf, tm.assert_frame_equal,\n compression=True)\n\n with ensure_clean_store(self.path) as store:\n # not consolidated\n df['foo'] = np.random.randn(len(df))\n store['df'] = df\n recons = store['df']\n self.assert_(recons._data.is_consolidated())\n\n # empty\n self._check_roundtrip(df[:0], tm.assert_frame_equal)\n\n def test_empty_series_frame(self):\n s0 = Series()\n s1 = Series(name='myseries')\n df0 = DataFrame()\n df1 = DataFrame(index=['a', 'b', 'c'])\n df2 = DataFrame(columns=['d', 'e', 'f'])\n\n self._check_roundtrip(s0, tm.assert_series_equal)\n self._check_roundtrip(s1, tm.assert_series_equal)\n self._check_roundtrip(df0, tm.assert_frame_equal)\n self._check_roundtrip(df1, tm.assert_frame_equal)\n self._check_roundtrip(df2, tm.assert_frame_equal)\n\n def test_empty_series(self):\n for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:\n s = Series(dtype=dtype)\n self._check_roundtrip(s, tm.assert_series_equal)\n\n def test_can_serialize_dates(self):\n\n rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n self._check_roundtrip(frame, tm.assert_frame_equal)\n\n def test_timezones(self):\n rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n with ensure_clean_store(self.path) as store:\n store['frame'] = frame\n recons = store['frame']\n self.assert_(recons.index.equals(rng))\n self.assertEquals(rng.tz, recons.index.tz)\n\n def test_fixed_offset_tz(self):\n rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n with ensure_clean_store(self.path) as store:\n store['frame'] = frame\n recons = store['frame']\n self.assert_(recons.index.equals(rng))\n self.assertEquals(rng.tz, recons.index.tz)\n\n def test_store_hierarchical(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['foo', 'bar'])\n frame = DataFrame(np.random.randn(10, 3), index=index,\n columns=['A', 'B', 'C'])\n\n self._check_roundtrip(frame, tm.assert_frame_equal)\n self._check_roundtrip(frame.T, tm.assert_frame_equal)\n self._check_roundtrip(frame['A'], tm.assert_series_equal)\n\n # check that the names are stored\n with ensure_clean_store(self.path) as store:\n store['frame'] = frame\n recons = store['frame']\n assert(recons.index.names == ('foo', 'bar'))\n\n def test_store_index_name(self):\n df = tm.makeDataFrame()\n df.index.name = 'foo'\n\n with ensure_clean_store(self.path) as store:\n store['frame'] = df\n recons = store['frame']\n assert(recons.index.name == 'foo')\n\n def test_store_series_name(self):\n df = tm.makeDataFrame()\n series = df['A']\n\n with ensure_clean_store(self.path) as store:\n store['series'] = series\n recons = store['series']\n assert(recons.name == 'A')\n\n def test_store_mixed(self):\n\n def _make_one():\n df = tm.makeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['bool1'] = df['A'] > 0\n df['bool2'] = df['B'] > 0\n df['int1'] = 1\n df['int2'] = 2\n return df.consolidate()\n\n df1 = _make_one()\n df2 = _make_one()\n\n self._check_roundtrip(df1, tm.assert_frame_equal)\n self._check_roundtrip(df2, tm.assert_frame_equal)\n\n with ensure_clean_store(self.path) as store:\n store['obj'] = df1\n tm.assert_frame_equal(store['obj'], df1)\n store['obj'] = df2\n tm.assert_frame_equal(store['obj'], df2)\n\n # check that can store Series of all of these types\n self._check_roundtrip(df1['obj1'], tm.assert_series_equal)\n self._check_roundtrip(df1['bool1'], tm.assert_series_equal)\n self._check_roundtrip(df1['int1'], tm.assert_series_equal)\n\n # try with compression\n self._check_roundtrip(df1['obj1'], tm.assert_series_equal,\n compression=True)\n self._check_roundtrip(df1['bool1'], tm.assert_series_equal,\n compression=True)\n self._check_roundtrip(df1['int1'], tm.assert_series_equal,\n compression=True)\n self._check_roundtrip(df1, tm.assert_frame_equal,\n compression=True)\n\n def test_wide(self):\n\n wp = tm.makePanel()\n self._check_roundtrip(wp, assert_panel_equal)\n\n def test_wide_table(self):\n\n wp = tm.makePanel()\n self._check_roundtrip_table(wp, assert_panel_equal)\n\n def test_select_with_dups(self):\n\n # single dtypes\n df = DataFrame(np.random.randn(10,4),columns=['A','A','B','B'])\n df.index = date_range('20130101 9:30',periods=10,freq='T')\n\n with ensure_clean_store(self.path) as store:\n store.append('df',df)\n\n result = store.select('df')\n expected = df\n assert_frame_equal(result,expected,by_blocks=True)\n\n result = store.select('df',columns=df.columns)\n expected = df\n assert_frame_equal(result,expected,by_blocks=True)\n\n result = store.select('df',columns=['A'])\n expected = df.loc[:,['A']]\n assert_frame_equal(result,expected)\n\n # dups accross dtypes\n df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),\n DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],\n axis=1)\n df.index = date_range('20130101 9:30',periods=10,freq='T')\n\n with ensure_clean_store(self.path) as store:\n store.append('df',df)\n\n result = store.select('df')\n expected = df\n assert_frame_equal(result,expected,by_blocks=True)\n\n result = store.select('df',columns=df.columns)\n expected = df\n assert_frame_equal(result,expected,by_blocks=True)\n\n expected = df.loc[:,['A']]\n result = store.select('df',columns=['A'])\n assert_frame_equal(result,expected,by_blocks=True)\n\n expected = df.loc[:,['B','A']]\n result = store.select('df',columns=['B','A'])\n assert_frame_equal(result,expected,by_blocks=True)\n\n # duplicates on both index and columns\n with ensure_clean_store(self.path) as store:\n store.append('df',df)\n store.append('df',df)\n\n expected = df.loc[:,['B','A']]\n expected = concat([expected, expected])\n result = store.select('df',columns=['B','A'])\n assert_frame_equal(result,expected,by_blocks=True)\n\n def test_wide_table_dups(self):\n wp = tm.makePanel()\n with ensure_clean_store(self.path) as store:\n store.put('panel', wp, format='table')\n store.put('panel', wp, format='table', append=True)\n\n with tm.assert_produces_warning(expected_warning=DuplicateWarning):\n recons = store['panel']\n\n assert_panel_equal(recons, wp)\n\n def test_long(self):\n def _check(left, right):\n assert_panel_equal(left.to_panel(), right.to_panel())\n\n wp = tm.makePanel()\n self._check_roundtrip(wp.to_frame(), _check)\n\n # empty\n # self._check_roundtrip(wp.to_frame()[:0], _check)\n\n def test_longpanel(self):\n pass\n\n def test_overwrite_node(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeDataFrame()\n ts = tm.makeTimeSeries()\n store['a'] = ts\n\n tm.assert_series_equal(store['a'], ts)\n\n def test_sparse_with_compression(self):\n\n # GH 2931\n\n # make sparse dataframe\n df = DataFrame(np.random.binomial(n=1, p=.01, size=(1e3, 10))).to_sparse(fill_value=0)\n\n # case 1: store uncompressed\n self._check_double_roundtrip(df, tm.assert_frame_equal,\n compression = False,\n check_frame_type=True)\n\n # case 2: store compressed (works)\n self._check_double_roundtrip(df, tm.assert_frame_equal,\n compression = 'zlib',\n check_frame_type=True)\n\n # set one series to be completely sparse\n df[0] = np.zeros(1e3)\n\n # case 3: store df with completely sparse series uncompressed\n self._check_double_roundtrip(df, tm.assert_frame_equal,\n compression = False,\n check_frame_type=True)\n\n # case 4: try storing df with completely sparse series compressed (fails)\n self._check_double_roundtrip(df, tm.assert_frame_equal,\n compression = 'zlib',\n check_frame_type=True)\n\n def test_select(self):\n wp = tm.makePanel()\n\n with ensure_clean_store(self.path) as store:\n\n # put/select ok\n _maybe_remove(store, 'wp')\n store.put('wp', wp, format='table')\n store.select('wp')\n\n # non-table ok (where = None)\n _maybe_remove(store, 'wp')\n store.put('wp2', wp)\n store.select('wp2')\n\n # selection on the non-indexable with a large number of columns\n wp = Panel(\n np.random.randn(100, 100, 100), items=['Item%03d' % i for i in range(100)],\n major_axis=date_range('1/1/2000', periods=100), minor_axis=['E%03d' % i for i in range(100)])\n\n _maybe_remove(store, 'wp')\n store.append('wp', wp)\n items = ['Item%03d' % i for i in range(80)]\n result = store.select('wp', Term('items=items'))\n expected = wp.reindex(items=items)\n assert_panel_equal(expected, result)\n\n # selectin non-table with a where\n # self.assertRaises(ValueError, store.select,\n # 'wp2', ('column', ['A', 'D']))\n\n # select with columns=\n df = tm.makeTimeDataFrame()\n _maybe_remove(store, 'df')\n store.append('df', df)\n result = store.select('df', columns=['A', 'B'])\n expected = df.reindex(columns=['A', 'B'])\n tm.assert_frame_equal(expected, result)\n\n # equivalentsly\n result = store.select('df', [(\"columns=['A', 'B']\")])\n expected = df.reindex(columns=['A', 'B'])\n tm.assert_frame_equal(expected, result)\n\n # with a data column\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['A'])\n result = store.select('df', ['A > 0'], columns=['A', 'B'])\n expected = df[df.A > 0].reindex(columns=['A', 'B'])\n tm.assert_frame_equal(expected, result)\n\n # all a data columns\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=True)\n result = store.select('df', ['A > 0'], columns=['A', 'B'])\n expected = df[df.A > 0].reindex(columns=['A', 'B'])\n tm.assert_frame_equal(expected, result)\n\n # with a data column, but different columns\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['A'])\n result = store.select('df', ['A > 0'], columns=['C', 'D'])\n expected = df[df.A > 0].reindex(columns=['C', 'D'])\n tm.assert_frame_equal(expected, result)\n\n def test_select_dtypes(self):\n\n with ensure_clean_store(self.path) as store:\n\n # with a Timestamp data column (GH #2637)\n df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), A=np.random.randn(300)))\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['ts', 'A'])\n\n result = store.select('df', [Term(\"ts>=Timestamp('2012-02-01')\")])\n expected = df[df.ts >= Timestamp('2012-02-01')]\n tm.assert_frame_equal(expected, result)\n\n # bool columns (GH #2849)\n df = DataFrame(np.random.randn(5,2), columns =['A','B'])\n df['object'] = 'foo'\n df.ix[4:5,'object'] = 'bar'\n df['boolv'] = df['A'] > 0\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns = True)\n\n expected = df[df.boolv == True].reindex(columns=['A','boolv'])\n for v in [True,'true',1]:\n result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])\n tm.assert_frame_equal(expected, result)\n\n expected = df[df.boolv == False ].reindex(columns=['A','boolv'])\n for v in [False,'false',0]:\n result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])\n tm.assert_frame_equal(expected, result)\n\n # integer index\n df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))\n _maybe_remove(store, 'df_int')\n store.append('df_int', df)\n result = store.select(\n 'df_int', [Term(\"index<10\"), Term(\"columns=['A']\")])\n expected = df.reindex(index=list(df.index)[0:10],columns=['A'])\n tm.assert_frame_equal(expected, result)\n\n # float index\n df = DataFrame(dict(A=np.random.rand(\n 20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))\n _maybe_remove(store, 'df_float')\n store.append('df_float', df)\n result = store.select(\n 'df_float', [Term(\"index<10.0\"), Term(\"columns=['A']\")])\n expected = df.reindex(index=list(df.index)[0:10],columns=['A'])\n tm.assert_frame_equal(expected, result)\n\n with ensure_clean_store(self.path) as store:\n\n # floats w/o NaN\n df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')\n df['cols'] = (df['cols']+10).apply(str)\n\n store.append('df1',df,data_columns=True)\n result = store.select(\n 'df1', where='values>2.0')\n expected = df[df['values']>2.0]\n tm.assert_frame_equal(expected, result)\n\n # floats with NaN\n df.iloc[0] = np.nan\n expected = df[df['values']>2.0]\n\n store.append('df2',df,data_columns=True,index=False)\n result = store.select(\n 'df2', where='values>2.0')\n tm.assert_frame_equal(expected, result)\n\n # https://github.com/PyTables/PyTables/issues/282\n # bug in selection when 0th row has a np.nan and an index\n #store.append('df3',df,data_columns=True)\n #result = store.select(\n # 'df3', where='values>2.0')\n #tm.assert_frame_equal(expected, result)\n\n # not in first position float with NaN ok too\n df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')\n df['cols'] = (df['cols']+10).apply(str)\n\n df.iloc[1] = np.nan\n expected = df[df['values']>2.0]\n\n store.append('df4',df,data_columns=True)\n result = store.select(\n 'df4', where='values>2.0')\n tm.assert_frame_equal(expected, result)\n\n def test_select_with_many_inputs(self):\n\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),\n A=np.random.randn(300),\n B=range(300),\n users = ['a']*50 + ['b']*50 + ['c']*100 + ['a%03d' % i for i in range(100)]))\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])\n\n # regular select\n result = store.select('df', [Term(\"ts>=Timestamp('2012-02-01')\")])\n expected = df[df.ts >= Timestamp('2012-02-01')]\n tm.assert_frame_equal(expected, result)\n\n # small selector\n result = store.select('df', [Term(\"ts>=Timestamp('2012-02-01') & users=['a','b','c']\")])\n expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(['a','b','c']) ]\n tm.assert_frame_equal(expected, result)\n\n # big selector along the columns\n selector = [ 'a','b','c' ] + [ 'a%03d' % i for i in range(60) ]\n result = store.select('df', [Term(\"ts>=Timestamp('2012-02-01')\"),Term('users=selector')])\n expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(selector) ]\n tm.assert_frame_equal(expected, result)\n\n selector = range(100,200)\n result = store.select('df', [Term('B=selector')])\n expected = df[ df.B.isin(selector) ]\n tm.assert_frame_equal(expected, result)\n self.assert_(len(result) == 100)\n\n # big selector along the index\n selector = Index(df.ts[0:100].values)\n result = store.select('df', [Term('ts=selector')])\n expected = df[ df.ts.isin(selector.values) ]\n tm.assert_frame_equal(expected, result)\n self.assert_(len(result) == 100)\n\n def test_select_iterator(self):\n\n # single table\n with ensure_clean_store(self.path) as store:\n\n df = tm.makeTimeDataFrame(500)\n _maybe_remove(store, 'df')\n store.append('df', df)\n\n expected = store.select('df')\n\n results = []\n for s in store.select('df',iterator=True):\n results.append(s)\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n results = []\n for s in store.select('df',chunksize=100):\n results.append(s)\n self.assert_(len(results) == 5)\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n results = []\n for s in store.select('df',chunksize=150):\n results.append(s)\n result = concat(results)\n tm.assert_frame_equal(result, expected)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeTimeDataFrame(500)\n df.to_hdf(path,'df_non_table')\n self.assertRaises(TypeError, read_hdf, path,'df_non_table',chunksize=100)\n self.assertRaises(TypeError, read_hdf, path,'df_non_table',iterator=True)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeTimeDataFrame(500)\n df.to_hdf(path,'df',format='table')\n\n results = []\n for x in read_hdf(path,'df',chunksize=100):\n results.append(x)\n\n self.assert_(len(results) == 5)\n result = concat(results)\n tm.assert_frame_equal(result, df)\n tm.assert_frame_equal(result, read_hdf(path,'df'))\n\n # multiple\n\n with ensure_clean_store(self.path) as store:\n\n df1 = tm.makeTimeDataFrame(500)\n store.append('df1',df1,data_columns=True)\n df2 = tm.makeTimeDataFrame(500).rename(columns=lambda x: \"%s_2\" % x)\n df2['foo'] = 'bar'\n store.append('df2',df2)\n\n df = concat([df1, df2], axis=1)\n\n # full selection\n expected = store.select_as_multiple(\n ['df1', 'df2'], selector='df1')\n results = []\n for s in store.select_as_multiple(\n ['df1', 'df2'], selector='df1', chunksize=150):\n results.append(s)\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n # where selection\n #expected = store.select_as_multiple(\n # ['df1', 'df2'], where= Term('A>0'), selector='df1')\n #results = []\n #for s in store.select_as_multiple(\n # ['df1', 'df2'], where= Term('A>0'), selector='df1', chunksize=25):\n # results.append(s)\n #result = concat(results)\n #tm.assert_frame_equal(expected, result)\n\n def test_retain_index_attributes(self):\n\n # GH 3499, losing frequency info on index recreation\n df = DataFrame(dict(A = Series(lrange(3),\n index=date_range('2000-1-1',periods=3,freq='H'))))\n\n with ensure_clean_store(self.path) as store:\n _maybe_remove(store,'data')\n store.put('data', df, format='table')\n\n result = store.get('data')\n tm.assert_frame_equal(df,result)\n\n for attr in ['freq','tz','name']:\n for idx in ['index','columns']:\n self.assert_(getattr(getattr(df,idx),attr,None) == getattr(getattr(result,idx),attr,None))\n\n\n # try to append a table with a different frequency\n with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):\n df2 = DataFrame(dict(A = Series(lrange(3),\n index=date_range('2002-1-1',periods=3,freq='D'))))\n store.append('data',df2)\n\n self.assert_(store.get_storer('data').info['index']['freq'] is None)\n\n # this is ok\n _maybe_remove(store,'df2')\n df2 = DataFrame(dict(A = Series(lrange(3),\n index=[Timestamp('20010101'),Timestamp('20010102'),Timestamp('20020101')])))\n store.append('df2',df2)\n df3 = DataFrame(dict(A = Series(lrange(3),index=date_range('2002-1-1',periods=3,freq='D'))))\n store.append('df2',df3)\n\n def test_retain_index_attributes2(self):\n\n with ensure_clean_path(self.path) as path:\n\n with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):\n\n df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H'))))\n df.to_hdf(path,'data',mode='w',append=True)\n df2 = DataFrame(dict(A = Series(lrange(3), index=date_range('2002-1-1',periods=3,freq='D'))))\n df2.to_hdf(path,'data',append=True)\n\n idx = date_range('2000-1-1',periods=3,freq='H')\n idx.name = 'foo'\n df = DataFrame(dict(A = Series(lrange(3), index=idx)))\n df.to_hdf(path,'data',mode='w',append=True)\n\n self.assert_(read_hdf(path,'data').index.name == 'foo')\n\n with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):\n\n idx2 = date_range('2001-1-1',periods=3,freq='H')\n idx2.name = 'bar'\n df2 = DataFrame(dict(A = Series(lrange(3), index=idx2)))\n df2.to_hdf(path,'data',append=True)\n\n self.assert_(read_hdf(path,'data').index.name is None)\n\n def test_panel_select(self):\n\n wp = tm.makePanel()\n\n with ensure_clean_store(self.path) as store:\n store.put('wp', wp, format='table')\n date = wp.major_axis[len(wp.major_axis) // 2]\n\n crit1 = ('major_axis>=date')\n crit2 = (\"minor_axis=['A', 'D']\")\n\n result = store.select('wp', [crit1, crit2])\n expected = wp.truncate(before=date).reindex(minor=['A', 'D'])\n assert_panel_equal(result, expected)\n\n result = store.select(\n 'wp', ['major_axis>=\"20000124\"', (\"minor_axis=['A', 'B']\")])\n expected = wp.truncate(before='20000124').reindex(minor=['A', 'B'])\n assert_panel_equal(result, expected)\n\n def test_frame_select(self):\n\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n store.put('frame', df,format='table')\n date = df.index[len(df) // 2]\n\n crit1 = Term('index>=date')\n crit2 = (\"columns=['A', 'D']\")\n crit3 = ('columns=A')\n\n result = store.select('frame', [crit1, crit2])\n expected = df.ix[date:, ['A', 'D']]\n tm.assert_frame_equal(result, expected)\n\n result = store.select('frame', [crit3])\n expected = df.ix[:, ['A']]\n tm.assert_frame_equal(result, expected)\n\n # invalid terms\n df = tm.makeTimeDataFrame()\n store.append('df_time', df)\n self.assertRaises(\n ValueError, store.select, 'df_time', [Term(\"index>0\")])\n\n # can't select if not written as table\n # store['frame'] = df\n # self.assertRaises(ValueError, store.select,\n # 'frame', [crit1, crit2])\n\n def test_frame_select_complex(self):\n # select via complex criteria\n\n df = tm.makeTimeDataFrame()\n df['string'] = 'foo'\n df.loc[df.index[0:4],'string'] = 'bar'\n\n with ensure_clean_store(self.path) as store:\n store.put('df', df, table=True, data_columns=['string'])\n\n # empty\n result = store.select('df', 'index>df.index[3] & string=\"bar\"')\n expected = df.loc[(df.index>df.index[3]) & (df.string=='bar')]\n tm.assert_frame_equal(result, expected)\n\n result = store.select('df', 'index>df.index[3] & string=\"foo\"')\n expected = df.loc[(df.index>df.index[3]) & (df.string=='foo')]\n tm.assert_frame_equal(result, expected)\n\n # or\n result = store.select('df', 'index>df.index[3] | string=\"bar\"')\n expected = df.loc[(df.index>df.index[3]) | (df.string=='bar')]\n tm.assert_frame_equal(result, expected)\n\n result = store.select('df', '(index>df.index[3] & index<=df.index[6]) | string=\"bar\"')\n expected = df.loc[((df.index>df.index[3]) & (df.index<=df.index[6])) | (df.string=='bar')]\n tm.assert_frame_equal(result, expected)\n\n # invert\n result = store.select('df', 'string!=\"bar\"')\n expected = df.loc[df.string!='bar']\n tm.assert_frame_equal(result, expected)\n\n # invert not implemented in numexpr :(\n self.assertRaises(NotImplementedError, store.select, 'df', '~(string=\"bar\")')\n\n # invert ok for filters\n result = store.select('df', \"~(columns=['A','B'])\")\n expected = df.loc[:,df.columns-['A','B']]\n tm.assert_frame_equal(result, expected)\n\n # in\n result = store.select('df', \"index>df.index[3] & columns in ['A','B']\")\n expected = df.loc[df.index>df.index[3]].reindex(columns=['A','B'])\n tm.assert_frame_equal(result, expected)\n\n def test_frame_select_complex2(self):\n\n with ensure_clean_path(['parms.hdf','hist.hdf']) as paths:\n\n pp, hh = paths\n\n # use non-trivial selection criteria\n parms = DataFrame({ 'A' : [1,1,2,2,3] })\n parms.to_hdf(pp,'df',mode='w',format='table',data_columns=['A'])\n\n selection = read_hdf(pp,'df',where='A=[2,3]')\n hist = DataFrame(np.random.randn(25,1),columns=['data'],\n index=MultiIndex.from_tuples([ (i,j) for i in range(5) for j in range(5) ],\n names=['l1','l2']))\n\n hist.to_hdf(hh,'df',mode='w',format='table')\n\n expected = read_hdf(hh,'df',where=Term('l1','=',[2,3,4]))\n\n # list like\n result = read_hdf(hh,'df',where=Term('l1','=',selection.index.tolist()))\n assert_frame_equal(result, expected)\n l = selection.index.tolist()\n\n # sccope with list like\n store = HDFStore(hh)\n result = store.select('df',where='l1=l')\n assert_frame_equal(result, expected)\n store.close()\n\n result = read_hdf(hh,'df',where='l1=l')\n assert_frame_equal(result, expected)\n\n # index\n index = selection.index\n result = read_hdf(hh,'df',where='l1=index')\n assert_frame_equal(result, expected)\n\n result = read_hdf(hh,'df',where='l1=selection.index')\n assert_frame_equal(result, expected)\n\n result = read_hdf(hh,'df',where='l1=selection.index.tolist()')\n assert_frame_equal(result, expected)\n\n result = read_hdf(hh,'df',where='l1=list(selection.index)')\n assert_frame_equal(result, expected)\n\n # sccope with index\n store = HDFStore(hh)\n\n result = store.select('df',where='l1=index')\n assert_frame_equal(result, expected)\n\n result = store.select('df',where='l1=selection.index')\n assert_frame_equal(result, expected)\n\n result = store.select('df',where='l1=selection.index.tolist()')\n assert_frame_equal(result, expected)\n\n result = store.select('df',where='l1=list(selection.index)')\n assert_frame_equal(result, expected)\n\n store.close()\n\n def test_invalid_filtering(self):\n\n # can't use more than one filter (atm)\n\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n store.put('df', df, table=True)\n\n # not implemented\n self.assertRaises(NotImplementedError, store.select, 'df', \"columns=['A'] | columns=['B']\")\n\n # in theory we could deal with this\n self.assertRaises(NotImplementedError, store.select, 'df', \"columns=['A','B'] & columns=['C']\")\n\n def test_string_select(self):\n\n # GH 2973\n with ensure_clean_store(self.path) as store:\n\n df = tm.makeTimeDataFrame()\n\n # test string ==/!=\n df['x'] = 'none'\n df.ix[2:7,'x'] = ''\n\n store.append('df',df,data_columns=['x'])\n\n result = store.select('df',Term('x=none'))\n expected = df[df.x == 'none']\n assert_frame_equal(result,expected)\n\n try:\n result = store.select('df',Term('x!=none'))\n expected = df[df.x != 'none']\n assert_frame_equal(result,expected)\n except Exception as detail:\n print(\"[{0}]\".format(detail))\n print(store)\n print(expected)\n\n df2 = df.copy()\n df2.loc[df2.x=='','x'] = np.nan\n\n store.append('df2',df2,data_columns=['x'])\n result = store.select('df2',Term('x!=none'))\n expected = df2[isnull(df2.x)]\n assert_frame_equal(result,expected)\n\n # int ==/!=\n df['int'] = 1\n df.ix[2:7,'int'] = 2\n\n store.append('df3',df,data_columns=['int'])\n\n result = store.select('df3',Term('int=2'))\n expected = df[df.int==2]\n assert_frame_equal(result,expected)\n\n result = store.select('df3',Term('int!=2'))\n expected = df[df.int!=2]\n assert_frame_equal(result,expected)\n\n def test_read_column(self):\n\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n _maybe_remove(store, 'df')\n store.append('df', df)\n\n # error\n self.assertRaises(KeyError, store.select_column, 'df', 'foo')\n\n def f():\n store.select_column('df', 'index', where = ['index>5'])\n self.assertRaises(Exception, f)\n\n # valid\n result = store.select_column('df', 'index')\n tm.assert_almost_equal(result.values, Series(df.index).values)\n self.assert_(isinstance(result,Series))\n\n # not a data indexable column\n self.assertRaises(\n ValueError, store.select_column, 'df', 'values_block_0')\n\n # a data column\n df2 = df.copy()\n df2['string'] = 'foo'\n store.append('df2', df2, data_columns=['string'])\n result = store.select_column('df2', 'string')\n tm.assert_almost_equal(result.values, df2['string'].values)\n\n # a data column with NaNs, result excludes the NaNs\n df3 = df.copy()\n df3['string'] = 'foo'\n df3.ix[4:6, 'string'] = np.nan\n store.append('df3', df3, data_columns=['string'])\n result = store.select_column('df3', 'string')\n tm.assert_almost_equal(result.values, df3['string'].values)\n\n def test_coordinates(self):\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n\n _maybe_remove(store, 'df')\n store.append('df', df)\n\n # all\n c = store.select_as_coordinates('df')\n assert((c.values == np.arange(len(df.index))).all() == True)\n\n # get coordinates back & test vs frame\n _maybe_remove(store, 'df')\n\n df = DataFrame(dict(A=lrange(5), B=lrange(5)))\n store.append('df', df)\n c = store.select_as_coordinates('df', ['index<3'])\n assert((c.values == np.arange(3)).all() == True)\n result = store.select('df', where=c)\n expected = df.ix[0:2, :]\n tm.assert_frame_equal(result, expected)\n\n c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])\n assert((c.values == np.arange(2) + 3).all() == True)\n result = store.select('df', where=c)\n expected = df.ix[3:4, :]\n tm.assert_frame_equal(result, expected)\n self.assert_(isinstance(c, Index))\n\n # multiple tables\n _maybe_remove(store, 'df1')\n _maybe_remove(store, 'df2')\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n store.append('df1', df1, data_columns=['A', 'B'])\n store.append('df2', df2)\n\n c = store.select_as_coordinates('df1', ['A>0', 'B>0'])\n df1_result = store.select('df1', c)\n df2_result = store.select('df2', c)\n result = concat([df1_result, df2_result], axis=1)\n\n expected = concat([df1, df2], axis=1)\n expected = expected[(expected.A > 0) & (expected.B > 0)]\n tm.assert_frame_equal(result, expected)\n\n # pass array/mask as the coordinates\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(np.random.randn(1000,2),index=date_range('20000101',periods=1000))\n store.append('df',df)\n c = store.select_column('df','index')\n where = c[DatetimeIndex(c).month==5].index\n expected = df.iloc[where]\n\n # locations\n result = store.select('df',where=where)\n tm.assert_frame_equal(result,expected)\n\n # boolean\n result = store.select('df',where=where)\n tm.assert_frame_equal(result,expected)\n\n # invalid\n self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df),dtype='float64'))\n self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)+1))\n self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5)\n self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5,stop=10)\n\n # list\n df = DataFrame(np.random.randn(10,2))\n store.append('df2',df)\n result = store.select('df2',where=[0,3,5])\n expected = df.iloc[[0,3,5]]\n tm.assert_frame_equal(result,expected)\n\n # boolean\n where = [True] * 10\n where[-2] = False\n result = store.select('df2',where=where)\n expected = df.loc[where]\n tm.assert_frame_equal(result,expected)\n\n def test_append_to_multiple(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n df2['foo'] = 'bar'\n df = concat([df1, df2], axis=1)\n\n with ensure_clean_store(self.path) as store:\n\n # exceptions\n self.assertRaises(ValueError, store.append_to_multiple,\n {'df1': ['A', 'B'], 'df2': None}, df, selector='df3')\n self.assertRaises(ValueError, store.append_to_multiple,\n {'df1': None, 'df2': None}, df, selector='df3')\n self.assertRaises(\n ValueError, store.append_to_multiple, 'df1', df, 'df1')\n\n # regular operation\n store.append_to_multiple(\n {'df1': ['A', 'B'], 'df2': None}, df, selector='df1')\n result = store.select_as_multiple(\n ['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')\n expected = df[(df.A > 0) & (df.B > 0)]\n tm.assert_frame_equal(result, expected)\n\n def test_append_to_multiple_dropna(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n df1.ix[1, ['A', 'B']] = np.nan\n df = concat([df1, df2], axis=1)\n\n with ensure_clean_store(self.path) as store:\n # dropna=True should guarantee rows are synchronized\n store.append_to_multiple(\n {'df1': ['A', 'B'], 'df2': None}, df, selector='df1',\n dropna=True)\n result = store.select_as_multiple(['df1', 'df2'])\n expected = df.dropna()\n tm.assert_frame_equal(result, expected)\n tm.assert_index_equal(store.select('df1').index,\n store.select('df2').index)\n\n # dropna=False shouldn't synchronize row indexes\n store.append_to_multiple(\n {'df1': ['A', 'B'], 'df2': None}, df, selector='df1',\n dropna=False)\n self.assertRaises(\n ValueError, store.select_as_multiple, ['df1', 'df2'])\n assert not store.select('df1').index.equals(\n store.select('df2').index)\n\n def test_select_as_multiple(self):\n\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n df2['foo'] = 'bar'\n\n with ensure_clean_store(self.path) as store:\n\n # no tables stored\n self.assertRaises(Exception, store.select_as_multiple,\n None, where=['A>0', 'B>0'], selector='df1')\n\n store.append('df1', df1, data_columns=['A', 'B'])\n store.append('df2', df2)\n\n # exceptions\n self.assertRaises(Exception, store.select_as_multiple,\n None, where=['A>0', 'B>0'], selector='df1')\n self.assertRaises(Exception, store.select_as_multiple,\n [None], where=['A>0', 'B>0'], selector='df1')\n self.assertRaises(TypeError, store.select_as_multiple,\n ['df1','df3'], where=['A>0', 'B>0'], selector='df1')\n self.assertRaises(KeyError, store.select_as_multiple,\n ['df3'], where=['A>0', 'B>0'], selector='df1')\n self.assertRaises(ValueError, store.select_as_multiple,\n ['df1','df2'], where=['A>0', 'B>0'], selector='df4')\n\n # default select\n result = store.select('df1', ['A>0', 'B>0'])\n expected = store.select_as_multiple(\n ['df1'], where=['A>0', 'B>0'], selector='df1')\n tm.assert_frame_equal(result, expected)\n expected = store.select_as_multiple(\n 'df1', where=['A>0', 'B>0'], selector='df1')\n tm.assert_frame_equal(result, expected)\n\n # multiple\n result = store.select_as_multiple(\n ['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')\n expected = concat([df1, df2], axis=1)\n expected = expected[(expected.A > 0) & (expected.B > 0)]\n tm.assert_frame_equal(result, expected)\n\n # multiple (diff selector)\n result = store.select_as_multiple(['df1', 'df2'], where=[Term(\n 'index>df2.index[4]')], selector='df2')\n expected = concat([df1, df2], axis=1)\n expected = expected[5:]\n tm.assert_frame_equal(result, expected)\n\n # test excpection for diff rows\n store.append('df3', tm.makeTimeDataFrame(nper=50))\n self.assertRaises(ValueError, store.select_as_multiple,\n ['df1','df3'], where=['A>0', 'B>0'], selector='df1')\n\n def test_start_stop(self):\n\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))\n store.append('df', df)\n\n result = store.select(\n 'df', [Term(\"columns=['A']\")], start=0, stop=5)\n expected = df.ix[0:4, ['A']]\n tm.assert_frame_equal(result, expected)\n\n # out of range\n result = store.select(\n 'df', [Term(\"columns=['A']\")], start=30, stop=40)\n assert(len(result) == 0)\n assert(type(result) == DataFrame)\n\n def test_select_filter_corner(self):\n\n df = DataFrame(np.random.randn(50, 100))\n df.index = ['%.3d' % c for c in df.index]\n df.columns = ['%.3d' % c for c in df.columns]\n\n with ensure_clean_store(self.path) as store:\n store.put('frame', df, format='table')\n\n crit = Term('columns=df.columns[:75]')\n result = store.select('frame', [crit])\n tm.assert_frame_equal(result, df.ix[:, df.columns[:75]])\n\n def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):\n\n options = {}\n if compression:\n options['complib'] = _default_compressor\n\n with ensure_clean_store(self.path, 'w', **options) as store:\n store['obj'] = obj\n retrieved = store['obj']\n comparator(retrieved, obj, **kwargs)\n\n def _check_double_roundtrip(self, obj, comparator, compression=False,\n **kwargs):\n options = {}\n if compression:\n options['complib'] = compression or _default_compressor\n\n with ensure_clean_store(self.path, 'w', **options) as store:\n store['obj'] = obj\n retrieved = store['obj']\n comparator(retrieved, obj, **kwargs)\n store['obj'] = retrieved\n again = store['obj']\n comparator(again, obj, **kwargs)\n\n def _check_roundtrip_table(self, obj, comparator, compression=False):\n options = {}\n if compression:\n options['complib'] = _default_compressor\n\n with ensure_clean_store(self.path, 'w', **options) as store:\n store.put('obj', obj, format='table')\n retrieved = store['obj']\n # sorted_obj = _test_sort(obj)\n comparator(retrieved, obj)\n\n def test_multiple_open_close(self):\n # GH 4409, open & close multiple times\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.to_hdf(path,'df',mode='w',format='table')\n\n # single\n store = HDFStore(path)\n self.assert_('CLOSED' not in str(store))\n self.assert_(store.is_open)\n store.close()\n self.assert_('CLOSED' in str(store))\n self.assert_(not store.is_open)\n\n with ensure_clean_path(self.path) as path:\n\n if pytables._table_file_open_policy_is_strict:\n\n # multiples\n store1 = HDFStore(path)\n def f():\n HDFStore(path)\n self.assertRaises(ValueError, f)\n store1.close()\n\n else:\n\n # multiples\n store1 = HDFStore(path)\n store2 = HDFStore(path)\n\n self.assert_('CLOSED' not in str(store1))\n self.assert_('CLOSED' not in str(store2))\n self.assert_(store1.is_open)\n self.assert_(store2.is_open)\n\n store1.close()\n self.assert_('CLOSED' in str(store1))\n self.assert_(not store1.is_open)\n self.assert_('CLOSED' not in str(store2))\n self.assert_(store2.is_open)\n\n store2.close()\n self.assert_('CLOSED' in str(store1))\n self.assert_('CLOSED' in str(store2))\n self.assert_(not store1.is_open)\n self.assert_(not store2.is_open)\n\n # nested close\n store = HDFStore(path,mode='w')\n store.append('df',df)\n\n store2 = HDFStore(path)\n store2.append('df2',df)\n store2.close()\n self.assert_('CLOSED' in str(store2))\n self.assert_(not store2.is_open)\n\n store.close()\n self.assert_('CLOSED' in str(store))\n self.assert_(not store.is_open)\n\n # double closing\n store = HDFStore(path,mode='w')\n store.append('df', df)\n\n store2 = HDFStore(path)\n store.close()\n self.assert_('CLOSED' in str(store))\n self.assert_(not store.is_open)\n\n store2.close()\n self.assert_('CLOSED' in str(store2))\n self.assert_(not store2.is_open)\n\n # ops on a closed store\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.to_hdf(path,'df',mode='w',format='table')\n\n store = HDFStore(path)\n store.close()\n\n self.assertRaises(ClosedFileError, store.keys)\n self.assertRaises(ClosedFileError, lambda : 'df' in store)\n self.assertRaises(ClosedFileError, lambda : len(store))\n self.assertRaises(ClosedFileError, lambda : store['df'])\n self.assertRaises(ClosedFileError, lambda : store.df)\n self.assertRaises(ClosedFileError, store.select, 'df')\n self.assertRaises(ClosedFileError, store.get, 'df')\n self.assertRaises(ClosedFileError, store.append, 'df2', df)\n self.assertRaises(ClosedFileError, store.put, 'df3', df)\n self.assertRaises(ClosedFileError, store.get_storer, 'df2')\n self.assertRaises(ClosedFileError, store.remove, 'df2')\n\n def f():\n store.select('df')\n tm.assertRaisesRegexp(ClosedFileError, 'file is not open', f)\n\n def test_pytables_native_read(self):\n\n try:\n store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native.h5'), 'r')\n d2 = store['detector/readout']\n assert isinstance(d2, DataFrame)\n finally:\n safe_close(store)\n\n try:\n store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native2.h5'), 'r')\n str(store)\n d1 = store['detector']\n assert isinstance(d1, DataFrame)\n finally:\n safe_close(store)\n\n def test_legacy_read(self):\n try:\n store = HDFStore(tm.get_data_path('legacy_hdf/legacy.h5'), 'r')\n store['a']\n store['b']\n store['c']\n store['d']\n finally:\n safe_close(store)\n\n def test_legacy_table_read(self):\n # legacy table types\n try:\n store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table.h5'), 'r')\n store.select('df1')\n store.select('df2')\n store.select('wp1')\n\n # force the frame\n store.select('df2', typ='legacy_frame')\n\n # old version warning\n with tm.assert_produces_warning(expected_warning=IncompatibilityWarning):\n self.assertRaises(\n Exception, store.select, 'wp1', Term('minor_axis=B'))\n\n df2 = store.select('df2')\n result = store.select('df2', Term('index>df2.index[2]'))\n expected = df2[df2.index > df2.index[2]]\n assert_frame_equal(expected, result)\n\n finally:\n safe_close(store)\n\n def test_legacy_0_10_read(self):\n # legacy from 0.10\n try:\n store = HDFStore(tm.get_data_path('legacy_hdf/legacy_0.10.h5'), 'r')\n str(store)\n for k in store.keys():\n store.select(k)\n finally:\n safe_close(store)\n\n def test_legacy_0_11_read(self):\n # legacy from 0.11\n try:\n path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')\n store = HDFStore(tm.get_data_path(path), 'r')\n str(store)\n assert 'df' in store\n assert 'df1' in store\n assert 'mi' in store\n df = store.select('df')\n df1 = store.select('df1')\n mi = store.select('mi')\n assert isinstance(df, DataFrame)\n assert isinstance(df1, DataFrame)\n assert isinstance(mi, DataFrame)\n finally:\n safe_close(store)\n\n def test_copy(self):\n\n def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):\n try:\n if f is None:\n f = tm.get_data_path(os.path.join('legacy_hdf',\n 'legacy_0.10.h5'))\n\n\n store = HDFStore(f, 'r')\n\n if new_f is None:\n import tempfile\n fd, new_f = tempfile.mkstemp()\n\n tstore = store.copy(new_f, keys = keys, propindexes = propindexes, **kwargs)\n\n # check keys\n if keys is None:\n keys = store.keys()\n self.assert_(set(keys) == set(tstore.keys()))\n\n # check indicies & nrows\n for k in tstore.keys():\n if tstore.get_storer(k).is_table:\n new_t = tstore.get_storer(k)\n orig_t = store.get_storer(k)\n\n self.assert_(orig_t.nrows == new_t.nrows)\n\n # check propindixes\n if propindexes:\n for a in orig_t.axes:\n if a.is_indexed:\n self.assert_(new_t[a.name].is_indexed == True)\n\n finally:\n safe_close(store)\n safe_close(tstore)\n try:\n os.close(fd)\n except:\n pass\n safe_remove(new_f)\n\n do_copy()\n do_copy(keys = ['/a','/b','/df1_mixed'])\n do_copy(propindexes = False)\n\n # new table\n df = tm.makeDataFrame()\n\n try:\n st = HDFStore(self.path)\n st.append('df', df, data_columns = ['A'])\n st.close()\n do_copy(f = self.path)\n do_copy(f = self.path, propindexes = False)\n finally:\n safe_remove(self.path)\n\n def test_legacy_table_write(self):\n raise nose.SkipTest(\"skipping for now\")\n\n store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')\n\n df = tm.makeDataFrame()\n wp = tm.makePanel()\n\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['foo', 'bar'])\n df = DataFrame(np.random.randn(10, 3), index=index,\n columns=['A', 'B', 'C'])\n store.append('mi', df)\n\n df = DataFrame(dict(A = 'foo', B = 'bar'),index=lrange(10))\n store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })\n store.append('wp', wp)\n\n store.close()\n\n def test_store_datetime_fractional_secs(self):\n\n with ensure_clean_store(self.path) as store:\n dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)\n series = Series([0], [dt])\n store['a'] = series\n self.assertEquals(store['a'].index[0], dt)\n\n def test_tseries_indices_series(self):\n\n with ensure_clean_store(self.path) as store:\n idx = tm.makeDateIndex(10)\n ser = Series(np.random.randn(len(idx)), idx)\n store['a'] = ser\n result = store['a']\n\n assert_series_equal(result, ser)\n self.assertEquals(type(result.index), type(ser.index))\n self.assertEquals(result.index.freq, ser.index.freq)\n\n idx = tm.makePeriodIndex(10)\n ser = Series(np.random.randn(len(idx)), idx)\n store['a'] = ser\n result = store['a']\n\n assert_series_equal(result, ser)\n self.assertEquals(type(result.index), type(ser.index))\n self.assertEquals(result.index.freq, ser.index.freq)\n\n def test_tseries_indices_frame(self):\n\n with ensure_clean_store(self.path) as store:\n idx = tm.makeDateIndex(10)\n df = DataFrame(np.random.randn(len(idx), 3), index=idx)\n store['a'] = df\n result = store['a']\n\n assert_frame_equal(result, df)\n self.assertEquals(type(result.index), type(df.index))\n self.assertEquals(result.index.freq, df.index.freq)\n\n idx = tm.makePeriodIndex(10)\n df = DataFrame(np.random.randn(len(idx), 3), idx)\n store['a'] = df\n result = store['a']\n\n assert_frame_equal(result, df)\n self.assertEquals(type(result.index), type(df.index))\n self.assertEquals(result.index.freq, df.index.freq)\n\n def test_unicode_index(self):\n\n unicode_values = [u('\\u03c3'), u('\\u03c3\\u03c3')]\n def f():\n s = Series(np.random.randn(len(unicode_values)), unicode_values)\n self._check_roundtrip(s, tm.assert_series_equal)\n\n compat_assert_produces_warning(PerformanceWarning,f)\n\n def test_store_datetime_mixed(self):\n\n df = DataFrame(\n {'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})\n ts = tm.makeTimeSeries()\n df['d'] = ts.index[:3]\n self._check_roundtrip(df, tm.assert_frame_equal)\n\n # def test_cant_write_multiindex_table(self):\n # # for now, #1848\n # df = DataFrame(np.random.randn(10, 4),\n # index=[np.arange(5).repeat(2),\n # np.tile(np.arange(2), 5)])\n\n # self.assertRaises(Exception, store.put, 'foo', df, format='table')\n\n def test_append_with_diff_col_name_types_raises_value_error(self):\n df = DataFrame(np.random.randn(10, 1))\n df2 = DataFrame({'a': np.random.randn(10)})\n df3 = DataFrame({(1, 2): np.random.randn(10)})\n df4 = DataFrame({('1', 2): np.random.randn(10)})\n df5 = DataFrame({('1', 2, object): np.random.randn(10)})\n\n with ensure_clean_store(self.path) as store:\n name = 'df_%s' % tm.rands(10)\n store.append(name, df)\n\n for d in (df2, df3, df4, df5):\n with tm.assertRaises(ValueError):\n store.append(name, d)\n\n\ndef _test_sort(obj):\n if isinstance(obj, DataFrame):\n return obj.reindex(sorted(obj.index))\n elif isinstance(obj, Panel):\n return obj.reindex(major=sorted(obj.major_axis))\n else:\n raise ValueError('type not supported here')\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n" ]
[ [ "numpy.random.rand", "pandas.DatetimeIndex", "pandas.compat.u", "pandas.Timestamp", "pandas.io.pytables.HDFStore", "pandas.concat", "pandas.compat.range", "pandas.io.pytables.read_hdf", "numpy.random.binomial", "numpy.random.random_integers", "pandas.set_option", "pandas.util.testing.makePeriodIndex", "pandas.util.testing.makeStringSeries", "pandas.DataFrame", "pandas.util.testing.assertRaises", "pandas.util.testing.assert_panel4d_equal", "pandas.util.testing.skip_if_no_package", "pandas.util.testing.get_data_path", "numpy.arange", "pandas.MultiIndex", "pandas.util.testing.makeTimeSeries", "pandas.bdate_range", "pandas.util.testing.makeDataFrame", "pandas.util.testing.assert_produces_warning", "numpy.random.randint", "pandas.util.testing.makeDateIndex", "pandas.util.testing.assert_panel_equal", "numpy.array", "numpy.zeros", "pandas.util.testing.assert_almost_equal", "pandas.MultiIndex.from_tuples", "numpy.random.randn", "pandas.util.testing.rands", "pandas.io.pytables.get_store", "pandas.compat.lrange", "pandas.io.pytables.Term", "pandas.isnull", "pandas.Index", "pandas.util.testing.assert_frame_equal", "pandas.util.testing.makeTimeDataFrame", "numpy.asarray", "numpy.random.seed", "pandas.date_range", "pandas.util.testing.makePanel4D", "pandas.util.testing.assert_series_equal", "pandas.util.testing.makePanel", "pandas.Series", "pandas.util.testing.assertRaisesRegexp" ] ]
sandiegodata-projects/downtown-partnership
[ "2ee6f01a8efd5ee7452939dcb3d273e890c69a92" ]
[ "src/dtcv/src/dtcv/pt_lib.py" ]
[ "import numpy as np\nfrom shapely.geometry import Polygon\n\n# Pad and unpack the transformation matrix to be 3x1 or 3x3,\n# necessary for it to handle both rotation and translation\npad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])\nunpad = lambda x: x[:, :-1]\n\ndef norm(pri):\n pri = pri.copy()\n pri[:, 0] = pri[:, 0] - pri[:, 0].mean()\n pri[:, 0] = pri[:, 0] / pri[:, 0].max()\n pri[:, 1] = pri[:, 1] - pri[:, 1].mean()\n pri[:, 1] = pri[:, 1] / pri[:, 1].max()\n return pri\n\n\ndef poly_to_array(poly):\n\n try:\n return np.array(poly.exterior.coords)[:-1]\n except AttributeError:\n return poly\n\ndef reorder_points(v):\n \"\"\"Reorder points to ensure the shape is valid. The only works if all of the points\n are on the convex hull, which is true for our shapes. \"\"\"\n\n from math import sqrt\n\n try:\n points = poly_to_array(v).tolist()\n except AttributeError:\n points = v # Hopefully already a list.\n\n points = poly_to_array(Polygon(v).convex_hull).tolist()\n\n # Find the point closest to the origin\n # Norming ensures origin finding is consistent. I guess.\n normed_points = norm(np.array(points)) + 10 # +10 to void div by zero in distance\n\n mp = next(iter(sorted(normed_points, key=lambda p: sqrt(p[0] ** 2 + p[1] ** 1))))\n\n # Rotate the list of points so the mp point is first in the list\n mpos = normed_points.tolist().index(mp.tolist())\n points = points[mpos:] + points[:mpos]\n\n return np.array(points)\n\n\ndef solve_transform(primary, secondary):\n\n primary = reorder_points(poly_to_array(primary))\n secondary = reorder_points(poly_to_array(secondary))\n\n A, res, rank, s = np.linalg.lstsq(pad(primary), pad(secondary), rcond=None)\n\n A[np.abs(A) < 1e-12] = 0 # Get rid of really small values.\n\n return A, primary, secondary\n\ndef invert_point(p):\n x,y = p\n\n y = -y + 2000\n return (x,y)\n\ndef invert_poly(poly):\n\n return Polygon( invert_point(p) for p in poly_to_array(poly))\n\ndef transform_point(A, x):\n # The extra np.array in pad() presumes this is a point, not a matrix\n # The final [0] unpacks back to a point.\n\n return unpad(np.dot(pad(np.array([x])), A))[0]\n\ndef transform_points(A, x):\n return unpad(np.dot(pad(x), A))\n\ndef get_transform(r):\n A, transform = solve_transform(r.source, r.dest)\n return transform\n\n\ndef get_matrix(r):\n A, pri, sec = solve_transform(r.source, r.dest)\n return A\n\n\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.abs" ] ]
gluesolutions/glue-vispy-viewers
[ "7ce0c55989eee9dc4056e5ce1547591cbaab86b6" ]
[ "glue_vispy_viewers/common/axes.py" ]
[ "from vispy import scene\nfrom vispy.visuals.transforms import STTransform, ChainTransform\n\nfrom glue_vispy_viewers.compat.axis import Axis\n\nimport numpy as np\n\n\nclass AxesVisual3D(object):\n\n def __init__(self, view=None, transform=None, **kwargs):\n\n self.view = view\n\n # Add a 3D cube to show us the unit cube. The 1.001 factor is to make\n # sure that the grid lines are not 'hidden' by volume renderings on the\n # front side due to numerical precision.\n cube_verts = np.array([[1, 1, 1], [-1, 1, 1], [-1, -1, 1], [1, -1, 1],\n [1, -1, -1], [1, 1, -1], [-1, 1, -1], [-1, -1, -1]])\n cube_edge_indices = np.array([[0, 1], [0, 3], [0, 5], [1, 2], [1, 6], [2, 3],\n [2, 7], [3, 4], [4, 5], [4, 7], [5, 6], [6, 7]])\n self.axis = scene.visuals.Line(cube_verts, parent=self.view.scene,\n color=kwargs['axis_color'], connect=cube_edge_indices)\n\n self.axis.transform = transform\n\n self.xax = Axis(pos=[[-1.0, 0], [1.0, 0]],\n tick_direction=(0, -1),\n parent=self.view.scene, axis_label='X',\n anchors=['center', 'middle'], **kwargs)\n\n self.yax = Axis(pos=[[0, -1.0], [0, 1.0]],\n tick_direction=(-1, 0),\n parent=self.view.scene, axis_label='Y',\n anchors=['center', 'middle'], **kwargs)\n\n self.zax = Axis(pos=[[0, -1.0], [0, 1.0]],\n tick_direction=(-1, 0),\n parent=self.view.scene, axis_label='Z',\n anchors=['center', 'middle'], **kwargs)\n\n self.xtr = STTransform()\n self.xtr = self.xtr.as_matrix()\n self.xtr.rotate(45, (1, 0, 0))\n self.xtr.translate((0, -1., -1.))\n\n self.ytr = STTransform()\n self.ytr = self.ytr.as_matrix()\n self.ytr.rotate(-45, (0, 1, 0))\n self.ytr.translate((-1, 0, -1.))\n\n self.ztr = STTransform()\n self.ztr = self.ztr.as_matrix()\n self.ztr.rotate(45, (0, 1, 0))\n self.ztr.rotate(90, (1, 0, 0))\n self.ztr.translate((-1, -1, 0.))\n\n self.xax.transform = ChainTransform(transform, self.xtr)\n self.yax.transform = ChainTransform(transform, self.ytr)\n self.zax.transform = ChainTransform(transform, self.ztr)\n\n @property\n def tick_color(self):\n return self.xax.tick_color\n\n @tick_color.setter\n def tick_color(self, value):\n self.xax.tick_color = value\n self.yax.tick_color = value\n self.zax.tick_color = value\n\n @property\n def label_color(self):\n return self._label_color\n\n @label_color.setter\n def label_color(self, value):\n self.xax.label_color = value\n self.yax.label_color = value\n self.zax.label_color = value\n\n @property\n def axis_color(self):\n return self._axis_color\n\n @axis_color.setter\n def axis_color(self, value):\n self.axis.color = value\n\n @property\n def tick_font_size(self):\n return self.xax.tick_font_size\n\n @tick_font_size.setter\n def tick_font_size(self, value):\n self.xax.tick_font_size = value\n self.yax.tick_font_size = value\n self.zax.tick_font_size = value\n\n @property\n def axis_font_size(self):\n return self.xax.axis_font_size\n\n @axis_font_size.setter\n def axis_font_size(self, value):\n self.xax.axis_font_size = value\n self.yax.axis_font_size = value\n self.zax.axis_font_size = value\n\n @property\n def xlabel(self):\n return self.xax.axis_label\n\n @xlabel.setter\n def xlabel(self, value):\n self.xax.axis_label = value\n\n @property\n def ylabel(self):\n return self.yax.axis_label\n\n @ylabel.setter\n def ylabel(self, value):\n self.yax.axis_label = value\n\n @property\n def zlabel(self):\n return self.zax.axis_label\n\n @zlabel.setter\n def zlabel(self, value):\n self.zax.axis_label = value\n\n @property\n def xlim(self):\n return self.xax.domain\n\n @xlim.setter\n def xlim(self, value):\n self.xax.domain = value\n\n @property\n def ylim(self):\n return self.yax.domain\n\n @ylim.setter\n def ylim(self, value):\n self.yax.domain = value\n\n @property\n def zlim(self):\n return self.zax.domain\n\n @zlim.setter\n def zlim(self, value):\n self.zax.domain = value\n\n @property\n def parent(self):\n return self.axis.parent\n\n @parent.setter\n def parent(self, value):\n self.axis.parent = value\n self.xax.parent = value\n self.yax.parent = value\n self.zax.parent = value\n" ]
[ [ "numpy.array" ] ]
PMatthaei/pymarl
[ "eeec978e930c9e36d8102724c3b4d0459547cb36" ]
[ "src/components/action_selectors.py" ]
[ "import torch as th\nfrom torch.distributions import Categorical\nfrom .epsilon_schedules import DecayThenFlatSchedule\n\nREGISTRY = {}\n\n\nclass MultinomialActionSelector():\n\n def __init__(self, args):\n self.args = args\n\n self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,\n decay=\"linear\")\n self.epsilon = self.schedule.eval(0)\n self.test_greedy = getattr(args, \"test_greedy\", True)\n\n def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):\n masked_policies = agent_inputs.clone()\n masked_policies[avail_actions == 0.0] = 0.0\n\n self.epsilon = self.schedule.eval(t_env)\n\n if test_mode and self.test_greedy:\n picked_actions = masked_policies.max(dim=2)[1]\n else:\n picked_actions = Categorical(masked_policies).sample().long()\n\n return picked_actions\n\n\nREGISTRY[\"multinomial\"] = MultinomialActionSelector\n\n\nclass EpsilonGreedyActionSelector():\n\n def __init__(self, args):\n self.args = args\n\n self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,\n decay=\"linear\")\n self.epsilon = self.schedule.eval(0)\n\n def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):\n\n # Assuming agent_inputs is a batch of Q-Values for each agent bav\n self.epsilon = self.schedule.eval(t_env)\n\n if test_mode:\n # Greedy action selection only\n self.epsilon = 0.0\n\n # mask actions that are excluded from selection -> use avail_actions for this\n masked_q_values = agent_inputs.clone()\n masked_q_values[avail_actions == 0.0] = -float(\"inf\") # should never be selected!\n\n # Randomly decide for each agent to go with random action and explore\n random_numbers = th.rand_like(agent_inputs[:, :, 0])\n pick_random = (random_numbers < self.epsilon).long() # pick can either be 0 or 1 (=use random)\n # Generate random action from available ones\n random_actions = Categorical(avail_actions.float()).sample().long()\n\n # If pick_random = 1 use the random action else use the action chosen by q value (masked by available ones)\n picked_actions = pick_random * random_actions + (1 - pick_random) * masked_q_values.max(dim=2)[1]\n return picked_actions\n\n\nREGISTRY[\"epsilon_greedy\"] = EpsilonGreedyActionSelector\n" ]
[ [ "torch.distributions.Categorical", "torch.rand_like" ] ]
tommy-qichang/FedML
[ "55ebb00ab11a70ac074e613c6ab2c09f053db70c" ]
[ "fedml_api/model/cv/resnetLab.py" ]
[ "import os, sys, math\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\n\n# add the FedML root directory to the python path\nsys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), \"../../../\")))\n\nfrom fedml_api.model.cv.batchnorm_utils import SynchronizedBatchNorm2d\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n dilation=dilation, padding=dilation, bias=False)\n self.bn2 = BatchNorm(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = BatchNorm(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, output_stride, BatchNorm, pretrained=True):\n self.inplanes = 64\n super(ResNet, self).__init__()\n blocks = [1, 2, 4]\n if output_stride == 16:\n strides = [1, 2, 2, 1]\n dilations = [1, 1, 1, 2]\n elif output_stride == 8:\n strides = [1, 2, 1, 1]\n dilations = [1, 1, 2, 4]\n else:\n raise NotImplementedError\n\n # Modules\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = BatchNorm(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0], BatchNorm=BatchNorm)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1], BatchNorm=BatchNorm)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2], BatchNorm=BatchNorm)\n self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)\n # self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)\n self._init_weight()\n\n if pretrained:\n self._load_pretrained_model()\n\n def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, dilation, downsample, BatchNorm))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm))\n\n return nn.Sequential(*layers)\n\n def _make_MG_unit(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, dilation=blocks[0]*dilation,\n downsample=downsample, BatchNorm=BatchNorm))\n self.inplanes = planes * block.expansion\n for i in range(1, len(blocks)):\n layers.append(block(self.inplanes, planes, stride=1,\n dilation=blocks[i]*dilation, BatchNorm=BatchNorm))\n\n return nn.Sequential(*layers)\n\n def forward(self, input):\n x = self.conv1(input)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n low_level_feat = x\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x, low_level_feat\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _load_pretrained_model(self):\n pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/resnet101-5d3b4d8f.pth')\n model_dict = {}\n state_dict = self.state_dict()\n for k, v in pretrain_dict.items():\n if k in state_dict:\n model_dict[k] = v\n state_dict.update(model_dict)\n self.load_state_dict(state_dict)\n\ndef ResNet101(output_stride, BatchNorm, pretrained=True):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], output_stride, BatchNorm, pretrained=pretrained)\n return model\n\nif __name__ == \"__main__\":\n import torch\n model = ResNet101(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=8)\n input = torch.rand(1, 3, 512, 512)\n output, low_level_feat = model(input)\n print(output.size())\n print(low_level_feat.size())" ]
[ [ "torch.rand", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.utils.model_zoo.load_url", "torch.nn.ReLU", "torch.nn.Conv2d" ] ]
avinashsai/metrics
[ "e383af24085bf7c0bd4e08db2757c25ff4feccdc" ]
[ "tests/classification/test_accuracy.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\n\nimport numpy as np\nimport pytest\nfrom sklearn.metrics import accuracy_score as sk_accuracy\nfrom torch import tensor\n\nfrom tests.classification.inputs import _input_binary, _input_binary_prob\nfrom tests.classification.inputs import _input_multiclass as _input_mcls\nfrom tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob\nfrom tests.classification.inputs import _input_multidim_multiclass as _input_mdmc\nfrom tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob\nfrom tests.classification.inputs import _input_multilabel as _input_mlb\nfrom tests.classification.inputs import _input_multilabel_multidim as _input_mlmd\nfrom tests.classification.inputs import _input_multilabel_multidim_prob as _input_mlmd_prob\nfrom tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob\nfrom tests.helpers import seed_all\nfrom tests.helpers.testers import THRESHOLD, MetricTester\nfrom torchmetrics import Accuracy\nfrom torchmetrics.functional import accuracy\nfrom torchmetrics.utilities.checks import _input_format_classification\nfrom torchmetrics.utilities.enums import DataType\n\nseed_all(42)\n\n\ndef _sk_accuracy(preds, target, subset_accuracy):\n sk_preds, sk_target, mode = _input_format_classification(preds, target, threshold=THRESHOLD)\n sk_preds, sk_target = sk_preds.numpy(), sk_target.numpy()\n\n if mode == DataType.MULTIDIM_MULTICLASS and not subset_accuracy:\n sk_preds, sk_target = np.transpose(sk_preds, (0, 2, 1)), np.transpose(sk_target, (0, 2, 1))\n sk_preds, sk_target = sk_preds.reshape(-1, sk_preds.shape[2]), sk_target.reshape(-1, sk_target.shape[2])\n elif mode == DataType.MULTIDIM_MULTICLASS and subset_accuracy:\n return np.all(sk_preds == sk_target, axis=(1, 2)).mean()\n elif mode == DataType.MULTILABEL and not subset_accuracy:\n sk_preds, sk_target = sk_preds.reshape(-1), sk_target.reshape(-1)\n\n return sk_accuracy(y_true=sk_target, y_pred=sk_preds)\n\n\n@pytest.mark.parametrize(\n \"preds, target, subset_accuracy\",\n [\n (_input_binary_prob.preds, _input_binary_prob.target, False),\n (_input_binary.preds, _input_binary.target, False),\n (_input_mlb_prob.preds, _input_mlb_prob.target, True),\n (_input_mlb_prob.preds, _input_mlb_prob.target, False),\n (_input_mlb.preds, _input_mlb.target, True),\n (_input_mlb.preds, _input_mlb.target, False),\n (_input_mcls_prob.preds, _input_mcls_prob.target, False),\n (_input_mcls.preds, _input_mcls.target, False),\n (_input_mdmc_prob.preds, _input_mdmc_prob.target, False),\n (_input_mdmc_prob.preds, _input_mdmc_prob.target, True),\n (_input_mdmc.preds, _input_mdmc.target, False),\n (_input_mdmc.preds, _input_mdmc.target, True),\n (_input_mlmd_prob.preds, _input_mlmd_prob.target, True),\n (_input_mlmd_prob.preds, _input_mlmd_prob.target, False),\n (_input_mlmd.preds, _input_mlmd.target, True),\n (_input_mlmd.preds, _input_mlmd.target, False),\n ],\n)\nclass TestAccuracies(MetricTester):\n\n @pytest.mark.parametrize(\"ddp\", [False, True])\n @pytest.mark.parametrize(\"dist_sync_on_step\", [False, True])\n def test_accuracy_class(self, ddp, dist_sync_on_step, preds, target, subset_accuracy):\n self.run_class_metric_test(\n ddp=ddp,\n preds=preds,\n target=target,\n metric_class=Accuracy,\n sk_metric=partial(_sk_accuracy, subset_accuracy=subset_accuracy),\n dist_sync_on_step=dist_sync_on_step,\n metric_args={\n \"threshold\": THRESHOLD,\n \"subset_accuracy\": subset_accuracy\n },\n )\n\n def test_accuracy_fn(self, preds, target, subset_accuracy):\n self.run_functional_metric_test(\n preds,\n target,\n metric_functional=accuracy,\n sk_metric=partial(_sk_accuracy, subset_accuracy=subset_accuracy),\n metric_args={\n \"threshold\": THRESHOLD,\n \"subset_accuracy\": subset_accuracy\n },\n )\n\n def test_accuracy_differentiability(self, preds, target, subset_accuracy):\n self.run_differentiability_test(\n preds=preds,\n target=target,\n metric_module=Accuracy,\n metric_functional=accuracy,\n metric_args={\n \"threshold\": THRESHOLD,\n \"subset_accuracy\": subset_accuracy\n }\n )\n\n\n_l1to4 = [0.1, 0.2, 0.3, 0.4]\n_l1to4t3 = np.array([_l1to4, _l1to4, _l1to4])\n_l1to4t3_mcls = [_l1to4t3.T, _l1to4t3.T, _l1to4t3.T]\n\n# The preds in these examples always put highest probability on class 3, second highest on class 2,\n# third highest on class 1, and lowest on class 0\n_topk_preds_mcls = tensor([_l1to4t3, _l1to4t3]).float()\n_topk_target_mcls = tensor([[1, 2, 3], [2, 1, 0]])\n\n# This is like for MC case, but one sample in each batch is sabotaged with 0 class prediction :)\n_topk_preds_mdmc = tensor([_l1to4t3_mcls, _l1to4t3_mcls]).float()\n_topk_target_mdmc = tensor([[[1, 1, 0], [2, 2, 2], [3, 3, 3]], [[2, 2, 0], [1, 1, 1], [0, 0, 0]]])\n\n\n# Replace with a proper sk_metric test once sklearn 0.24 hits :)\n@pytest.mark.parametrize(\n \"preds, target, exp_result, k, subset_accuracy\",\n [\n (_topk_preds_mcls, _topk_target_mcls, 1 / 6, 1, False),\n (_topk_preds_mcls, _topk_target_mcls, 3 / 6, 2, False),\n (_topk_preds_mcls, _topk_target_mcls, 5 / 6, 3, False),\n (_topk_preds_mcls, _topk_target_mcls, 1 / 6, 1, True),\n (_topk_preds_mcls, _topk_target_mcls, 3 / 6, 2, True),\n (_topk_preds_mcls, _topk_target_mcls, 5 / 6, 3, True),\n (_topk_preds_mdmc, _topk_target_mdmc, 1 / 6, 1, False),\n (_topk_preds_mdmc, _topk_target_mdmc, 8 / 18, 2, False),\n (_topk_preds_mdmc, _topk_target_mdmc, 13 / 18, 3, False),\n (_topk_preds_mdmc, _topk_target_mdmc, 1 / 6, 1, True),\n (_topk_preds_mdmc, _topk_target_mdmc, 2 / 6, 2, True),\n (_topk_preds_mdmc, _topk_target_mdmc, 3 / 6, 3, True),\n ],\n)\ndef test_topk_accuracy(preds, target, exp_result, k, subset_accuracy):\n topk = Accuracy(top_k=k, subset_accuracy=subset_accuracy)\n\n for batch in range(preds.shape[0]):\n topk(preds[batch], target[batch])\n\n assert topk.compute() == exp_result\n\n # Test functional\n total_samples = target.shape[0] * target.shape[1]\n\n preds = preds.view(total_samples, 4, -1)\n target = target.view(total_samples, -1)\n\n assert accuracy(preds, target, top_k=k, subset_accuracy=subset_accuracy) == exp_result\n\n\n# Only MC and MDMC with probs input type should be accepted for top_k\n@pytest.mark.parametrize(\n \"preds, target\",\n [\n (_input_binary_prob.preds, _input_binary_prob.target),\n (_input_binary.preds, _input_binary.target),\n (_input_mlb_prob.preds, _input_mlb_prob.target),\n (_input_mlb.preds, _input_mlb.target),\n (_input_mcls.preds, _input_mcls.target),\n (_input_mdmc.preds, _input_mdmc.target),\n (_input_mlmd_prob.preds, _input_mlmd_prob.target),\n (_input_mlmd.preds, _input_mlmd.target),\n ],\n)\ndef test_topk_accuracy_wrong_input_types(preds, target):\n topk = Accuracy(top_k=1)\n\n with pytest.raises(ValueError):\n topk(preds[0], target[0])\n\n with pytest.raises(ValueError):\n accuracy(preds[0], target[0], top_k=1)\n\n\n@pytest.mark.parametrize(\"top_k, threshold\", [(0, 0.5), (None, 1.5)])\ndef test_wrong_params(top_k, threshold):\n preds, target = _input_mcls_prob.preds, _input_mcls_prob.target\n\n with pytest.raises(ValueError):\n acc = Accuracy(threshold=threshold, top_k=top_k)\n acc(preds, target)\n acc.compute()\n\n with pytest.raises(ValueError):\n accuracy(preds, target, threshold=threshold, top_k=top_k)\n\n\n_ignoreindex_binary_preds = tensor([1, 0, 1, 1, 0, 1, 0])\n_ignoreindex_target_preds = tensor([1, 1, 0, 1, 1, 1, 1])\n_ignoreindex_binary_preds_prob = tensor([0.3, 0.6, 0.1, 0.3, 0.7, 0.9, 0.4])\n_ignoreindex_mc_target = tensor([0, 1, 2])\n_ignoreindex_mc_preds = tensor([[0.35, 0.4, 0.25], [0.1, 0.5, 0.4], [0.2, 0.1, 0.7]])\n_ignoreindex_ml_target = tensor([[0, 1, 0], [1, 1, 0], [0, 0, 0]])\n_ignoreindex_ml_preds = tensor([[0.9, 0.8, 0.75], [0.6, 0.7, 0.1], [0.6, 0.1, 0.2]])\n\n\n@pytest.mark.parametrize(\n \"preds, target, ignore_index, exp_result, subset_accuracy\",\n [\n (_ignoreindex_binary_preds, _ignoreindex_target_preds, 0, 3 / 6, False),\n (_ignoreindex_binary_preds, _ignoreindex_target_preds, 1, 0, False),\n (_ignoreindex_binary_preds, _ignoreindex_target_preds, None, 3 / 6, False),\n (_ignoreindex_binary_preds_prob, _ignoreindex_target_preds, 0, 3 / 6, False),\n (_ignoreindex_binary_preds_prob, _ignoreindex_target_preds, 1, 1, False),\n (_ignoreindex_mc_preds, _ignoreindex_mc_target, 0, 1, False),\n (_ignoreindex_mc_preds, _ignoreindex_mc_target, 1, 1 / 2, False),\n (_ignoreindex_mc_preds, _ignoreindex_mc_target, 2, 1 / 2, False),\n (_ignoreindex_ml_preds, _ignoreindex_ml_target, 0, 2 / 3, False),\n (_ignoreindex_ml_preds, _ignoreindex_ml_target, 1, 2 / 3, False),\n ]\n)\ndef test_ignore_index(preds, target, ignore_index, exp_result, subset_accuracy):\n ignoreindex = Accuracy(ignore_index=ignore_index, subset_accuracy=subset_accuracy)\n\n for batch in range(preds.shape[0]):\n ignoreindex(preds[batch], target[batch])\n\n assert ignoreindex.compute() == exp_result\n\n assert accuracy(preds, target, ignore_index=ignore_index, subset_accuracy=subset_accuracy) == exp_result\n" ]
[ [ "numpy.array", "sklearn.metrics.accuracy_score", "torch.tensor", "numpy.transpose", "numpy.all" ] ]
DeuroIO/Deuro-scikit-learn
[ "9dd09a19593d1224077fe0d1a754aed936269528", "9dd09a19593d1224077fe0d1a754aed936269528", "9dd09a19593d1224077fe0d1a754aed936269528", "9dd09a19593d1224077fe0d1a754aed936269528", "9dd09a19593d1224077fe0d1a754aed936269528" ]
[ "sklearn/linear_model/coordinate_descent.py", "examples/cluster/plot_adjusted_for_chance_measures.py", "sklearn/preprocessing/tests/test_encoders.py", "examples/linear_model/plot_logistic_path.py", "examples/cluster/plot_agglomerative_clustering.py" ]
[ "# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# Fabian Pedregosa <fabian.pedregosa@inria.fr>\n# Olivier Grisel <olivier.grisel@ensta.org>\n# Gael Varoquaux <gael.varoquaux@inria.fr>\n#\n# License: BSD 3 clause\n\nimport sys\nimport warnings\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom .base import LinearModel, _pre_fit\nfrom ..base import RegressorMixin\nfrom .base import _preprocess_data\nfrom ..utils import check_array, check_X_y\nfrom ..utils.validation import check_random_state\nfrom ..model_selection import check_cv\nfrom ..utils import Parallel, delayed, effective_n_jobs\nfrom ..externals import six\nfrom ..externals.six.moves import xrange\nfrom ..utils.extmath import safe_sparse_dot\nfrom ..utils.fixes import _joblib_parallel_args\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import column_or_1d\nfrom ..exceptions import ConvergenceWarning\n\nfrom . import cd_fast\n\n\n###############################################################################\n# Paths functions\n\ndef _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,\n eps=1e-3, n_alphas=100, normalize=False, copy_X=True):\n \"\"\" Compute the grid of alpha values for elastic net parameter search\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data. Pass directly as Fortran-contiguous data to avoid\n unnecessary memory duplication\n\n y : ndarray, shape (n_samples,)\n Target values\n\n Xy : array-like, optional\n Xy = np.dot(X.T, y) that can be precomputed.\n\n l1_ratio : float\n The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.\n For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not\n supported) ``For l1_ratio = 1`` it is an L1 penalty. For\n ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.\n\n eps : float, optional\n Length of the path. ``eps=1e-3`` means that\n ``alpha_min / alpha_max = 1e-3``\n\n n_alphas : int, optional\n Number of alphas along the regularization path\n\n fit_intercept : boolean, default True\n Whether to fit an intercept or not\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n \"\"\"\n if l1_ratio == 0:\n raise ValueError(\"Automatic alpha grid generation is not supported for\"\n \" l1_ratio=0. Please supply a grid by providing \"\n \"your estimator with the appropriate `alphas=` \"\n \"argument.\")\n n_samples = len(y)\n\n sparse_center = False\n if Xy is None:\n X_sparse = sparse.isspmatrix(X)\n sparse_center = X_sparse and (fit_intercept or normalize)\n X = check_array(X, 'csc',\n copy=(copy_X and fit_intercept and not X_sparse))\n if not X_sparse:\n # X can be touched inplace thanks to the above line\n X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,\n normalize, copy=False)\n Xy = safe_sparse_dot(X.T, y, dense_output=True)\n\n if sparse_center:\n # Workaround to find alpha_max for sparse matrices.\n # since we should not destroy the sparsity of such matrices.\n _, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,\n normalize,\n return_mean=True)\n mean_dot = X_offset * np.sum(y)\n\n if Xy.ndim == 1:\n Xy = Xy[:, np.newaxis]\n\n if sparse_center:\n if fit_intercept:\n Xy -= mean_dot[:, np.newaxis]\n if normalize:\n Xy /= X_scale[:, np.newaxis]\n\n alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /\n (n_samples * l1_ratio))\n\n if alpha_max <= np.finfo(float).resolution:\n alphas = np.empty(n_alphas)\n alphas.fill(np.finfo(float).resolution)\n return alphas\n\n return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),\n num=n_alphas)[::-1]\n\n\ndef lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,\n precompute='auto', Xy=None, copy_X=True, coef_init=None,\n verbose=False, return_n_iter=False, positive=False, **params):\n \"\"\"Compute Lasso path with coordinate descent\n\n The Lasso optimization function varies for mono and multi-outputs.\n\n For mono-output tasks it is::\n\n (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1\n\n For multi-output tasks it is::\n\n (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21\n\n Where::\n\n ||W||_21 = \\\\sum_i \\\\sqrt{\\\\sum_j w_{ij}^2}\n\n i.e. the sum of norm of each row.\n\n Read more in the :ref:`User Guide <lasso>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data. Pass directly as Fortran-contiguous data to avoid\n unnecessary memory duplication. If ``y`` is mono-output then ``X``\n can be sparse.\n\n y : ndarray, shape (n_samples,), or (n_samples, n_outputs)\n Target values\n\n eps : float, optional\n Length of the path. ``eps=1e-3`` means that\n ``alpha_min / alpha_max = 1e-3``\n\n n_alphas : int, optional\n Number of alphas along the regularization path\n\n alphas : ndarray, optional\n List of alphas where to compute the models.\n If ``None`` alphas are set automatically\n\n precompute : True | False | 'auto' | array-like\n Whether to use a precomputed Gram matrix to speed up\n calculations. If set to ``'auto'`` let us decide. The Gram\n matrix can also be passed as argument.\n\n Xy : array-like, optional\n Xy = np.dot(X.T, y) that can be precomputed. It is useful\n only when the Gram matrix is precomputed.\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n coef_init : array, shape (n_features, ) | None\n The initial values of the coefficients.\n\n verbose : bool or integer\n Amount of verbosity.\n\n return_n_iter : bool\n whether to return the number of iterations or not.\n\n positive : bool, default False\n If set to True, forces coefficients to be positive.\n (Only allowed when ``y.ndim == 1``).\n\n **params : kwargs\n keyword arguments passed to the coordinate descent solver.\n\n Returns\n -------\n alphas : array, shape (n_alphas,)\n The alphas along the path where models are computed.\n\n coefs : array, shape (n_features, n_alphas) or \\\n (n_outputs, n_features, n_alphas)\n Coefficients along the path.\n\n dual_gaps : array, shape (n_alphas,)\n The dual gaps at the end of the optimization for each alpha.\n\n n_iters : array-like, shape (n_alphas,)\n The number of iterations taken by the coordinate descent optimizer to\n reach the specified tolerance for each alpha.\n\n Notes\n -----\n For an example, see\n :ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py\n <sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.\n\n To avoid unnecessary memory duplication the X argument of the fit method\n should be directly passed as a Fortran-contiguous numpy array.\n\n Note that in certain cases, the Lars solver may be significantly\n faster to implement this functionality. In particular, linear\n interpolation can be used to retrieve model coefficients between the\n values output by lars_path\n\n Examples\n ---------\n\n Comparing lasso_path and lars_path with interpolation:\n\n >>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T\n >>> y = np.array([1, 2, 3.1])\n >>> # Use lasso_path to compute a coefficient path\n >>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])\n >>> print(coef_path)\n [[0. 0. 0.46874778]\n [0.2159048 0.4425765 0.23689075]]\n\n >>> # Now use lars_path and 1D linear interpolation to compute the\n >>> # same path\n >>> from sklearn.linear_model import lars_path\n >>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')\n >>> from scipy import interpolate\n >>> coef_path_continuous = interpolate.interp1d(alphas[::-1],\n ... coef_path_lars[:, ::-1])\n >>> print(coef_path_continuous([5., 1., .5]))\n [[0. 0. 0.46915237]\n [0.2159048 0.4425765 0.23668876]]\n\n\n See also\n --------\n lars_path\n Lasso\n LassoLars\n LassoCV\n LassoLarsCV\n sklearn.decomposition.sparse_encode\n \"\"\"\n return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,\n alphas=alphas, precompute=precompute, Xy=Xy,\n copy_X=copy_X, coef_init=coef_init, verbose=verbose,\n positive=positive, return_n_iter=return_n_iter, **params)\n\n\ndef enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,\n precompute='auto', Xy=None, copy_X=True, coef_init=None,\n verbose=False, return_n_iter=False, positive=False,\n check_input=True, **params):\n \"\"\"Compute elastic net path with coordinate descent\n\n The elastic net optimization function varies for mono and multi-outputs.\n\n For mono-output tasks it is::\n\n 1 / (2 * n_samples) * ||y - Xw||^2_2\n + alpha * l1_ratio * ||w||_1\n + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2\n\n For multi-output tasks it is::\n\n (1 / (2 * n_samples)) * ||Y - XW||^Fro_2\n + alpha * l1_ratio * ||W||_21\n + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2\n\n Where::\n\n ||W||_21 = \\\\sum_i \\\\sqrt{\\\\sum_j w_{ij}^2}\n\n i.e. the sum of norm of each row.\n\n Read more in the :ref:`User Guide <elastic_net>`.\n\n Parameters\n ----------\n X : {array-like}, shape (n_samples, n_features)\n Training data. Pass directly as Fortran-contiguous data to avoid\n unnecessary memory duplication. If ``y`` is mono-output then ``X``\n can be sparse.\n\n y : ndarray, shape (n_samples,) or (n_samples, n_outputs)\n Target values\n\n l1_ratio : float, optional\n float between 0 and 1 passed to elastic net (scaling between\n l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso\n\n eps : float\n Length of the path. ``eps=1e-3`` means that\n ``alpha_min / alpha_max = 1e-3``\n\n n_alphas : int, optional\n Number of alphas along the regularization path\n\n alphas : ndarray, optional\n List of alphas where to compute the models.\n If None alphas are set automatically\n\n precompute : True | False | 'auto' | array-like\n Whether to use a precomputed Gram matrix to speed up\n calculations. If set to ``'auto'`` let us decide. The Gram\n matrix can also be passed as argument.\n\n Xy : array-like, optional\n Xy = np.dot(X.T, y) that can be precomputed. It is useful\n only when the Gram matrix is precomputed.\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n coef_init : array, shape (n_features, ) | None\n The initial values of the coefficients.\n\n verbose : bool or integer\n Amount of verbosity.\n\n return_n_iter : bool\n whether to return the number of iterations or not.\n\n positive : bool, default False\n If set to True, forces coefficients to be positive.\n (Only allowed when ``y.ndim == 1``).\n\n check_input : bool, default True\n Skip input validation checks, including the Gram matrix when provided\n assuming there are handled by the caller when check_input=False.\n\n **params : kwargs\n keyword arguments passed to the coordinate descent solver.\n\n Returns\n -------\n alphas : array, shape (n_alphas,)\n The alphas along the path where models are computed.\n\n coefs : array, shape (n_features, n_alphas) or \\\n (n_outputs, n_features, n_alphas)\n Coefficients along the path.\n\n dual_gaps : array, shape (n_alphas,)\n The dual gaps at the end of the optimization for each alpha.\n\n n_iters : array-like, shape (n_alphas,)\n The number of iterations taken by the coordinate descent optimizer to\n reach the specified tolerance for each alpha.\n (Is returned when ``return_n_iter`` is set to True).\n\n Notes\n -----\n For an example, see\n :ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py\n <sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.\n\n See also\n --------\n MultiTaskElasticNet\n MultiTaskElasticNetCV\n ElasticNet\n ElasticNetCV\n \"\"\"\n # We expect X and y to be already Fortran ordered when bypassing\n # checks\n if check_input:\n X = check_array(X, 'csc', dtype=[np.float64, np.float32],\n order='F', copy=copy_X)\n y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,\n ensure_2d=False)\n if Xy is not None:\n # Xy should be a 1d contiguous array or a 2D C ordered array\n Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,\n ensure_2d=False)\n\n n_samples, n_features = X.shape\n\n multi_output = False\n if y.ndim != 1:\n multi_output = True\n _, n_outputs = y.shape\n\n if multi_output and positive:\n raise ValueError('positive=True is not allowed for multi-output'\n ' (y.ndim != 1)')\n\n # MultiTaskElasticNet does not support sparse matrices\n if not multi_output and sparse.isspmatrix(X):\n if 'X_offset' in params:\n # As sparse matrices are not actually centered we need this\n # to be passed to the CD solver.\n X_sparse_scaling = params['X_offset'] / params['X_scale']\n X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)\n else:\n X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)\n\n # X should be normalized and fit already if function is called\n # from ElasticNet.fit\n if check_input:\n X, y, X_offset, y_offset, X_scale, precompute, Xy = \\\n _pre_fit(X, y, Xy, precompute, normalize=False,\n fit_intercept=False, copy=False, check_input=check_input)\n if alphas is None:\n # No need to normalize of fit_intercept: it has been done\n # above\n alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,\n fit_intercept=False, eps=eps, n_alphas=n_alphas,\n normalize=False, copy_X=False)\n else:\n alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered\n\n n_alphas = len(alphas)\n tol = params.get('tol', 1e-4)\n max_iter = params.get('max_iter', 1000)\n dual_gaps = np.empty(n_alphas)\n n_iters = []\n\n rng = check_random_state(params.get('random_state', None))\n selection = params.get('selection', 'cyclic')\n if selection not in ['random', 'cyclic']:\n raise ValueError(\"selection should be either random or cyclic.\")\n random = (selection == 'random')\n\n if not multi_output:\n coefs = np.empty((n_features, n_alphas), dtype=X.dtype)\n else:\n coefs = np.empty((n_outputs, n_features, n_alphas),\n dtype=X.dtype)\n\n if coef_init is None:\n coef_ = np.zeros(coefs.shape[:-1], dtype=X.dtype, order='F')\n else:\n coef_ = np.asfortranarray(coef_init, dtype=X.dtype)\n\n for i, alpha in enumerate(alphas):\n l1_reg = alpha * l1_ratio * n_samples\n l2_reg = alpha * (1.0 - l1_ratio) * n_samples\n if not multi_output and sparse.isspmatrix(X):\n model = cd_fast.sparse_enet_coordinate_descent(\n coef_, l1_reg, l2_reg, X.data, X.indices,\n X.indptr, y, X_sparse_scaling,\n max_iter, tol, rng, random, positive)\n elif multi_output:\n model = cd_fast.enet_coordinate_descent_multi_task(\n coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)\n elif isinstance(precompute, np.ndarray):\n # We expect precompute to be already Fortran ordered when bypassing\n # checks\n if check_input:\n precompute = check_array(precompute, dtype=X.dtype.type,\n order='C')\n model = cd_fast.enet_coordinate_descent_gram(\n coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,\n tol, rng, random, positive)\n elif precompute is False:\n model = cd_fast.enet_coordinate_descent(\n coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,\n positive)\n else:\n raise ValueError(\"Precompute should be one of True, False, \"\n \"'auto' or array-like. Got %r\" % precompute)\n coef_, dual_gap_, eps_, n_iter_ = model\n coefs[..., i] = coef_\n dual_gaps[i] = dual_gap_\n n_iters.append(n_iter_)\n if dual_gap_ > eps_:\n warnings.warn('Objective did not converge.' +\n ' You might want' +\n ' to increase the number of iterations.' +\n ' Fitting data with very small alpha' +\n ' may cause precision problems.',\n ConvergenceWarning)\n\n if verbose:\n if verbose > 2:\n print(model)\n elif verbose > 1:\n print('Path: %03i out of %03i' % (i, n_alphas))\n else:\n sys.stderr.write('.')\n\n if return_n_iter:\n return alphas, coefs, dual_gaps, n_iters\n return alphas, coefs, dual_gaps\n\n\n###############################################################################\n# ElasticNet model\n\n\nclass ElasticNet(LinearModel, RegressorMixin):\n \"\"\"Linear regression with combined L1 and L2 priors as regularizer.\n\n Minimizes the objective function::\n\n 1 / (2 * n_samples) * ||y - Xw||^2_2\n + alpha * l1_ratio * ||w||_1\n + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2\n\n If you are interested in controlling the L1 and L2 penalty\n separately, keep in mind that this is equivalent to::\n\n a * L1 + b * L2\n\n where::\n\n alpha = a + b and l1_ratio = a / (a + b)\n\n The parameter l1_ratio corresponds to alpha in the glmnet R package while\n alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio\n = 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,\n unless you supply your own sequence of alpha.\n\n Read more in the :ref:`User Guide <elastic_net>`.\n\n Parameters\n ----------\n alpha : float, optional\n Constant that multiplies the penalty terms. Defaults to 1.0.\n See the notes for the exact mathematical meaning of this\n parameter.``alpha = 0`` is equivalent to an ordinary least square,\n solved by the :class:`LinearRegression` object. For numerical\n reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.\n Given this, you should use the :class:`LinearRegression` object.\n\n l1_ratio : float\n The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For\n ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it\n is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a\n combination of L1 and L2.\n\n fit_intercept : bool\n Whether the intercept should be estimated or not. If ``False``, the\n data is assumed to be already centered.\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n precompute : True | False | array-like\n Whether to use a precomputed Gram matrix to speed up\n calculations. The Gram matrix can also be passed as argument.\n For sparse input this option is always ``True`` to preserve sparsity.\n\n max_iter : int, optional\n The maximum number of iterations\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n tol : float, optional\n The tolerance for the optimization: if the updates are\n smaller than ``tol``, the optimization code checks the\n dual gap for optimality and continues until it is smaller\n than ``tol``.\n\n warm_start : bool, optional\n When set to ``True``, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n See :term:`the Glossary <warm_start>`.\n\n positive : bool, optional\n When set to ``True``, forces the coefficients to be positive.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator that selects a random\n feature to update. If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by `np.random`. Used when ``selection`` ==\n 'random'.\n\n selection : str, default 'cyclic'\n If set to 'random', a random coefficient is updated every iteration\n rather than looping over features sequentially by default. This\n (setting to 'random') often leads to significantly faster convergence\n especially when tol is higher than 1e-4.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,) | (n_targets, n_features)\n parameter vector (w in the cost function formula)\n\n sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \\\n (n_targets, n_features)\n ``sparse_coef_`` is a readonly property derived from ``coef_``\n\n intercept_ : float | array, shape (n_targets,)\n independent term in decision function.\n\n n_iter_ : array-like, shape (n_targets,)\n number of iterations run by the coordinate descent solver to reach\n the specified tolerance.\n\n Examples\n --------\n >>> from sklearn.linear_model import ElasticNet\n >>> from sklearn.datasets import make_regression\n\n >>> X, y = make_regression(n_features=2, random_state=0)\n >>> regr = ElasticNet(random_state=0)\n >>> regr.fit(X, y)\n ElasticNet(alpha=1.0, copy_X=True, fit_intercept=True, l1_ratio=0.5,\n max_iter=1000, normalize=False, positive=False, precompute=False,\n random_state=0, selection='cyclic', tol=0.0001, warm_start=False)\n >>> print(regr.coef_) # doctest: +ELLIPSIS\n [18.83816048 64.55968825]\n >>> print(regr.intercept_) # doctest: +ELLIPSIS\n 1.451...\n >>> print(regr.predict([[0, 0]])) # doctest: +ELLIPSIS\n [1.451...]\n\n\n Notes\n -----\n To avoid unnecessary memory duplication the X argument of the fit method\n should be directly passed as a Fortran-contiguous numpy array.\n\n See also\n --------\n ElasticNetCV : Elastic net model with best model selection by\n cross-validation.\n SGDRegressor: implements elastic net regression with incremental training.\n SGDClassifier: implements logistic regression with elastic net penalty\n (``SGDClassifier(loss=\"log\", penalty=\"elasticnet\")``).\n \"\"\"\n path = staticmethod(enet_path)\n\n def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,\n normalize=False, precompute=False, max_iter=1000,\n copy_X=True, tol=1e-4, warm_start=False, positive=False,\n random_state=None, selection='cyclic'):\n self.alpha = alpha\n self.l1_ratio = l1_ratio\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.precompute = precompute\n self.max_iter = max_iter\n self.copy_X = copy_X\n self.tol = tol\n self.warm_start = warm_start\n self.positive = positive\n self.random_state = random_state\n self.selection = selection\n\n def fit(self, X, y, check_input=True):\n \"\"\"Fit model with coordinate descent.\n\n Parameters\n -----------\n X : ndarray or scipy.sparse matrix, (n_samples, n_features)\n Data\n\n y : ndarray, shape (n_samples,) or (n_samples, n_targets)\n Target. Will be cast to X's dtype if necessary\n\n check_input : boolean, (default=True)\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Notes\n -----\n\n Coordinate descent is an algorithm that considers each column of\n data at a time hence it will automatically convert the X input\n as a Fortran-contiguous numpy array if necessary.\n\n To avoid memory re-allocation it is advised to allocate the\n initial data in memory directly using that format.\n \"\"\"\n\n if self.alpha == 0:\n warnings.warn(\"With alpha=0, this algorithm does not converge \"\n \"well. You are advised to use the LinearRegression \"\n \"estimator\", stacklevel=2)\n\n if isinstance(self.precompute, six.string_types):\n raise ValueError('precompute should be one of True, False or'\n ' array-like. Got %r' % self.precompute)\n\n # Remember if X is copied\n X_copied = False\n # We expect X and y to be float64 or float32 Fortran ordered arrays\n # when bypassing checks\n if check_input:\n X_copied = self.copy_X and self.fit_intercept\n X, y = check_X_y(X, y, accept_sparse='csc',\n order='F', dtype=[np.float64, np.float32],\n copy=X_copied, multi_output=True, y_numeric=True)\n y = check_array(y, order='F', copy=False, dtype=X.dtype.type,\n ensure_2d=False)\n\n # Ensure copying happens only once, don't do it again if done above\n should_copy = self.copy_X and not X_copied\n X, y, X_offset, y_offset, X_scale, precompute, Xy = \\\n _pre_fit(X, y, None, self.precompute, self.normalize,\n self.fit_intercept, copy=should_copy,\n check_input=check_input)\n if y.ndim == 1:\n y = y[:, np.newaxis]\n if Xy is not None and Xy.ndim == 1:\n Xy = Xy[:, np.newaxis]\n\n n_samples, n_features = X.shape\n n_targets = y.shape[1]\n\n if self.selection not in ['cyclic', 'random']:\n raise ValueError(\"selection should be either random or cyclic.\")\n\n if not self.warm_start or not hasattr(self, \"coef_\"):\n coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,\n order='F')\n else:\n coef_ = self.coef_\n if coef_.ndim == 1:\n coef_ = coef_[np.newaxis, :]\n\n dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)\n self.n_iter_ = []\n\n for k in xrange(n_targets):\n if Xy is not None:\n this_Xy = Xy[:, k]\n else:\n this_Xy = None\n _, this_coef, this_dual_gap, this_iter = \\\n self.path(X, y[:, k],\n l1_ratio=self.l1_ratio, eps=None,\n n_alphas=None, alphas=[self.alpha],\n precompute=precompute, Xy=this_Xy,\n fit_intercept=False, normalize=False, copy_X=True,\n verbose=False, tol=self.tol, positive=self.positive,\n X_offset=X_offset, X_scale=X_scale,\n return_n_iter=True, coef_init=coef_[k],\n max_iter=self.max_iter,\n random_state=self.random_state,\n selection=self.selection,\n check_input=False)\n coef_[k] = this_coef[:, 0]\n dual_gaps_[k] = this_dual_gap[0]\n self.n_iter_.append(this_iter[0])\n\n if n_targets == 1:\n self.n_iter_ = self.n_iter_[0]\n self.coef_ = coef_[0]\n self.dual_gap_ = dual_gaps_[0]\n else:\n self.coef_ = coef_\n self.dual_gap_ = dual_gaps_\n\n self._set_intercept(X_offset, y_offset, X_scale)\n\n # workaround since _set_intercept will cast self.coef_ into X.dtype\n self.coef_ = np.asarray(self.coef_, dtype=X.dtype)\n\n # return self for chaining fit and predict calls\n return self\n\n @property\n def sparse_coef_(self):\n \"\"\" sparse representation of the fitted ``coef_`` \"\"\"\n return sparse.csr_matrix(self.coef_)\n\n def _decision_function(self, X):\n \"\"\"Decision function of the linear model\n\n Parameters\n ----------\n X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)\n\n Returns\n -------\n T : array, shape (n_samples,)\n The predicted decision function\n \"\"\"\n check_is_fitted(self, 'n_iter_')\n if sparse.isspmatrix(X):\n return safe_sparse_dot(X, self.coef_.T,\n dense_output=True) + self.intercept_\n else:\n return super(ElasticNet, self)._decision_function(X)\n\n\n###############################################################################\n# Lasso model\n\nclass Lasso(ElasticNet):\n \"\"\"Linear Model trained with L1 prior as regularizer (aka the Lasso)\n\n The optimization objective for Lasso is::\n\n (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1\n\n Technically the Lasso model is optimizing the same objective function as\n the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).\n\n Read more in the :ref:`User Guide <lasso>`.\n\n Parameters\n ----------\n alpha : float, optional\n Constant that multiplies the L1 term. Defaults to 1.0.\n ``alpha = 0`` is equivalent to an ordinary least square, solved\n by the :class:`LinearRegression` object. For numerical\n reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.\n Given this, you should use the :class:`LinearRegression` object.\n\n fit_intercept : boolean, optional, default True\n Whether to calculate the intercept for this model. If set\n to False, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n precompute : True | False | array-like, default=False\n Whether to use a precomputed Gram matrix to speed up\n calculations. If set to ``'auto'`` let us decide. The Gram\n matrix can also be passed as argument. For sparse input\n this option is always ``True`` to preserve sparsity.\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n max_iter : int, optional\n The maximum number of iterations\n\n tol : float, optional\n The tolerance for the optimization: if the updates are\n smaller than ``tol``, the optimization code checks the\n dual gap for optimality and continues until it is smaller\n than ``tol``.\n\n warm_start : bool, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n See :term:`the Glossary <warm_start>`.\n\n positive : bool, optional\n When set to ``True``, forces the coefficients to be positive.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator that selects a random\n feature to update. If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by `np.random`. Used when ``selection`` ==\n 'random'.\n\n selection : str, default 'cyclic'\n If set to 'random', a random coefficient is updated every iteration\n rather than looping over features sequentially by default. This\n (setting to 'random') often leads to significantly faster convergence\n especially when tol is higher than 1e-4.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,) | (n_targets, n_features)\n parameter vector (w in the cost function formula)\n\n sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \\\n (n_targets, n_features)\n ``sparse_coef_`` is a readonly property derived from ``coef_``\n\n intercept_ : float | array, shape (n_targets,)\n independent term in decision function.\n\n n_iter_ : int | array-like, shape (n_targets,)\n number of iterations run by the coordinate descent solver to reach\n the specified tolerance.\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> clf = linear_model.Lasso(alpha=0.1)\n >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])\n Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,\n normalize=False, positive=False, precompute=False, random_state=None,\n selection='cyclic', tol=0.0001, warm_start=False)\n >>> print(clf.coef_)\n [0.85 0. ]\n >>> print(clf.intercept_) # doctest: +ELLIPSIS\n 0.15...\n\n See also\n --------\n lars_path\n lasso_path\n LassoLars\n LassoCV\n LassoLarsCV\n sklearn.decomposition.sparse_encode\n\n Notes\n -----\n The algorithm used to fit the model is coordinate descent.\n\n To avoid unnecessary memory duplication the X argument of the fit method\n should be directly passed as a Fortran-contiguous numpy array.\n \"\"\"\n path = staticmethod(enet_path)\n\n def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,\n precompute=False, copy_X=True, max_iter=1000,\n tol=1e-4, warm_start=False, positive=False,\n random_state=None, selection='cyclic'):\n super(Lasso, self).__init__(\n alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,\n normalize=normalize, precompute=precompute, copy_X=copy_X,\n max_iter=max_iter, tol=tol, warm_start=warm_start,\n positive=positive, random_state=random_state,\n selection=selection)\n\n\n###############################################################################\n# Functions for CV with paths functions\n\ndef _path_residuals(X, y, train, test, path, path_params, alphas=None,\n l1_ratio=1, X_order=None, dtype=None):\n \"\"\"Returns the MSE for the models computed by 'path'\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data.\n\n y : array-like, shape (n_samples,) or (n_samples, n_targets)\n Target values\n\n train : list of indices\n The indices of the train set\n\n test : list of indices\n The indices of the test set\n\n path : callable\n function returning a list of models on the path. See\n enet_path for an example of signature\n\n path_params : dictionary\n Parameters passed to the path function\n\n alphas : array-like, optional\n Array of float that is used for cross-validation. If not\n provided, computed using 'path'\n\n l1_ratio : float, optional\n float between 0 and 1 passed to ElasticNet (scaling between\n l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an\n L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0\n < l1_ratio < 1``, the penalty is a combination of L1 and L2\n\n X_order : {'F', 'C', or None}, optional\n The order of the arrays expected by the path function to\n avoid memory copies\n\n dtype : a numpy dtype or None\n The dtype of the arrays expected by the path function to\n avoid memory copies\n \"\"\"\n X_train = X[train]\n y_train = y[train]\n X_test = X[test]\n y_test = y[test]\n fit_intercept = path_params['fit_intercept']\n normalize = path_params['normalize']\n\n if y.ndim == 1:\n precompute = path_params['precompute']\n else:\n # No Gram variant of multi-task exists right now.\n # Fall back to default enet_multitask\n precompute = False\n\n X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \\\n _pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,\n copy=False)\n\n path_params = path_params.copy()\n path_params['Xy'] = Xy\n path_params['X_offset'] = X_offset\n path_params['X_scale'] = X_scale\n path_params['precompute'] = precompute\n path_params['copy_X'] = False\n path_params['alphas'] = alphas\n\n if 'l1_ratio' in path_params:\n path_params['l1_ratio'] = l1_ratio\n\n # Do the ordering and type casting here, as if it is done in the path,\n # X is copied and a reference is kept here\n X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)\n alphas, coefs, _ = path(X_train, y_train, **path_params)\n del X_train, y_train\n\n if y.ndim == 1:\n # Doing this so that it becomes coherent with multioutput.\n coefs = coefs[np.newaxis, :, :]\n y_offset = np.atleast_1d(y_offset)\n y_test = y_test[:, np.newaxis]\n\n if normalize:\n nonzeros = np.flatnonzero(X_scale)\n coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]\n\n intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)\n if sparse.issparse(X_test):\n n_order, n_features, n_alphas = coefs.shape\n # Work around for sparse matrices since coefs is a 3-D numpy array.\n coefs_feature_major = np.rollaxis(coefs, 1)\n feature_2d = np.reshape(coefs_feature_major, (n_features, -1))\n X_test_coefs = safe_sparse_dot(X_test, feature_2d)\n X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)\n else:\n X_test_coefs = safe_sparse_dot(X_test, coefs)\n residues = X_test_coefs - y_test[:, :, np.newaxis]\n residues += intercepts\n this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)\n\n return this_mses\n\n\nclass LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):\n \"\"\"Base class for iterative model fitting along a regularization path\"\"\"\n\n @abstractmethod\n def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,\n normalize=False, precompute='auto', max_iter=1000, tol=1e-4,\n copy_X=True, cv='warn', verbose=False, n_jobs=None,\n positive=False, random_state=None, selection='cyclic'):\n self.eps = eps\n self.n_alphas = n_alphas\n self.alphas = alphas\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.precompute = precompute\n self.max_iter = max_iter\n self.tol = tol\n self.copy_X = copy_X\n self.cv = cv\n self.verbose = verbose\n self.n_jobs = n_jobs\n self.positive = positive\n self.random_state = random_state\n self.selection = selection\n\n def fit(self, X, y):\n \"\"\"Fit linear model with coordinate descent\n\n Fit is on grid of alphas and best alpha estimated by cross-validation.\n\n Parameters\n ----------\n X : {array-like}, shape (n_samples, n_features)\n Training data. Pass directly as Fortran-contiguous data\n to avoid unnecessary memory duplication. If y is mono-output,\n X can be sparse.\n\n y : array-like, shape (n_samples,) or (n_samples, n_targets)\n Target values\n \"\"\"\n y = check_array(y, copy=False, dtype=[np.float64, np.float32],\n ensure_2d=False)\n if y.shape[0] == 0:\n raise ValueError(\"y has 0 samples: %r\" % y)\n\n if hasattr(self, 'l1_ratio'):\n model_str = 'ElasticNet'\n else:\n model_str = 'Lasso'\n\n if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):\n if model_str == 'ElasticNet':\n model = ElasticNet()\n else:\n model = Lasso()\n if y.ndim > 1 and y.shape[1] > 1:\n raise ValueError(\"For multi-task outputs, use \"\n \"MultiTask%sCV\" % (model_str))\n y = column_or_1d(y, warn=True)\n else:\n if sparse.isspmatrix(X):\n raise TypeError(\"X should be dense but a sparse matrix was\"\n \"passed\")\n elif y.ndim == 1:\n raise ValueError(\"For mono-task outputs, use \"\n \"%sCV\" % (model_str))\n if model_str == 'ElasticNet':\n model = MultiTaskElasticNet()\n else:\n model = MultiTaskLasso()\n\n if self.selection not in [\"random\", \"cyclic\"]:\n raise ValueError(\"selection should be either random or cyclic.\")\n\n # This makes sure that there is no duplication in memory.\n # Dealing right with copy_X is important in the following:\n # Multiple functions touch X and subsamples of X and can induce a\n # lot of duplication of memory\n copy_X = self.copy_X and self.fit_intercept\n\n if isinstance(X, np.ndarray) or sparse.isspmatrix(X):\n # Keep a reference to X\n reference_to_old_X = X\n # Let us not impose fortran ordering so far: it is\n # not useful for the cross-validation loop and will be done\n # by the model fitting itself\n X = check_array(X, 'csc', copy=False)\n if sparse.isspmatrix(X):\n if (hasattr(reference_to_old_X, \"data\") and\n not np.may_share_memory(reference_to_old_X.data, X.data)):\n # X is a sparse matrix and has been copied\n copy_X = False\n elif not np.may_share_memory(reference_to_old_X, X):\n # X has been copied\n copy_X = False\n del reference_to_old_X\n else:\n X = check_array(X, 'csc', dtype=[np.float64, np.float32],\n order='F', copy=copy_X)\n copy_X = False\n\n if X.shape[0] != y.shape[0]:\n raise ValueError(\"X and y have inconsistent dimensions (%d != %d)\"\n % (X.shape[0], y.shape[0]))\n\n # All LinearModelCV parameters except 'cv' are acceptable\n path_params = self.get_params()\n if 'l1_ratio' in path_params:\n l1_ratios = np.atleast_1d(path_params['l1_ratio'])\n # For the first path, we need to set l1_ratio\n path_params['l1_ratio'] = l1_ratios[0]\n else:\n l1_ratios = [1, ]\n path_params.pop('cv', None)\n path_params.pop('n_jobs', None)\n\n alphas = self.alphas\n n_l1_ratio = len(l1_ratios)\n if alphas is None:\n alphas = []\n for l1_ratio in l1_ratios:\n alphas.append(_alpha_grid(\n X, y, l1_ratio=l1_ratio,\n fit_intercept=self.fit_intercept,\n eps=self.eps, n_alphas=self.n_alphas,\n normalize=self.normalize,\n copy_X=self.copy_X))\n else:\n # Making sure alphas is properly ordered.\n alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))\n # We want n_alphas to be the number of alphas used for each l1_ratio.\n n_alphas = len(alphas[0])\n path_params.update({'n_alphas': n_alphas})\n\n path_params['copy_X'] = copy_X\n # We are not computing in parallel, we can modify X\n # inplace in the folds\n if effective_n_jobs(self.n_jobs) > 1:\n path_params['copy_X'] = False\n\n # init cross-validation generator\n cv = check_cv(self.cv)\n\n # Compute path for all folds and compute MSE to get the best alpha\n folds = list(cv.split(X, y))\n best_mse = np.inf\n\n # We do a double for loop folded in one, in order to be able to\n # iterate in parallel on l1_ratio and folds\n jobs = (delayed(_path_residuals)(X, y, train, test, self.path,\n path_params, alphas=this_alphas,\n l1_ratio=this_l1_ratio, X_order='F',\n dtype=X.dtype.type)\n for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)\n for train, test in folds)\n mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,\n **_joblib_parallel_args(prefer=\"threads\"))(jobs)\n mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))\n mean_mse = np.mean(mse_paths, axis=1)\n self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))\n for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,\n mean_mse):\n i_best_alpha = np.argmin(mse_alphas)\n this_best_mse = mse_alphas[i_best_alpha]\n if this_best_mse < best_mse:\n best_alpha = l1_alphas[i_best_alpha]\n best_l1_ratio = l1_ratio\n best_mse = this_best_mse\n\n self.l1_ratio_ = best_l1_ratio\n self.alpha_ = best_alpha\n if self.alphas is None:\n self.alphas_ = np.asarray(alphas)\n if n_l1_ratio == 1:\n self.alphas_ = self.alphas_[0]\n # Remove duplicate alphas in case alphas is provided.\n else:\n self.alphas_ = np.asarray(alphas[0])\n\n # Refit the model with the parameters selected\n common_params = dict((name, value)\n for name, value in self.get_params().items()\n if name in model.get_params())\n model.set_params(**common_params)\n model.alpha = best_alpha\n model.l1_ratio = best_l1_ratio\n model.copy_X = copy_X\n model.precompute = False\n model.fit(X, y)\n if not hasattr(self, 'l1_ratio'):\n del self.l1_ratio_\n self.coef_ = model.coef_\n self.intercept_ = model.intercept_\n self.dual_gap_ = model.dual_gap_\n self.n_iter_ = model.n_iter_\n return self\n\n\nclass LassoCV(LinearModelCV, RegressorMixin):\n \"\"\"Lasso linear model with iterative fitting along a regularization path.\n\n See glossary entry for :term:`cross-validation estimator`.\n\n The best model is selected by cross-validation.\n\n The optimization objective for Lasso is::\n\n (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1\n\n Read more in the :ref:`User Guide <lasso>`.\n\n Parameters\n ----------\n eps : float, optional\n Length of the path. ``eps=1e-3`` means that\n ``alpha_min / alpha_max = 1e-3``.\n\n n_alphas : int, optional\n Number of alphas along the regularization path\n\n alphas : numpy array, optional\n List of alphas where to compute the models.\n If ``None`` alphas are set automatically\n\n fit_intercept : boolean, default True\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n precompute : True | False | 'auto' | array-like\n Whether to use a precomputed Gram matrix to speed up\n calculations. If set to ``'auto'`` let us decide. The Gram\n matrix can also be passed as argument.\n\n max_iter : int, optional\n The maximum number of iterations\n\n tol : float, optional\n The tolerance for the optimization: if the updates are\n smaller than ``tol``, the optimization code checks the\n dual gap for optimality and continues until it is smaller\n than ``tol``.\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n .. versionchanged:: 0.20\n ``cv`` default value if None will change from 3-fold to 5-fold\n in v0.22.\n\n verbose : bool or integer\n Amount of verbosity.\n\n n_jobs : int or None, optional (default=None)\n Number of CPUs to use during the cross validation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n positive : bool, optional\n If positive, restrict regression coefficients to be positive\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator that selects a random\n feature to update. If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by `np.random`. Used when ``selection`` ==\n 'random'.\n\n selection : str, default 'cyclic'\n If set to 'random', a random coefficient is updated every iteration\n rather than looping over features sequentially by default. This\n (setting to 'random') often leads to significantly faster convergence\n especially when tol is higher than 1e-4.\n\n Attributes\n ----------\n alpha_ : float\n The amount of penalization chosen by cross validation\n\n coef_ : array, shape (n_features,) | (n_targets, n_features)\n parameter vector (w in the cost function formula)\n\n intercept_ : float | array, shape (n_targets,)\n independent term in decision function.\n\n mse_path_ : array, shape (n_alphas, n_folds)\n mean square error for the test set on each fold, varying alpha\n\n alphas_ : numpy array, shape (n_alphas,)\n The grid of alphas used for fitting\n\n dual_gap_ : ndarray, shape ()\n The dual gap at the end of the optimization for the optimal alpha\n (``alpha_``).\n\n n_iter_ : int\n number of iterations run by the coordinate descent solver to reach\n the specified tolerance for the optimal alpha.\n\n Examples\n --------\n >>> from sklearn.linear_model import LassoCV\n >>> from sklearn.datasets import make_regression\n >>> X, y = make_regression(noise=4, random_state=0)\n >>> reg = LassoCV(cv=5, random_state=0).fit(X, y)\n >>> reg.score(X, y) # doctest: +ELLIPSIS\n 0.9993...\n >>> reg.predict(X[:1,])\n array([-78.4951...])\n\n Notes\n -----\n For an example, see\n :ref:`examples/linear_model/plot_lasso_model_selection.py\n <sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.\n\n To avoid unnecessary memory duplication the X argument of the fit method\n should be directly passed as a Fortran-contiguous numpy array.\n\n See also\n --------\n lars_path\n lasso_path\n LassoLars\n Lasso\n LassoLarsCV\n \"\"\"\n path = staticmethod(lasso_path)\n\n def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,\n normalize=False, precompute='auto', max_iter=1000, tol=1e-4,\n copy_X=True, cv='warn', verbose=False, n_jobs=None,\n positive=False, random_state=None, selection='cyclic'):\n super(LassoCV, self).__init__(\n eps=eps, n_alphas=n_alphas, alphas=alphas,\n fit_intercept=fit_intercept, normalize=normalize,\n precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,\n cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,\n random_state=random_state, selection=selection)\n\n\nclass ElasticNetCV(LinearModelCV, RegressorMixin):\n \"\"\"Elastic Net model with iterative fitting along a regularization path.\n\n See glossary entry for :term:`cross-validation estimator`.\n\n Read more in the :ref:`User Guide <elastic_net>`.\n\n Parameters\n ----------\n l1_ratio : float or array of floats, optional\n float between 0 and 1 passed to ElasticNet (scaling between\n l1 and l2 penalties). For ``l1_ratio = 0``\n the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.\n For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2\n This parameter can be a list, in which case the different\n values are tested by cross-validation and the one giving the best\n prediction score is used. Note that a good choice of list of\n values for l1_ratio is often to put more values close to 1\n (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,\n .9, .95, .99, 1]``\n\n eps : float, optional\n Length of the path. ``eps=1e-3`` means that\n ``alpha_min / alpha_max = 1e-3``.\n\n n_alphas : int, optional\n Number of alphas along the regularization path, used for each l1_ratio.\n\n alphas : numpy array, optional\n List of alphas where to compute the models.\n If None alphas are set automatically\n\n fit_intercept : boolean\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n precompute : True | False | 'auto' | array-like\n Whether to use a precomputed Gram matrix to speed up\n calculations. If set to ``'auto'`` let us decide. The Gram\n matrix can also be passed as argument.\n\n max_iter : int, optional\n The maximum number of iterations\n\n tol : float, optional\n The tolerance for the optimization: if the updates are\n smaller than ``tol``, the optimization code checks the\n dual gap for optimality and continues until it is smaller\n than ``tol``.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n .. versionchanged:: 0.20\n ``cv`` default value if None will change from 3-fold to 5-fold\n in v0.22.\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n verbose : bool or integer\n Amount of verbosity.\n\n n_jobs : int or None, optional (default=None)\n Number of CPUs to use during the cross validation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n positive : bool, optional\n When set to ``True``, forces the coefficients to be positive.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator that selects a random\n feature to update. If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by `np.random`. Used when ``selection`` ==\n 'random'.\n\n selection : str, default 'cyclic'\n If set to 'random', a random coefficient is updated every iteration\n rather than looping over features sequentially by default. This\n (setting to 'random') often leads to significantly faster convergence\n especially when tol is higher than 1e-4.\n\n Attributes\n ----------\n alpha_ : float\n The amount of penalization chosen by cross validation\n\n l1_ratio_ : float\n The compromise between l1 and l2 penalization chosen by\n cross validation\n\n coef_ : array, shape (n_features,) | (n_targets, n_features)\n Parameter vector (w in the cost function formula),\n\n intercept_ : float | array, shape (n_targets, n_features)\n Independent term in the decision function.\n\n mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)\n Mean square error for the test set on each fold, varying l1_ratio and\n alpha.\n\n alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)\n The grid of alphas used for fitting, for each l1_ratio.\n\n n_iter_ : int\n number of iterations run by the coordinate descent solver to reach\n the specified tolerance for the optimal alpha.\n\n Examples\n --------\n >>> from sklearn.linear_model import ElasticNetCV\n >>> from sklearn.datasets import make_regression\n\n >>> X, y = make_regression(n_features=2, random_state=0)\n >>> regr = ElasticNetCV(cv=5, random_state=0)\n >>> regr.fit(X, y)\n ElasticNetCV(alphas=None, copy_X=True, cv=5, eps=0.001, fit_intercept=True,\n l1_ratio=0.5, max_iter=1000, n_alphas=100, n_jobs=None,\n normalize=False, positive=False, precompute='auto', random_state=0,\n selection='cyclic', tol=0.0001, verbose=0)\n >>> print(regr.alpha_) # doctest: +ELLIPSIS\n 0.1994727942696716\n >>> print(regr.intercept_) # doctest: +ELLIPSIS\n 0.398...\n >>> print(regr.predict([[0, 0]])) # doctest: +ELLIPSIS\n [0.398...]\n\n\n Notes\n -----\n For an example, see\n :ref:`examples/linear_model/plot_lasso_model_selection.py\n <sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.\n\n To avoid unnecessary memory duplication the X argument of the fit method\n should be directly passed as a Fortran-contiguous numpy array.\n\n The parameter l1_ratio corresponds to alpha in the glmnet R package\n while alpha corresponds to the lambda parameter in glmnet.\n More specifically, the optimization objective is::\n\n 1 / (2 * n_samples) * ||y - Xw||^2_2\n + alpha * l1_ratio * ||w||_1\n + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2\n\n If you are interested in controlling the L1 and L2 penalty\n separately, keep in mind that this is equivalent to::\n\n a * L1 + b * L2\n\n for::\n\n alpha = a + b and l1_ratio = a / (a + b).\n\n See also\n --------\n enet_path\n ElasticNet\n\n \"\"\"\n path = staticmethod(enet_path)\n\n def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,\n fit_intercept=True, normalize=False, precompute='auto',\n max_iter=1000, tol=1e-4, cv='warn', copy_X=True,\n verbose=0, n_jobs=None, positive=False, random_state=None,\n selection='cyclic'):\n self.l1_ratio = l1_ratio\n self.eps = eps\n self.n_alphas = n_alphas\n self.alphas = alphas\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.precompute = precompute\n self.max_iter = max_iter\n self.tol = tol\n self.cv = cv\n self.copy_X = copy_X\n self.verbose = verbose\n self.n_jobs = n_jobs\n self.positive = positive\n self.random_state = random_state\n self.selection = selection\n\n\n###############################################################################\n# Multi Task ElasticNet and Lasso models (with joint feature selection)\n\n\nclass MultiTaskElasticNet(Lasso):\n \"\"\"Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer\n\n The optimization objective for MultiTaskElasticNet is::\n\n (1 / (2 * n_samples)) * ||Y - XW||_Fro^2\n + alpha * l1_ratio * ||W||_21\n + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2\n\n Where::\n\n ||W||_21 = sum_i sqrt(sum_j w_ij ^ 2)\n\n i.e. the sum of norm of each row.\n\n Read more in the :ref:`User Guide <multi_task_elastic_net>`.\n\n Parameters\n ----------\n alpha : float, optional\n Constant that multiplies the L1/L2 term. Defaults to 1.0\n\n l1_ratio : float\n The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.\n For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it\n is an L2 penalty.\n For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.\n\n fit_intercept : boolean\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n max_iter : int, optional\n The maximum number of iterations\n\n tol : float, optional\n The tolerance for the optimization: if the updates are\n smaller than ``tol``, the optimization code checks the\n dual gap for optimality and continues until it is smaller\n than ``tol``.\n\n warm_start : bool, optional\n When set to ``True``, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n See :term:`the Glossary <warm_start>`.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator that selects a random\n feature to update. If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by `np.random`. Used when ``selection`` ==\n 'random'.\n\n selection : str, default 'cyclic'\n If set to 'random', a random coefficient is updated every iteration\n rather than looping over features sequentially by default. This\n (setting to 'random') often leads to significantly faster convergence\n especially when tol is higher than 1e-4.\n\n Attributes\n ----------\n intercept_ : array, shape (n_tasks,)\n Independent term in decision function.\n\n coef_ : array, shape (n_tasks, n_features)\n Parameter vector (W in the cost function formula). If a 1D y is\n passed in at fit (non multi-task usage), ``coef_`` is then a 1D array.\n Note that ``coef_`` stores the transpose of ``W``, ``W.T``.\n\n n_iter_ : int\n number of iterations run by the coordinate descent solver to reach\n the specified tolerance.\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)\n >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])\n ... #doctest: +NORMALIZE_WHITESPACE\n MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,\n l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,\n selection='cyclic', tol=0.0001, warm_start=False)\n >>> print(clf.coef_)\n [[0.45663524 0.45612256]\n [0.45663524 0.45612256]]\n >>> print(clf.intercept_)\n [0.0872422 0.0872422]\n\n See also\n --------\n MultiTaskElasticNet : Multi-task L1/L2 ElasticNet with built-in\n cross-validation.\n ElasticNet\n MultiTaskLasso\n\n Notes\n -----\n The algorithm used to fit the model is coordinate descent.\n\n To avoid unnecessary memory duplication the X argument of the fit method\n should be directly passed as a Fortran-contiguous numpy array.\n \"\"\"\n def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,\n normalize=False, copy_X=True, max_iter=1000, tol=1e-4,\n warm_start=False, random_state=None, selection='cyclic'):\n self.l1_ratio = l1_ratio\n self.alpha = alpha\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.max_iter = max_iter\n self.copy_X = copy_X\n self.tol = tol\n self.warm_start = warm_start\n self.random_state = random_state\n self.selection = selection\n\n def fit(self, X, y):\n \"\"\"Fit MultiTaskElasticNet model with coordinate descent\n\n Parameters\n -----------\n X : ndarray, shape (n_samples, n_features)\n Data\n y : ndarray, shape (n_samples, n_tasks)\n Target. Will be cast to X's dtype if necessary\n\n Notes\n -----\n\n Coordinate descent is an algorithm that considers each column of\n data at a time hence it will automatically convert the X input\n as a Fortran-contiguous numpy array if necessary.\n\n To avoid memory re-allocation it is advised to allocate the\n initial data in memory directly using that format.\n \"\"\"\n X = check_array(X, dtype=[np.float64, np.float32], order='F',\n copy=self.copy_X and self.fit_intercept)\n y = check_array(y, dtype=X.dtype.type, ensure_2d=False)\n\n if hasattr(self, 'l1_ratio'):\n model_str = 'ElasticNet'\n else:\n model_str = 'Lasso'\n if y.ndim == 1:\n raise ValueError(\"For mono-task outputs, use %s\" % model_str)\n\n n_samples, n_features = X.shape\n _, n_tasks = y.shape\n\n if n_samples != y.shape[0]:\n raise ValueError(\"X and y have inconsistent dimensions (%d != %d)\"\n % (n_samples, y.shape[0]))\n\n X, y, X_offset, y_offset, X_scale = _preprocess_data(\n X, y, self.fit_intercept, self.normalize, copy=False)\n\n if not self.warm_start or not hasattr(self, \"coef_\"):\n self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type,\n order='F')\n\n l1_reg = self.alpha * self.l1_ratio * n_samples\n l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples\n\n self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory\n\n if self.selection not in ['random', 'cyclic']:\n raise ValueError(\"selection should be either random or cyclic.\")\n random = (self.selection == 'random')\n\n self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \\\n cd_fast.enet_coordinate_descent_multi_task(\n self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,\n check_random_state(self.random_state), random)\n\n self._set_intercept(X_offset, y_offset, X_scale)\n\n if self.dual_gap_ > self.eps_:\n warnings.warn('Objective did not converge, you might want'\n ' to increase the number of iterations',\n ConvergenceWarning)\n\n # return self for chaining fit and predict calls\n return self\n\n\nclass MultiTaskLasso(MultiTaskElasticNet):\n \"\"\"Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.\n\n The optimization objective for Lasso is::\n\n (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21\n\n Where::\n\n ||W||_21 = \\\\sum_i \\\\sqrt{\\\\sum_j w_{ij}^2}\n\n i.e. the sum of norm of each row.\n\n Read more in the :ref:`User Guide <multi_task_lasso>`.\n\n Parameters\n ----------\n alpha : float, optional\n Constant that multiplies the L1/L2 term. Defaults to 1.0\n\n fit_intercept : boolean\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n max_iter : int, optional\n The maximum number of iterations\n\n tol : float, optional\n The tolerance for the optimization: if the updates are\n smaller than ``tol``, the optimization code checks the\n dual gap for optimality and continues until it is smaller\n than ``tol``.\n\n warm_start : bool, optional\n When set to ``True``, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n See :term:`the Glossary <warm_start>`.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator that selects a random\n feature to update. If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by `np.random`. Used when ``selection`` ==\n 'random'.\n\n selection : str, default 'cyclic'\n If set to 'random', a random coefficient is updated every iteration\n rather than looping over features sequentially by default. This\n (setting to 'random') often leads to significantly faster convergence\n especially when tol is higher than 1e-4\n\n Attributes\n ----------\n coef_ : array, shape (n_tasks, n_features)\n Parameter vector (W in the cost function formula).\n Note that ``coef_`` stores the transpose of ``W``, ``W.T``.\n\n intercept_ : array, shape (n_tasks,)\n independent term in decision function.\n\n n_iter_ : int\n number of iterations run by the coordinate descent solver to reach\n the specified tolerance.\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> clf = linear_model.MultiTaskLasso(alpha=0.1)\n >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])\n MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,\n normalize=False, random_state=None, selection='cyclic', tol=0.0001,\n warm_start=False)\n >>> print(clf.coef_)\n [[0.89393398 0. ]\n [0.89393398 0. ]]\n >>> print(clf.intercept_)\n [0.10606602 0.10606602]\n\n See also\n --------\n MultiTaskLasso : Multi-task L1/L2 Lasso with built-in cross-validation\n Lasso\n MultiTaskElasticNet\n\n Notes\n -----\n The algorithm used to fit the model is coordinate descent.\n\n To avoid unnecessary memory duplication the X argument of the fit method\n should be directly passed as a Fortran-contiguous numpy array.\n \"\"\"\n def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,\n random_state=None, selection='cyclic'):\n self.alpha = alpha\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.max_iter = max_iter\n self.copy_X = copy_X\n self.tol = tol\n self.warm_start = warm_start\n self.l1_ratio = 1.0\n self.random_state = random_state\n self.selection = selection\n\n\nclass MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):\n \"\"\"Multi-task L1/L2 ElasticNet with built-in cross-validation.\n\n See glossary entry for :term:`cross-validation estimator`.\n\n The optimization objective for MultiTaskElasticNet is::\n\n (1 / (2 * n_samples)) * ||Y - XW||^Fro_2\n + alpha * l1_ratio * ||W||_21\n + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2\n\n Where::\n\n ||W||_21 = \\\\sum_i \\\\sqrt{\\\\sum_j w_{ij}^2}\n\n i.e. the sum of norm of each row.\n\n Read more in the :ref:`User Guide <multi_task_elastic_net>`.\n\n Parameters\n ----------\n l1_ratio : float or array of floats\n The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.\n For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it\n is an L2 penalty.\n For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.\n This parameter can be a list, in which case the different\n values are tested by cross-validation and the one giving the best\n prediction score is used. Note that a good choice of list of\n values for l1_ratio is often to put more values close to 1\n (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,\n .9, .95, .99, 1]``\n\n eps : float, optional\n Length of the path. ``eps=1e-3`` means that\n ``alpha_min / alpha_max = 1e-3``.\n\n n_alphas : int, optional\n Number of alphas along the regularization path\n\n alphas : array-like, optional\n List of alphas where to compute the models.\n If not provided, set automatically.\n\n fit_intercept : boolean\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n max_iter : int, optional\n The maximum number of iterations\n\n tol : float, optional\n The tolerance for the optimization: if the updates are\n smaller than ``tol``, the optimization code checks the\n dual gap for optimality and continues until it is smaller\n than ``tol``.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n .. versionchanged:: 0.20\n ``cv`` default value if None will change from 3-fold to 5-fold\n in v0.22.\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n verbose : bool or integer\n Amount of verbosity.\n\n n_jobs : int or None, optional (default=None)\n Number of CPUs to use during the cross validation. Note that this is\n used only if multiple values for l1_ratio are given.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator that selects a random\n feature to update. If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by `np.random`. Used when ``selection`` ==\n 'random'.\n\n selection : str, default 'cyclic'\n If set to 'random', a random coefficient is updated every iteration\n rather than looping over features sequentially by default. This\n (setting to 'random') often leads to significantly faster convergence\n especially when tol is higher than 1e-4.\n\n Attributes\n ----------\n intercept_ : array, shape (n_tasks,)\n Independent term in decision function.\n\n coef_ : array, shape (n_tasks, n_features)\n Parameter vector (W in the cost function formula).\n Note that ``coef_`` stores the transpose of ``W``, ``W.T``.\n\n alpha_ : float\n The amount of penalization chosen by cross validation\n\n mse_path_ : array, shape (n_alphas, n_folds) or \\\n (n_l1_ratio, n_alphas, n_folds)\n mean square error for the test set on each fold, varying alpha\n\n alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)\n The grid of alphas used for fitting, for each l1_ratio\n\n l1_ratio_ : float\n best l1_ratio obtained by cross-validation.\n\n n_iter_ : int\n number of iterations run by the coordinate descent solver to reach\n the specified tolerance for the optimal alpha.\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> clf = linear_model.MultiTaskElasticNetCV(cv=3)\n >>> clf.fit([[0,0], [1, 1], [2, 2]],\n ... [[0, 0], [1, 1], [2, 2]])\n ... #doctest: +NORMALIZE_WHITESPACE\n MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=3, eps=0.001,\n fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,\n n_jobs=None, normalize=False, random_state=None, selection='cyclic',\n tol=0.0001, verbose=0)\n >>> print(clf.coef_)\n [[0.52875032 0.46958558]\n [0.52875032 0.46958558]]\n >>> print(clf.intercept_)\n [0.00166409 0.00166409]\n\n See also\n --------\n MultiTaskElasticNet\n ElasticNetCV\n MultiTaskLassoCV\n\n Notes\n -----\n The algorithm used to fit the model is coordinate descent.\n\n To avoid unnecessary memory duplication the X argument of the fit method\n should be directly passed as a Fortran-contiguous numpy array.\n \"\"\"\n path = staticmethod(enet_path)\n\n def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,\n fit_intercept=True, normalize=False,\n max_iter=1000, tol=1e-4, cv='warn', copy_X=True,\n verbose=0, n_jobs=None, random_state=None,\n selection='cyclic'):\n self.l1_ratio = l1_ratio\n self.eps = eps\n self.n_alphas = n_alphas\n self.alphas = alphas\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.max_iter = max_iter\n self.tol = tol\n self.cv = cv\n self.copy_X = copy_X\n self.verbose = verbose\n self.n_jobs = n_jobs\n self.random_state = random_state\n self.selection = selection\n\n\nclass MultiTaskLassoCV(LinearModelCV, RegressorMixin):\n \"\"\"Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.\n\n See glossary entry for :term:`cross-validation estimator`.\n\n The optimization objective for MultiTaskLasso is::\n\n (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21\n\n Where::\n\n ||W||_21 = \\\\sum_i \\\\sqrt{\\\\sum_j w_{ij}^2}\n\n i.e. the sum of norm of each row.\n\n Read more in the :ref:`User Guide <multi_task_lasso>`.\n\n Parameters\n ----------\n eps : float, optional\n Length of the path. ``eps=1e-3`` means that\n ``alpha_min / alpha_max = 1e-3``.\n\n n_alphas : int, optional\n Number of alphas along the regularization path\n\n alphas : array-like, optional\n List of alphas where to compute the models.\n If not provided, set automatically.\n\n fit_intercept : boolean\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n max_iter : int, optional\n The maximum number of iterations.\n\n tol : float, optional\n The tolerance for the optimization: if the updates are\n smaller than ``tol``, the optimization code checks the\n dual gap for optimality and continues until it is smaller\n than ``tol``.\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n .. versionchanged:: 0.20\n ``cv`` default value if None will change from 3-fold to 5-fold\n in v0.22.\n\n verbose : bool or integer\n Amount of verbosity.\n\n n_jobs : int or None, optional (default=None)\n Number of CPUs to use during the cross validation. Note that this is\n used only if multiple values for l1_ratio are given.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator that selects a random\n feature to update. If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by `np.random`. Used when ``selection`` ==\n 'random'\n\n selection : str, default 'cyclic'\n If set to 'random', a random coefficient is updated every iteration\n rather than looping over features sequentially by default. This\n (setting to 'random') often leads to significantly faster convergence\n especially when tol is higher than 1e-4.\n\n Attributes\n ----------\n intercept_ : array, shape (n_tasks,)\n Independent term in decision function.\n\n coef_ : array, shape (n_tasks, n_features)\n Parameter vector (W in the cost function formula).\n Note that ``coef_`` stores the transpose of ``W``, ``W.T``.\n\n alpha_ : float\n The amount of penalization chosen by cross validation\n\n mse_path_ : array, shape (n_alphas, n_folds)\n mean square error for the test set on each fold, varying alpha\n\n alphas_ : numpy array, shape (n_alphas,)\n The grid of alphas used for fitting.\n\n n_iter_ : int\n number of iterations run by the coordinate descent solver to reach\n the specified tolerance for the optimal alpha.\n\n Examples\n --------\n >>> from sklearn.linear_model import MultiTaskLassoCV\n >>> from sklearn.datasets import make_regression\n >>> X, y = make_regression(n_targets=2, noise=4, random_state=0)\n >>> reg = MultiTaskLassoCV(cv=5, random_state=0).fit(X, y)\n >>> reg.score(X, y) # doctest: +ELLIPSIS\n 0.9994...\n >>> reg.alpha_\n 0.5713...\n >>> reg.predict(X[:1,])\n array([[153.7971..., 94.9015...]])\n\n See also\n --------\n MultiTaskElasticNet\n ElasticNetCV\n MultiTaskElasticNetCV\n\n Notes\n -----\n The algorithm used to fit the model is coordinate descent.\n\n To avoid unnecessary memory duplication the X argument of the fit method\n should be directly passed as a Fortran-contiguous numpy array.\n \"\"\"\n path = staticmethod(lasso_path)\n\n def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,\n normalize=False, max_iter=1000, tol=1e-4, copy_X=True,\n cv='warn', verbose=False, n_jobs=None, random_state=None,\n selection='cyclic'):\n super(MultiTaskLassoCV, self).__init__(\n eps=eps, n_alphas=n_alphas, alphas=alphas,\n fit_intercept=fit_intercept, normalize=normalize,\n max_iter=max_iter, tol=tol, copy_X=copy_X,\n cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,\n selection=selection)\n", "\"\"\"\n==========================================================\nAdjustment for chance in clustering performance evaluation\n==========================================================\n\nThe following plots demonstrate the impact of the number of clusters and\nnumber of samples on various clustering performance evaluation metrics.\n\nNon-adjusted measures such as the V-Measure show a dependency between\nthe number of clusters and the number of samples: the mean V-Measure\nof random labeling increases significantly as the number of clusters is\ncloser to the total number of samples used to compute the measure.\n\nAdjusted for chance measure such as ARI display some random variations\ncentered around a mean score of 0.0 for any number of samples and\nclusters.\n\nOnly adjusted measures can hence safely be used as a consensus index\nto evaluate the average stability of clustering algorithms for a given\nvalue of k on various overlapping sub-samples of the dataset.\n\n\"\"\"\nprint(__doc__)\n\n# Author: Olivier Grisel <olivier.grisel@ensta.org>\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom time import time\nfrom sklearn import metrics\n\n\ndef uniform_labelings_scores(score_func, n_samples, n_clusters_range,\n fixed_n_classes=None, n_runs=5, seed=42):\n \"\"\"Compute score for 2 random uniform cluster labelings.\n\n Both random labelings have the same number of clusters for each value\n possible value in ``n_clusters_range``.\n\n When fixed_n_classes is not None the first labeling is considered a ground\n truth class assignment with fixed number of classes.\n \"\"\"\n random_labels = np.random.RandomState(seed).randint\n scores = np.zeros((len(n_clusters_range), n_runs))\n\n if fixed_n_classes is not None:\n labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)\n\n for i, k in enumerate(n_clusters_range):\n for j in range(n_runs):\n if fixed_n_classes is None:\n labels_a = random_labels(low=0, high=k, size=n_samples)\n labels_b = random_labels(low=0, high=k, size=n_samples)\n scores[i, j] = score_func(labels_a, labels_b)\n return scores\n\nscore_funcs = [\n metrics.adjusted_rand_score,\n metrics.v_measure_score,\n metrics.adjusted_mutual_info_score,\n metrics.mutual_info_score,\n]\n\n# 2 independent random clusterings with equal cluster number\n\nn_samples = 100\nn_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)\n\nplt.figure(1)\n\nplots = []\nnames = []\nfor score_func in score_funcs:\n print(\"Computing %s for %d values of n_clusters and n_samples=%d\"\n % (score_func.__name__, len(n_clusters_range), n_samples))\n\n t0 = time()\n scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)\n print(\"done in %0.3fs\" % (time() - t0))\n plots.append(plt.errorbar(\n n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])\n names.append(score_func.__name__)\n\nplt.title(\"Clustering measures for 2 random uniform labelings\\n\"\n \"with equal number of clusters\")\nplt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)\nplt.ylabel('Score value')\nplt.legend(plots, names)\nplt.ylim(ymin=-0.05, ymax=1.05)\n\n\n# Random labeling with varying n_clusters against ground class labels\n# with fixed number of clusters\n\nn_samples = 1000\nn_clusters_range = np.linspace(2, 100, 10).astype(np.int)\nn_classes = 10\n\nplt.figure(2)\n\nplots = []\nnames = []\nfor score_func in score_funcs:\n print(\"Computing %s for %d values of n_clusters and n_samples=%d\"\n % (score_func.__name__, len(n_clusters_range), n_samples))\n\n t0 = time()\n scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,\n fixed_n_classes=n_classes)\n print(\"done in %0.3fs\" % (time() - t0))\n plots.append(plt.errorbar(\n n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])\n names.append(score_func.__name__)\n\nplt.title(\"Clustering measures for random uniform labeling\\n\"\n \"against reference assignment with %d classes\" % n_classes)\nplt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)\nplt.ylabel('Score value')\nplt.ylim(ymin=-0.05, ymax=1.05)\nplt.legend(plots, names)\nplt.show()\n", "# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nimport re\n\nimport numpy as np\nfrom scipy import sparse\nimport pytest\n\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_raises_regex\nfrom sklearn.utils.testing import assert_allclose\nfrom sklearn.utils.testing import ignore_warnings\nfrom sklearn.utils.testing import assert_warns\nfrom sklearn.utils.testing import assert_warns_message\nfrom sklearn.utils.testing import assert_no_warnings\n\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import OrdinalEncoder\n\n\ndef toarray(a):\n if hasattr(a, \"toarray\"):\n a = a.toarray()\n return a\n\n\ndef test_one_hot_encoder_sparse():\n # Test OneHotEncoder's fit and transform.\n X = [[3, 2, 1], [0, 1, 1]]\n enc = OneHotEncoder()\n with ignore_warnings(category=(DeprecationWarning, FutureWarning)):\n # discover max values automatically\n X_trans = enc.fit_transform(X).toarray()\n assert_equal(X_trans.shape, (2, 5))\n assert_array_equal(enc.active_features_,\n np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])\n assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])\n\n # check outcome\n assert_array_equal(X_trans,\n [[0., 1., 0., 1., 1.],\n [1., 0., 1., 0., 1.]])\n\n # max value given as 3\n # enc = assert_warns(DeprecationWarning, OneHotEncoder, n_values=4)\n enc = OneHotEncoder(n_values=4)\n with ignore_warnings(category=DeprecationWarning):\n X_trans = enc.fit_transform(X)\n assert_equal(X_trans.shape, (2, 4 * 3))\n assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])\n\n # max value given per feature\n # enc = assert_warns(DeprecationWarning, OneHotEncoder, n_values=[3, 2, 2])\n enc = OneHotEncoder(n_values=[3, 2, 2])\n with ignore_warnings(category=DeprecationWarning):\n X = [[1, 0, 1], [0, 1, 1]]\n X_trans = enc.fit_transform(X)\n assert_equal(X_trans.shape, (2, 3 + 2 + 2))\n assert_array_equal(enc.n_values_, [3, 2, 2])\n # check that testing with larger feature works:\n X = np.array([[2, 0, 1], [0, 1, 1]])\n enc.transform(X)\n\n # test that an error is raised when out of bounds:\n X_too_large = [[0, 2, 1], [0, 1, 1]]\n assert_raises(ValueError, enc.transform, X_too_large)\n error_msg = r\"unknown categorical feature present \\[2\\] during transform\"\n assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large)\n with ignore_warnings(category=DeprecationWarning):\n assert_raises(\n ValueError,\n OneHotEncoder(n_values=2).fit_transform, X)\n\n # test that error is raised when wrong number of features\n assert_raises(ValueError, enc.transform, X[:, :-1])\n\n # test that error is raised when wrong number of features in fit\n # with prespecified n_values\n with ignore_warnings(category=DeprecationWarning):\n assert_raises(ValueError, enc.fit, X[:, :-1])\n # test exception on wrong init param\n with ignore_warnings(category=DeprecationWarning):\n assert_raises(\n TypeError, OneHotEncoder(n_values=np.int).fit, X)\n\n enc = OneHotEncoder()\n # test negative input to fit\n with ignore_warnings(category=FutureWarning):\n assert_raises(ValueError, enc.fit, [[0], [-1]])\n\n # test negative input to transform\n with ignore_warnings(category=FutureWarning):\n enc.fit([[0], [1]])\n assert_raises(ValueError, enc.transform, [[0], [-1]])\n\n\ndef test_one_hot_encoder_dense():\n # check for sparse=False\n X = [[3, 2, 1], [0, 1, 1]]\n enc = OneHotEncoder(sparse=False)\n with ignore_warnings(category=(DeprecationWarning, FutureWarning)):\n # discover max values automatically\n X_trans = enc.fit_transform(X)\n assert_equal(X_trans.shape, (2, 5))\n assert_array_equal(enc.active_features_,\n np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])\n assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])\n\n # check outcome\n assert_array_equal(X_trans,\n np.array([[0., 1., 0., 1., 1.],\n [1., 0., 1., 0., 1.]]))\n\n\ndef test_one_hot_encoder_deprecationwarnings():\n for X in [[[3, 2, 1], [0, 1, 1]],\n [[3., 2., 1.], [0., 1., 1.]]]:\n enc = OneHotEncoder()\n assert_warns_message(FutureWarning, \"handling of integer\",\n enc.fit, X)\n enc = OneHotEncoder()\n assert_warns_message(FutureWarning, \"handling of integer\",\n enc.fit_transform, X)\n\n # check it still works correctly as well\n with ignore_warnings(category=FutureWarning):\n X_trans = enc.fit_transform(X).toarray()\n res = [[0., 1., 0., 1., 1.],\n [1., 0., 1., 0., 1.]]\n assert_array_equal(X_trans, res)\n\n # check deprecated attributes\n assert_warns(DeprecationWarning, lambda: enc.active_features_)\n assert_warns(DeprecationWarning, lambda: enc.feature_indices_)\n assert_warns(DeprecationWarning, lambda: enc.n_values_)\n\n # check no warning is raised if keyword is specified\n enc = OneHotEncoder(categories='auto')\n assert_no_warnings(enc.fit, X)\n enc = OneHotEncoder(categories='auto')\n assert_no_warnings(enc.fit_transform, X)\n X_trans = enc.fit_transform(X).toarray()\n assert_array_equal(X_trans, res)\n\n # check there is also a warning if the default is passed\n enc = OneHotEncoder(n_values='auto', handle_unknown='ignore')\n assert_warns(DeprecationWarning, enc.fit, X)\n\n X = np.array([['cat1', 'cat2']], dtype=object).T\n enc = OneHotEncoder(categorical_features='all')\n assert_warns(DeprecationWarning, enc.fit, X)\n\n\ndef test_one_hot_encoder_force_new_behaviour():\n # ambiguous integer case (non secutive range of categories)\n X = np.array([[1, 2]]).T\n X2 = np.array([[0, 1]]).T\n\n # without argument -> by default using legacy behaviour with warnings\n enc = OneHotEncoder()\n\n with ignore_warnings(category=FutureWarning):\n enc.fit(X)\n\n res = enc.transform(X2)\n exp = np.array([[0, 0], [1, 0]])\n assert_array_equal(res.toarray(), exp)\n\n # with explicit auto argument -> don't use legacy behaviour\n # (so will raise an error on unseen value within range)\n enc = OneHotEncoder(categories='auto')\n enc.fit(X)\n assert_raises(ValueError, enc.transform, X2)\n\n\ndef _run_one_hot(X, X2, cat):\n # enc = assert_warns(\n # DeprecationWarning,\n # OneHotEncoder, categorical_features=cat)\n enc = OneHotEncoder(categorical_features=cat)\n with ignore_warnings(category=(DeprecationWarning, FutureWarning)):\n Xtr = enc.fit_transform(X)\n with ignore_warnings(category=(DeprecationWarning, FutureWarning)):\n X2tr = enc.fit(X).transform(X2)\n return Xtr, X2tr\n\n\ndef _check_one_hot(X, X2, cat, n_features):\n ind = np.where(cat)[0]\n # With mask\n A, B = _run_one_hot(X, X2, cat)\n # With indices\n C, D = _run_one_hot(X, X2, ind)\n # Check shape\n assert_equal(A.shape, (2, n_features))\n assert_equal(B.shape, (1, n_features))\n assert_equal(C.shape, (2, n_features))\n assert_equal(D.shape, (1, n_features))\n # Check that mask and indices give the same results\n assert_array_equal(toarray(A), toarray(C))\n assert_array_equal(toarray(B), toarray(D))\n\n\ndef test_one_hot_encoder_categorical_features():\n X = np.array([[3, 2, 1], [0, 1, 1]])\n X2 = np.array([[1, 1, 1]])\n\n cat = [True, False, False]\n _check_one_hot(X, X2, cat, 4)\n\n # Edge case: all non-categorical\n cat = [False, False, False]\n _check_one_hot(X, X2, cat, 3)\n\n # Edge case: all categorical\n cat = [True, True, True]\n _check_one_hot(X, X2, cat, 5)\n\n # check error raised if also specifying categories\n oh = OneHotEncoder(categories=[range(3)],\n categorical_features=[True, False, False])\n assert_raises(ValueError, oh.fit, X)\n\n\ndef test_one_hot_encoder_handle_unknown():\n X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])\n X2 = np.array([[4, 1, 1]])\n\n # Test that one hot encoder raises error for unknown features\n # present during transform.\n oh = OneHotEncoder(handle_unknown='error')\n assert_warns(FutureWarning, oh.fit, X)\n assert_raises(ValueError, oh.transform, X2)\n\n # Test the ignore option, ignores unknown features (giving all 0's)\n oh = OneHotEncoder(handle_unknown='ignore')\n oh.fit(X)\n X2_passed = X2.copy()\n assert_array_equal(\n oh.transform(X2_passed).toarray(),\n np.array([[0., 0., 0., 0., 1., 0., 0.]]))\n # ensure transformed data was not modified in place\n assert_allclose(X2, X2_passed)\n\n # Raise error if handle_unknown is neither ignore or error.\n oh = OneHotEncoder(handle_unknown='42')\n assert_raises(ValueError, oh.fit, X)\n\n\ndef test_one_hot_encoder_not_fitted():\n X = np.array([['a'], ['b']])\n enc = OneHotEncoder(categories=['a', 'b'])\n msg = (\"This OneHotEncoder instance is not fitted yet. \"\n \"Call 'fit' with appropriate arguments before using this method.\")\n with pytest.raises(NotFittedError, match=msg):\n enc.transform(X)\n\n\ndef test_one_hot_encoder_no_categorical_features():\n X = np.array([[3, 2, 1], [0, 1, 1]], dtype='float64')\n\n cat = [False, False, False]\n enc = OneHotEncoder(categorical_features=cat)\n with ignore_warnings(category=(DeprecationWarning, FutureWarning)):\n X_tr = enc.fit_transform(X)\n expected_features = np.array(list(), dtype='object')\n assert_array_equal(X, X_tr)\n assert_array_equal(enc.get_feature_names(), expected_features)\n assert enc.categories_ == []\n\n\ndef test_one_hot_encoder_handle_unknown_strings():\n X = np.array(['11111111', '22', '333', '4444']).reshape((-1, 1))\n X2 = np.array(['55555', '22']).reshape((-1, 1))\n # Non Regression test for the issue #12470\n # Test the ignore option, when categories are numpy string dtype\n # particularly when the known category strings are larger\n # than the unknown category strings\n oh = OneHotEncoder(handle_unknown='ignore')\n oh.fit(X)\n X2_passed = X2.copy()\n assert_array_equal(\n oh.transform(X2_passed).toarray(),\n np.array([[0., 0., 0., 0.], [0., 1., 0., 0.]]))\n # ensure transformed data was not modified in place\n assert_array_equal(X2, X2_passed)\n\n\n@pytest.mark.parametrize(\"output_dtype\", [np.int32, np.float32, np.float64])\n@pytest.mark.parametrize(\"input_dtype\", [np.int32, np.float32, np.float64])\ndef test_one_hot_encoder_dtype(input_dtype, output_dtype):\n X = np.asarray([[0, 1]], dtype=input_dtype).T\n X_expected = np.asarray([[1, 0], [0, 1]], dtype=output_dtype)\n\n oh = OneHotEncoder(categories='auto', dtype=output_dtype)\n assert_array_equal(oh.fit_transform(X).toarray(), X_expected)\n assert_array_equal(oh.fit(X).transform(X).toarray(), X_expected)\n\n oh = OneHotEncoder(categories='auto', dtype=output_dtype, sparse=False)\n assert_array_equal(oh.fit_transform(X), X_expected)\n assert_array_equal(oh.fit(X).transform(X), X_expected)\n\n\n@pytest.mark.parametrize(\"output_dtype\", [np.int32, np.float32, np.float64])\ndef test_one_hot_encoder_dtype_pandas(output_dtype):\n pd = pytest.importorskip('pandas')\n\n X_df = pd.DataFrame({'A': ['a', 'b'], 'B': [1, 2]})\n X_expected = np.array([[1, 0, 1, 0], [0, 1, 0, 1]], dtype=output_dtype)\n\n oh = OneHotEncoder(dtype=output_dtype)\n assert_array_equal(oh.fit_transform(X_df).toarray(), X_expected)\n assert_array_equal(oh.fit(X_df).transform(X_df).toarray(), X_expected)\n\n oh = OneHotEncoder(dtype=output_dtype, sparse=False)\n assert_array_equal(oh.fit_transform(X_df), X_expected)\n assert_array_equal(oh.fit(X_df).transform(X_df), X_expected)\n\n\ndef test_one_hot_encoder_set_params():\n X = np.array([[1, 2]]).T\n oh = OneHotEncoder()\n # set params on not yet fitted object\n oh.set_params(categories=[[0, 1, 2, 3]])\n assert oh.get_params()['categories'] == [[0, 1, 2, 3]]\n assert oh.fit_transform(X).toarray().shape == (2, 4)\n # set params on already fitted object\n oh.set_params(categories=[[0, 1, 2, 3, 4]])\n assert oh.fit_transform(X).toarray().shape == (2, 5)\n\n\ndef check_categorical_onehot(X):\n enc = OneHotEncoder(categories='auto')\n Xtr1 = enc.fit_transform(X)\n\n enc = OneHotEncoder(categories='auto', sparse=False)\n Xtr2 = enc.fit_transform(X)\n\n assert_allclose(Xtr1.toarray(), Xtr2)\n\n assert sparse.isspmatrix_csr(Xtr1)\n return Xtr1.toarray()\n\n\n@pytest.mark.parametrize(\"X\", [\n [['def', 1, 55], ['abc', 2, 55]],\n np.array([[10, 1, 55], [5, 2, 55]]),\n np.array([['b', 'A', 'cat'], ['a', 'B', 'cat']], dtype=object)\n ], ids=['mixed', 'numeric', 'object'])\ndef test_one_hot_encoder(X):\n Xtr = check_categorical_onehot(np.array(X)[:, [0]])\n assert_allclose(Xtr, [[0, 1], [1, 0]])\n\n Xtr = check_categorical_onehot(np.array(X)[:, [0, 1]])\n assert_allclose(Xtr, [[0, 1, 1, 0], [1, 0, 0, 1]])\n\n Xtr = OneHotEncoder(categories='auto').fit_transform(X)\n assert_allclose(Xtr.toarray(), [[0, 1, 1, 0, 1], [1, 0, 0, 1, 1]])\n\n\ndef test_one_hot_encoder_inverse():\n for sparse_ in [True, False]:\n X = [['abc', 2, 55], ['def', 1, 55], ['abc', 3, 55]]\n enc = OneHotEncoder(sparse=sparse_)\n X_tr = enc.fit_transform(X)\n exp = np.array(X, dtype=object)\n assert_array_equal(enc.inverse_transform(X_tr), exp)\n\n X = [[2, 55], [1, 55], [3, 55]]\n enc = OneHotEncoder(sparse=sparse_, categories='auto')\n X_tr = enc.fit_transform(X)\n exp = np.array(X)\n assert_array_equal(enc.inverse_transform(X_tr), exp)\n\n # with unknown categories\n X = [['abc', 2, 55], ['def', 1, 55], ['abc', 3, 55]]\n enc = OneHotEncoder(sparse=sparse_, handle_unknown='ignore',\n categories=[['abc', 'def'], [1, 2],\n [54, 55, 56]])\n X_tr = enc.fit_transform(X)\n exp = np.array(X, dtype=object)\n exp[2, 1] = None\n assert_array_equal(enc.inverse_transform(X_tr), exp)\n\n # with an otherwise numerical output, still object if unknown\n X = [[2, 55], [1, 55], [3, 55]]\n enc = OneHotEncoder(sparse=sparse_, categories=[[1, 2], [54, 56]],\n handle_unknown='ignore')\n X_tr = enc.fit_transform(X)\n exp = np.array(X, dtype=object)\n exp[2, 0] = None\n exp[:, 1] = None\n assert_array_equal(enc.inverse_transform(X_tr), exp)\n\n # incorrect shape raises\n X_tr = np.array([[0, 1, 1], [1, 0, 1]])\n msg = re.escape('Shape of the passed X data is not correct')\n assert_raises_regex(ValueError, msg, enc.inverse_transform, X_tr)\n\n\n@pytest.mark.parametrize(\"X, cat_exp, cat_dtype\", [\n ([['abc', 55], ['def', 55]], [['abc', 'def'], [55]], np.object_),\n (np.array([[1, 2], [3, 2]]), [[1, 3], [2]], np.integer),\n (np.array([['A', 'cat'], ['B', 'cat']], dtype=object),\n [['A', 'B'], ['cat']], np.object_),\n (np.array([['A', 'cat'], ['B', 'cat']]),\n [['A', 'B'], ['cat']], np.str_)\n ], ids=['mixed', 'numeric', 'object', 'string'])\ndef test_one_hot_encoder_categories(X, cat_exp, cat_dtype):\n # order of categories should not depend on order of samples\n for Xi in [X, X[::-1]]:\n enc = OneHotEncoder(categories='auto')\n enc.fit(Xi)\n # assert enc.categories == 'auto'\n assert isinstance(enc.categories_, list)\n for res, exp in zip(enc.categories_, cat_exp):\n assert res.tolist() == exp\n assert np.issubdtype(res.dtype, cat_dtype)\n\n\n@pytest.mark.parametrize(\"X, X2, cats, cat_dtype\", [\n (np.array([['a', 'b']], dtype=object).T,\n np.array([['a', 'd']], dtype=object).T,\n [['a', 'b', 'c']], np.object_),\n (np.array([[1, 2]], dtype='int64').T,\n np.array([[1, 4]], dtype='int64').T,\n [[1, 2, 3]], np.int64),\n (np.array([['a', 'b']], dtype=object).T,\n np.array([['a', 'd']], dtype=object).T,\n [np.array(['a', 'b', 'c'])], np.object_),\n ], ids=['object', 'numeric', 'object-string-cat'])\ndef test_one_hot_encoder_specified_categories(X, X2, cats, cat_dtype):\n enc = OneHotEncoder(categories=cats)\n exp = np.array([[1., 0., 0.],\n [0., 1., 0.]])\n assert_array_equal(enc.fit_transform(X).toarray(), exp)\n assert list(enc.categories[0]) == list(cats[0])\n assert enc.categories_[0].tolist() == list(cats[0])\n # manually specified categories should have same dtype as\n # the data when coerced from lists\n assert enc.categories_[0].dtype == cat_dtype\n\n # when specifying categories manually, unknown categories should already\n # raise when fitting\n enc = OneHotEncoder(categories=cats)\n with pytest.raises(ValueError, match=\"Found unknown categories\"):\n enc.fit(X2)\n enc = OneHotEncoder(categories=cats, handle_unknown='ignore')\n exp = np.array([[1., 0., 0.], [0., 0., 0.]])\n assert_array_equal(enc.fit(X2).transform(X2).toarray(), exp)\n\n\ndef test_one_hot_encoder_unsorted_categories():\n X = np.array([['a', 'b']], dtype=object).T\n\n enc = OneHotEncoder(categories=[['b', 'a', 'c']])\n exp = np.array([[0., 1., 0.],\n [1., 0., 0.]])\n assert_array_equal(enc.fit(X).transform(X).toarray(), exp)\n assert_array_equal(enc.fit_transform(X).toarray(), exp)\n assert enc.categories_[0].tolist() == ['b', 'a', 'c']\n assert np.issubdtype(enc.categories_[0].dtype, np.object_)\n\n # unsorted passed categories still raise for numerical values\n X = np.array([[1, 2]]).T\n enc = OneHotEncoder(categories=[[2, 1, 3]])\n msg = 'Unsorted categories are not supported'\n with pytest.raises(ValueError, match=msg):\n enc.fit_transform(X)\n\n\ndef test_one_hot_encoder_specified_categories_mixed_columns():\n # multiple columns\n X = np.array([['a', 'b'], [0, 2]], dtype=object).T\n enc = OneHotEncoder(categories=[['a', 'b', 'c'], [0, 1, 2]])\n exp = np.array([[1., 0., 0., 1., 0., 0.],\n [0., 1., 0., 0., 0., 1.]])\n assert_array_equal(enc.fit_transform(X).toarray(), exp)\n assert enc.categories_[0].tolist() == ['a', 'b', 'c']\n assert np.issubdtype(enc.categories_[0].dtype, np.object_)\n assert enc.categories_[1].tolist() == [0, 1, 2]\n # integer categories but from object dtype data\n assert np.issubdtype(enc.categories_[1].dtype, np.object_)\n\n\ndef test_one_hot_encoder_pandas():\n pd = pytest.importorskip('pandas')\n\n X_df = pd.DataFrame({'A': ['a', 'b'], 'B': [1, 2]})\n\n Xtr = check_categorical_onehot(X_df)\n assert_allclose(Xtr, [[1, 0, 1, 0], [0, 1, 0, 1]])\n\n\ndef test_one_hot_encoder_feature_names():\n enc = OneHotEncoder()\n X = [['Male', 1, 'girl', 2, 3],\n ['Female', 41, 'girl', 1, 10],\n ['Male', 51, 'boy', 12, 3],\n ['Male', 91, 'girl', 21, 30]]\n\n enc.fit(X)\n feature_names = enc.get_feature_names()\n assert isinstance(feature_names, np.ndarray)\n\n assert_array_equal(['x0_Female', 'x0_Male',\n 'x1_1', 'x1_41', 'x1_51', 'x1_91',\n 'x2_boy', 'x2_girl',\n 'x3_1', 'x3_2', 'x3_12', 'x3_21',\n 'x4_3',\n 'x4_10', 'x4_30'], feature_names)\n\n feature_names2 = enc.get_feature_names(['one', 'two',\n 'three', 'four', 'five'])\n\n assert_array_equal(['one_Female', 'one_Male',\n 'two_1', 'two_41', 'two_51', 'two_91',\n 'three_boy', 'three_girl',\n 'four_1', 'four_2', 'four_12', 'four_21',\n 'five_3', 'five_10', 'five_30'], feature_names2)\n\n with pytest.raises(ValueError, match=\"input_features should have length\"):\n enc.get_feature_names(['one', 'two'])\n\n\ndef test_one_hot_encoder_feature_names_unicode():\n enc = OneHotEncoder()\n X = np.array([[u'c❤t1', u'dat2']], dtype=object).T\n enc.fit(X)\n feature_names = enc.get_feature_names()\n assert_array_equal([u'x0_c❤t1', u'x0_dat2'], feature_names)\n feature_names = enc.get_feature_names(input_features=[u'n👍me'])\n assert_array_equal([u'n👍me_c❤t1', u'n👍me_dat2'], feature_names)\n\n\n@pytest.mark.parametrize(\"X\", [np.array([[1, np.nan]]).T,\n np.array([['a', np.nan]], dtype=object).T],\n ids=['numeric', 'object'])\n@pytest.mark.parametrize(\"handle_unknown\", ['error', 'ignore'])\ndef test_one_hot_encoder_raise_missing(X, handle_unknown):\n ohe = OneHotEncoder(categories='auto', handle_unknown=handle_unknown)\n\n with pytest.raises(ValueError, match=\"Input contains NaN\"):\n ohe.fit(X)\n\n with pytest.raises(ValueError, match=\"Input contains NaN\"):\n ohe.fit_transform(X)\n\n ohe.fit(X[:1, :])\n\n with pytest.raises(ValueError, match=\"Input contains NaN\"):\n ohe.transform(X)\n\n\n@pytest.mark.parametrize(\"X\", [\n [['abc', 2, 55], ['def', 1, 55]],\n np.array([[10, 2, 55], [20, 1, 55]]),\n np.array([['a', 'B', 'cat'], ['b', 'A', 'cat']], dtype=object)\n ], ids=['mixed', 'numeric', 'object'])\ndef test_ordinal_encoder(X):\n enc = OrdinalEncoder()\n exp = np.array([[0, 1, 0],\n [1, 0, 0]], dtype='int64')\n assert_array_equal(enc.fit_transform(X), exp.astype('float64'))\n enc = OrdinalEncoder(dtype='int64')\n assert_array_equal(enc.fit_transform(X), exp)\n\n\n@pytest.mark.parametrize(\"X, X2, cats, cat_dtype\", [\n (np.array([['a', 'b']], dtype=object).T,\n np.array([['a', 'd']], dtype=object).T,\n [['a', 'b', 'c']], np.object_),\n (np.array([[1, 2]], dtype='int64').T,\n np.array([[1, 4]], dtype='int64').T,\n [[1, 2, 3]], np.int64),\n (np.array([['a', 'b']], dtype=object).T,\n np.array([['a', 'd']], dtype=object).T,\n [np.array(['a', 'b', 'c'])], np.object_),\n ], ids=['object', 'numeric', 'object-string-cat'])\ndef test_ordinal_encoder_specified_categories(X, X2, cats, cat_dtype):\n enc = OrdinalEncoder(categories=cats)\n exp = np.array([[0.], [1.]])\n assert_array_equal(enc.fit_transform(X), exp)\n assert list(enc.categories[0]) == list(cats[0])\n assert enc.categories_[0].tolist() == list(cats[0])\n # manually specified categories should have same dtype as\n # the data when coerced from lists\n assert enc.categories_[0].dtype == cat_dtype\n\n # when specifying categories manually, unknown categories should already\n # raise when fitting\n enc = OrdinalEncoder(categories=cats)\n with pytest.raises(ValueError, match=\"Found unknown categories\"):\n enc.fit(X2)\n\n\ndef test_ordinal_encoder_inverse():\n X = [['abc', 2, 55], ['def', 1, 55]]\n enc = OrdinalEncoder()\n X_tr = enc.fit_transform(X)\n exp = np.array(X, dtype=object)\n assert_array_equal(enc.inverse_transform(X_tr), exp)\n\n # incorrect shape raises\n X_tr = np.array([[0, 1, 1, 2], [1, 0, 1, 0]])\n msg = re.escape('Shape of the passed X data is not correct')\n assert_raises_regex(ValueError, msg, enc.inverse_transform, X_tr)\n\n\n@pytest.mark.parametrize(\"X\", [np.array([[1, np.nan]]).T,\n np.array([['a', np.nan]], dtype=object).T],\n ids=['numeric', 'object'])\ndef test_ordinal_encoder_raise_missing(X):\n ohe = OrdinalEncoder()\n\n with pytest.raises(ValueError, match=\"Input contains NaN\"):\n ohe.fit(X)\n\n with pytest.raises(ValueError, match=\"Input contains NaN\"):\n ohe.fit_transform(X)\n\n ohe.fit(X[:1, :])\n\n with pytest.raises(ValueError, match=\"Input contains NaN\"):\n ohe.transform(X)\n\n\ndef test_encoder_dtypes():\n # check that dtypes are preserved when determining categories\n enc = OneHotEncoder(categories='auto')\n exp = np.array([[1., 0., 1., 0.], [0., 1., 0., 1.]], dtype='float64')\n\n for X in [np.array([[1, 2], [3, 4]], dtype='int64'),\n np.array([[1, 2], [3, 4]], dtype='float64'),\n np.array([['a', 'b'], ['c', 'd']]), # string dtype\n np.array([[1, 'a'], [3, 'b']], dtype='object')]:\n enc.fit(X)\n assert all([enc.categories_[i].dtype == X.dtype for i in range(2)])\n assert_array_equal(enc.transform(X).toarray(), exp)\n\n X = [[1, 2], [3, 4]]\n enc.fit(X)\n assert all([np.issubdtype(enc.categories_[i].dtype, np.integer)\n for i in range(2)])\n assert_array_equal(enc.transform(X).toarray(), exp)\n\n X = [[1, 'a'], [3, 'b']]\n enc.fit(X)\n assert all([enc.categories_[i].dtype == 'object' for i in range(2)])\n assert_array_equal(enc.transform(X).toarray(), exp)\n\n\ndef test_encoder_dtypes_pandas():\n # check dtype (similar to test_categorical_encoder_dtypes for dataframes)\n pd = pytest.importorskip('pandas')\n\n enc = OneHotEncoder(categories='auto')\n exp = np.array([[1., 0., 1., 0.], [0., 1., 0., 1.]], dtype='float64')\n\n X = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}, dtype='int64')\n enc.fit(X)\n assert all([enc.categories_[i].dtype == 'int64' for i in range(2)])\n assert_array_equal(enc.transform(X).toarray(), exp)\n\n X = pd.DataFrame({'A': [1, 2], 'B': ['a', 'b']})\n enc.fit(X)\n assert all([enc.categories_[i].dtype == 'object' for i in range(2)])\n assert_array_equal(enc.transform(X).toarray(), exp)\n\n\ndef test_one_hot_encoder_warning():\n enc = OneHotEncoder()\n X = [['Male', 1], ['Female', 3]]\n np.testing.assert_no_warnings(enc.fit_transform, X)\n", "#!/usr/bin/env python\n\"\"\"\n==============================================\nRegularization path of L1- Logistic Regression\n==============================================\n\n\nTrain l1-penalized logistic regression models on a binary classification\nproblem derived from the Iris dataset.\n\nThe models are ordered from strongest regularized to least regularized. The 4\ncoefficients of the models are collected and plotted as a \"regularization\npath\": on the left-hand side of the figure (strong regularizers), all the\ncoefficients are exactly 0. When regularization gets progressively looser,\ncoefficients can get non-zero values one after the other.\n\nHere we choose the SAGA solver because it can efficiently optimize for the\nLogistic Regression loss with a non-smooth, sparsity inducing l1 penalty.\n\nAlso note that we set a low value for the tolerance to make sure that the model\nhas converged before collecting the coefficients.\n\nWe also use warm_start=True which means that the coefficients of the models are\nreused to initialize the next model fit to speed-up the computation of the\nfull-path.\n\n\"\"\"\nprint(__doc__)\n\n# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# License: BSD 3 clause\n\nfrom time import time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import linear_model\nfrom sklearn import datasets\nfrom sklearn.svm import l1_min_c\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nX = X[y != 2]\ny = y[y != 2]\n\nX /= X.max() # Normalize X to speed-up convergence\n\n# #############################################################################\n# Demo path functions\n\ncs = l1_min_c(X, y, loss='log') * np.logspace(0, 7, 16)\n\n\nprint(\"Computing regularization path ...\")\nstart = time()\nclf = linear_model.LogisticRegression(penalty='l1', solver='saga',\n tol=1e-6, max_iter=int(1e6),\n warm_start=True)\ncoefs_ = []\nfor c in cs:\n clf.set_params(C=c)\n clf.fit(X, y)\n coefs_.append(clf.coef_.ravel().copy())\nprint(\"This took %0.3fs\" % (time() - start))\n\ncoefs_ = np.array(coefs_)\nplt.plot(np.log10(cs), coefs_, marker='o')\nymin, ymax = plt.ylim()\nplt.xlabel('log(C)')\nplt.ylabel('Coefficients')\nplt.title('Logistic Regression Path')\nplt.axis('tight')\nplt.show()\n", "\"\"\"\nAgglomerative clustering with and without structure\n===================================================\n\nThis example shows the effect of imposing a connectivity graph to capture\nlocal structure in the data. The graph is simply the graph of 20 nearest\nneighbors.\n\nTwo consequences of imposing a connectivity can be seen. First clustering\nwith a connectivity matrix is much faster.\n\nSecond, when using a connectivity matrix, single, average and complete\nlinkage are unstable and tend to create a few clusters that grow very\nquickly. Indeed, average and complete linkage fight this percolation behavior\nby considering all the distances between two clusters when merging them (\nwhile single linkage exaggerates the behaviour by considering only the\nshortest distance between clusters). The connectivity graph breaks this\nmechanism for average and complete linkage, making them resemble the more\nbrittle single linkage. This effect is more pronounced for very sparse graphs\n(try decreasing the number of neighbors in kneighbors_graph) and with\ncomplete linkage. In particular, having a very small number of neighbors in\nthe graph, imposes a geometry that is close to that of single linkage,\nwhich is well known to have this percolation instability. \"\"\"\n# Authors: Gael Varoquaux, Nelle Varoquaux\n# License: BSD 3 clause\n\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.neighbors import kneighbors_graph\n\n# Generate sample data\nn_samples = 1500\nnp.random.seed(0)\nt = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))\nx = t * np.cos(t)\ny = t * np.sin(t)\n\n\nX = np.concatenate((x, y))\nX += .7 * np.random.randn(2, n_samples)\nX = X.T\n\n# Create a graph capturing local connectivity. Larger number of neighbors\n# will give more homogeneous clusters to the cost of computation\n# time. A very large number of neighbors gives more evenly distributed\n# cluster sizes, but may not impose the local manifold structure of\n# the data\nknn_graph = kneighbors_graph(X, 30, include_self=False)\n\nfor connectivity in (None, knn_graph):\n for n_clusters in (30, 3):\n plt.figure(figsize=(10, 4))\n for index, linkage in enumerate(('average',\n 'complete',\n 'ward',\n 'single')):\n plt.subplot(1, 4, index + 1)\n model = AgglomerativeClustering(linkage=linkage,\n connectivity=connectivity,\n n_clusters=n_clusters)\n t0 = time.time()\n model.fit(X)\n elapsed_time = time.time() - t0\n plt.scatter(X[:, 0], X[:, 1], c=model.labels_,\n cmap=plt.cm.nipy_spectral)\n plt.title('linkage=%s\\n(time %.2fs)' % (linkage, elapsed_time),\n fontdict=dict(verticalalignment='top'))\n plt.axis('equal')\n plt.axis('off')\n\n plt.subplots_adjust(bottom=0, top=.89, wspace=0,\n left=0, right=1)\n plt.suptitle('n_cluster=%i, connectivity=%r' %\n (n_clusters, connectivity is not None), size=17)\n\n\nplt.show()\n" ]
[ [ "numpy.dot", "numpy.argmin", "numpy.rollaxis", "numpy.asfortranarray", "numpy.mean", "numpy.finfo", "numpy.sort", "scipy.sparse.isspmatrix", "numpy.empty", "numpy.log10", "scipy.sparse.csr_matrix", "numpy.flatnonzero", "scipy.sparse.issparse", "numpy.reshape", "numpy.zeros", "numpy.asarray", "numpy.sum", "numpy.may_share_memory", "numpy.atleast_1d" ], [ "numpy.random.RandomState", "numpy.median", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "numpy.linspace" ], [ "numpy.array", "numpy.asarray", "scipy.sparse.isspmatrix_csr", "sklearn.utils.testing.assert_raises_regex", "sklearn.utils.testing.assert_no_warnings", "sklearn.utils.testing.assert_warns", "sklearn.utils.testing.assert_equal", "sklearn.utils.testing.assert_allclose", "sklearn.utils.testing.assert_array_equal", "sklearn.utils.testing.assert_raises", "numpy.testing.assert_no_warnings", "sklearn.utils.testing.ignore_warnings", "sklearn.utils.testing.assert_warns_message", "sklearn.preprocessing.OrdinalEncoder", "numpy.where", "numpy.issubdtype", "sklearn.preprocessing.OneHotEncoder" ], [ "numpy.array", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "sklearn.svm.l1_min_c", "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "numpy.log10", "numpy.logspace", "sklearn.datasets.load_iris" ], [ "numpy.concatenate", "numpy.sin", "sklearn.cluster.AgglomerativeClustering", "numpy.random.rand", "numpy.random.seed", "matplotlib.pyplot.suptitle", "numpy.random.randn", "matplotlib.pyplot.figure", "matplotlib.pyplot.scatter", "sklearn.neighbors.kneighbors_graph", "numpy.cos", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplot" ] ]
vishalsingha/Colorme
[ "0bd72692444744b167ef82df4d885ca04ad746d3" ]
[ "utils.py" ]
[ "from flask import Flask, request, redirect, url_for, render_template, flash\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport keras\r\nimport os\r\nfrom werkzeug.utils import secure_filename\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, UpSampling2D, Input\r\nfrom keras.preprocessing.image import img_to_array, load_img\r\nfrom skimage.color import rgb2lab, lab2rgb, rgb2gray\r\nfrom skimage.transform import resize\r\nfrom skimage.io import imsave, imread\r\n\r\n\r\n# build model and load weight\r\ndef build_model(): \r\n vgg = tf.keras.applications.VGG16()\r\n model = Sequential()\r\n for idx, layer in enumerate(vgg.layers):\r\n if idx<19:\r\n model.add(layer)\r\n for layer in model.layers:\r\n layer.trainable = False\r\n model.add(Conv2D(256, (3, 3), activation='relu', padding = 'same'))\r\n model.add(UpSampling2D((2, 2)))\r\n model.add(Conv2D(128, (3, 3), activation='relu', padding = 'same'))\r\n model.add(UpSampling2D((2, 2)))\r\n model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same'))\r\n model.add(UpSampling2D((2, 2)))\r\n model.add(Conv2D(32, (3, 3), activation='relu', padding = 'same'))\r\n model.add(UpSampling2D((2, 2)))\r\n model.add(Conv2D(16, (3, 3), activation='relu', padding = 'same'))\r\n model.add(UpSampling2D((2, 2)))\r\n model.add(Conv2D(2, (3, 3), activation='tanh', padding = 'same'))\r\n model.load_weights(\"C:\\\\Users\\\\HPvns\\\\Desktop\\\\colorme\\\\weight_file.h5\")\r\n model.compile(optimizer='adam', loss = 'mse', metrics=['accuracy'],)\r\n return model\r\n\r\n\r\ndef load_tflite():\r\n # Load the TFLite model and allocate tensors. \r\n interpreter = tf.lite.Interpreter(model_path=\"model.tflite\") \r\n interpreter.allocate_tensors()\r\n return interpreter\r\n\r\n# predict by using regular tensorflow model\r\ndef predict(filename, model, app):\r\n test = img_to_array(load_img(os.path.join(app.config['UPLOAD_FOLDER'], filename)))\r\n test = resize(test, (224,224), anti_aliasing=True)\r\n test*= 1.0/255\r\n lab = rgb2lab(test)\r\n l = lab[:,:,0]\r\n L = np.repeat(l[..., np.newaxis], 3, -1)\r\n L = L.reshape((1,224,224,3))\r\n ab = model.predict(L)\r\n ab = ab*128\r\n cur = np.zeros((224, 224, 3))\r\n cur[:,:,0] = l\r\n cur[:,:,1:] = ab\r\n imsave(\"img//output//out.jpg\", lab2rgb(cur))\r\n\r\n\r\n# predict by using tflite model\r\ndef predict_tflite(filename, app, interpreter):\r\n test = img_to_array(load_img(os.path.join(app.config['UPLOAD_FOLDER'], filename)))\r\n test = resize(test, (224,224), anti_aliasing=True)\r\n test*= 1.0/255\r\n lab = rgb2lab(test)\r\n l = lab[:,:,0]\r\n L = np.repeat(l[..., np.newaxis], 3, -1)\r\n L = L.reshape((1,224,224,3))\r\n\r\n input_data = np.array(L, dtype=np.float32)\r\n # Get input and output tensors. \r\n input_details = interpreter.get_input_details() \r\n output_details = interpreter.get_output_details()\r\n #Predict model with processed data \r\n input_shape = input_details[0]['shape']\r\n interpreter.set_tensor(input_details[0]['index'], input_data) \r\n print(\"invoking model\") \r\n interpreter.invoke() \r\n print(\"invoking model Done\") \r\n ab = interpreter.get_tensor(output_details[0]['index']) \r\n ab = ab*128\r\n cur = np.zeros((224, 224, 3))\r\n cur[:,:,0] = l\r\n cur[:,:,1:] = ab\r\n imsave(\"img//output//out.jpg\", lab2rgb(cur))" ]
[ [ "numpy.repeat", "numpy.array", "numpy.zeros", "tensorflow.lite.Interpreter", "tensorflow.keras.applications.VGG16" ] ]
c0g/tensorflow_current
[ "f49aca4532c155597c669cf2189f211cafbebf96" ]
[ "tensorflow/python/framework/gen_docs_combined.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Updates generated docs from Python doc comments.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os.path\nimport sys\n\nimport tensorflow as tf\n\nfrom tensorflow.contrib import ffmpeg\nfrom tensorflow.python.client import client_lib\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import docs\nfrom tensorflow.python.framework import framework_lib\n\n\ntf.flags.DEFINE_string(\"out_dir\", None,\n \"Directory to which docs should be written.\")\ntf.flags.DEFINE_boolean(\"print_hidden_regex\", False,\n \"Dump a regular expression matching any hidden symbol\")\nFLAGS = tf.flags.FLAGS\n\n\nPREFIX_TEXT = \"\"\"\nNote: Functions taking `Tensor` arguments can also take anything accepted by\n[`tf.convert_to_tensor`](framework.md#convert_to_tensor).\n\"\"\"\n\n\ndef module_names():\n return [\n \"tf\",\n \"tf.errors\",\n \"tf.image\",\n \"tf.nn\",\n \"tf.nn.rnn_cell\",\n \"tf.train\",\n \"tf.python_io\",\n \"tf.summary\",\n \"tf.test\",\n \"tf.contrib.bayesflow.entropy\",\n \"tf.contrib.bayesflow.monte_carlo\",\n \"tf.contrib.bayesflow.stochastic_graph\",\n \"tf.contrib.bayesflow.stochastic_tensor\",\n \"tf.contrib.bayesflow.variational_inference\",\n \"tf.contrib.copy_graph\",\n \"tf.contrib.crf\",\n \"tf.contrib.distributions\",\n \"tf.contrib.distributions.bijector\",\n \"tf.contrib.ffmpeg\",\n \"tf.contrib.framework\",\n \"tf.contrib.graph_editor\",\n \"tf.contrib.layers\",\n \"tf.contrib.learn\",\n \"tf.contrib.learn.monitors\",\n \"tf.contrib.losses\",\n \"tf.contrib.rnn\",\n \"tf.contrib.metrics\",\n \"tf.contrib.training\",\n \"tf.contrib.util\",\n ]\n\n\ndef find_module(base_module, name):\n if name == \"tf\":\n return base_module\n # Special case for ffmpeg is needed since it's not linked in by default due\n # to size concerns.\n elif name == \"tf.contrib.ffmpeg\":\n return ffmpeg\n elif name.startswith(\"tf.\"):\n subname = name[3:]\n subnames = subname.split(\".\")\n parent_module = base_module\n for s in subnames:\n if not hasattr(parent_module, s):\n raise ValueError(\n \"Module not found: {}. Submodule {} not found in parent module {}.\"\n \" Possible candidates are {}\".format(\n name, s, parent_module.__name__, dir(parent_module)))\n parent_module = getattr(parent_module, s)\n return parent_module\n else:\n raise ValueError(\n \"Invalid module name: {}. Module names must start with 'tf.'\".format(\n name))\n\n\ndef get_module_to_name(names):\n return collections.OrderedDict([(find_module(tf, x), x) for x in names])\n\n\ndef all_libraries(module_to_name, members, documented):\n \"\"\"Make a list of the individual files that we want to create.\n\n Args:\n module_to_name: Dictionary mapping modules to short names.\n members: Dictionary mapping member name to (fullname, member).\n documented: Set of documented names to update.\n\n Returns:\n List of (filename, docs.Library) pairs.\n \"\"\"\n def library(name, title, module=None, **args):\n if module is None:\n module = sys.modules[\"tensorflow.python.ops.\" + name]\n return (name + \".md\", docs.Library(title=title,\n module_to_name=module_to_name,\n members=members,\n documented=documented,\n module=module,\n **args))\n return collections.OrderedDict([\n # Splits of module 'tf'.\n library(\"framework\", \"Building Graphs\", framework_lib),\n library(\"check_ops\", \"Asserts and boolean checks.\"),\n library(\"constant_op\", \"Constants, Sequences, and Random Values\",\n constant_op, prefix=PREFIX_TEXT),\n library(\"state_ops\",\n \"Variables\",\n exclude_symbols=[\"create_partitioned_variables\"],\n prefix=PREFIX_TEXT),\n library(\"array_ops\",\n \"Tensor Transformations\",\n exclude_symbols=[\"list_diff\"],\n prefix=PREFIX_TEXT),\n library(\"math_ops\",\n \"Math\",\n exclude_symbols=[\"sparse_matmul\", \"arg_min\", \"arg_max\",\n \"lin_space\", \"sparse_segment_mean_grad\"],\n prefix=PREFIX_TEXT),\n library(\"string_ops\", \"Strings\",\n prefix=PREFIX_TEXT),\n library(\"histogram_ops\", \"Histograms\"),\n library(\"control_flow_ops\", \"Control Flow\", prefix=PREFIX_TEXT),\n library(\"functional_ops\", \"Higher Order Functions\", prefix=PREFIX_TEXT),\n library(\"tensor_array_ops\", \"TensorArray Operations\", prefix=PREFIX_TEXT),\n library(\"session_ops\", \"Tensor Handle Operations\", prefix=PREFIX_TEXT),\n library(\"image\", \"Images\", tf.image, exclude_symbols=[\"ResizeMethod\"],\n prefix=PREFIX_TEXT),\n library(\"sparse_ops\",\n \"Sparse Tensors\",\n exclude_symbols=[\"serialize_sparse\", \"serialize_many_sparse\",\n \"deserialize_many_sparse\"],\n prefix=PREFIX_TEXT),\n library(\"io_ops\",\n \"Inputs and Readers\",\n exclude_symbols=[\"LookupTableBase\", \"HashTable\",\n \"initialize_all_tables\",\n \"parse_single_sequence_example\",\n \"string_to_hash_bucket\"],\n prefix=PREFIX_TEXT),\n library(\"python_io\", \"Data IO (Python functions)\", tf.python_io),\n library(\"nn\",\n \"Neural Network\",\n tf.nn,\n exclude_symbols=[\"conv2d_backprop_input\",\n \"conv2d_backprop_filter\", \"avg_pool_grad\",\n \"max_pool_grad\", \"max_pool_grad_with_argmax\",\n \"batch_norm_with_global_normalization_grad\",\n \"lrn_grad\", \"relu6_grad\", \"softplus_grad\",\n \"softsign_grad\", \"xw_plus_b\", \"relu_layer\",\n \"lrn\", \"batch_norm_with_global_normalization\",\n \"batch_norm_with_global_normalization_grad\",\n \"all_candidate_sampler\", \"seq2seq\"],\n prefix=PREFIX_TEXT),\n library(\"rnn_cell\", \"Neural Network RNN Cells\", tf.nn.rnn_cell),\n library(\"client\", \"Running Graphs\", client_lib),\n library(\"train\",\n \"Training\",\n tf.train,\n exclude_symbols=[\"Feature\", \"Features\", \"BytesList\", \"FloatList\",\n \"Int64List\", \"Example\", \"InferenceExample\",\n \"FeatureList\", \"FeatureLists\", \"RankingExample\",\n \"SequenceExample\"]),\n library(\"script_ops\",\n \"Wraps python functions\",\n prefix=PREFIX_TEXT),\n library(\"summary\", \"Summary Operations\", tf.summary),\n library(\"test\", \"Testing\", tf.test),\n library(\"contrib.bayesflow.entropy\",\n \"BayesFlow Entropy (contrib)\",\n tf.contrib.bayesflow.entropy),\n library(\"contrib.bayesflow.monte_carlo\",\n \"BayesFlow Monte Carlo (contrib)\",\n tf.contrib.bayesflow.monte_carlo),\n library(\"contrib.bayesflow.stochastic_graph\",\n \"BayesFlow Stochastic Graph (contrib)\",\n tf.contrib.bayesflow.stochastic_graph),\n library(\"contrib.bayesflow.stochastic_tensor\",\n \"BayesFlow Stochastic Tensors (contrib)\",\n tf.contrib.bayesflow.stochastic_tensor),\n library(\"contrib.bayesflow.variational_inference\",\n \"BayesFlow Variational Inference (contrib)\",\n tf.contrib.bayesflow.variational_inference),\n library(\"contrib.crf\", \"CRF (contrib)\", tf.contrib.crf),\n library(\"contrib.distributions\", \"Statistical distributions (contrib)\",\n tf.contrib.distributions),\n library(\"contrib.distributions.bijector\",\n \"Random variable transformations (contrib)\",\n tf.contrib.distributions.bijector),\n library(\"contrib.ffmpeg\", \"FFmpeg (contrib)\", ffmpeg),\n library(\"contrib.framework\", \"Framework (contrib)\", tf.contrib.framework),\n library(\"contrib.graph_editor\", \"Graph Editor (contrib)\",\n tf.contrib.graph_editor),\n library(\"contrib.layers\", \"Layers (contrib)\", tf.contrib.layers),\n library(\"contrib.learn\", \"Learn (contrib)\", tf.contrib.learn),\n library(\"contrib.learn.monitors\", \"Monitors (contrib)\",\n tf.contrib.learn.monitors),\n library(\"contrib.losses\", \"Losses (contrib)\", tf.contrib.losses),\n library(\"contrib.rnn\", \"RNN (contrib)\", tf.contrib.rnn),\n library(\"contrib.metrics\", \"Metrics (contrib)\", tf.contrib.metrics),\n library(\"contrib.training\", \"Training (contrib)\", tf.contrib.training),\n library(\"contrib.util\", \"Utilities (contrib)\", tf.contrib.util),\n library(\"contrib.copy_graph\", \"Copying Graph Elements (contrib)\",\n tf.contrib.copy_graph),\n ])\n\n_hidden_symbols = [\"Event\", \"LogMessage\", \"Summary\", \"SessionLog\", \"xrange\",\n \"HistogramProto\", \"ConfigProto\", \"NodeDef\", \"GraphDef\",\n \"GPUOptions\", \"GraphOptions\", \"RunOptions\", \"RunMetadata\",\n \"SessionInterface\", \"BaseSession\", \"NameAttrList\",\n \"AttrValue\", \"OptimizerOptions\",\n \"CollectionDef\", \"MetaGraphDef\", \"QueueRunnerDef\",\n \"SaverDef\", \"VariableDef\", \"TestCase\", \"GrpcServer\",\n \"ClusterDef\", \"JobDef\", \"ServerDef\"]\n\n# TODO(skleinfeld, deannarubin) Address shortname\n# conflict between tf.contrib.learn.NanLossDuringTrainingError and\n# tf.contrib.learn.monitors.NanLossDuringTrainingError, arising due\n# to imports in learn/python/learn/__init__.py\n# TODO(wicke): Remove contrib.layers.relu* after shortnames are\n# disabled. These conflict with tf.nn.relu*\nEXCLUDE = frozenset([\"tf.contrib.learn.monitors.NanLossDuringTrainingError\",\n \"tf.contrib.layers.relu\", \"tf.contrib.layers.relu6\",\n \"tf.contrib.framework.assert_global_step\",\n \"tf.contrib.framework.get_global_step\",\n \"tf.contrib.learn.NanLossDuringTrainingError\"])\n\n\ndef main(unused_argv):\n if not FLAGS.out_dir:\n tf.logging.error(\"out_dir not specified\")\n return -1\n\n # Document libraries\n documented = set()\n module_to_name = get_module_to_name(module_names())\n members = docs.collect_members(module_to_name, exclude=EXCLUDE)\n libraries = all_libraries(module_to_name, members, documented).items()\n\n # Define catch_all library before calling write_libraries to avoid complaining\n # about generically hidden symbols.\n catch_all = docs.Library(title=\"Catch All\", module=None,\n exclude_symbols=_hidden_symbols,\n module_to_name=module_to_name, members=members,\n documented=documented)\n\n # Write docs to files\n docs.write_libraries(FLAGS.out_dir, libraries)\n\n # Make it easy to search for hidden symbols\n if FLAGS.print_hidden_regex:\n hidden = set(_hidden_symbols)\n for _, lib in libraries:\n hidden.update(lib.exclude_symbols)\n print(r\"hidden symbols regex = r'\\b(%s)\\b'\" % \"|\".join(sorted(hidden)))\n\n # Verify that all symbols are mentioned in some library doc.\n catch_all.assert_no_leftovers()\n\n # Generate index\n with open(os.path.join(FLAGS.out_dir, \"index.md\"), \"w\") as f:\n docs.Index(module_to_name, members, libraries,\n \"../../api_docs/python/\").write_markdown_to_file(f)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.flags.DEFINE_string", "tensorflow.flags.DEFINE_boolean", "tensorflow.python.framework.docs.Library", "tensorflow.python.framework.docs.collect_members", "tensorflow.logging.error", "tensorflow.python.framework.docs.Index", "tensorflow.app.run", "tensorflow.python.framework.docs.write_libraries" ] ]
zwxu064/TransInit
[ "134ac0463684e695448c18486f200053d372cf66" ]
[ "third_party/rloss/pytorch_deeplab_v3_plus/dataloaders/datasets/pascal.py" ]
[ "from __future__ import print_function, division\nimport os\nfrom PIL import Image\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom ..custom_path import Path\nfrom torchvision import transforms\nfrom ..custom_transforms import FixScaleCrop, RandomHorizontalFlip\nfrom ..custom_transforms import RandomScaleCrop, RandomGaussianBlur\nfrom ..custom_transforms import Normalize, ToTensor, AutoAdjustSize\n\n\nclass VOCSegmentation(Dataset):\n \"\"\"\n PascalVoc dataset\n \"\"\"\n NUM_CLASSES = 21\n\n def __init__(self,\n args,\n server='039614',\n split='train',\n ):\n \"\"\"\n :param base_dir: path to VOC dataset directory\n :param split: train/val\n :param transform: transform to apply\n \"\"\"\n super().__init__()\n base_dir = Path.db_root_dir('pascal', data_root=args.data_root, server=server)\n self._base_dir = base_dir\n self._image_dir = os.path.join(self._base_dir, 'JPEGImages')\n\n if args.mode == 'weakly':\n self._cat_dir = os.path.join(self._base_dir, 'pascal_2012_scribble')\n else:\n self._cat_dir = os.path.join(self._base_dir, 'SegmentationClassAug')\n\n if isinstance(split, str):\n self.split = [split]\n else:\n split.sort()\n self.split = split\n\n self.args = args\n\n _splits_dir = os.path.join(self._base_dir, 'ImageSets', 'SegmentationAug')\n\n self.im_ids = []\n self.images = []\n self.categories = []\n self.image_names = []\n\n for splt in self.split:\n with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), \"r\") as f:\n lines = f.read().splitlines()\n\n for ii, line in enumerate(lines):\n _image = os.path.join(self._image_dir, line + \".jpg\")\n assert os.path.isfile(_image)\n _cat = os.path.join(self._cat_dir, line + \".png\")\n assert os.path.isfile(_cat)\n\n self.im_ids.append(line)\n self.images.append(_image)\n self.categories.append(_cat)\n self.image_names.append(line)\n\n assert (len(self.images) == len(self.categories))\n\n # Display stats\n print('Number of images in {}: {:d}'.format(split, len(self.images)))\n\n def _make_img_gt_point_pair(self, index):\n image_path = self.images[index]\n category_path = self.categories[index]\n\n _img = Image.open(image_path).convert('RGB')\n _target = Image.open(category_path) if category_path else None\n\n return _img, _target\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, index):\n _img, _target = self._make_img_gt_point_pair(index)\n _padding_mask = Image.fromarray(np.ones((_img.height, _img.width), dtype=np.uint8))\n sample = {'image': _img, 'padding_mask': _padding_mask, 'size': (_img.height, _img.width)}\n\n for split in self.split:\n if split in {'train', 'val'}:\n sample.update({'label': _target})\n\n for split in self.split:\n if split == \"train\":\n sample = self.transform_tr_part1(sample)\n elif split in {'val', 'test'}:\n sample = self.transform_val_part1(sample)\n else:\n assert False\n\n if split == 'train':\n sample = self.transform_tr_part2(sample)\n elif split in {'val', 'test'}:\n sample = self.transform_val_part2(sample)\n\n if 'padding_mask' in sample:\n del sample['padding_mask']\n\n sample.update({'name': self.image_names[index]})\n\n return sample\n\n def transform_tr_part1(self, sample):\n composed_transforms = transforms.Compose([\n RandomHorizontalFlip(),\n RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),\n RandomGaussianBlur()]) # Zhiwei\n\n return composed_transforms(sample)\n\n def transform_tr_part1_1(self, sample):\n composed_transforms = transforms.Compose([\n RandomHorizontalFlip(),\n RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size)]) # Zhiwei\n\n return composed_transforms(sample)\n\n def transform_tr_part1_2(self, sample):\n composed_transforms = transforms.Compose([RandomGaussianBlur()])\n\n return composed_transforms(sample)\n\n def transform_tr_part2(self, sample):\n composed_transforms = transforms.Compose([\n Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n ToTensor()])\n\n return composed_transforms(sample)\n\n def transform_val_part1(self, sample):\n if self.args.enable_adjust_val:\n composed_transforms = transforms.Compose([\n AutoAdjustSize(factor=self.args.adjust_val_factor, fill=254)])\n else:\n composed_transforms = transforms.Compose([\n FixScaleCrop(crop_size=self.args.crop_size)])\n\n return composed_transforms(sample)\n\n def transform_val_part2(self, sample):\n composed_transforms = transforms.Compose([\n Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n ToTensor()])\n\n return composed_transforms(sample)\n\n def __str__(self):\n return 'VOC2012(split=' + str(self.split) + ')'\n\nif __name__ == '__main__':\n from ..utils import decode_segmap\n from torch.utils.data import DataLoader\n import matplotlib.pyplot as plt\n import argparse\n\n parser = argparse.ArgumentParser()\n args = parser.parse_args()\n args.base_size = 513\n args.crop_size = 513\n\n voc_train = VOCSegmentation(args, split='train')\n\n dataloader = DataLoader(voc_train, batch_size=5, shuffle=True, num_workers=0)\n\n for ii, sample in enumerate(dataloader):\n for jj in range(sample[\"image\"].size()[0]):\n img = sample['image'].numpy()\n gt = sample['label'].numpy()\n tmp = np.array(gt[jj]).astype(np.uint8)\n segmap = decode_segmap(tmp, dataset='pascal')\n img_tmp = np.transpose(img[jj], axes=[1, 2, 0])\n img_tmp *= (0.229, 0.224, 0.225)\n img_tmp += (0.485, 0.456, 0.406)\n img_tmp *= 255.0\n img_tmp = img_tmp.astype(np.uint8)\n plt.figure()\n plt.title('display')\n plt.subplot(211)\n plt.imshow(img_tmp)\n plt.subplot(212)\n plt.imshow(segmap)\n\n if ii == 1:\n break\n\n plt.show(block=True)" ]
[ [ "numpy.array", "numpy.ones", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "numpy.transpose", "torch.utils.data.DataLoader", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow", "matplotlib.pyplot.subplot" ] ]
yassirf/sequence-training
[ "a7cd7a9bb2f4f1d76c4f62a62704e396ad029540", "a7cd7a9bb2f4f1d76c4f62a62704e396ad029540", "a7cd7a9bb2f4f1d76c4f62a62704e396ad029540" ]
[ "examples/self_distribution_distillation/self_distribution_distillation_src/criterions/distributiondir.py", "examples/self_distribution_distillation/self_distribution_distillation_src/uncertainty/categorical.py", "examples/self_distribution_distillation/self_distribution_distillation_src/criterions/distillation.py" ]
[ "import math\nfrom dataclasses import dataclass, field\n\nimport torch\nfrom torch.distributions.dirichlet import Dirichlet\nfrom fairseq import metrics, utils\nfrom fairseq.criterions import register_criterion\nfrom fairseq.criterions.label_smoothed_cross_entropy import (\n LabelSmoothedCrossEntropyCriterion\n)\nfrom fairseq.dataclass import FairseqDataclass\nfrom .distillation import KLDivergenceCriterionConfig, KLDivergenceCriterion\nfrom self_distribution_distillation_src.utils.dirichlet import DirichletEstimation\nfrom omegaconf import II\n\n\n@dataclass\nclass DirKLDivergenceAndDirCriterionConfig(FairseqDataclass):\n label_smoothing: float = field(\n default=0.0,\n metadata={\"help\": \"epsilon for label smoothing, 0 means no label smoothing\"},\n )\n ls_ratio: float = field(\n default=0.0,\n metadata={\"help\": \"Weighting of label smoothed loss\"}\n )\n self_ratio: float = field(\n default=0.0,\n metadata={\"help\": \"ratio of default to self loss\"}\n )\n temperature_scale_num: float = field(\n default=1.0,\n metadata={\"help\": \"temperature scaling alphas in kl divergence for numerical stability\"}\n )\n report_accuracy: bool = field(\n default=False,\n metadata={\"help\": \"report accuracy metric\"},\n )\n ignore_prefix_size: int = field(\n default=0,\n metadata={\"help\": \"Ignore first N tokens\"},\n )\n sentence_avg: bool = II(\"optimization.sentence_avg\")\n\n\ndef dirichlet_kl_divergence(log_alphas, teacher_log_alphas, temperature_scale_num, ignore_mask=None, reduce=True):\n\n # Get target scaled distributions\n teacher_alphas = torch.exp(teacher_log_alphas / temperature_scale_num)\n teacher_alphas = Dirichlet(teacher_alphas)\n\n # Get prediction scaled distribution\n alphas = torch.exp(log_alphas / temperature_scale_num)\n alphas = Dirichlet(alphas.unsqueeze(dim = 2))\n\n # Use built in kl divergence (batch, seq, models)\n loss = torch.distributions.kl.kl_divergence(teacher_alphas, alphas)\n loss = loss.mean(dim = -1)\n\n # Mask out padding elements\n if ignore_mask is not None: loss.masked_fill_(ignore_mask, 0.0)\n\n if reduce: loss = loss.sum()\n return loss\n\n\n@register_criterion(\"dir_kl_divergence_distillation_and_dir\", dataclass=DirKLDivergenceAndDirCriterionConfig)\nclass DirKLDivergenceAndDirCriterion(KLDivergenceCriterion):\n def __init__(\n self,\n task,\n sentence_avg,\n label_smoothing,\n ignore_prefix_size=0,\n report_accuracy=False,\n ls_ratio=0.0,\n self_ratio=0.0,\n temperature_scale_num=1.0,\n ):\n super(DirKLDivergenceAndDirCriterion, self).__init__(\n task = task,\n sentence_avg = sentence_avg,\n label_smoothing = label_smoothing,\n ignore_prefix_size = ignore_prefix_size,\n report_accuracy = report_accuracy,\n temperature_scale_num = temperature_scale_num,\n temperature_scale_est = 1.0,\n )\n\n # For weighting label smoothed loss\n self.ls_ratio = ls_ratio\n\n # For dirichlet estimation\n self.self_ratio = self_ratio\n\n @classmethod\n def build_criterion(cls, cfg: DirKLDivergenceAndDirCriterionConfig, task):\n return DirKLDivergenceAndDirCriterion(\n task = task,\n sentence_avg = cfg.sentence_avg,\n label_smoothing = cfg.label_smoothing,\n ignore_prefix_size = cfg.ignore_prefix_size,\n report_accuracy = cfg.report_accuracy,\n ls_ratio = cfg.ls_ratio,\n self_ratio = cfg.self_ratio,\n temperature_scale_num = cfg.temperature_scale_num,\n )\n\n def compute_dir_loss(self, model, net_output, sample, reduce=True):\n \"\"\"\n Estimate and compute the kl-divergence between student and teacher dirichlets\n \"\"\"\n\n # Get teacher predictions (batch, len, models, vocab)\n teacher_log_alphas = sample['teacher_ensemble_logits']\n\n # Get student predictions\n log_alphas = net_output[0]\n\n # Compute loss\n loss = dirichlet_kl_divergence(\n log_alphas = log_alphas,\n teacher_log_alphas = teacher_log_alphas,\n temperature_scale_num = self.temperature_scale_num,\n ignore_mask = self.get_padding_mask(sample),\n reduce = reduce,\n )\n\n return loss\n\n def forward(self, model, sample, reduce=True):\n \"\"\"\n Compute the loss for the given sample.\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n\n # Get prediction\n net_output = model(**sample[\"net_input\"])\n\n # Get label smoothed and nll loss\n ls_loss, nll_loss = self.compute_nll_loss(model, net_output, sample, reduce)\n\n # Zero element\n zero = torch.zeros_like(ls_loss)\n\n # Get kl-divergence loss only during training\n kl_loss = self.compute_kl_loss(model, net_output, sample, reduce) if model.training else zero\n\n # Get dirichlet loss only during training\n dir_loss = self.compute_dir_loss(model, net_output, sample, reduce) if model.training else zero\n\n # Total loss\n loss = ls_loss * self.ls_ratio + (kl_loss + self.self_ratio * dir_loss) * (1 - self.ls_ratio)\n\n # Sample size for gradient normalisation\n sample_size = sample[\"target\"].size(0) if self.sentence_avg else sample[\"ntokens\"]\n\n logging_output = {\n \"loss\": loss.data,\n \"kl_loss\": kl_loss.data,\n \"dir_loss\": dir_loss.data,\n \"nll_loss\": nll_loss.data,\n \"ls_loss\": ls_loss.data,\n \"ntokens\": sample[\"ntokens\"],\n \"nsentences\": sample[\"target\"].size(0),\n \"sample_size\": sample_size,\n }\n\n if self.report_accuracy:\n n_correct, total = self.compute_accuracy(model, net_output, sample)\n logging_output[\"n_correct\"] = utils.item(n_correct.data)\n logging_output[\"total\"] = utils.item(total.data)\n\n return loss, sample_size, logging_output\n\n @classmethod\n def reduce_metrics(cls, logging_outputs) -> None:\n \"\"\"\n Aggregate logging outputs from data parallel training.\n \"\"\"\n loss_sum = sum(log.get(\"loss\", 0) for log in logging_outputs)\n kl_loss_sum = sum(log.get(\"kl_loss\", 0) for log in logging_outputs)\n dir_loss_sum = sum(log.get(\"dir_loss\", 0) for log in logging_outputs)\n nll_loss_sum = sum(log.get(\"nll_loss\", 0) for log in logging_outputs)\n ls_loss_sum = sum(log.get(\"ls_loss\", 0) for log in logging_outputs)\n ntokens = sum(log.get(\"ntokens\", 0) for log in logging_outputs)\n sample_size = sum(log.get(\"sample_size\", 0) for log in logging_outputs)\n\n metrics.log_scalar(\n \"loss\", loss_sum / sample_size / math.log(2), sample_size, round=3\n )\n metrics.log_scalar(\n \"kl_loss\", kl_loss_sum / sample_size / math.log(2), sample_size, round=3\n )\n metrics.log_scalar(\n \"dir_loss\", dir_loss_sum / sample_size / math.log(2), sample_size, round=3\n )\n metrics.log_scalar(\n \"nll_loss\", nll_loss_sum / ntokens / math.log(2), ntokens, round=3\n )\n metrics.log_scalar(\n \"ls_loss\", ls_loss_sum / ntokens / math.log(2), ntokens, round=3\n )\n metrics.log_derived(\n \"ppl\", lambda meters: utils.get_perplexity(meters[\"nll_loss\"].avg)\n )\n\n # Additional metrics for accuracy amoung others\n cls.additional_metrics(logging_outputs)\n", "import numpy as np\nimport torch\n\nfrom .estimators import EnsembleCategoricals\nfrom .misc import process_outputs\n\n\ndef compute_token_uncertainties(args, outputs, extra):\n \"\"\"\n Function which computes token-level measures of uncertainty for Categorical model.\n :param args: specifies uncertainty estimation parameters\n :param outputs: List of Tensors of size [batch_size, seq_len, vocab] of Log Dirichlet Concentrations\n :return: Tensors of token level uncertainties of size [batch_size, seq_len]\n \"\"\"\n outputs = process_outputs(outputs, extra)\n\n estimator = EnsembleCategoricals()\n returns = estimator(args, outputs)\n\n return returns['entropy_expected'].clamp_(min=0.0, max=None), \\\n returns['expected_entropy'].clamp_(min=0.0, max=None), \\\n returns['mutual_information'].clamp_(min=0.0, max=None)\n\n\ndef compute_sequence_uncertainties(args, outputs, extra, output_ids, output_length, mask):\n \"\"\"\n Function which computes sequence-level measures of uncertainty for Categorical model.\n :param args: specifies uncertainty estimation parameters\n :param outputs: List of Tensors of size [batch_size, seq_len, vocab] of Logits\n :param output_ids: Tensor of size [batch_size, seq_len] of token ids\n :param output_length: Tensor of size [batch_size, seq_len] of masked token ids\n :param mask: Tensor of size [batch_size] of masked token ids\n :return: Tuple of tensor score, sentence log-probability and token log-probabilities\n \"\"\"\n outputs = process_outputs(outputs, extra)\n\n # Compute the expectation\n expected = torch.stack(outputs, dim=2)\n\n # Normalise results (batch, seqlen, models, vocab)\n expected = torch.log_softmax(expected, dim=-1)\n\n # Expected results (batch, seqlen, vocab)\n expected = torch.logsumexp(expected, dim=2) - np.log(expected.size(2))\n\n # Now (batch, seqlen, 1)\n unsqueezed_ids = output_ids.unsqueeze(-1)\n\n # Now (batch, seqlen)\n token_log_probs = expected.gather(-1, unsqueezed_ids).squeeze(2)\n\n # Remove any uncertainties outside mask\n if mask.any(): token_log_probs.masked_fill_(mask, 0.0)\n\n # Now get sentence and averaged scores\n log_probs = token_log_probs.sum(dim=1)\n scores = -log_probs / output_length\n\n # Return score, sentence log-probability, token probabilities\n return scores, log_probs, token_log_probs", "import math\nfrom dataclasses import dataclass, field\n\nimport torch\nfrom fairseq import metrics, utils\nfrom fairseq.criterions import register_criterion\nfrom fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion\nfrom fairseq.dataclass import FairseqDataclass\nfrom omegaconf import II\n\n\n@dataclass\nclass KLDivergenceCriterionConfig(FairseqDataclass):\n label_smoothing: float = field(\n default=0.0,\n metadata={\"help\": \"epsilon for label smoothing, 0 means no label smoothing\"},\n )\n ls_ratio: float = field(\n default=0.0,\n metadata={\"help\": \"Weighting of label smoothed loss\"}\n )\n temperature_scale_est: float = field(\n default=1.0,\n metadata={\"help\": \"temperature scaling teacher predictions\"}\n )\n temperature_scale_num: float = field(\n default=1.0,\n metadata={\"help\": \"symmetric temperature scaling for numerical stability\"}\n )\n report_accuracy: bool = field(\n default=False,\n metadata={\"help\": \"report accuracy metric\"},\n )\n ignore_prefix_size: int = field(\n default=0,\n metadata={\"help\": \"Ignore first N tokens\"},\n )\n sentence_avg: bool = II(\"optimization.sentence_avg\")\n\n\ndef kl_divergence(log_probs, teacher_log_probs, ignore_mask = None, reduce = True):\n \"\"\"\n The inputs will have shape\n log_probs: (batch, len, vocab)\n teacher_log_probs: (batch, len, models, vocab)\n \"\"\"\n # Matching the shape of the target\n log_probs = log_probs.unsqueeze(dim = 2)\n\n # Compute kl-divergence between categoricals\n loss = torch.exp(teacher_log_probs) * (teacher_log_probs - log_probs)\n\n # Sum over vocabulary and average over all teacher members\n loss = loss.sum(dim = -1).mean(dim = 2)\n\n # Mask padding elements\n if ignore_mask is not None: loss.masked_fill_(ignore_mask, 0.0)\n\n if reduce: loss = loss.sum()\n return loss\n\n\n@register_criterion(\"kl_divergence_distillation\", dataclass=KLDivergenceCriterionConfig)\nclass KLDivergenceCriterion(LabelSmoothedCrossEntropyCriterion):\n def __init__(\n self,\n task,\n sentence_avg,\n label_smoothing,\n ignore_prefix_size=0,\n report_accuracy=False,\n ls_ratio = 0.0,\n temperature_scale_est = 1.0,\n temperature_scale_num = 1.0,\n ):\n super().__init__(\n task=task,\n sentence_avg=sentence_avg,\n label_smoothing=label_smoothing,\n ignore_prefix_size=ignore_prefix_size,\n report_accuracy=report_accuracy\n )\n\n # For weighting label smoothed loss\n self.ls_ratio = ls_ratio\n\n # When obtaining teacher probabilities\n self.temperature_scale_est = temperature_scale_est\n\n # Symmetric temperature in kl-loss\n self.temperature_scale_num = temperature_scale_num\n\n @classmethod\n def build_criterion(cls, cfg: KLDivergenceCriterionConfig, task):\n return KLDivergenceCriterion(\n task = task,\n sentence_avg = cfg.sentence_avg,\n label_smoothing = cfg.label_smoothing,\n ignore_prefix_size = cfg.ignore_prefix_size,\n report_accuracy = cfg.report_accuracy,\n ls_ratio = cfg.ls_ratio,\n temperature_scale_est = cfg.temperature_scale_est,\n temperature_scale_num = cfg.temperature_scale_num,\n )\n\n def get_padding_mask(self, sample):\n return sample[\"target\"].eq(self.padding_idx)\n\n def compute_nll_loss(self, model, net_output, sample, reduce=True):\n \"\"\"\n Compute the smooth and negative log-likelihood during validation for tracking purposes\n \"\"\"\n return super(KLDivergenceCriterion, self).compute_loss(\n model, net_output, sample, reduce = reduce\n )\n\n def compute_kl_loss(self, model, net_output, sample, reduce=True):\n \"\"\"\n Compute the expected kl-divergence between student and teacher parameters\n \"\"\"\n\n # Get student predictions\n log_probs = net_output[0]/self.temperature_scale_num\n log_probs = torch.log_softmax(log_probs, dim = -1)\n\n with torch.no_grad():\n # Get teacher predictions\n teacher_log_probs = sample['teacher_ensemble_logits']/(self.temperature_scale_num * self.temperature_scale_est)\n teacher_log_probs = torch.log_softmax(teacher_log_probs, dim = -1)\n\n # Get the kl-divergence loss\n loss = kl_divergence(\n log_probs = log_probs,\n teacher_log_probs = teacher_log_probs,\n ignore_mask = self.get_padding_mask(sample),\n reduce = reduce\n )\n\n return loss\n\n def forward(self, model, sample, reduce=True):\n \"\"\"\n Compute the loss for the given sample.\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n\n # Get prediction\n net_output = model(**sample[\"net_input\"])\n\n # Get label smoothed and nll loss\n ls_loss, nll_loss = self.compute_nll_loss(model, net_output, sample, reduce)\n\n # Zero element\n zero = torch.zeros_like(ls_loss)\n\n # Get kl-divergence loss only during training\n kl_loss = self.compute_kl_loss(model, net_output, sample, reduce) if model.training else zero\n\n # Get weighted loss\n loss = ls_loss * self.ls_ratio + kl_loss * (1 - self.ls_ratio)\n\n # Sample size for gradient normalisation\n sample_size = sample[\"target\"].size(0) if self.sentence_avg else sample[\"ntokens\"]\n\n logging_output = {\n \"loss\": loss.data,\n \"nll_loss\": nll_loss.data,\n \"ls_loss\": ls_loss.data,\n \"kl_loss\": kl_loss.data,\n \"ntokens\": sample[\"ntokens\"],\n \"nsentences\": sample[\"target\"].size(0),\n \"sample_size\": sample_size,\n }\n\n if self.report_accuracy:\n n_correct, total = self.compute_accuracy(model, net_output, sample)\n logging_output[\"n_correct\"] = utils.item(n_correct.data)\n logging_output[\"total\"] = utils.item(total.data)\n\n return loss, sample_size, logging_output\n\n @classmethod\n def additional_metrics(cls, logging_outputs) -> None:\n total = utils.item(sum(log.get(\"total\", 0) for log in logging_outputs))\n if total > 0:\n metrics.log_scalar(\"total\", total)\n n_correct = utils.item(\n sum(log.get(\"n_correct\", 0) for log in logging_outputs)\n )\n metrics.log_scalar(\"n_correct\", n_correct)\n metrics.log_derived(\n \"accuracy\",\n lambda meters: round(\n meters[\"n_correct\"].sum * 100.0 / meters[\"total\"].sum, 3\n )\n if meters[\"total\"].sum > 0\n else float(\"nan\"),\n )\n\n @classmethod\n def reduce_metrics(cls, logging_outputs) -> None:\n \"\"\"\n Aggregate logging outputs from data parallel training.\n \"\"\"\n loss_sum = sum(log.get(\"loss\", 0) for log in logging_outputs)\n nll_loss_sum = sum(log.get(\"nll_loss\", 0) for log in logging_outputs)\n ls_loss_sum = sum(log.get(\"ls_loss\", 0) for log in logging_outputs)\n kl_loss_sum = sum(log.get(\"kl_loss\", 0) for log in logging_outputs)\n ntokens = sum(log.get(\"ntokens\", 0) for log in logging_outputs)\n sample_size = sum(log.get(\"sample_size\", 0) for log in logging_outputs)\n\n metrics.log_scalar(\n \"loss\", loss_sum / sample_size / math.log(2), sample_size, round=3\n )\n metrics.log_scalar(\n \"nll_loss\", nll_loss_sum / ntokens / math.log(2), ntokens, round=3\n )\n metrics.log_scalar(\n \"ls_loss\", ls_loss_sum / ntokens / math.log(2), ntokens, round=3\n )\n metrics.log_scalar(\n \"kl_loss\", kl_loss_sum / ntokens / math.log(2), ntokens, round=3\n )\n metrics.log_derived(\n \"ppl\", lambda meters: utils.get_perplexity(meters[\"nll_loss\"].avg)\n )\n\n # Additional metrics for accuracy amoung others\n cls.additional_metrics(logging_outputs)\n\n @staticmethod\n def logging_outputs_can_be_summed() -> bool:\n \"\"\"\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n \"\"\"\n return True\n\n\ndef nll_loss(log_probs, teacher_log_probs, ignore_mask = None, reduce = True):\n \n # Average teacher predictions\n teacher_log_probs = torch.logsumexp(teacher_log_probs, dim = 2) - math.log(teacher_log_probs.size(2))\n\n # Find the maximum of these probabilities\n _, idx = teacher_log_probs.max(dim = -1, keepdim = True)\n\n # Select student probabilities corresponding to max\n loss = -torch.gather(log_probs, -1, idx).squeeze()\n\n # Mask padding elements\n if ignore_mask is not None: loss.masked_fill_(ignore_mask, 0.0)\n\n if reduce: loss = loss.sum()\n return loss\n\n\n@register_criterion(\"map_distillation\", dataclass=KLDivergenceCriterionConfig)\nclass MapDistillationCriterion(KLDivergenceCriterion):\n def __init__(\n self,\n task,\n sentence_avg,\n label_smoothing,\n ignore_prefix_size=0,\n report_accuracy=False,\n ls_ratio = 0.0,\n temperature_scale_est = 1.0,\n temperature_scale_num = 1.0,\n ):\n super().__init__(\n task=task,\n sentence_avg=sentence_avg,\n label_smoothing=label_smoothing,\n ignore_prefix_size=ignore_prefix_size,\n report_accuracy=report_accuracy\n )\n\n # For weighting label smoothed loss\n self.ls_ratio = ls_ratio\n\n # When obtaining teacher probabilities\n self.temperature_scale_est = temperature_scale_est\n\n # Symmetric temperature in kl-loss\n self.temperature_scale_num = temperature_scale_num\n\n @classmethod\n def build_criterion(cls, cfg: KLDivergenceCriterionConfig, task):\n return MapDistillationCriterion(\n task = task,\n sentence_avg = cfg.sentence_avg,\n label_smoothing = cfg.label_smoothing,\n ignore_prefix_size = cfg.ignore_prefix_size,\n report_accuracy = cfg.report_accuracy,\n ls_ratio = cfg.ls_ratio,\n temperature_scale_est = cfg.temperature_scale_est,\n temperature_scale_num = cfg.temperature_scale_num,\n )\n\n def get_padding_mask(self, sample):\n return sample[\"target\"].eq(self.padding_idx)\n\n def compute_nll_loss(self, model, net_output, sample, reduce=True):\n \"\"\"\n Compute the smooth and negative log-likelihood during validation for tracking purposes\n \"\"\"\n return super(KLDivergenceCriterion, self).compute_loss(\n model, net_output, sample, reduce = reduce\n )\n\n def compute_map_loss(self, model, net_output, sample, reduce=True):\n \"\"\"\n Compute the expected kl-divergence between student and teacher parameters\n \"\"\"\n\n # Get student predictions\n log_probs = torch.log_softmax(net_output[0], dim = -1)\n\n with torch.no_grad():\n # Get teacher predictions\n teacher_log_probs = sample['teacher_ensemble_logits']\n teacher_log_probs = torch.log_softmax(teacher_log_probs, dim = -1)\n\n # Get the kl-divergence loss\n loss = nll_loss(\n log_probs = log_probs,\n teacher_log_probs = teacher_log_probs,\n ignore_mask = self.get_padding_mask(sample),\n reduce = reduce\n )\n\n return loss\n\n def forward(self, model, sample, reduce=True):\n \"\"\"\n Compute the loss for the given sample.\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n\n # Get prediction\n net_output = model(**sample[\"net_input\"])\n\n # Get label smoothed and nll loss\n ls_loss, nll_loss = self.compute_nll_loss(model, net_output, sample, reduce)\n\n # Zero element\n zero = torch.zeros_like(ls_loss)\n\n # Get kl-divergence loss only during training\n map_loss = self.compute_map_loss(model, net_output, sample, reduce) if model.training else zero\n\n # Get weighted loss\n loss = ls_loss * self.ls_ratio + map_loss * (1 - self.ls_ratio)\n\n # Sample size for gradient normalisation\n sample_size = sample[\"target\"].size(0) if self.sentence_avg else sample[\"ntokens\"]\n\n logging_output = {\n \"loss\": loss.data,\n \"nll_loss\": nll_loss.data,\n \"ls_loss\": ls_loss.data,\n \"map_loss\": map_loss.data,\n \"ntokens\": sample[\"ntokens\"],\n \"nsentences\": sample[\"target\"].size(0),\n \"sample_size\": sample_size,\n }\n\n if self.report_accuracy:\n n_correct, total = self.compute_accuracy(model, net_output, sample)\n logging_output[\"n_correct\"] = utils.item(n_correct.data)\n logging_output[\"total\"] = utils.item(total.data)\n\n return loss, sample_size, logging_output\n\n @classmethod\n def additional_metrics(cls, logging_outputs) -> None:\n total = utils.item(sum(log.get(\"total\", 0) for log in logging_outputs))\n if total > 0:\n metrics.log_scalar(\"total\", total)\n n_correct = utils.item(\n sum(log.get(\"n_correct\", 0) for log in logging_outputs)\n )\n metrics.log_scalar(\"n_correct\", n_correct)\n metrics.log_derived(\n \"accuracy\",\n lambda meters: round(\n meters[\"n_correct\"].sum * 100.0 / meters[\"total\"].sum, 3\n )\n if meters[\"total\"].sum > 0\n else float(\"nan\"),\n )\n\n @classmethod\n def reduce_metrics(cls, logging_outputs) -> None:\n \"\"\"\n Aggregate logging outputs from data parallel training.\n \"\"\"\n loss_sum = sum(log.get(\"loss\", 0) for log in logging_outputs)\n nll_loss_sum = sum(log.get(\"nll_loss\", 0) for log in logging_outputs)\n ls_loss_sum = sum(log.get(\"ls_loss\", 0) for log in logging_outputs)\n map_loss_sum = sum(log.get(\"map_loss\", 0) for log in logging_outputs)\n ntokens = sum(log.get(\"ntokens\", 0) for log in logging_outputs)\n sample_size = sum(log.get(\"sample_size\", 0) for log in logging_outputs)\n\n metrics.log_scalar(\n \"loss\", loss_sum / sample_size / math.log(2), sample_size, round=3\n )\n metrics.log_scalar(\n \"nll_loss\", nll_loss_sum / ntokens / math.log(2), ntokens, round=3\n )\n metrics.log_scalar(\n \"ls_loss\", ls_loss_sum / ntokens / math.log(2), ntokens, round=3\n )\n metrics.log_scalar(\n \"map_loss\", map_loss_sum / ntokens / math.log(2), ntokens, round=3\n )\n metrics.log_derived(\n \"ppl\", lambda meters: utils.get_perplexity(meters[\"nll_loss\"].avg)\n )\n\n # Additional metrics for accuracy amoung others\n cls.additional_metrics(logging_outputs)\n\n" ]
[ [ "torch.distributions.dirichlet.Dirichlet", "torch.distributions.kl.kl_divergence", "torch.exp", "torch.zeros_like" ], [ "torch.stack", "torch.log_softmax", "torch.logsumexp" ], [ "torch.log_softmax", "torch.gather", "torch.no_grad", "torch.logsumexp", "torch.zeros_like", "torch.exp" ] ]
itsjohnward/wilds
[ "aeafefd01456840c7bd5173d714b184ec86758af" ]
[ "wilds/examples/losses.py" ]
[ "import torch.nn as nn\r\nfrom wilds.common.metrics.loss import ElementwiseLoss, Loss, MultiTaskLoss\r\nfrom wilds.common.metrics.all_metrics import MSE\r\n\r\ndef initialize_loss(config, d_out):\r\n if config.get('loss_function') == 'cross_entropy':\r\n return ElementwiseLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))\r\n\r\n elif config.get('loss_function') == 'lm_cross_entropy':\r\n return MultiTaskLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))\r\n\r\n elif config.get('loss_function') == 'mse':\r\n return MSE(name='loss')\r\n\r\n elif config.get('loss_function') == 'multitask_bce':\r\n return MultiTaskLoss(loss_fn=nn.BCEWithLogitsLoss(reduction='none'))\r\n\r\n elif config.get('loss_function') == 'fasterrcnn_criterion':\r\n from models.detection.fasterrcnn import FasterRCNNLoss\r\n return ElementwiseLoss(loss_fn=FasterRCNNLoss(config.get('device')))\r\n\r\n else:\r\n raise ValueError(f'config.get(\"loss_function\") {config.get(\"loss_function\")} not recognized')\r\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.BCEWithLogitsLoss" ] ]
BeautyOfWeb/VIN
[ "a0bd0a15fb26b00d76f5290de093f529b928b3a0" ]
[ "dl/models/factor_graph.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Factor1d(nn.Module):\n \"\"\"Similar to masked attention\n \n \"\"\"\n def __init__(self, in_features, in_dim, out_features, out_dim, adj_mat=None, bias=True):\n super(Factor1d, self).__init__()\n self.linear1 = nn.Linear(in_dim, out_dim, bias) # based on intuition, not justified\n self.linear2 = nn.Linear(out_dim, out_dim, bias)\n self.linear3 = nn.Linear(in_features, out_features, bias)\n self.linear4 = nn.Linear(out_features, out_features, bias)\n self.adj_mat = adj_mat\n\n def forward(self, x):\n out = F.relu(self.linear2(F.relu(self.linear1(x))).transpose(1, 2)) # (NxDxC -> NxCxD)\n if self.adj_mat is None:\n return self.linear4(F.relu(self.linear3(out))).transpose(1, 2)\n else:\n return self.linear4(F.relu(\n F.linear(out, self.linear3.weight*self.adj_mat.float(), self.linear3.bias))).transpose(1, 2)\n" ]
[ [ "torch.nn.Linear" ] ]
dorianhenning/SPIN
[ "6595195a1837ff22a3844c099199bda2f085a557" ]
[ "utils/imutils.py" ]
[ "\"\"\"\nThis file contains functions that are used to perform data augmentation.\n\"\"\"\nimport torch\nimport numpy as np\nimport scipy.misc\nimport cv2\nfrom PIL import Image\n\nimport SPIN.constants\n\ndef get_transform(center, scale, res, rot=0):\n \"\"\"Generate transformation matrix.\"\"\"\n h = 200 * scale\n t = np.zeros((3, 3))\n t[0, 0] = float(res[1]) / h\n t[1, 1] = float(res[0]) / h\n t[0, 2] = res[1] * (-float(center[0]) / h + .5)\n t[1, 2] = res[0] * (-float(center[1]) / h + .5)\n t[2, 2] = 1\n if not rot == 0:\n rot = -rot # To match direction of rotation from cropping\n rot_mat = np.zeros((3,3))\n rot_rad = rot * np.pi / 180\n sn,cs = np.sin(rot_rad), np.cos(rot_rad)\n rot_mat[0,:2] = [cs, -sn]\n rot_mat[1,:2] = [sn, cs]\n rot_mat[2,2] = 1\n # Need to rotate around center\n t_mat = np.eye(3)\n t_mat[0,2] = -res[1]/2\n t_mat[1,2] = -res[0]/2\n t_inv = t_mat.copy()\n t_inv[:2,2] *= -1\n t = np.dot(t_inv,np.dot(rot_mat,np.dot(t_mat,t)))\n return t\n\ndef transform(pt, center, scale, res, invert=0, rot=0):\n \"\"\"Transform pixel location to different reference.\"\"\"\n t = get_transform(center, scale, res, rot=rot)\n if invert:\n t = np.linalg.inv(t)\n new_pt = np.array([pt[0]-1, pt[1]-1, 1.]).T\n new_pt = np.dot(t, new_pt)\n return new_pt[:2].astype(int)+1\n\ndef crop(img, center, scale, res, rot=0):\n \"\"\"Crop image according to the supplied bounding box.\"\"\"\n # Upper left point\n ul = np.array(transform([1, 1], center, scale, res, invert=1))-1\n # Bottom right point\n br = np.array(transform([res[0]+1, \n res[1]+1], center, scale, res, invert=1))-1\n \n # Padding so that when rotated proper amount of context is included\n pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)\n if not rot == 0:\n ul -= pad\n br += pad\n\n new_shape = [br[1] - ul[1], br[0] - ul[0]]\n if len(img.shape) > 2:\n new_shape += [img.shape[2]]\n new_img = np.zeros(new_shape, dtype='uint8')\n\n # Range to fill new array\n new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]\n new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]\n # Range to sample from original image\n old_x = max(0, ul[0]), min(len(img[0]), br[0])\n old_y = max(0, ul[1]), min(len(img), br[1])\n new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], \n old_x[0]:old_x[1]]\n\n if not rot == 0:\n # Remove padding\n new_img = scipy.misc.imrotate(new_img, rot)\n new_img = new_img[pad:-pad, pad:-pad]\n\n new_img = np.array(Image.fromarray(new_img).resize(res))\n return new_img\n\ndef uncrop(img, center, scale, orig_shape, rot=0, is_rgb=True):\n \"\"\"'Undo' the image cropping/resizing.\n This function is used when evaluating mask/part segmentation.\n \"\"\"\n res = img.shape[:2]\n # Upper left point\n ul = np.array(transform([1, 1], center, scale, res, invert=1))-1\n # Bottom right point\n br = np.array(transform([res[0]+1,res[1]+1], center, scale, res, invert=1))-1\n # size of cropped image\n crop_shape = [br[1] - ul[1], br[0] - ul[0]]\n\n new_shape = [br[1] - ul[1], br[0] - ul[0]]\n if len(img.shape) > 2:\n new_shape += [img.shape[2]]\n new_img = np.zeros(orig_shape, dtype=np.uint8)\n # Range to fill new array\n new_x = max(0, -ul[0]), min(br[0], orig_shape[1]) - ul[0]\n new_y = max(0, -ul[1]), min(br[1], orig_shape[0]) - ul[1]\n # Range to sample from original image\n old_x = max(0, ul[0]), min(orig_shape[1], br[0])\n old_y = max(0, ul[1]), min(orig_shape[0], br[1])\n img = scipy.misc.imresize(img, crop_shape, interp='nearest')\n new_img[old_y[0]:old_y[1], old_x[0]:old_x[1]] = img[new_y[0]:new_y[1], new_x[0]:new_x[1]]\n return new_img\n\ndef rot_aa(aa, rot):\n \"\"\"Rotate axis angle parameters.\"\"\"\n # pose parameters\n R = np.array([[np.cos(np.deg2rad(-rot)), -np.sin(np.deg2rad(-rot)), 0],\n [np.sin(np.deg2rad(-rot)), np.cos(np.deg2rad(-rot)), 0],\n [0, 0, 1]])\n # find the rotation of the body in camera frame\n per_rdg, _ = cv2.Rodrigues(aa)\n # apply the global rotation to the global orientation\n resrot, _ = cv2.Rodrigues(np.dot(R,per_rdg))\n aa = (resrot.T)[0]\n return aa\n\ndef flip_img(img):\n \"\"\"Flip rgb images or masks.\n channels come last, e.g. (256,256,3).\n \"\"\"\n img = np.fliplr(img)\n return img\n\ndef flip_kp(kp):\n \"\"\"Flip keypoints.\"\"\"\n if len(kp) == 24:\n flipped_parts = constants.J24_FLIP_PERM\n elif len(kp) == 49:\n flipped_parts = constants.J49_FLIP_PERM\n kp = kp[flipped_parts]\n kp[:,0] = - kp[:,0]\n return kp\n\ndef flip_pose(pose):\n \"\"\"Flip pose.\n The flipping is based on SMPL parameters.\n \"\"\"\n flipped_parts = constants.SMPL_POSE_FLIP_PERM\n pose = pose[flipped_parts]\n # we also negate the second and the third dimension of the axis-angle\n pose[1::3] = -pose[1::3]\n pose[2::3] = -pose[2::3]\n return pose\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.sin", "numpy.linalg.norm", "numpy.zeros", "numpy.eye", "numpy.cos", "numpy.deg2rad", "numpy.linalg.inv", "numpy.fliplr" ] ]
hchau630/nn-analysis
[ "0fbe7ad7b2b4566b9f88d8f21413a6d405f96bdc", "0fbe7ad7b2b4566b9f88d8f21413a6d405f96bdc", "0fbe7ad7b2b4566b9f88d8f21413a6d405f96bdc" ]
[ "nn_analysis/acts/utils.py", "nn_analysis/acts/core.py", "nn_analysis/models/archs/barlowtwins/base.py" ]
[ "import pathlib\nimport os\n\nimport numpy as np\n\nfrom nn_analysis import utils, exceptions\nfrom nn_analysis.constants import ENV_CONFIG_PATH, ACTS_CONFIGS_PATH\n\nenv_config = utils.load_config(ENV_CONFIG_PATH)\nacts_configs = utils.load_config(ACTS_CONFIGS_PATH)\n\nDEFAULT_SAVE_LOC = 'save_acts_path' # If you change this, make sure to also move the acts you previously saved at this location to the location that you change to.\n\ndef _get_data_path(model_name, epoch, acts_name, version, layer_name=None, data_type='y'):\n # print(acts_configs[acts_name])\n if \"save_loc\" in acts_configs[acts_name][f\"{version:02d}\"][\"kwargs\"]:\n load_loc = acts_configs[acts_name][f\"{version:02d}\"][\"kwargs\"][\"save_loc\"]\n else:\n load_loc = DEFAULT_SAVE_LOC\n if epoch is not None:\n path = f\"{env_config[load_loc]}/{acts_name}/{version:02d}/{model_name}/{epoch:04d}\"\n else:\n path = f\"{env_config[load_loc]}/{acts_name}/{version:02d}/{model_name}\"\n \n if data_type == 'y' or data_type == 'evr':\n if layer_name is None:\n raise ValueError(\"layer_name must be provieded if data_type is 'y' or 'evr'\")\n return pathlib.Path(f\"{path}/{layer_name}/{data_type}.pkl\")\n elif data_type == 'x':\n return pathlib.Path(f\"{path}/x.pkl\")\n else:\n raise NotImplementedError(f\"data_type {data_type} not implemented.\")\n\ndef data_exists(*args, **kwargs):\n return _get_data_path(*args, **kwargs).is_file()\n\ndef save_data(model_name, epoch, acts_name, version, data_dict, layer_name=None, save_loc=DEFAULT_SAVE_LOC, overwrite=False):\n if epoch is not None:\n path = f\"{env_config[save_loc]}/{acts_name}/{version:02d}/{model_name}/{epoch:04d}\"\n else:\n path = f\"{env_config[save_loc]}/{acts_name}/{version:02d}/{model_name}\"\n \n with utils.SimulFileHandler(*[f\"{path}/{layer_name}/{k}.pkl\" for k in data_dict.keys() if k in ['y', 'evr']]):\n for k, v in data_dict.items():\n if k == 'x':\n path_x = pathlib.Path(f\"{path}/x.pkl\")\n if not overwrite and path_x.is_file():\n existing_x = utils.load_data(path_x)\n if np.allclose(existing_x, v):\n return\n print(existing_x)\n print(v)\n raise exceptions.PathAlreadyExists(f\"The data 'x' already exists for {model_name} {epoch} {acts_name} v{version} and is different from the provided data 'x'. If you want override this error, set overwrite=True.\")\n\n utils.save_data(f\"{path}/x.pkl\", v, overwrite=overwrite)\n\n elif k == 'y' or k == 'evr':\n if layer_name is None:\n raise ValueError(\"layer_name must be provided if the data_dict to be saved contains keys 'y' or 'evr'.\")\n utils.save_data(f\"{path}/{layer_name}/{k}.pkl\", v, overwrite=overwrite)\n else:\n raise NotImplementedError(f\"data_dict key {k} not implemented.\")\n \ndef load_data(*args, **kwargs):\n return utils.load_data(_get_data_path(*args, **kwargs))\n\ndef assert_consistent_x(acts_name, version):\n if \"save_loc\" in acts_configs[acts_name][f\"{version:02d}\"][\"kwargs\"]:\n load_loc = acts_configs[acts_name][f\"{version:02d}\"][\"kwargs\"][\"save_loc\"]\n else:\n load_loc = DEFAULT_SAVE_LOC\n path = f\"{env_config[load_loc]}/{acts_name}/{version:02d}\"\n xs = []\n for cur_path, dirnames, filenames in os.walk(path):\n for filename in filenames:\n if filename == 'x.pkl':\n xs.append(utils.load_data(os.path.join(cur_path, filename)))\n if len(xs) > 0:\n for x in xs[1:]:\n assert np.allclose(x, xs[0])\n print(f\"All x.pkl files under {acts_name} v{version} are consistent: checked {len(xs)} files.\")\n", "from abc import ABC, abstractmethod\nimport random\nimport hashlib\nimport os\nimport traceback\nimport pathlib\n\nimport h5py\nimport torch\nimport numpy as np\n\nfrom nn_analysis import utils\n\ndef attach_hooks(model, layer_names, get_hook):\n handles = []\n for layer_name, module in model.named_modules():\n if layer_name in layer_names:\n hook = get_hook(layer_name)\n handle = module.register_forward_hook(hook)\n handles.append(handle)\n return handles\n\ndef remove_hooks(handles):\n for handle in handles:\n handle.remove()\n\ndef compute_sizes(model, layer_names, dataset, device='cpu'):\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=2)\n images = next(iter(dataloader))[0].to(device)\n sizes = {}\n \n def get_hook(layer_name):\n def hook(_0, _1, out):\n sizes[layer_name] = utils.prod(out.size()[1:])\n return hook\n \n try:\n handles = attach_hooks(model, layer_names, get_hook)\n model.eval()\n with torch.no_grad():\n model(images)\n finally:\n remove_hooks(handles)\n \n targets = next(iter(dataloader))[1]\n assert targets.ndim <= 2\n if targets.ndim == 1:\n sizes['target'] = 1\n else:\n sizes['target'] = targets.size()[1]\n sizes['dataset'] = dataset.shape\n return sizes\n\ndef create_group_datasets(grp, model, layer_names, sizes, meta_dicts=None, dtype='float32'):\n for layer_name, module in model.named_modules():\n if layer_name in layer_names:\n layer_grp = grp.create_group(layer_name) # Must be new, cannot overwrite\n if meta_dicts is not None:\n for k, v in meta_dicts[0].items():\n if layer_name in v.keys():\n layer_grp.attrs[k] = v[layer_name]\n for k, v in meta_dicts[1].items():\n if layer_name in v.keys():\n layer_grp[k] = v[layer_name] # Too large to fit in as attribute\n layer_grp.create_dataset('y', shape=(*sizes['dataset'],sizes[layer_name]), dtype=dtype) # Must be new, cannot overwrite\n grp.create_dataset('x', shape=(*sizes['dataset'],sizes['target']), dtype=dtype) # Must be new, cannot overwrite\n\ndef save_dataset(filename, path, model, layer_names, dataset, device='cpu', batch_size=128, postprocessor=None, dtype='float32', log=False):\n sizes = compute_sizes(model, layer_names, dataset, device=device)\n if postprocessor is None:\n postprocess = lambda y, *args, **kwargs: y\n else:\n sizes = postprocessor.configure(sizes)\n postprocess = postprocessor.process\n meta_dicts = postprocessor.meta_dicts if postprocessor is not None else None\n\n with h5py.File(filename, 'a') as f:\n grp = f[path]\n create_group_datasets(grp, model, layer_names, sizes, meta_dicts=meta_dicts, dtype=dtype)\n \n model.eval()\n \n def get_hook(layer_name):\n def hook(_0, _1, out):\n y = out.detach()\n y = y.reshape(y.size(0),-1)\n activations = postprocess(y,layer_name,device=device,dtype=dtype).cpu()\n with h5py.File(filename, 'a') as f:\n # print(f\"Activations size: {activations.size()}\")\n # print(f\"file size: {os.path.getsize(filename)}\")\n try:\n f[path][layer_name]['y'][indices] = activations\n except TypeError as err:\n # Fancy indexing cannot handle multi-dimensional individual elements inexing\n for j, index in enumerate(zip(*indices)):\n f[path][layer_name]['y'][index] = activations[j]\n return hook\n \n try:\n handles = attach_hooks(model, layer_names, get_hook)\n dl = torch.utils.data.DataLoader(dataset,batch_size=batch_size,shuffle=False)\n print_freq = len(dl)//10 if len(dl) > 10 else 1\n for i, (images, targets, indices) in enumerate(dl):\n if i % print_freq == 0:\n print(f\"Processing batch {i}/{len(dl)}\")\n images = images.to(device)\n if indices.ndim == 1:\n indices = indices.view(-1,1)\n indices = tuple(indices.t().long().numpy())\n if targets.ndim == 1:\n targets = targets.view(-1,1)\n with h5py.File(filename, 'a') as f:\n try:\n f[path]['x'][indices] = targets\n except TypeError as err:\n # Fancy indexing cannot handle multi-dimensional individual elements inexing\n for j, index in enumerate(zip(*indices)):\n f[path]['x'][index] = targets[j]\n with torch.no_grad():\n model(images)\n finally:\n remove_hooks(handles)\n \ndef save(filename, model, model_name, epoch, layer_names, datasets, dataset_names, seeds=None, device='cpu', batch_size=256, postprocessor=None, dtype='float32', log=False):\n assert len(dataset_names) == len(seeds)\n \n pathlib.Path(filename).parent.mkdir(parents=True, exist_ok=True)\n \n if epoch is None:\n model_version = '0000'\n else:\n model_version = f'{epoch:04d}'\n \n with h5py.File(filename, 'a') as f:\n model_grp = f.require_group(model_name)\n model_version_grp = model_grp.require_group(model_version)\n for i, dataset in enumerate(datasets):\n print(f\"Processing dataset {i}: {dataset_names[i]}\")\n dataset_grp = model_version_grp.require_group(dataset_names[i])\n if seeds is not None:\n dataset_grp.attrs['seed'] = seeds[i]\n \n for i, dataset in enumerate(datasets):\n with h5py.File(filename, 'a') as f:\n path = f[model_name][model_version][dataset_names[i]].name\n if seeds is not None:\n with utils.set_seed(seeds[i]):\n save_dataset(filename, path, model, layer_names, dataset, device=device, batch_size=batch_size, postprocessor=postprocessor, dtype=dtype, log=log)\n else:\n save_dataset(filename, path, model, layer_names, dataset, device=device, batch_size=batch_size, postprocessor=postprocessor, dtype=dtype, log=log)\n \ndef load(filename, model_name, epoch, dataset_name, layer_name):\n if epoch is None:\n model_version = '0000'\n else:\n model_version = f'{epoch:04d}'\n with h5py.File(filename, 'r') as f:\n grp = f[model_name][model_version][dataset_name]\n y = grp[layer_name]['y'][...]\n return y\n \ndef load_x(filename, model_name, epoch, dataset_name):\n if epoch is None:\n model_version = '0000'\n else:\n model_version = f'{epoch:04d}'\n with h5py.File(filename, 'r') as f:\n grp = f[model_name][model_version][dataset_name]\n x = grp['x'][...]\n return x\n \nclass Processor(ABC):\n @property\n @abstractmethod\n def meta_dicts(self):\n # List of two dicts, the first one containing meta attributes and the second one containing meta datasets\n pass\n \n @abstractmethod\n def configure(self, layer_sizes):\n pass\n \n @abstractmethod\n def process(self, tensor, layer_name, **kwargs):\n pass\n\nclass Compose(Processor):\n def __init__(self, processors):\n self.processors = processors\n \n @property\n def meta_dicts(self):\n out = [{},{}]\n for processor in self.processor:\n out[0].update(processor.meta_dicts[0])\n out[1].update(processor.meta_dicts[1])\n return out\n \n def configure(self, layer_sizes):\n for processor in self.processors:\n layer_sizes = processor.configure(layer_sizes)\n return layer_sizes\n \n def process(self, tensor, layer_name, **kwargs):\n for processor in self.processors:\n tensor = processor.process(tensor, layer_name, **kwargs)\n return tensor\n \nclass Sampler(Processor):\n def __init__(self, n_samples, set_seed=True):\n self.n_samples = n_samples\n self.indices = {}\n self.configured = False\n if set_seed:\n self.seeds = {}\n self.set_seed = set_seed\n \n @property\n def meta_dicts(self):\n if self.set_seed:\n return [{'seed': self.seeds}, {'indices': self.indices}]\n return [{}, {'indices': self.indices}]\n \n def configure(self, sizes):\n layer_sizes = {k: v for k, v in sizes.items() if k not in ['target', 'dataset']}\n output_sizes = {}\n for layer_name, size in layer_sizes.items():\n if self.n_samples > size:\n self.indices[layer_name] = torch.arange(size)\n output_sizes[layer_name] = size\n else:\n if self.set_seed:\n seed = int(hashlib.sha256(layer_name.encode('utf-8')).hexdigest(), 16) % (2**32) # Get seed corresponding to layer\n self.seeds[layer_name] = seed\n with utils.set_seed(seed): \n self.indices[layer_name] = torch.from_numpy(np.random.choice(size,size=self.n_samples,replace=False)).long()\n else:\n self.indices[layer_name] = torch.from_numpy(np.random.choice(size,size=self.n_samples,replace=False)).long()\n output_sizes[layer_name] = self.n_samples\n self.configured = True\n output_sizes.update({'target': sizes['target'], 'dataset': sizes['dataset']})\n return output_sizes\n \n def process(self, tensor, layer_name, **kwargs):\n \"\"\"\n tensor - (batch_size, N)\n \"\"\"\n assert self.configured\n assert tensor.ndim == 2\n layer_indices = self.indices[layer_name]\n if tensor.is_cuda:\n layer_indices.to(tensor.get_device())\n return tensor[:,layer_indices]\n", "from argparse import Namespace\n\nimport torch\nimport torch.nn as nn\nimport torchvision.models as models\n\ndef off_diagonal(x):\n # return a flattened view of the off-diagonal elements of a square matrix\n n, m = x.shape\n assert n == m\n return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()\n\nclass BarlowTwins(nn.Module):\n def __init__(self, args):\n super().__init__()\n self.args = args\n self.backbone = models.resnet50(zero_init_residual=True)\n self.backbone.fc = nn.Identity()\n\n # projector\n sizes = [2048] + list(map(int, args.projector.split('-')))\n layers = []\n for i in range(len(sizes) - 2):\n layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=False))\n layers.append(nn.BatchNorm1d(sizes[i + 1]))\n layers.append(nn.ReLU(inplace=True))\n layers.append(nn.Linear(sizes[-2], sizes[-1], bias=False))\n self.projector = nn.Sequential(*layers)\n self.projector_sizes = sizes\n\n # normalization layer for the representations z1 and z2\n self.bn = nn.BatchNorm1d(sizes[-1], affine=False)\n \n def forward(self, x):\n return self.bn(self.projector(self.backbone(x)))\n\n def loss_forward(self, y1, y2):\n z1 = self.projector(self.backbone(y1))\n z2 = self.projector(self.backbone(y2))\n\n # empirical cross-correlation matrix\n c = self.bn(z1).T @ self.bn(z2)\n\n # sum the cross-correlation matrix between all gpus\n c.div_(self.args.batch_size)\n torch.distributed.all_reduce(c)\n\n # use --scale-loss to multiply the loss by a constant factor\n # see the Issues section of the readme\n on_diag = torch.diagonal(c).add_(-1).pow_(2).sum().mul(self.args.scale_loss)\n off_diag = off_diagonal(c).pow_(2).sum().mul(self.args.scale_loss)\n loss = on_diag + self.args.lambd * off_diag\n return loss\n \ndef barlowtwins(projector='8192-8192-8192', batch_size=1024, scale_loss=0.024, lambd=0.0051, **kwargs):\n args = Namespace(projector=projector, batch_size=batch_size, scale_loss=scale_loss, lambd=lambd, **kwargs)\n return BarlowTwins(args)" ]
[ [ "numpy.allclose" ], [ "torch.no_grad", "numpy.random.choice", "torch.utils.data.DataLoader", "torch.arange" ], [ "torch.nn.Linear", "torch.nn.Identity", "torch.diagonal", "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.BatchNorm1d", "torch.distributed.all_reduce" ] ]
mgarbade/models
[ "6dc1dbe28556375403d0b4b91b561c07387df982" ]
[ "official/resnet/cifar10_main.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Runs a ResNet model on the CIFAR-10 dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl import app as absl_app\nfrom absl import flags\nimport tensorflow as tf # pylint: disable=g-bad-import-order\n\nfrom official.utils.flags import core as flags_core\nfrom official.resnet import resnet_model\nfrom official.resnet import resnet_run_loop\n\n_HEIGHT = 32\n_WIDTH = 32\n_NUM_CHANNELS = 3\n_DEFAULT_IMAGE_BYTES = _HEIGHT * _WIDTH * _NUM_CHANNELS\n# The record is the image plus a one-byte label\n_RECORD_BYTES = _DEFAULT_IMAGE_BYTES + 1\n_NUM_CLASSES = 10\n_NUM_DATA_FILES = 5\n\n_NUM_IMAGES = {\n 'train': 50000,\n 'validation': 10000,\n}\n\nDATASET_NAME = 'CIFAR-10'\n\n\n###############################################################################\n# Data processing\n###############################################################################\ndef get_filenames(is_training, data_dir):\n \"\"\"Returns a list of filenames.\"\"\"\n data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')\n\n assert os.path.exists(data_dir), (\n 'Run cifar10_download_and_extract.py first to download and extract the '\n 'CIFAR-10 data.')\n\n if is_training:\n return [\n os.path.join(data_dir, 'data_batch_%d.bin' % i)\n for i in range(1, _NUM_DATA_FILES + 1)\n ]\n else:\n return [os.path.join(data_dir, 'test_batch.bin')]\n\n\ndef parse_record(raw_record, is_training):\n \"\"\"Parse CIFAR-10 image and label from a raw record.\"\"\"\n # Convert bytes to a vector of uint8 that is record_bytes long.\n record_vector = tf.decode_raw(raw_record, tf.uint8)\n\n # The first byte represents the label, which we convert from uint8 to int32\n # and then to one-hot.\n label = tf.cast(record_vector[0], tf.int32)\n\n # The remaining bytes after the label represent the image, which we reshape\n # from [depth * height * width] to [depth, height, width].\n depth_major = tf.reshape(record_vector[1:_RECORD_BYTES],\n [_NUM_CHANNELS, _HEIGHT, _WIDTH])\n\n # Convert from [depth, height, width] to [height, width, depth], and cast as\n # float32.\n image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)\n\n image = preprocess_image(image, is_training)\n\n return image, label\n\n\ndef preprocess_image(image, is_training):\n \"\"\"Preprocess a single image of layout [height, width, depth].\"\"\"\n if is_training:\n # Resize the image to add four extra pixels on each side.\n image = tf.image.resize_image_with_crop_or_pad(\n image, _HEIGHT + 8, _WIDTH + 8)\n\n # Randomly crop a [_HEIGHT, _WIDTH] section of the image.\n image = tf.random_crop(image, [_HEIGHT, _WIDTH, _NUM_CHANNELS])\n\n # Randomly flip the image horizontally.\n image = tf.image.random_flip_left_right(image)\n\n # Subtract off the mean and divide by the variance of the pixels.\n image = tf.image.per_image_standardization(image)\n return image\n\n\ndef input_fn(is_training, data_dir, batch_size, num_epochs=1):\n \"\"\"Input_fn using the tf.data input pipeline for CIFAR-10 dataset.\n\n Args:\n is_training: A boolean denoting whether the input is for training.\n data_dir: The directory containing the input data.\n batch_size: The number of samples per batch.\n num_epochs: The number of epochs to repeat the dataset.\n\n Returns:\n A dataset that can be used for iteration.\n \"\"\"\n filenames = get_filenames(is_training, data_dir)\n dataset = tf.data.FixedLengthRecordDataset(filenames, _RECORD_BYTES)\n\n return resnet_run_loop.process_record_dataset(\n dataset, is_training, batch_size, _NUM_IMAGES['train'],\n parse_record, num_epochs,\n )\n\n\ndef get_synth_input_fn():\n return resnet_run_loop.get_synth_input_fn(\n _HEIGHT, _WIDTH, _NUM_CHANNELS, _NUM_CLASSES)\n\n\n###############################################################################\n# Running the model\n###############################################################################\nclass Cifar10Model(resnet_model.Model):\n \"\"\"Model class with appropriate defaults for CIFAR-10 data.\"\"\"\n\n def __init__(self, resnet_size, data_format=None, num_classes=_NUM_CLASSES,\n resnet_version=resnet_model.DEFAULT_VERSION,\n dtype=resnet_model.DEFAULT_DTYPE):\n \"\"\"These are the parameters that work for CIFAR-10 data.\n\n Args:\n resnet_size: The number of convolutional layers needed in the model.\n data_format: Either 'channels_first' or 'channels_last', specifying which\n data format to use when setting up the model.\n num_classes: The number of output classes needed from the model. This\n enables users to extend the same model to their own datasets.\n resnet_version: Integer representing which version of the ResNet network\n to use. See README for details. Valid values: [1, 2]\n dtype: The TensorFlow dtype to use for calculations.\n\n Raises:\n ValueError: if invalid resnet_size is chosen\n \"\"\"\n if resnet_size % 6 != 2:\n raise ValueError('resnet_size must be 6n + 2:', resnet_size)\n\n num_blocks = (resnet_size - 2) // 6\n\n super(Cifar10Model, self).__init__(\n resnet_size=resnet_size,\n bottleneck=False,\n num_classes=num_classes,\n num_filters=16,\n kernel_size=3,\n conv_stride=1,\n first_pool_size=None,\n first_pool_stride=None,\n block_sizes=[num_blocks] * 3,\n block_strides=[1, 2, 2],\n final_size=64,\n resnet_version=resnet_version,\n data_format=data_format,\n dtype=dtype\n )\n\n\ndef cifar10_model_fn(features, labels, mode, params):\n \"\"\"Model function for CIFAR-10.\"\"\"\n features = tf.reshape(features, [-1, _HEIGHT, _WIDTH, _NUM_CHANNELS])\n\n learning_rate_fn = resnet_run_loop.learning_rate_with_decay(\n batch_size=params['batch_size'], batch_denom=128,\n num_images=_NUM_IMAGES['train'], boundary_epochs=[100, 150, 200],\n decay_rates=[1, 0.1, 0.01, 0.001])\n\n # We use a weight decay of 0.0002, which performs better\n # than the 0.0001 that was originally suggested.\n weight_decay = 2e-4\n\n # Empirical testing showed that including batch_normalization variables\n # in the calculation of regularized loss helped validation accuracy\n # for the CIFAR-10 dataset, perhaps because the regularization prevents\n # overfitting on the small data set. We therefore include all vars when\n # regularizing and computing loss during training.\n def loss_filter_fn(_):\n return True\n\n return resnet_run_loop.resnet_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n model_class=Cifar10Model,\n resnet_size=params['resnet_size'],\n weight_decay=weight_decay,\n learning_rate_fn=learning_rate_fn,\n momentum=0.9,\n data_format=params['data_format'],\n resnet_version=params['resnet_version'],\n loss_scale=params['loss_scale'],\n loss_filter_fn=loss_filter_fn,\n dtype=params['dtype']\n )\n\n\ndef define_cifar_flags():\n resnet_run_loop.define_resnet_flags()\n flags.adopt_module_key_flags(resnet_run_loop)\n flags_core.set_defaults(data_dir='/tmp/cifar10_data',\n model_dir='/tmp/cifar10_model',\n resnet_size='32',\n train_epochs=250,\n epochs_between_evals=10,\n batch_size=128)\n\n\ndef run_cifar(flags_obj):\n \"\"\"Run ResNet CIFAR-10 training and eval loop.\n\n Args:\n flags_obj: An object containing parsed flag values.\n \"\"\"\n input_function = (flags_obj.use_synthetic_data and get_synth_input_fn()\n or input_fn)\n\n resnet_run_loop.resnet_main(\n flags_obj, cifar10_model_fn, input_function, DATASET_NAME,\n shape=[_HEIGHT, _WIDTH, _NUM_CHANNELS])\n\n\ndef main(_):\n run_cifar(flags.FLAGS)\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n define_cifar_flags()\n absl_app.run(main)\n" ]
[ [ "tensorflow.data.FixedLengthRecordDataset", "tensorflow.logging.set_verbosity", "tensorflow.decode_raw", "tensorflow.image.random_flip_left_right", "tensorflow.reshape", "tensorflow.transpose", "tensorflow.image.resize_image_with_crop_or_pad", "tensorflow.image.per_image_standardization", "tensorflow.random_crop", "tensorflow.cast" ] ]
jpwright/foobot-slack
[ "ffc1cf8490d08433d76bb62cbf7440c765089784" ]
[ "foobot_grapher.py" ]
[ "#!/usr/bin/env python\n\nfrom pyfoobot import Foobot\nimport requests\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.dates\nimport matplotlib.pyplot\nimport datetime\nfrom imgurpython import ImgurClient\nimport ConfigParser\n\ndef getSensorReadings(notify):\n\n\tconfig = ConfigParser.ConfigParser()\n\tconfig.read(\"config.txt\")\n\n\tsettings = {\n\t\t'foobot_api_key': '',\n\t\t'foobot_email': '',\n\t\t'foobot_password': '',\n\t\t'imgur_id': '',\n\t\t'imgur_secret': '',\n\t\t'slack_webhook': '',\n\t\t'averaging_period': 15,\n\t\t'periods_to_graph': 12,\n\t\t'threshold_pm': 25.0,\n\t\t'threshold_temperature': 26.5,\n\t\t'threshold_humidity': 60.0,\n\t\t'threshold_co2': 30000.0,\n\t\t'threshold_tvoc': 500.0\n\t}\n\n\tfor settings_key in settings:\n\t\ttry:\n\t\t\tvalue_to_set = config.get('default', settings_key)\n\t\t\tsettings[settings_key] = value_to_set\n\t\texcept:\n\t\t\tpass\n\n\timgur_supported = False\n\n\tif (len(settings['imgur_id']) > 0 and len(settings['imgur_secret']) > 0):\n\t\timgur_supported = True\n\t\timgur = ImgurClient(settings['imgur_id'], settings['imgur_secret'])\n\n\tfb = Foobot(settings['foobot_api_key'], settings['foobot_email'], settings['foobot_password'])\n\tdevices = fb.devices()\n\tdevice = devices[0]\n\n\tmeasurement_interval = 60*(int(settings['averaging_period']) * int(settings['periods_to_graph']))\n\n\tdata = device.data_period(measurement_interval, 0)\n\n\talerts = []\n\tlabels = [\"PM2.5\", \"Temperature\", \"Humidity\", \"CO2\", \"tVOC\"]\n\tunits = [\"ug/m3\", \"C\", \"%\", \"ppm\", \"ppb\"]\n\tmax_vals = [0, 0, 0, 0, 0]\n\tsums = [0, 0, 0, 0, 0]\n\tdatapoints = [[], [], [], [], []]\n\ttimeseries = []\n\tthresholds = [\n\t\tfloat(settings['threshold_pm']),\n\t\tfloat(settings['threshold_temperature']),\n\t\tfloat(settings['threshold_humidity']),\n\t\tfloat(settings['threshold_co2']),\n\t\tfloat(settings['threshold_tvoc'])\n\t]\n\n\tnum_averaging_samples = int(len(data['datapoints']) / int(settings['periods_to_graph']))\n\n\tfor i in range(0, len(data['datapoints'])):\n\t\tdatapoint = data['datapoints'][i]\n\t\ttime = datapoint[0]\n\t\tpm = datapoint[1]\n\t\ttmp = datapoint[2]\n\t\thum = datapoint[3]\n\t\tco2 = datapoint[4]\n\t\tvoc = datapoint[5]\n\t\tallpollu = datapoint[6]\n\n\t\tfor j in range(0, 5):\n\t\t\tdatapoints[j].append(datapoint[j+1])\n\n\t\t\tif (i >= (len(data['datapoints']) - num_averaging_samples)):\n\t\t\t\tsums[j] += datapoint[j+1]\n\t\t\t\tif datapoint[j] > max_vals[j]:\n\t\t\t\t\tmax_vals[j] = datapoint[j+1]\n\n\t\ttimeseries.append(datetime.datetime.fromtimestamp(time))\n\n\thours = matplotlib.dates.HourLocator()\n\tminutes = matplotlib.dates.MinuteLocator(interval = 10)\n\thoursFmt = matplotlib.dates.DateFormatter('%-I:%M')\n\n\tif notify:\n\t\tfor i in range(0, 5):\n\t\t\tsums[i] = sums[i] / num_averaging_samples\n\n\t\t\tif sums[i] > thresholds[i]:\n\t\t\t\tprint(\"Sending alert for \"+labels[i])\n\t\t\t\tfig, ax = matplotlib.pyplot.subplots()\n\t\t\t\tax.plot(timeseries, datapoints[i])\n\n\t\t\t\tax.xaxis.set_major_locator(hours)\n\t\t\t\tax.xaxis.set_major_formatter(hoursFmt)\n\t\t\t\tax.grid(True)\n\n\t\t\t\tmatplotlib.pyplot.xlabel(\"Time\")\n\t\t\t\tmatplotlib.pyplot.ylabel(labels[i] + \" (\"+units[i]+\")\")\n\n\t\t\t\tfig.autofmt_xdate()\n\n\t\t\t\tmatplotlib.pyplot.savefig(\"figure.png\")\n\t\t\t\tif imgur_supported:\n\t\t\t\t\timage = imgur.upload_from_path(\"figure.png\", anon=True)\n\t\t\t\telse:\n\t\t\t\t\timage = {\"link\": \"http://imgur.not.supported.com/alter_your_config.txt\"}\n\n\t\t\t\tpayload = '{\"text\": \"Warning: '+labels[i]+' levels at '+\"{0:.2f}\".format(sums[i])+' '+units[i]+'.\", \"attachments\": [{\"fallback\": \"Graph.\", \"image_url\": \"'+image[\"link\"]+'\"}]}'\n\t\t\t\tr = requests.post(\"https://hooks.slack.com/services/\"+settings['slack_webhook'], data={\"payload\": payload})\n\n\telse:\n\t\tfig, axarr = matplotlib.pyplot.subplots(1,5)\n\t\tfor i in range(0, 5):\n\t\t\tax = axarr[i]\n\t\t\tax.plot(timeseries, datapoints[i])\n\n\t\t\tax.xaxis.set_major_locator(hours)\n\t\t\tax.xaxis.set_major_formatter(hoursFmt)\n\t\t\tax.grid(True)\n\n\t\t\tax.set_xlabel(\"Time\")\n\t\t\tax.set_title(labels[i] + \" (\"+units[i]+\")\")\n\n\t\t\tfig.autofmt_xdate()\n\n\n\t\tfig.set_size_inches(18, 4)\n\t\tmatplotlib.pyplot.savefig(\"figure.png\", bbox_inches='tight')\n\n\t\tif (imgur_supported):\n\t\t\timage = imgur.upload_from_path(\"figure.png\", anon=True)\n\t\telse:\n\t\t\timage = {\"link\": \"http://imgur.not.supported.com/alter_your_config.txt\"}\n\n\t\treturn image[\"link\"]\n\nif __name__ == \"__main__\":\n\tgetSensorReadings(True)\n" ]
[ [ "matplotlib.use", "matplotlib.dates.MinuteLocator", "matplotlib.pyplot.savefig", "matplotlib.dates.DateFormatter", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.subplots", "matplotlib.dates.HourLocator", "matplotlib.pyplot.ylabel" ] ]
MQSdk/optimal-ph
[ "23659b661d7c2a932886f9472ccb233ed132a73f" ]
[ "src/predict.py" ]
[ "import argparse\n\nimport os\nimport pickle\nimport zipfile\nimport pandas as pd\nimport numpy as np\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVR\n\nfrom utils import remove_linebreak,remove_mafft_train,fasta_to_numpy,read_embedding_matrix\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--input_csv', default='input.csv')\nargs = parser.parse_args()\n\n# Load input.csv\nwith open(args.input_csv) as input_csv:\n df = pd.read_csv(input_csv)\n\n# write as fasta_file\nwith open('src/input.fasta', 'w') as f: \n for ii, s in enumerate(df['sequence']):\n f.write(f\">TEST{ii}\\n\")\n f.write(s+\"\\n\")\n \n \nos.system('src/mafft/mafft.bat --keeplength --add src/input.fasta --auto src/clustalo-4000_ph_new_stripped_ordered_train.fasta > src/input_mafft.fasta')\n\nmfile = remove_linebreak('src/input_mafft.fasta')\nprint(\"remove_linebreak\", mfile)\nmfile = remove_mafft_train(mfile)\nprint(\"remove_mafft_train\", mfile)\ndata = fasta_to_numpy(mfile)\ndata = data.astype(np.int32)\n\nembedding_path = 'src/matrix-40k_L2_30.csv'\nc, embedding_matrix = read_embedding_matrix(embedding_path)\nN_sites = 9900\nsite_embeddings = embedding_matrix[:N_sites]\namino_embeddings = embedding_matrix[N_sites:]\n\ndp_embedding = np.sum((amino_embeddings[data, :] * site_embeddings[None, :]), -1)\n\nzf = zipfile.ZipFile('src/svrmodel_matrix-40k_L2_30.zip', 'r', zipfile.ZIP_BZIP2)\nclf = pickle.load(zf.open('data.pkl'))\nzf.close()\n\npredictions = clf.predict(dp_embedding)\n\noutput_file_path = 'predictions.csv'\nwith open(output_file_path, 'w') as outfile:\n outfile.write('predictions\\n')\n for p in predictions.tolist():\n outfile.write(str(p) + '\\n')\n\n\n" ]
[ [ "numpy.sum", "pandas.read_csv" ] ]
DeepLearnXMU/ABDNMT-Transformer
[ "9477fcf040b59e577a6eb071d173aab243ca0beb" ]
[ "thumt/models/abdtransformer.py" ]
[ "# coding=utf-8\n# Copyright 2017-2019 The THUMT Authors\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\n\nimport tensorflow as tf\n\nimport thumt.interface as interface\nimport thumt.layers as layers\nimport thumt.utils.getter as getter\n\n\ndef _layer_process(x, mode):\n if not mode or mode == \"none\":\n return x\n elif mode == \"layer_norm\":\n return layers.nn.layer_norm(x)\n else:\n raise ValueError(\"Unknown mode %s\" % mode)\n\n\ndef _residual_fn(x, y, keep_prob=None):\n if keep_prob and keep_prob < 1.0:\n y = tf.nn.dropout(y, keep_prob)\n return x + y\n\n\ndef _ffn_layer(inputs, hidden_size, output_size, keep_prob=None,\n dtype=None, scope=None):\n with tf.variable_scope(scope, default_name=\"ffn_layer\", values=[inputs],\n dtype=dtype):\n with tf.variable_scope(\"input_layer\"):\n hidden = layers.nn.linear(inputs, hidden_size, True, True)\n hidden = tf.nn.relu(hidden)\n\n if keep_prob and keep_prob < 1.0:\n hidden = tf.nn.dropout(hidden, keep_prob)\n\n with tf.variable_scope(\"output_layer\"):\n output = layers.nn.linear(hidden, output_size, True, True)\n\n return output\n\n\ndef transformer_encoder(inputs, bias, params, dtype=None, scope=None):\n with tf.variable_scope(scope, default_name=\"encoder\", dtype=dtype,\n values=[inputs, bias]):\n x = inputs\n for layer in range(params.num_encoder_layers):\n with tf.variable_scope(\"layer_%d\" % layer):\n with tf.variable_scope(\"self_attention\"):\n max_relative_dis = params.max_relative_dis \\\n if params.position_info_type == 'relative' else None\n\n y = layers.attention.multihead_attention(\n _layer_process(x, params.layer_preprocess),\n None,\n bias,\n params.num_heads,\n params.attention_key_channels or params.hidden_size,\n params.attention_value_channels or params.hidden_size,\n params.hidden_size,\n 1.0 - params.attention_dropout,\n max_relative_dis=max_relative_dis,\n )\n y = y[\"outputs\"]\n x = _residual_fn(x, y, 1.0 - params.residual_dropout)\n x = _layer_process(x, params.layer_postprocess)\n\n with tf.variable_scope(\"feed_forward\"):\n y = _ffn_layer(\n _layer_process(x, params.layer_preprocess),\n params.filter_size,\n params.hidden_size,\n 1.0 - params.relu_dropout,\n )\n x = _residual_fn(x, y, 1.0 - params.residual_dropout)\n x = _layer_process(x, params.layer_postprocess)\n\n outputs = _layer_process(x, params.layer_preprocess)\n\n return outputs\n\n\ndef transformer_decoder(inputs, memory, bias, mem_bias, params, state=None,\n dtype=None, scope=None, r2l_memory=None, r2l_bias=None,reuse=None):\n with tf.variable_scope(scope, dtype=dtype,\n values=[inputs, memory, bias, mem_bias, r2l_memory, r2l_bias]):\n x = inputs\n next_state = {}\n for layer in range(params.num_decoder_layers):\n layer_name = \"layer_%d\" % layer\n with tf.variable_scope(layer_name):\n layer_state = state[layer_name] if state is not None else None\n max_relative_dis = params.max_relative_dis \\\n if params.position_info_type == 'relative' else None\n\n with tf.variable_scope(\"self_attention\",reuse=reuse):\n y = layers.attention.multihead_attention(\n _layer_process(x, params.layer_preprocess),\n None,\n bias,\n params.num_heads,\n params.attention_key_channels or params.hidden_size,\n params.attention_value_channels or params.hidden_size,\n params.hidden_size,\n 1.0 - params.attention_dropout,\n state=layer_state,\n max_relative_dis=max_relative_dis,\n )\n\n if layer_state is not None:\n next_state[layer_name] = y[\"state\"]\n\n y = y[\"outputs\"]\n x = _residual_fn(x, y, 1.0 - params.residual_dropout)\n x = _layer_process(x, params.layer_postprocess)\n\n with tf.variable_scope(\"encdec_attention\",reuse=reuse):\n y = layers.attention.multihead_attention(\n _layer_process(x, params.layer_preprocess),\n memory,\n mem_bias,\n params.num_heads,\n params.attention_key_channels or params.hidden_size,\n params.attention_value_channels or params.hidden_size,\n params.hidden_size,\n 1.0 - params.attention_dropout,\n max_relative_dis=max_relative_dis,\n )\n y = y[\"outputs\"]\n x = _residual_fn(x, y, 1.0 - params.residual_dropout)\n x = _layer_process(x, params.layer_postprocess)\n\n if r2l_memory is not None:\n with tf.variable_scope(\"r2l_attention\"):\n y = layers.attention.multihead_attention(\n _layer_process(x, params.layer_preprocess),\n r2l_memory,\n r2l_bias,\n params.num_heads,\n params.attention_key_channels or params.hidden_size,\n params.attention_value_channels or params.hidden_size,\n params.hidden_size,\n 1.0 - params.attention_dropout,\n max_relative_dis=max_relative_dis,\n )\n y = y[\"outputs\"]\n x = _residual_fn(x, y, 1.0 - params.residual_dropout)\n x = _layer_process(x, params.layer_postprocess)\n\n with tf.variable_scope(\"feed_forward\",reuse=reuse):\n y = _ffn_layer(\n _layer_process(x, params.layer_preprocess),\n params.filter_size,\n params.hidden_size,\n 1.0 - params.relu_dropout,\n )\n x = _residual_fn(x, y, 1.0 - params.residual_dropout)\n x = _layer_process(x, params.layer_postprocess)\n\n outputs = _layer_process(x, params.layer_preprocess)\n\n if state is not None:\n return outputs, next_state\n\n return outputs\n\n\ndef encoding_graph(features, mode, params):\n if mode != \"train\":\n params.residual_dropout = 0.0\n params.attention_dropout = 0.0\n params.relu_dropout = 0.0\n params.label_smoothing = 0.0\n\n dtype = tf.get_variable_scope().dtype\n hidden_size = params.hidden_size\n src_seq = features[\"source\"]\n src_len = features[\"source_length\"]\n src_mask = tf.sequence_mask(src_len,\n maxlen=tf.shape(features[\"source\"])[1],\n dtype=dtype or tf.float32)\n\n svocab = params.vocabulary[\"source\"]\n src_vocab_size = len(svocab)\n initializer = tf.random_normal_initializer(0.0, params.hidden_size ** -0.5)\n\n if params.shared_source_target_embedding:\n src_embedding = tf.get_variable(\"weights\",\n [src_vocab_size, hidden_size],\n initializer=initializer)\n else:\n src_embedding = tf.get_variable(\"source_embedding\",\n [src_vocab_size, hidden_size],\n initializer=initializer)\n\n bias = tf.get_variable(\"bias\", [hidden_size])\n\n inputs = tf.gather(src_embedding, src_seq)\n\n if params.multiply_embedding_mode == \"sqrt_depth\":\n inputs = inputs * (hidden_size ** 0.5)\n\n inputs = inputs * tf.expand_dims(src_mask, -1)\n\n encoder_input = tf.nn.bias_add(inputs, bias)\n enc_attn_bias = layers.attention.attention_bias(src_mask, \"masking\",\n dtype=dtype)\n if params.position_info_type == 'absolute':\n encoder_input = layers.attention.add_timing_signal(encoder_input)\n\n if params.residual_dropout:\n keep_prob = 1.0 - params.residual_dropout\n encoder_input = tf.nn.dropout(encoder_input, keep_prob)\n\n encoder_output = transformer_encoder(encoder_input, enc_attn_bias, params)\n\n return encoder_output\n\n\ndef r2l_decoder(decoder_input, encoder_output, dec_attn_bias, enc_attn_bias, params):\n scope='r2l'\n if params.share_r2l:\n scope='decoder'\n r2l_state = transformer_decoder(decoder_input, encoder_output, dec_attn_bias, enc_attn_bias, params, scope=scope)\n return r2l_state\n\n\ndef l2r_decoder(r2l_mem, r2l_attn_bias, decoder_input, encoder_output, dec_attn_bias, enc_attn_bias, params, state=None):\n scope='l2r'\n if params.share_r2l:\n scope='decoder'\n outputs = transformer_decoder(decoder_input, encoder_output, dec_attn_bias, enc_attn_bias, params, state,\n scope=scope, r2l_memory=r2l_mem, r2l_bias=r2l_attn_bias,reuse=params.share_r2l)\n return outputs\n\n\ndef abd_forward(r2l_input, r2l_bias, l2r_input, l2r_bias, encoder_output, enc_attn_bias,r2l_attn_bias, params, state=None):\n if state is None or 'r2l_memory' not in state:\n r2l_mem = r2l_decoder(r2l_input, encoder_output, r2l_bias, enc_attn_bias, params)\n if state is not None:\n state['r2l_memory'] = r2l_mem\n else:\n r2l_mem = state['r2l_memory']\n\n l2r_outputs = l2r_decoder(r2l_mem, r2l_attn_bias, l2r_input, encoder_output, l2r_bias, enc_attn_bias, params, state)\n return r2l_mem, l2r_outputs\n\n\ndef decoding_graph(features, state, mode, params):\n if mode != \"train\":\n params.residual_dropout = 0.0\n params.attention_dropout = 0.0\n params.relu_dropout = 0.0\n params.label_smoothing = 0.0\n\n dtype = tf.get_variable_scope().dtype\n tgt_seq = features[\"target\"]\n r2l_tgt_seq = features[\"r2l_target\"]\n src_len = features[\"source_length\"]\n tgt_len = features[\"target_length\"]\n r2l_tgt_len = features[\"r2l_target_length\"]\n src_mask = tf.sequence_mask(src_len,\n maxlen=tf.shape(features[\"source\"])[1],\n dtype=dtype or tf.float32)\n tgt_mask = tf.sequence_mask(tgt_len,\n maxlen=tf.shape(features[\"target\"])[1],\n dtype=dtype or tf.float32)\n r2l_tgt_mask = tf.sequence_mask(r2l_tgt_len,\n maxlen=tf.shape(features[\"r2l_target\"])[1],\n dtype=dtype or tf.float32)\n\n hidden_size = params.hidden_size\n tvocab = params.vocabulary[\"target\"]\n tgt_vocab_size = len(tvocab)\n initializer = tf.random_normal_initializer(0.0, params.hidden_size ** -0.5)\n\n if params.shared_source_target_embedding:\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n tgt_embedding = tf.get_variable(\"weights\",\n [tgt_vocab_size, hidden_size],\n initializer=initializer)\n else:\n tgt_embedding = tf.get_variable(\"target_embedding\",\n [tgt_vocab_size, hidden_size],\n initializer=initializer)\n\n if params.shared_embedding_and_softmax_weights:\n weights = tgt_embedding\n else:\n weights = tf.get_variable(\"softmax\", [tgt_vocab_size, hidden_size],\n initializer=initializer)\n\n targets = tf.gather(tgt_embedding, tgt_seq)\n r2l_targets = tf.gather(tgt_embedding, r2l_tgt_seq)\n\n if params.multiply_embedding_mode == \"sqrt_depth\":\n targets = targets * (hidden_size ** 0.5)\n r2l_targets = r2l_targets * (hidden_size ** 0.5)\n\n targets = targets * tf.expand_dims(tgt_mask, -1)\n r2l_targets = r2l_targets * tf.expand_dims(r2l_tgt_mask, -1)\n\n enc_attn_bias = layers.attention.attention_bias(src_mask, \"masking\",\n dtype=dtype)\n dec_attn_bias = layers.attention.attention_bias(tf.shape(targets)[1],\n \"causal\", dtype=dtype)\n r2l_bias = layers.attention.attention_bias(tf.shape(r2l_targets)[1],\n \"causal\", dtype=dtype)\n r2l_attn_bias = layers.attention.attention_bias(r2l_tgt_mask, \"masking\",\n dtype=dtype)\n\n # Shift left\n decoder_input = tf.pad(targets, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]\n r2l_decoder_input = tf.pad(r2l_targets, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]\n if params.position_info_type == 'absolute':\n decoder_input = layers.attention.add_timing_signal(decoder_input)\n r2l_decoder_input = layers.attention.add_timing_signal(r2l_decoder_input)\n\n if params.residual_dropout:\n keep_prob = 1.0 - params.residual_dropout\n decoder_input = tf.nn.dropout(decoder_input, keep_prob)\n r2l_decoder_input = tf.nn.dropout(r2l_decoder_input, keep_prob)\n\n encoder_output = state[\"encoder\"]\n\n if mode == \"train\":\n r2l_output, l2r_output = abd_forward(r2l_decoder_input, r2l_bias, decoder_input, dec_attn_bias,\n encoder_output, enc_attn_bias,r2l_attn_bias, params)\n\n elif mode == 'infer':\n decoder_input = decoder_input[:, -1:, :]\n dec_attn_bias = dec_attn_bias[:, :, -1:, :]\n\n r2l_mem, (l2r_output, decoder_state) = abd_forward(r2l_decoder_input, r2l_bias,\n decoder_input, dec_attn_bias,\n encoder_output, enc_attn_bias,\n r2l_attn_bias,\n params, state['decoder'])\n decoder_state['r2l_memory']=r2l_mem\n l2r_output = l2r_output[:, -1, :]\n logits = tf.matmul(l2r_output, weights, False, True)\n log_prob = tf.nn.log_softmax(logits)\n\n return log_prob, {\"encoder\": encoder_output, \"decoder\": decoder_state}\n else:\n raise NotImplementedError('mode=%s' % mode)\n\n def loss(decoder_output, labels, mask):\n decoder_output = tf.reshape(decoder_output, [-1, hidden_size])\n logits = tf.matmul(decoder_output, weights, False, True)\n # label smoothing\n ce = layers.nn.smoothed_softmax_cross_entropy_with_logits(\n logits=logits,\n labels=labels,\n smoothing=params.label_smoothing,\n normalize=True\n )\n tgt_mask = tf.cast(mask, ce.dtype)\n\n ce = tf.reshape(ce, tf.shape(tgt_mask))\n\n loss = tf.reduce_sum(ce * tgt_mask) / tf.reduce_sum(tgt_mask)\n return loss\n\n r2l_loss = loss(r2l_output, features['r2l_target'], r2l_tgt_mask)\n l2r_loss = loss(l2r_output, features['target'], tgt_mask)\n\n return l2r_loss + r2l_loss\n\n\ndef model_graph(features, mode, params):\n encoder_output = encoding_graph(features, mode, params)\n state = {\n \"encoder\": encoder_output\n }\n output = decoding_graph(features, state, mode, params)\n\n return output\n\n\nclass Transformer(interface.NMTModel):\n\n def __init__(self, params, scope=\"transformer\"):\n super(Transformer, self).__init__(params=params, scope=scope)\n\n def get_training_func(self, initializer, regularizer=None, dtype=None):\n def training_fn(features, params=None, reuse=None):\n if params is None:\n params = copy.copy(self.parameters)\n else:\n params = copy.copy(params)\n\n if dtype != tf.float32:\n custom_getter = getter.fp32_variable_getter\n else:\n custom_getter = None\n\n with tf.variable_scope(self._scope, initializer=initializer,\n regularizer=regularizer, reuse=reuse,\n custom_getter=custom_getter, dtype=dtype):\n loss = model_graph(features, \"train\", params)\n return loss\n\n return training_fn\n\n def get_evaluation_func(self):\n def evaluation_fn(features, params=None):\n if params is None:\n params = copy.copy(self.parameters)\n else:\n params = copy.copy(params)\n\n with tf.variable_scope(self._scope):\n score = model_graph(features, \"eval\", params)\n\n return score\n\n return evaluation_fn\n\n def get_inference_func(self):\n def encoding_fn(features, params=None):\n if params is None:\n params = copy.copy(self.parameters)\n else:\n params = copy.copy(params)\n\n with tf.variable_scope(self._scope):\n encoder_output = encoding_graph(features, \"infer\", params)\n batch = tf.shape(encoder_output)[0]\n\n state = {\n \"encoder\": encoder_output,\n \"decoder\": {\n \"layer_%d\" % i: {\n \"key\": tf.zeros([batch, 0, params.hidden_size]),\n \"value\": tf.zeros([batch, 0, params.hidden_size])\n }\n for i in range(params.num_decoder_layers)\n }\n }\n decoding_graph(features, state, 'infer', params) # get r2l_memory\n r2l_memory = state['decoder']['r2l_memory']\n state = {\n \"encoder\": encoder_output,\n \"decoder\": {\n \"layer_%d\" % i: {\n \"key\": tf.zeros([batch, 0, params.hidden_size]),\n \"value\": tf.zeros([batch, 0, params.hidden_size])\n }\n for i in range(params.num_decoder_layers)\n }\n }\n state['decoder']['r2l_memory'] = r2l_memory\n\n return state\n\n def decoding_fn(features, state, params=None):\n if params is None:\n params = copy.copy(self.parameters)\n else:\n params = copy.copy(params)\n\n with tf.variable_scope(self._scope, reuse=True):\n log_prob, new_state = decoding_graph(features, state, \"infer\",\n params)\n\n return log_prob, new_state\n\n return encoding_fn, decoding_fn\n\n @staticmethod\n def get_name():\n return \"transformer\"\n\n @staticmethod\n def get_parameters():\n params = tf.contrib.training.HParams(\n pad=\"<pad>\",\n bos=\"<eos>\",\n eos=\"<eos>\",\n unk=\"<unk>\",\n append_eos=False,\n hidden_size=512,\n filter_size=2048,\n num_heads=8,\n num_encoder_layers=6,\n num_decoder_layers=6,\n attention_dropout=0.0,\n residual_dropout=0.1,\n relu_dropout=0.0,\n label_smoothing=0.1,\n attention_key_channels=0,\n attention_value_channels=0,\n layer_preprocess=\"none\",\n layer_postprocess=\"layer_norm\",\n multiply_embedding_mode=\"sqrt_depth\",\n shared_embedding_and_softmax_weights=True,\n shared_source_target_embedding=False,\n # Override default parameters\n learning_rate_decay=\"linear_warmup_rsqrt_decay\",\n initializer=\"uniform_unit_scaling\",\n initializer_gain=1.0,\n learning_rate=1.0,\n batch_size=4096,\n constant_batch_size=False,\n adam_beta1=0.9,\n adam_beta2=0.98,\n adam_epsilon=1e-9,\n clip_grad_norm=0.0,\n position_info_type='relative', # 'absolute' or 'relative'\n max_relative_dis=16, # 8 for big model, 16 for base model, see (Shaw et al., 2018)\n share_r2l=None,\n )\n\n return params\n" ]
[ [ "tensorflow.contrib.training.HParams", "tensorflow.shape", "tensorflow.zeros", "tensorflow.nn.relu", "tensorflow.expand_dims", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.get_variable", "tensorflow.get_variable_scope", "tensorflow.gather", "tensorflow.reduce_sum", "tensorflow.pad", "tensorflow.nn.bias_add", "tensorflow.nn.log_softmax", "tensorflow.nn.dropout", "tensorflow.cast", "tensorflow.random_normal_initializer" ] ]
ZY-KK/panda
[ "48fcbd65d563ef74aab2554be8de7662560c43da" ]
[ "panda_goal_reaching/controllers/robot_supervisor_manager/agent/ppo.py" ]
[ "import os\nimport numpy as np\nimport torch as T\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.distributions.categorical import Categorical\n\n\nclass PPOMemory:\n def __init__(self, batch_size):\n self.states = []\n self.probs = []\n self.vals = []\n self.actions = []\n self.rewards = []\n self.dones = []\n\n self.batch_size = batch_size\n\n def generate_batches(self):\n n_states = len(self.states)\n batch_start = np.arange(0, n_states, self.batch_size)\n indices = np.arange(n_states, dtype=np.int64)\n np.random.shuffle(indices)\n batches = [indices[i:i+self.batch_size] for i in batch_start]\n\n return np.array(self.states),\\\n np.array(self.actions),\\\n np.array(self.probs),\\\n np.array(self.vals),\\\n np.array(self.rewards),\\\n np.array(self.dones),\\\n batches\n\n def store_memory(self, state, action, probs, vals, reward, done):\n self.states.append(state)\n self.actions.append(action)\n self.probs.append(probs)\n self.vals.append(vals)\n self.rewards.append(reward)\n self.dones.append(done)\n\n def clear_memory(self):\n self.states = []\n self.probs = []\n self.actions = []\n self.rewards = []\n self.dones = []\n self.vals = []\n\n\nclass ActorNetwork(nn.Module):\n def __init__(self, n_actions, input_dims, alpha,\n fc1_dims=256, fc2_dims=256, chkpt_dir='./tmp/ppo'):\n super(ActorNetwork, self).__init__()\n\n self.checkpoint_file = os.path.join(chkpt_dir, 'actor_torch_ppo')\n self.actor = nn.Sequential(\n nn.Linear(*input_dims, fc1_dims),\n nn.ReLU(),\n nn.Linear(fc1_dims, fc2_dims),\n nn.ReLU(),\n nn.Linear(fc2_dims, n_actions),\n nn.Softmax(dim=-1)\n )\n\n self.optimizer = optim.Adam(self.parameters(), lr=alpha)\n self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')\n self.to(self.device)\n\n def forward(self, state):\n dist = self.actor(state)\n dist = Categorical(dist)\n\n return dist\n\n def save_checkpoint(self):\n T.save(self.state_dict(), self.checkpoint_file)\n\n def load_checkpoint(self):\n self.load_state_dict(T.load(self.checkpoint_file))\n\n\nclass CriticNetwork(nn.Module):\n def __init__(self, input_dims, alpha, fc1_dims=256, fc2_dims=256,\n chkpt_dir='./tmp/ppo'):\n super(CriticNetwork, self).__init__()\n\n self.checkpoint_file = os.path.join(chkpt_dir, 'critic_torch_ppo')\n self.critic = nn.Sequential(\n nn.Linear(*input_dims, fc1_dims),\n nn.ReLU(),\n nn.Linear(fc1_dims, fc2_dims),\n nn.ReLU(),\n nn.Linear(fc2_dims, 1)\n )\n\n self.optimizer = optim.Adam(self.parameters(), lr=alpha)\n self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')\n self.to(self.device)\n\n def forward(self, state):\n value = self.critic(state)\n\n return value\n\n def save_checkpoint(self):\n T.save(self.state_dict(), self.checkpoint_file)\n\n def load_checkpoint(self):\n self.load_state_dict(T.load(self.checkpoint_file))\n\n\nclass PPO_agent:\n def __init__(self, n_actions, input_dims, gamma=0.99, alpha=0.0003, gae_lambda=0.95,\n policy_clip=0.2, batch_size=64, n_epochs=10):\n self.gamma = gamma\n self.policy_clip = policy_clip\n self.n_epochs = n_epochs\n self.gae_lambda = gae_lambda\n\n self.actor = ActorNetwork(n_actions, input_dims, alpha)\n self.critic = CriticNetwork(input_dims, alpha)\n self.memory = PPOMemory(batch_size)\n \n print(\"Try to load the pretrained models...\")\n try:\n self.load_models()\n except OSError:\n print (\"No pretrained models!\")\n else:\n print (\"Successfully loaded the pretrained models!\")\n def remember(self, state, action, probs, vals, reward, done):\n self.memory.store_memory(state, action, probs, vals, reward, done)\n\n def save_models(self):\n print('... saving models ...')\n self.actor.save_checkpoint()\n self.critic.save_checkpoint()\n\n def load_models(self):\n print('... loading models ...')\n self.actor.load_checkpoint()\n self.critic.load_checkpoint()\n\n def choose_action(self, observation):\n state = T.tensor([observation], dtype=T.float).to(self.actor.device)\n\n dist = self.actor(state)\n # print(\"===dist:\", dist)\n value = self.critic(state)\n action = dist.sample()\n # print(\"===action:\", action.size())\n\n probs = T.squeeze(dist.log_prob(action)).item()\n action = T.squeeze(action).item()\n # print(\"===action:\", action)\n value = T.squeeze(value).item()\n\n return action, probs, value\n\n def learn(self):\n for _ in range(self.n_epochs):\n state_arr, action_arr, old_prob_arr, vals_arr,\\\n reward_arr, dones_arr, batches = \\\n self.memory.generate_batches()\n\n values = vals_arr\n advantage = np.zeros(len(reward_arr), dtype=np.float32)\n\n for t in range(len(reward_arr)-1):\n discount = 1\n a_t = 0\n for k in range(t, len(reward_arr)-1):\n a_t += discount*(reward_arr[k] + self.gamma*values[k+1] *\n (1-int(dones_arr[k])) - values[k])\n discount *= self.gamma*self.gae_lambda\n advantage[t] = a_t\n advantage = T.tensor(advantage).to(self.actor.device)\n\n values = T.tensor(values).to(self.actor.device)\n for batch in batches:\n states = T.tensor(state_arr[batch], dtype=T.float).to(\n self.actor.device)\n old_probs = T.tensor(old_prob_arr[batch]).to(self.actor.device)\n actions = T.tensor(action_arr[batch]).to(self.actor.device)\n\n dist = self.actor(states)\n critic_value = self.critic(states)\n\n critic_value = T.squeeze(critic_value)\n\n new_probs = dist.log_prob(actions)\n prob_ratio = new_probs.exp() / old_probs.exp()\n #prob_ratio = (new_probs - old_probs).exp()\n weighted_probs = advantage[batch] * prob_ratio\n weighted_clipped_probs = T.clamp(prob_ratio, 1-self.policy_clip,\n 1+self.policy_clip)*advantage[batch]\n actor_loss = -T.min(weighted_probs,\n weighted_clipped_probs).mean()\n\n returns = advantage[batch] + values[batch]\n critic_loss = (returns-critic_value)**2\n critic_loss = critic_loss.mean()\n\n total_loss = actor_loss + 0.5*critic_loss\n self.actor.optimizer.zero_grad()\n self.critic.optimizer.zero_grad()\n total_loss.backward()\n self.actor.optimizer.step()\n self.critic.optimizer.step()\n\n self.memory.clear_memory()\n" ]
[ [ "torch.nn.Linear", "numpy.array", "torch.min", "torch.nn.Softmax", "numpy.random.shuffle", "torch.clamp", "torch.distributions.categorical.Categorical", "torch.nn.ReLU", "torch.squeeze", "torch.cuda.is_available", "numpy.arange", "torch.tensor", "torch.load" ] ]
jihoonl/ladder-densenet
[ "12626ce339625862d16f91bc692f64799da8bc31", "12626ce339625862d16f91bc692f64799da8bc31" ]
[ "models/voc2012/dense_net.py", "config/voc2012/resnet.py" ]
[ "import os, re\nimport pickle\nimport tensorflow as tf\nimport numpy as np\n#import cv2\nfrom os.path import join\n\nimport tensorflow.contrib.layers as layers\nfrom tensorflow.contrib.framework import arg_scope\n#import skimage as ski\n#import skimage.io\n\nimport libs.cylib as cylib\nimport train_helper\nimport losses\nimport eval_helper\nimport datasets.voc2012.reader as reader\nfrom datasets.voc2012.dataset import Dataset\n\n\nFLAGS = tf.app.flags.FLAGS\nsubset_dir = '/home/kivan/datasets/VOC2012/ImageSets/Segmentation/'\n#subset_dir = '/home/kivan/datasets/voc2012_aug/'\ndataset_dir = '/home/kivan/datasets/voc2012_aug/tensorflow/'\n#tf.app.flags.DEFINE_string('dataset_dir', DATASET_DIR, '')\nprint('Dataset dir: ' + dataset_dir)\n\n# RGB\ndata_mean = [116.49585869, 112.43425923, 103.19996733]\ndata_std = [60.37073962, 59.39268441, 60.74823033]\n\nif FLAGS.no_valid:\n train_dataset = Dataset(dataset_dir, join(subset_dir, 'trainval.txt'), 'trainval')\nelse:\n train_dataset = Dataset(dataset_dir, join(subset_dir, 'train.txt'), 'train')\n valid_dataset = Dataset(dataset_dir, join(subset_dir, 'val.txt'), 'val')\n\n\nprint('Num training examples = ', train_dataset.num_examples())\n\n#model_depth = 121\n#block_sizes = [6,12,24,16]\nmodel_depth = 169\nblock_sizes = [6,12,32,32]\n\nimagenet_init = True\n#imagenet_init = False\ninit_dir = '/home/kivan/datasets/pretrained/dense_net/'\napply_jitter = True\n#apply_jitter = False\njitter_scale = False\n#jitter_scale = True\npool_func = layers.avg_pool2d\n#pool_func = layers.max_pool2d\nknown_shape = True\n\ntrain_step_iter = 0\n\nweight_decay = 1e-4\n#weight_decay = 4e-5\n#weight_decay = 2e-4\n#init_func = layers.variance_scaling_initializer(mode='FAN_OUT')\ninit_func = layers.variance_scaling_initializer()\n\ncontext_size = 512\ngrowth = 32\ncompression = 0.5\ngrowth_up = 32\n\nuse_dropout = False\n#use_dropout = True\nkeep_prob = 0.8\n\n# must be false if BN is frozen\nfused_batch_norm = True\n#fused_batch_norm = False\n\n#data_format = 'NCHW'\n#maps_dim = 1\n#height_dim = 2\n\ndata_format = 'NHWC'\nmaps_dim = 3\nheight_dim = 1\n\n\nbn_params = {\n # Decay for the moving averages.\n 'decay': 0.9,\n 'center': True,\n 'scale': True,\n # epsilon to prevent 0s in variance.\n 'epsilon': 1e-5,\n # None to force the updates\n 'updates_collections': None,\n 'fused': fused_batch_norm,\n 'data_format': data_format,\n 'is_training': True\n}\n\n\ndef evaluate(name, sess, epoch_num, run_ops, data):\n loss_val, accuracy, iou, recall, precision = eval_helper.evaluate_segmentation_voc2012(\n sess, epoch_num, run_ops, valid_dataset)\n is_best = False\n if iou > data['best_iou'][0]:\n is_best = True\n data['best_iou'] = [iou, epoch_num]\n data['iou'] += [iou]\n data['acc'] += [accuracy]\n data['loss'] += [loss_val]\n return is_best\n\n\ndef start_epoch(train_data):\n global train_loss_arr, train_conf_mat\n train_conf_mat = np.ascontiguousarray(\n np.zeros((FLAGS.num_classes, FLAGS.num_classes), dtype=np.uint64))\n train_loss_arr = []\n train_data['lr'].append(lr.eval())\n\n\ndef end_epoch(train_data):\n pixacc, iou, _, _, _ = eval_helper.compute_errors(\n train_conf_mat, 'Train', train_dataset.class_info)\n is_best = False\n if len(train_data['iou']) > 0 and iou > max(train_data['iou']):\n is_best = True\n train_data['iou'].append(iou)\n train_data['acc'].append(pixacc)\n train_loss_val = np.mean(train_loss_arr)\n train_data['loss'].append(train_loss_val)\n return is_best\n\n\ndef update_stats(ret_val):\n global train_loss_arr\n loss_val = ret_val[0]\n yp = ret_val[1]\n yt = ret_val[2]\n train_loss_arr.append(loss_val)\n yp = yp.argmax(3).astype(np.int32)\n cylib.collect_confusion_matrix(yp.reshape(-1), yt.reshape(-1), train_conf_mat)\n\n\ndef plot_results(train_data, valid_data):\n eval_helper.plot_training_progress(os.path.join(FLAGS.train_dir, 'stats'),\n train_data, valid_data)\n\n\ndef print_results(train_data, valid_data):\n print('\\nBest train IOU = %.2f' % max(train_data['iou']))\n print('Best validation IOU = %.2f (epoch %d)\\n' % tuple(valid_data['best_iou']))\n\n\ndef init_eval_data():\n train_data = {}\n valid_data = {}\n train_data['lr'] = []\n train_data['loss'] = []\n train_data['iou'] = []\n train_data['acc'] = []\n train_data['best_iou'] = [0, 0]\n valid_data['best_iou'] = [0, 0]\n valid_data['loss'] = []\n valid_data['iou'] = []\n valid_data['acc'] = []\n return train_data, valid_data\n\n\ndef normalize_input(img):\n with tf.name_scope('input'), tf.device('/gpu:0'):\n if data_format == 'NCHW':\n img = tf.transpose(img, perm=[0,3,1,2])\n mean = tf.constant(data_mean, dtype=tf.float32, shape=[1,3,1,1])\n std = tf.constant(data_std, dtype=tf.float32, shape=[1,3,1,1])\n else:\n mean = data_mean\n std = data_std\n img = (img - mean) / std\n return img\n\n\ndef resize_tensor(net, shape, name):\n if data_format == 'NCHW':\n net = tf.transpose(net, perm=[0,2,3,1])\n net = tf.image.resize_bilinear(net, shape, name=name)\n if data_format == 'NCHW':\n net = tf.transpose(net, perm=[0,3,1,2])\n return net\n\n\ndef refine(net, skip_data, is_training):\n skip_net = skip_data[0]\n num_layers = skip_data[1]\n growth = skip_data[2]\n block_name = skip_data[3]\n\n #size_top = top_layer.get_shape()[maps_dim].value\n #skip_width = skip_layer.get_shape()[2].value\n #if top_height != skip_height or top_width != skip_width:\n #print(top_height, skip_height)\n #assert(2*top_height == skip_height)\n \n #TODO try convolution2d_transpose\n #up_shape = tf.shape(skip_net)[height_dim:height_dim+2]\n with tf.variable_scope(block_name):\n if known_shape:\n up_shape = skip_net.get_shape().as_list()[height_dim:height_dim+2]\n else:\n up_shape = tf.shape(skip_net)[height_dim:height_dim+2]\n shape_info = net.get_shape().as_list()\n print(net)\n net = resize_tensor(net, up_shape, name='upsample')\n print(net)\n if not known_shape:\n print(shape_info)\n shape_info[height_dim] = None\n shape_info[height_dim+1] = None\n net.set_shape(shape_info)\n print('\\nup = ', net)\n print('skip = ', skip_net)\n #print(skip_data)\n return upsample(net, skip_net, num_layers, growth, is_training, 'dense_block')\n\n\ndef BNReluConv(net, num_filters, name, k=3, rate=1, first=False, concat=None):\n with arg_scope([layers.conv2d],\n data_format=data_format, stride=1, padding='SAME', activation_fn=None,\n normalizer_fn=None, normalizer_params=None,\n weights_initializer=init_func, biases_initializer=None,\n weights_regularizer=layers.l2_regularizer(weight_decay)):\n with tf.variable_scope(name):\n # TODO check this\n relu = None\n if not first:\n # TODO try Relu -> BN\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n relu = net\n if concat is not None:\n net = tf.concat([net, concat], maps_dim)\n print('c ', net)\n net = layers.conv2d(net, num_filters, kernel_size=k, rate=rate)\n return net\n\n\ndef _pyramid_pooling(net, size, num_pools=3):\n print('Pyramid context pooling')\n with tf.variable_scope('pyramid_context_pooling'):\n if known_shape:\n shape = net.get_shape().as_list()\n else:\n shape = tf.shape(net)\n print('shape = ', shape)\n up_size = shape[height_dim:height_dim+2]\n shape_info = net.get_shape().as_list()\n num_maps = net.get_shape().as_list()[maps_dim]\n #grid_size = [6, 3, 2, 1]\n pool_dim = int(round(num_maps / num_pools))\n concat_lst = [net]\n for i in range(num_pools):\n #pool = layers.avg_pool2d(net, kernel_size=[kh, kw], stride=[kh, kw], padding='SAME')\n #pool = layers.avg_pool2d(net, kernel_size=[kh, kh], stride=[kh, kh], padding='SAME')\n print('before pool = ', net)\n net = layers.avg_pool2d(net, 2, 2, padding='SAME', data_format=data_format)\n print(net)\n pool = BNReluConv(net, pool_dim, k=1, name='bottleneck'+str(i))\n #pool = tf.image.resize_bilinear(pool, [height, width], name='resize_score')\n pool = resize_tensor(pool, up_size, name='upsample_level_'+str(i))\n concat_lst.append(pool)\n net = tf.concat(concat_lst, maps_dim)\n print('Pyramid pooling out: ', net)\n #net = BNReluConv(net, 512, k=3, name='bottleneck_out')\n net = BNReluConv(net, size, k=3, name='bottleneck_out')\n return net\n\n\ndef layer(net, num_filters, name, is_training, first):\n with tf.variable_scope(name):\n net = BNReluConv(net, 4*num_filters, 'bottleneck', k=1, first=first)\n net = BNReluConv(net, num_filters, 'conv', k=3)\n if use_dropout and is_training: \n net = tf.nn.dropout(net, keep_prob=keep_prob)\n return net\n\n\ndef dense_block(net, size, growth, name, is_training=False, first=False,\n split=False, rate=1):\n with tf.variable_scope(name):\n for i in range(size):\n x = net\n #net, first_relu = layer(net, k, 'layer'+str(i), is_training, first=first)\n net = layer(net, growth, 'layer'+str(i), is_training, first=first)\n net = tf.concat([x, net], maps_dim)\n if first:\n first = False\n if split and i == (size // 2) - 1:\n split_out = net\n print('Split shape = ', net)\n if rate == 1:\n net = pool_func(net, 2, stride=2, padding='SAME', data_format=data_format)\n else:\n paddings, crops = tf.required_space_to_batch_paddings(image_size(net),\n [rate,rate])\n net = tf.space_to_batch(net, paddings=paddings, block_size=rate)\n if split and rate > 1:\n net = tf.batch_to_space(net, crops=crops, block_size=rate)\n print('Dense block out: ', net)\n if split:\n return net, split_out\n return net\n\ndef dense_block_multigpu(net, size, growth, name, is_training=False, first=False, split=False):\n with tf.variable_scope(name):\n for i in range(size):\n #if i < size//2:\n #if i < 6:\n #if i < 3:\n\n if i < 12:\n gpu = '/gpu:0'\n else:\n gpu = '/gpu:1'\n with tf.device(gpu):\n x = net\n #net, first_relu = layer(net, k, 'layer'+str(i), is_training, first=first)\n net = layer(net, growth, 'layer'+str(i), is_training, first=first)\n net = tf.concat([x, net], maps_dim)\n if first:\n first = False\n if split and i == (size // 2) - 1:\n split_out = net\n print('Split shape = ', net)\n net = pool_func(net, 2, stride=2, padding='SAME', data_format=data_format)\n print('Dense block out: ', net)\n if split == True:\n return net, split_out\n return net\n\n#growth_up = 32\n#up_sizes = [2,2,4,4]\n#def dense_block_upsample(net, skip_net, size, growth, name):\n# with tf.variable_scope(name):\n# net = tf.concat([net, skip_net], maps_dim)\n# num_filters = net.get_shape().as_list()[maps_dim]\n# #num_filters = int(round(num_filters*compression))\n# num_filters = int(round(num_filters*compression))\n# #num_filters = int(round(num_filters*0.3))\n# # TODO try 3 vs 1\n# net = BNReluConv(net, num_filters, 'bottleneck', k=1)\n# #net = BNReluConv(net, num_filters, name+'_bottleneck', k=3)\n# print('after bottleneck = ', net)\n# for i in range(size):\n# x = net\n# net = BNReluConv(net, growth, 'layer'+str(i))\n# net = tf.concat([x, net], maps_dim)\n# return net\n# #return dense_block(net, size, growth, name)\n\n\n# old refine\n##up_sizes = [128,128,512,512]\n#up_sizes = [256,256,512,512]\n#up_sizes = [196,256,384,512]\n#up_sizes = [128,128,256,512] # 2gpus\n#up_sizes = [64,128,256,512] # 2gpus\n#up_sizes = [128,128,256,384,512]\n#up_sizes = [64,128,256,384,512]\n#up_sizes = [256,256,384,512]\n#up_sizes = [128,128,256,256]\n\n# best\n#up_sizes = [64,128,128,256]\n\n\ndef dense_block_upsample_worse(net, skip_net, size, growth, name):\n with tf.variable_scope(name):\n net = tf.concat([net, skip_net], maps_dim)\n #new_size = net.get_shape().as_list()[height_dim:height_dim+2]\n #depth = resize_tensor(depth, new_size, 'resize_depth')\n #net = tf.concat([net, skip_net, depth], maps_dim)\n\n num_filters = net.get_shape().as_list()[maps_dim]\n print(net)\n num_filters = int(round(num_filters*compression))\n #num_filters = int(round(num_filters*compression/2))\n #num_filters = int(round(num_filters*0.3))\n net = BNReluConv(net, num_filters, 'bottleneck', k=1)\n #net = BNReluConv(net, num_filters, 'bottleneck', k=3)\n #net = tf.concat([net, depth], maps_dim)\n #net = BNReluConv(net, num_filters, 'bottleneck', k=3)\n print('after bottleneck = ', net)\n net = BNReluConv(net, size, 'layer')\n return net\n #return dense_block(net, size, growth, name)\n\n#up_sizes = [256,256,256,384]\n\n# try stronger upsampling\n#up_sizes = [64,128,256,512] # good\n#up_sizes = [128,256,256,256]\n\n#up_sizes = [128,256,256,512] # good\n#up_sizes = [128,128,256,256]\n#up_sizes = [128,196,256,384]\n#up_sizes = [128,196,256,384,512]\n#up_sizes = [64,128,128,128,256]\n#up_sizes = [64,64,64,64,64]\n\n#up_sizes = [256,256,512,512] # good\n#up_sizes = [128,256,384,512] # 0.5% worse then up\n#up_sizes = [32,64,128,256]\n\nup_sizes = [128,128,128,128,128]\n#up_sizes = [128,128,256,256,256]\n\ndef upsample(net, skip_net, size, growth, is_training, name):\n with tf.variable_scope(name):\n # TODO\n num_filters = net.get_shape().as_list()[maps_dim]\n skip_net = BNReluConv(skip_net, num_filters, 'bottleneck', k=1)\n net = tf.concat([net, skip_net], maps_dim)\n #net = net + skip_net\n #net = BNReluConv(net, num_filters, 'bottleneck', k=3)\n print('after concat = ', net)\n net = BNReluConv(net, size, 'layer')\n return net\n\n# works the same as simple\ndef upsample_dense(net, skip_net, size, growth, is_training, name):\n with tf.variable_scope(name):\n num_filters = net.get_shape().as_list()[maps_dim]\n skip_net = BNReluConv(skip_net, num_filters, 'skip_bottleneck', k=1)\n net = tf.concat([net, skip_net], maps_dim)\n net = dense_block(net, 4, growth, 'dense_block', is_training)\n #net = BNReluConv(net, num_filters, 'bottleneck', k=3)\n print('after dense block = ', net)\n net = BNReluConv(net, size, 'bottleneck', k=1)\n return net\n\ndef transition(net, compression, name, stride=2, pool=True):\n with tf.variable_scope(name):\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n num_filters = net.get_shape().as_list()[maps_dim]\n num_filters = int(round(num_filters*compression))\n net = layers.conv2d(net, num_filters, kernel_size=1)\n skip_layer = net\n # avg works little better on small res\n if pool:\n net = pool_func(net, 2, stride=stride, data_format=data_format, padding='SAME')\n print('Transition: ', net)\n return net, skip_layer\n\n\ndef dense_block_context(net):\n print('Dense context')\n with tf.variable_scope('block_context'):\n outputs = []\n size = 8\n #size = 4\n #size = 6\n for i in range(size):\n x = net\n net = BNReluConv(net, 64, 'layer'+str(i))\n #net = BNReluConv(net, 128, 'layer'+str(i))\n outputs.append(net)\n if i < size - 1:\n net = tf.concat([x, net], maps_dim)\n net = tf.concat(outputs, maps_dim)\n return net\n\n\ndef _build(image, is_training=False):\n #image = tf.Print(image, [tf.shape(image)], message='img_shape = ', summarize=10)\n bn_params['is_training'] = is_training\n with arg_scope([layers.conv2d],\n data_format=data_format, stride=1, padding='SAME', activation_fn=None,\n normalizer_fn=None, normalizer_params=None,\n weights_initializer=init_func, biases_initializer=None,\n weights_regularizer=layers.l2_regularizer(weight_decay)):\n with tf.variable_scope('conv0'):\n net = layers.conv2d(image, 2*growth, 7, stride=2)\n #net = layers.conv2d(image, 2*growth, 7, stride=1)\n # TODO\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n\n net = layers.max_pool2d(net, 2, stride=2, padding='SAME',\n data_format=data_format, scope='pool0')\n\n skip_layers = []\n\n # no diff with double BN from orig densenet, first=True\n net = dense_block(net, block_sizes[0], growth, 'block0', is_training, first=True)\n #net, skip = dense_block(net, block_sizes[0], growth, 'block0', is_training,\n # first=True, split=True)\n #skip_layers.append([skip, 256, growth_up, 'block0_mid_refine', depth])\n #skip_layers.append([skip, up_sizes[0], growth_up, 'block0_mid_refine'])\n skip_layers.append([net, up_sizes[0], growth_up, 'block0_refine'])\n net, _ = transition(net, compression, 'block0/transition')\n #skip_layers.append([skip, up_sizes[0], growth_up, 'block0_refine'])\n\n #net, skip = dense_block(net, block_sizes[1], growth, 'block1', is_training, split=True)\n #skip_layers.append([skip, up_sizes[1], growth_up, 'block1_mid_refine'])\n net = dense_block(net, block_sizes[1], growth, 'block1', is_training)\n\n skip_layers.append([net, up_sizes[1], growth_up, 'block1_refine'])\n net, _ = transition(net, compression, 'block1/transition')\n\n #context_pool_num = 3\n #net, skip = dense_block(net, block_sizes[2], growth, 'block2', is_training, split=True)\n #skip_layers.append([skip, up_sizes[2], growth_up, 'block2_mid_refine'])\n #skip_layers.append([net, up_sizes[3], growth_up, 'block2_refine'])\n\n net = dense_block(net, block_sizes[2], growth, 'block2', is_training)\n skip_layers.append([net, up_sizes[2], growth_up, 'block2_refine'])\n net, _ = transition(net, compression, 'block2/transition')\n\n is_dilated = False\n #is_dilated = True\n #bsz = 2\n #net, _ = transition(net, compression, 'block1/transition', stride=1)\n #paddings, crops = tf.required_space_to_batch_paddings(image_size(net), [bsz, bsz])\n #net = tf.space_to_batch(net, paddings=paddings, block_size=bsz)\n #net = dense_block(net, block_sizes[2], growth, 'block2', is_training)\n #net, _ = transition(net, compression, 'block2/transition', stride=1)\n #net = tf.batch_to_space(net, crops=crops, block_size=bsz)\n\n #bsz = 2\n #bsz = 4\n #paddings, crops = tf.required_space_to_batch_paddings(image_size(net), [bsz, bsz])\n #net = tf.space_to_batch(net, paddings=paddings, block_size=bsz)\n #net = dense_block(net, block_sizes[3], growth, 'block3', is_training)\n #net = tf.batch_to_space(net, crops=crops, block_size=bsz)\n net, skip = dense_block(net, block_sizes[3], growth, 'block3', is_training, split=True)\n skip_layers.append([skip, up_sizes[-1], growth_up, 'block3_refine'])\n #skip = tf.batch_to_space(skip, crops=crops, block_size=bsz)\n\n #net, skip = dense_block(net, block_sizes[3], growth, 'block3', is_training,\n # split=True, rate=2)\n\n with tf.variable_scope('head'):\n print('out = ', net)\n #skip_layers.append([net, up_sizes[-1], growth_up, 'block3_refine'])\n #net, _ = transition(net, compression, 'block3/transition')\n #net = dense_block(net, block_sizes[3], growth, 'block4', is_training)\n net = BNReluConv(net, 512, 'bottleneck', k=1)\n ## 0.4 better with rate=2\n net = BNReluConv(net, 128, 'context', k=3, rate=2)\n ##net = BNReluConv(net, 128, 'context', k=3)\n\n #net = BNReluConv(net, 256, 'bottleneck', k=1)\n ##net = _pyramid_pooling(net, 256, num_pools=4)\n #net = _pyramid_pooling(net, 256, num_pools=3)\n ## SPP has dropout here\n #if is_training:\n # net = tf.nn.dropout(net, keep_prob=0.9)\n\n #skip_layers.append([net, up_sizes[-1], growth_up, 'block3_refine'])\n #net, _ = transition(net, compression, 'block3/transition')\n #net, _ = transition(net, compression, 'block3/transition', stride)\n ####bsz = 4\n #bsz = 2\n #paddings, crops = tf.required_space_to_batch_paddings(image_size(net), [bsz, bsz])\n #net = tf.space_to_batch(net, paddings=paddings, block_size=bsz)\n #net = dense_block(net, block_sizes[3], growth, 'block4', is_training)\n ##print(net)\n ##net = BNReluConv(net, 256, 'bottleneck', k=1)\n ###net = BNReluConv(net, 128, 'bottleneck', k=1)\n #net = BNReluConv(net, 256, 'context', k=3, rate=2)\n #net = BNReluConv(net, 256, 'context', k=3, rate=2)\n #net = BNReluConv(net, 128, 'context', k=3)\n #net = tf.batch_to_space(net, crops=crops, block_size=bsz)\n\n #net = dense_block_context(net)\n #net = BNReluConv(net, context_size, 'context_conv', k=3)\n #context_pool_num = 4\n\n #print('dense context')\n #print('7x7')\n #net = BNReluConv(net, context_size, 'context_conv', k=7)\n #net = BNReluConv(net, context_size, 'context_conv', k=7, rate=2)\n #net = BNReluConv(net, context_size, 'context_conv', k=3, rate=2)\n #in_shape = net.get_shape().as_list()\n #in_shape[maps_dim] = context_size\n #net.set_shape(in_shape)\n #net = BNReluConv(net, context_size, 'context_conv', k=5)\n #final_h = net.get_shape().as_list()[height_dim]\n print('Before upsampling: ', net)\n\n all_logits = [net]\n for skip_layer in reversed(skip_layers):\n net = refine(net, skip_layer, is_training)\n all_logits.append(net)\n print('after upsampling = ', net)\n if not is_dilated:\n all_logits = [all_logits[0], all_logits[-1]]\n else:\n all_logits = [all_logits[-1]]\n\n with tf.variable_scope('head'):\n for i, logits in enumerate(all_logits):\n with tf.variable_scope('logits_'+str(i)):\n # FIX\n #net = tf.nn.relu(layers.batch_norm(net, **bn_params))\n #logits = layers.conv2d(net, FLAGS.num_classes, 1, activation_fn=None,\n # data_format=data_format)\n logits = layers.conv2d(tf.nn.relu(logits), FLAGS.num_classes, 1,\n activation_fn=None, data_format=data_format)\n\n if data_format == 'NCHW':\n logits = tf.transpose(logits, perm=[0,2,3,1])\n input_shape = tf.shape(image)[height_dim:height_dim+2]\n logits = tf.image.resize_bilinear(logits, input_shape, name='resize_logits')\n all_logits[i] = logits\n logits = all_logits.pop()\n return logits, all_logits\n\n #with tf.variable_scope('logits'):\n # #net = tf.nn.relu(layers.batch_norm(net, **bn_params))\n # net = tf.nn.relu(net)\n # logits = layers.conv2d(net, FLAGS.num_classes, 1, activation_fn=None,\n # data_format=data_format)\n\n #with tf.variable_scope('mid_logits'):\n # #mid_logits = tf.nn.relu(layers.batch_norm(mid_logits, **bn_params))\n # mid_logits = tf.nn.relu(mid_logits)\n # mid_logits = layers.conv2d(mid_logits, FLAGS.num_classes, 1, activation_fn=None,\n # data_format=data_format)\n\n #if data_format == 'NCHW':\n # logits = tf.transpose(logits, perm=[0,2,3,1])\n # mid_logits = tf.transpose(mid_logits, perm=[0,2,3,1])\n #input_shape = tf.shape(image)[height_dim:height_dim+2]\n #logits = tf.image.resize_bilinear(logits, input_shape, name='resize_logits')\n #mid_logits = tf.image.resize_bilinear(mid_logits, input_shape, name='resize_mid_logits')\n ##if data_format == 'NCHW':\n ## top_layer = tf.transpose(top_layer, perm=[0,3,1,2])\n #return logits, mid_logits\n\n\ndef _build2gpu(image, is_training=False):\n #image = tf.Print(image, [tf.shape(image)], message='img_shape = ', summarize=10)\n bn_params['is_training'] = is_training\n with arg_scope([layers.conv2d],\n data_format=data_format, stride=1, padding='SAME', activation_fn=None,\n normalizer_fn=None, normalizer_params=None,\n weights_initializer=init_func, biases_initializer=None,\n weights_regularizer=layers.l2_regularizer(weight_decay)):\n gpu1 = '/gpu:0'\n gpu2 = '/gpu:1'\n with tf.device(gpu1):\n with tf.variable_scope('conv0'):\n net = layers.conv2d(image, 2*growth, 7, stride=2)\n #net = layers.conv2d(image, 2*growth, 7, stride=1)\n # TODO\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n\n net = layers.max_pool2d(net, 2, stride=2, padding='SAME',\n data_format=data_format, scope='pool0')\n\n skip_layers = []\n\n # no diff with double BN from orig densenet, first=True\n net = dense_block(net, block_sizes[0], growth, 'block0', is_training, first=True)\n #net, skip = dense_block(net, block_sizes[0], growth, 'block0', is_training,\n # first=True, split=True)\n #skip_layers.append([skip, 256, growth_up, 'block0_mid_refine', depth])\n #skip_layers.append([skip, up_sizes[0], growth_up, 'block0_mid_refine'])\n skip_layers.append([net, up_sizes[0], growth_up, 'block0_refine'])\n net, _ = transition(net, compression, 'block0/transition')\n #skip_layers.append([skip, up_sizes[0], growth_up, 'block0_refine'])\n\n #net, skip = dense_block(net, block_sizes[1], growth, 'block1', is_training, split=True)\n #skip_layers.append([skip, up_sizes[1], growth_up, 'block1_mid_refine'])\n #with tf.device(gpu2):\n net = dense_block(net, block_sizes[1], growth, 'block1', is_training)\n skip_layers.append([net, up_sizes[1], growth_up, 'block1_refine'])\n net, _ = transition(net, compression, 'block1/transition')\n #skip_layers.append([skip, up_sizes[1], growth_up, 'block1_refine'])\n\n # works the same with split, not 100%\n #context_pool_num = 3\n net, skip = dense_block(net, block_sizes[2], growth, 'block2', is_training, split=True)\n skip_layers.append([skip, up_sizes[2], growth_up, 'block2_mid_refine'])\n context_pool_num = 4\n with tf.device(gpu2):\n #net = dense_block(net, block_sizes[2], growth, 'block2', is_training)\n skip_layers.append([net, up_sizes[3], growth_up, 'block2_refine'])\n #skip_layers.append([net, up_sizes[2], growth_up, 'block2_refine'])\n net, _ = transition(net, compression, 'block2/transition')\n\n #net = dense_block(net, block_sizes[3], growth, 'block3', is_training)\n net, skip = dense_block(net, block_sizes[3], growth, 'block3', is_training, split=True)\n context_pool_num = 3\n skip_layers.append([skip, up_sizes[-1], growth_up, 'block3_refine'])\n\n with tf.variable_scope('head'):\n #net = BNReluConv(net, 512, 'bottleneck', k=1)\n net = BNReluConv(net, 512, 'bottleneck', k=1)\n net = BNReluConv(net, 128, 'context', k=3, rate=2)\n #net = BNReluConv(net, 128, 'bottleneck', k=1)\n #net = dense_block_context(net)\n #net = _pyramid_pooling(net, size=context_pool_num)\n #net = BNReluConv(net, context_size, 'context_conv', k=3)\n # SPP has dropout here\n #if is_training:\n # net = tf.nn.dropout(net, keep_prob=0.9)\n\n #print('dense context')\n #print('7x7')\n #net = BNReluConv(net, context_size, 'context_conv', k=7)\n #net = BNReluConv(net, context_size, 'context_conv', k=7, rate=2)\n #net = BNReluConv(net, context_size, 'context_conv', k=3, rate=2)\n #in_shape = net.get_shape().as_list()\n #in_shape[maps_dim] = context_size\n #net.set_shape(in_shape)\n #net = BNReluConv(net, context_size, 'context_conv', k=5)\n #final_h = net.get_shape().as_list()[height_dim]\n print('Before upsampling: ', net)\n\n all_logits = [net]\n for skip_layer in reversed(skip_layers):\n net = refine(net, skip_layer)\n all_logits.append(net)\n print('after upsampling = ', net)\n\n all_logits = [all_logits[0], all_logits[-1]]\n #all_logits = [all_logits[1], all_logits[-1]]\n #all_logits = [all_logits[2], all_logits[-1]]\n\n with tf.device(gpu2), tf.variable_scope('head'):\n for i, logits in enumerate(all_logits):\n with tf.variable_scope('logits_'+str(i)):\n # FIX\n #net = tf.nn.relu(layers.batch_norm(net, **bn_params))\n #logits = layers.conv2d(net, FLAGS.num_classes, 1, activation_fn=None,\n # data_format=data_format)\n logits = layers.conv2d(tf.nn.relu(logits), FLAGS.num_classes, 1,\n activation_fn=None, data_format=data_format)\n\n if data_format == 'NCHW':\n logits = tf.transpose(logits, perm=[0,2,3,1])\n input_shape = tf.shape(image)[height_dim:height_dim+2]\n logits = tf.image.resize_bilinear(logits, input_shape, name='resize_logits')\n all_logits[i] = logits\n logits = all_logits.pop()\n return logits, all_logits\n\ndef create_init_op(params):\n variables = tf.contrib.framework.get_variables()\n init_map = {}\n # clear head vars from imagenet\n remove_keys = []\n for key in params.keys():\n if 'head/' in key:\n print('delete ', key)\n remove_keys.append(key)\n for key in remove_keys:\n del params[key]\n\n for var in variables:\n name = var.name\n if name in params:\n #print(name, ' --> found init')\n #print(var)\n #print(params[name].shape)\n init_map[var.name] = params[name]\n del params[name]\n #else:\n # print(name, ' --> init not found!')\n print('Unused: ', list(params.keys()))\n init_op, init_feed = tf.contrib.framework.assign_from_values(init_map)\n return init_op, init_feed\n\n\n#def jitter(image, labels, weights):\ndef jitter(image, labels):\n with tf.name_scope('jitter'), tf.device('/cpu:0'):\n print('\\nJittering enabled')\n global random_flip_tf, resize_width, resize_height\n #random_flip_tf = tf.placeholder(tf.bool, shape=(), name='random_flip')\n random_flip_tf = tf.placeholder(tf.bool, shape=(FLAGS.batch_size), name='random_flip')\n resize_width = tf.placeholder(tf.int32, shape=(), name='resize_width')\n resize_height = tf.placeholder(tf.int32, shape=(), name='resize_height')\n \n #image_split = tf.unstack(image, axis=0)\n #labels_split = tf.unstack(labels, axis=0)\n #weights_split = tf.unstack(weights, axis=0)\n out_img = []\n #out_weights = []\n out_labels = []\n for i in range(FLAGS.batch_size):\n out_img.append(tf.cond(random_flip_tf[i],\n lambda: tf.image.flip_left_right(image[i]),\n lambda: image[i]))\n out_labels.append(tf.cond(random_flip_tf[i],\n lambda: tf.image.flip_left_right(labels[i]),\n lambda: labels[i]))\n #out_weights.append(tf.cond(random_flip_tf[i],\n # lambda: tf.image.flip_left_right(weights[i]),\n # lambda: weights[i]))\n image = tf.stack(out_img, axis=0)\n labels = tf.stack(out_labels, axis=0)\n #weights = tf.stack(out_weights, axis=0)\n\n if jitter_scale:\n global known_shape\n known_shape = False\n image = tf.image.resize_bicubic(image, [resize_height, resize_width])\n #image = tf.image.resize_bilinear(image, [resize_height, resize_width])\n image = tf.round(image)\n image = tf.minimum(255.0, image)\n image = tf.maximum(0.0, image)\n labels = tf.image.resize_nearest_neighbor(labels, [resize_height, resize_width])\n # TODO is this safe for zero wgts?\n #weights = tf.image.resize_nearest_neighbor(weights, [resize_height, resize_width])\n #return image, labels, weights\n return image, labels\n\n\ndef _get_train_feed():\n global random_flip_tf, resize_width, resize_height\n #random_flip = int(np.random.choice(2, 1))\n random_flip = np.random.choice(2, FLAGS.batch_size).astype(np.bool)\n #resize_scale = np.random.uniform(0.5, 2)\n #resize_scale = np.random.uniform(0.4, 1.5)\n #resize_scale = np.random.uniform(0.5, 1.2)\n #min_resize = 0.7\n #max_resize = 1.3\n min_resize = 0.8\n max_resize = 1.2\n #min_resize = 0.9\n #max_resize = 1.1\n #max_resize = 1\n if train_step_iter == 0:\n resize_scale = max_resize\n else:\n resize_scale = np.random.uniform(min_resize, max_resize)\n width = np.int32(int(round(FLAGS.img_width * resize_scale)))\n height = np.int32(int(round(FLAGS.img_height * resize_scale)))\n feed_dict = {random_flip_tf:random_flip, resize_width:width, resize_height:height}\n return feed_dict\n\n\ndef build(mode):\n if mode == 'train':\n is_training = True\n reuse = False\n dataset = train_dataset\n elif mode == 'validation':\n is_training = False\n reuse = True\n dataset = valid_dataset\n\n with tf.variable_scope('', reuse=reuse):\n x, labels, num_labels, class_hist, img_names = \\\n reader.inputs(dataset, is_training=is_training)\n #reader.inputs(dataset, is_training=is_training, num_epochs=FLAGS.max_epochs)\n\n if is_training and apply_jitter:\n x, labels = jitter(x, labels)\n image = x\n x = normalize_input(x)\n\n #logits = _build(x, depth, is_training)\n #total_loss = _loss(logits, labels, weights, is_training)\n #logits, mid_logits = _build(x, is_training)\n logits, aux_logits = _build(x, is_training)\n total_loss = _multiloss(logits, aux_logits, labels, class_hist, num_labels, is_training)\n\n if is_training and imagenet_init:\n init_path = init_dir + 'dense_net_' + str(model_depth) + '.pickle'\n with open(init_path, 'rb') as f:\n init_map = pickle.load(f)\n init_op, init_feed = create_init_op(init_map)\n else:\n init_op, init_feed = None, None\n train_run_ops = [total_loss, logits, labels, img_names]\n #train_run_ops = [total_loss, logits, labels, img_names, image]\n val_run_ops = [total_loss, logits, labels, img_names]\n if is_training:\n return train_run_ops, init_op, init_feed\n else:\n return val_run_ops\n\n\ndef inference(image, labels=None, constant_shape=True, is_training=False):\n global known_shape\n known_shape = constant_shape\n x = normalize_input(image)\n logits, aux_logits = _build(x, is_training=is_training)\n if labels:\n main_wgt = 0.7\n xent_loss = main_wgt * losses.weighted_cross_entropy_loss(logits, labels)\n xent_loss = (1-main_wgt) * losses.weighted_cross_entropy_loss(aux_logits, labels)\n return logits, aux_logits, xent_loss\n return logits, aux_logits\n\n\ndef _multiloss(logits, aux_logits, labels, num_labels, class_hist, is_training):\n max_weight = FLAGS.max_weight\n xent_loss = 0\n #main_wgt = 0.6\n if len(aux_logits) > 0:\n main_wgt = 0.7\n aux_wgt = (1 - main_wgt) / len(aux_logits)\n else:\n main_wgt = 1.0\n aux_wgt = 0\n xent_loss = main_wgt * losses.weighted_cross_entropy_loss(\n logits, labels, class_hist, max_weight=max_weight)\n for i, l in enumerate(aux_logits):\n print('loss' + str(i), ' --> ' , l)\n xent_loss += aux_wgt * losses.weighted_cross_entropy_loss(\n l, labels, class_hist, max_weight=max_weight)\n\n all_losses = [xent_loss]\n # get losses + regularization\n total_loss = losses.total_loss_sum(all_losses)\n if is_training:\n loss_averages_op = losses.add_loss_summaries(total_loss)\n with tf.control_dependencies([loss_averages_op]):\n total_loss = tf.identity(total_loss)\n return total_loss\n\n\ndef _dualloss(logits, mid_logits, labels, class_hist, num_labels, is_training=True):\n #loss1 = losses.cross_entropy_loss(logits, labels, weights, num_labels)\n #loss2 = losses.cross_entropy_loss(mid_logits, labels, weights, num_labels)\n #max_weight = 10\n max_weight = 1\n loss1 = losses.weighted_cross_entropy_loss(logits, labels, class_hist,\n max_weight=max_weight)\n loss2 = losses.weighted_cross_entropy_loss(mid_logits, labels, class_hist,\n max_weight=max_weight)\n #loss1 = losses.weighted_cross_entropy_loss_dense(logits, labels, weights, num_labels,\n # max_weight=max_weight)\n #loss2 = losses.weighted_cross_entropy_loss_dense(mid_logits, labels, weights, num_labels,\n # max_weight=max_weight)\n #wgt = 0.4\n #xent_loss = loss1 + wgt * loss2\n wgt = 0.3 # best\n #wgt = 0.2\n #wgt = 0.4\n xent_loss = (1-wgt)*loss1 + wgt*loss2\n all_losses = [xent_loss]\n\n # get losses + regularization\n total_loss = losses.total_loss_sum(all_losses)\n\n if is_training:\n loss_averages_op = losses.add_loss_summaries(total_loss)\n with tf.control_dependencies([loss_averages_op]):\n total_loss = tf.identity(total_loss)\n\n return total_loss\n\n\ndef minimize(loss, global_step, num_batches):\n # Calculate the learning rate schedule.\n #decay_steps = int(num_batches * FLAGS.num_epochs_per_decay)\n # Decay the learning rate exponentially based on the number of steps.\n global lr\n #base_lr = 1e-2 # for sgd\n base_lr = FLAGS.initial_learning_rate\n #TODO\n fine_lr_div = FLAGS.fine_tune_lr_factor\n #fine_lr_div = 10\n #fine_lr_div = 7\n print('LR = ', base_lr)\n print('fine_lr = LR / ', fine_lr_div)\n #lr_fine = tf.train.exponential_decay(base_lr / 10, global_step, decay_steps,\n #lr_fine = tf.train.exponential_decay(base_lr / 20, global_step, decay_steps,\n\n #decay_steps = int(num_batches * 30)\n #decay_steps = num_batches * FLAGS.max_epochs\n decay_steps = FLAGS.num_iters\n lr_fine = tf.train.polynomial_decay(base_lr / fine_lr_div, global_step, decay_steps,\n end_learning_rate=0, power=FLAGS.decay_power)\n lr = tf.train.polynomial_decay(base_lr, global_step, decay_steps,\n end_learning_rate=0, power=FLAGS.decay_power)\n #lr = tf.Print(lr, [lr], message='lr = ', summarize=10)\n\n #stairs = True\n #lr_fine = tf.train.exponential_decay(base_lr / fine_lr_div, global_step, decay_steps,\n # FLAGS.learning_rate_decay_factor, staircase=stairs)\n #lr = tf.train.exponential_decay(base_lr, global_step, decay_steps,\n # FLAGS.learning_rate_decay_factor, staircase=stairs)\n tf.summary.scalar('learning_rate', lr)\n # adam works much better here!\n if imagenet_init:\n if FLAGS.optimizer == 'adam':\n print('\\nOptimizer = ADAM\\n')\n opts = [tf.train.AdamOptimizer(lr_fine), tf.train.AdamOptimizer(lr)]\n elif FLAGS.optimizer == 'momentum':\n print('\\nOptimizer = SGD + momentum\\n')\n opts = [tf.train.MomentumOptimizer(lr_fine, 0.9), tf.train.MomentumOptimizer(lr, 0.9)]\n else:\n raise ValueError('unknown optimizer')\n return train_helper.minimize_fine_tune(opts, loss, global_step, 'head')\n else:\n opt = tf.train.AdamOptimizer(lr)\n #opt = tf.train.MomentumOptimizer(lr, 0.9)\n return train_helper.minimize(opt, loss, global_step)\n #opts = [tf.train.RMSPropOptimizer(lr_fine, momentum=0.9, centered=True),\n # tf.train.RMSPropOptimizer(lr, momentum=0.9, centered=True)]\n #opts = [tf.train.MomentumOptimizer(lr_fine, 0.9), tf.train.MomentumOptimizer(lr, 0.9)]\n\n\n\ndef train_step(sess, run_ops):\n global train_step_iter\n if apply_jitter:\n feed_dict = _get_train_feed()\n vals = sess.run(run_ops, feed_dict=feed_dict)\n else:\n vals = sess.run(run_ops)\n train_step_iter += 1\n #img = vals[-3]\n #print(img.shape)\n ##print(img.mean())\n #for i in range(img.shape[0]):\n # rgb = img[i]\n # print(rgb.min())\n # print(rgb.max())\n # ski.io.imsave(join('/home/kivan/datasets/results/tmp/debug', str(i)+'.png'),\n # rgb.astype(np.uint8))\n return vals\n\n\ndef num_batches():\n return train_dataset.num_examples() // FLAGS.batch_size\n\n\ndef image_size(net):\n return net.get_shape().as_list()[height_dim:height_dim+2]\n\ndef _build_dilated(image, is_training=False):\n #image = tf.Print(image, [tf.shape(image)], message='img_shape = ', summarize=10)\n bn_params['is_training'] = is_training\n with arg_scope([layers.conv2d],\n data_format=data_format, stride=1, padding='SAME', activation_fn=None,\n normalizer_fn=None, normalizer_params=None,\n weights_initializer=init_func, biases_initializer=None,\n weights_regularizer=layers.l2_regularizer(weight_decay)):\n with tf.variable_scope('conv0'):\n net = layers.conv2d(image, 2*growth, 7, stride=2)\n #net = layers.conv2d(image, 2*growth, 7, stride=1)\n # TODO\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n\n net = layers.max_pool2d(net, 2, stride=2, padding='SAME',\n data_format=data_format, scope='pool0')\n\n skip_layers = []\n\n # no diff with double BN from orig densenet, first=True\n net = dense_block(net, block_sizes[0], growth, 'block0', is_training, first=True)\n #net, skip = dense_block(net, block_sizes[0], growth, 'block0', is_training,\n # first=True, split=True)\n #skip_layers.append([skip, 256, growth_up, 'block0_mid_refine', depth])\n #skip_layers.append([skip, up_sizes[0], growth_up, 'block0_mid_refine'])\n skip_layers.append([net, up_sizes[0], growth_up, 'block0_refine'])\n net, _ = transition(net, compression, 'block0/transition')\n #skip_layers.append([skip, up_sizes[0], growth_up, 'block0_refine'])\n\n #net, skip = dense_block(net, block_sizes[1], growth, 'block1', is_training, split=True)\n #skip_layers.append([skip, up_sizes[1], growth_up, 'block1_mid_refine'])\n net = dense_block(net, block_sizes[1], growth, 'block1', is_training)\n skip_layers.append([net, up_sizes[1], growth_up, 'block1_refine'])\n net, _ = transition(net, compression, 'block1/transition')\n #skip_layers.append([skip, up_sizes[1], growth_up, 'block1_refine'])\n\n # works the same with split, not 100%\n #context_pool_num = 3\n #context_pool_num = 4\n context_pool_num = 5\n #net, skip = dense_block(net, block_sizes[2], growth, 'block2', is_training, split=True)\n #skip_layers.append([skip, up_sizes[2], growth_up, 'block2_mid_refine'])\n net = dense_block(net, block_sizes[2], growth, 'block2', is_training)\n #skip_layers.append([net, up_sizes[3], growth_up, 'block2_refine'])\n #skip_layers.append([net, up_sizes[2], growth_up, 'block2_refine'])\n net, _ = transition(net, compression, 'block2/transition', stride=1)\n\n bsz = 2\n paddings, crops = tf.required_space_to_batch_paddings(image_size(net), [bsz, bsz])\n net = tf.space_to_batch(net, paddings=paddings, block_size=bsz)\n net = dense_block(net, block_sizes[3], growth, 'block3', is_training)\n net = tf.batch_to_space(net, crops=crops, block_size=bsz)\n print('before context = ', net)\n\n with tf.variable_scope('head'):\n net = BNReluConv(net, 512, 'bottleneck', k=1)\n net = _pyramid_pooling(net, size=context_pool_num)\n #net = BNReluConv(net, context_size, 'context_conv', k=3)\n\n print('Before upsampling: ', net)\n\n all_logits = [net]\n for skip_layer in reversed(skip_layers):\n net = refine(net, skip_layer)\n all_logits.append(net)\n print('after upsampling = ', net)\n\n all_logits = [all_logits[0], all_logits[-1]]\n #all_logits = [all_logits[1], all_logits[-1]]\n #all_logits = [all_logits[2], all_logits[-1]]\n\n with tf.variable_scope('head'):\n for i, logits in enumerate(all_logits):\n with tf.variable_scope('logits_'+str(i)):\n # FIX\n #net = tf.nn.relu(layers.batch_norm(net, **bn_params))\n #logits = layers.conv2d(net, FLAGS.num_classes, 1, activation_fn=None,\n # data_format=data_format)\n logits = layers.conv2d(tf.nn.relu(logits), FLAGS.num_classes, 1,\n activation_fn=None, data_format=data_format)\n\n if data_format == 'NCHW':\n logits = tf.transpose(logits, perm=[0,2,3,1])\n input_shape = tf.shape(image)[height_dim:height_dim+2]\n logits = tf.image.resize_bilinear(logits, input_shape, name='resize_logits')\n all_logits[i] = logits\n logits = all_logits.pop()\n return logits, all_logits\n\n#def _loss(logits, labels, weights, is_training=True):\n# #TODO\n# #xent_loss = losses.weighted_cross_entropy_loss(logits, labels, weights, max_weight=1)\n# xent_loss = losses.weighted_cross_entropy_loss(logits, labels, weights, max_weight=10)\n# #xent_loss = losses.weighted_cross_entropy_loss(logits, labels, weights, max_weight=20)\n# #xent_loss = losses.weighted_cross_entropy_loss(logits, labels, weights, max_weight=50)\n# #xent_loss = losses.weighted_cross_entropy_loss(logits, labels, weights, max_weight=100)\n# all_losses = [xent_loss]\n#\n# # get losses + regularization\n# total_loss = losses.total_loss_sum(all_losses)\n#\n# if is_training:\n# loss_averages_op = losses.add_loss_summaries(total_loss)\n# with tf.control_dependencies([loss_averages_op]):\n# total_loss = tf.identity(total_loss)\n#\n# return total_loss\n\n", "import os\nimport tensorflow as tf\nimport train_helper\n\nMODEL_PATH = './models/voc2012/resnet.py'\nSAVE_DIR = os.path.join('/home/kivan/datasets/results/tmp/voc2012/',\n train_helper.get_time_string())\n\nDATASET_DIR = '/home/kivan/datasets/VOC2012/tensorflow/'\n#DATASET_DIR = '/home/kivan/datasets/voc2012_aug/tensorflow/'\nIMG_HEIGHT, IMG_WIDTH = 500, 500\n\n# SGD\n# 2e-2 to large\n#tf.app.flags.DEFINE_float('initial_learning_rate', 1e-2, '')\n# 7 to big\ntf.app.flags.DEFINE_integer('num_epochs_per_decay', 5, '')\n#tf.app.flags.DEFINE_integer('num_epochs_per_decay', 6, '')\n# ADAM\n#tf.app.flags.DEFINE_float('initial_learning_rate', 4e-4, '')\ntf.app.flags.DEFINE_float('initial_learning_rate', 1e-3, '')\n#tf.app.flags.DEFINE_integer('num_epochs_per_decay', 5, '')\n\ntf.app.flags.DEFINE_integer('batch_size', 5, '')\ntf.app.flags.DEFINE_integer('batch_size_valid', 3, '')\ntf.app.flags.DEFINE_integer('num_validations_per_epoch', 1, '')\ntf.app.flags.DEFINE_integer('max_epochs', 30, 'Number of epochs to run.')\n\ntf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.5,\n \"\"\"Learning rate decay factor.\"\"\")\n\n#povecaj_lr za w=1\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('train_dir', SAVE_DIR, \\\n \"\"\"Directory where to write event logs and checkpoint.\"\"\")\ntf.app.flags.DEFINE_string('resume_path', '', '')\ntf.app.flags.DEFINE_integer('img_width', IMG_WIDTH, '')\ntf.app.flags.DEFINE_integer('img_height', IMG_HEIGHT, '')\ntf.app.flags.DEFINE_integer('img_channels', 3, '')\ntf.app.flags.DEFINE_integer('max_weight', 10, '')\n\ntf.app.flags.DEFINE_string('model_path', MODEL_PATH, '')\ntf.app.flags.DEFINE_string('dataset_dir', DATASET_DIR, '')\ntf.app.flags.DEFINE_string('debug_dir', os.path.join(SAVE_DIR, 'debug'), '')\ntf.app.flags.DEFINE_integer('num_classes', 21, '')\ntf.app.flags.DEFINE_boolean('log_device_placement', False, 'Whether to log device placement.')\ntf.app.flags.DEFINE_boolean('draw_predictions', False, 'Whether to draw.')\ntf.app.flags.DEFINE_boolean('save_net', True, 'Whether to save.')\ntf.app.flags.DEFINE_boolean('no_valid', False, 'Whether to save.')\n\ntf.app.flags.DEFINE_integer('seed', 66478, '')\n\n" ]
[ [ "tensorflow.image.resize_bilinear", "tensorflow.contrib.layers.batch_norm", "tensorflow.space_to_batch", "numpy.random.choice", "tensorflow.contrib.framework.assign_from_values", "numpy.mean", "tensorflow.contrib.layers.conv2d", "tensorflow.stack", "tensorflow.control_dependencies", "tensorflow.identity", "tensorflow.shape", "tensorflow.concat", "tensorflow.contrib.framework.get_variables", "tensorflow.contrib.layers.variance_scaling_initializer", "tensorflow.transpose", "tensorflow.contrib.layers.max_pool2d", "tensorflow.image.resize_nearest_neighbor", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.nn.dropout", "tensorflow.train.AdamOptimizer", "tensorflow.nn.relu", "numpy.zeros", "tensorflow.summary.scalar", "tensorflow.minimum", "tensorflow.round", "tensorflow.image.resize_bicubic", "tensorflow.contrib.layers.avg_pool2d", "tensorflow.train.polynomial_decay", "tensorflow.placeholder", "tensorflow.name_scope", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.image.flip_left_right", "tensorflow.train.MomentumOptimizer", "tensorflow.batch_to_space", "numpy.random.uniform", "tensorflow.device", "tensorflow.maximum" ], [ "tensorflow.app.flags.DEFINE_float", "tensorflow.app.flags.DEFINE_integer", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.app.flags.DEFINE_string" ] ]
qqsun8819/oneflow
[ "b61e07b3406cc5c2d71f3d5e8b0f4de9b3fb9e40", "b61e07b3406cc5c2d71f3d5e8b0f4de9b3fb9e40", "b61e07b3406cc5c2d71f3d5e8b0f4de9b3fb9e40" ]
[ "oneflow/python/test/ops/test_gpt_data_loader.py", "oneflow/python/test/modules/test_sign.py", "oneflow/python/test/ops/test_quantize_op.py" ]
[ "\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\nimport unittest\nimport oneflow as flow\nimport os\n\n\ndef _make_gpt_data_loader_func(\n data_file_prefix,\n seq_length,\n num_samples,\n batch_size,\n dtype,\n shuffle=None,\n random_seed=None,\n split_sizes=None,\n split_index=None,\n machine_num=1,\n device_num=1,\n parallel_distribution=None,\n start_from_saved_progress=False,\n):\n assert machine_num > 0\n assert device_num > 0 and device_num <= 4\n\n parallel_hierachy = None\n if machine_num == 1:\n device_strs = \"0:0-{}\".format(device_num - 1)\n elif machine_num > 1:\n device_strs = [\n \"{}:0-{}\".format(machine_id, device_num - 1)\n for machine_id in range(machine_num)\n ]\n parallel_hierachy = (machine_num, device_num)\n else:\n raise ValueError(\"invalid machine_num\", machine_num)\n\n flow.clear_default_session()\n flow.config.cpu_device_num(4)\n flow.config.enable_legacy_model_io(True)\n\n func_cfg = flow.FunctionConfig()\n func_cfg.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(\"predict\", function_config=func_cfg)\n def gpt_loader_fn() -> flow.typing.Numpy:\n with flow.scope.placement(\"cpu\", device_strs, parallel_hierachy):\n tokens = flow.data.megatron_gpt_mmap_data_loader(\n data_file_prefix=data_file_prefix,\n seq_length=seq_length,\n num_samples=num_samples,\n batch_size=batch_size,\n dtype=dtype,\n shuffle=shuffle,\n random_seed=random_seed,\n split_sizes=split_sizes,\n split_index=split_index,\n parallel_distribution=parallel_distribution,\n start_from_saved_progress=start_from_saved_progress,\n name=\"GPTDataLoader\",\n )\n\n if (\n isinstance(parallel_distribution, list)\n and len(parallel_distribution) > 1\n ):\n tokens = flow.hierarchical_parallel_cast(\n tokens, parallel_distribution=[\"B\", \"B\"]\n )\n\n tokens = flow.hierarchical_parallel_cast(tokens, parallel_distribution=[\"B\"])\n\n return tokens\n\n check_point = flow.train.CheckPoint()\n check_point.init()\n return gpt_loader_fn\n\n\n@unittest.skipIf(\n os.getenv(\"ONEFLOW_TEST_GITHUB_HOSTED\"),\n \"/dataset not available on GitHub hosted servers\",\n)\nclass TestGPTDataLoader(flow.unittest.TestCase):\n DATA_FILE_PREFIX = \"/dataset/Megatron-LM/dummy/gpt_sample_dataset_text_document\"\n SEQ_LENGTH = 1024\n RANDOM_SEED = 12345\n\n @flow.unittest.skip_unless_1n1d()\n @unittest.skipIf(\n flow.unittest.env.eager_execution_enabled(),\n \"2-D SBP doesn't work in eager mode\",\n )\n def test_simple(self):\n of_gpt_data_loader_fn = _make_gpt_data_loader_func(\n data_file_prefix=self.DATA_FILE_PREFIX,\n seq_length=10,\n num_samples=10,\n batch_size=2,\n dtype=flow.int64,\n shuffle=False,\n start_from_saved_progress=True,\n )\n tokens = of_gpt_data_loader_fn()\n # this comparison tokens is from megatron-lm gpt data loader\n cmp_tokens = np.array(\n [\n [40, 1101, 845, 845, 3772, 13, 428, 318, 257, 1492, 13],\n [13, 612, 318, 257, 18739, 550, 257, 3290, 13, 50256, 464],\n ],\n dtype=np.int64,\n )\n self.assertTrue(np.array_equal(tokens, cmp_tokens))\n\n @unittest.skipIf(\n flow.unittest.env.eager_execution_enabled(),\n \"2-D SBP doesn't work in eager mode\",\n )\n def test_1n1d(self):\n of_gpt_data_loader_fn = _make_gpt_data_loader_func(\n data_file_prefix=self.DATA_FILE_PREFIX,\n seq_length=self.SEQ_LENGTH,\n num_samples=648,\n batch_size=8,\n split_sizes=[949, 50, 1],\n split_index=0,\n dtype=flow.int64,\n shuffle=True,\n random_seed=self.RANDOM_SEED,\n )\n tokens_list = []\n for _ in range(5):\n tokens = of_gpt_data_loader_fn()\n tokens_list.append(tokens)\n\n return np.stack(tokens_list, axis=0)\n\n @flow.unittest.skip_unless_1n4d()\n @unittest.skipIf(\n flow.unittest.env.eager_execution_enabled(),\n \"2-D SBP doesn't work in eager mode\",\n )\n def test_1n4d(self):\n of_gpt_data_loader_fn = _make_gpt_data_loader_func(\n data_file_prefix=self.DATA_FILE_PREFIX,\n seq_length=self.SEQ_LENGTH,\n num_samples=648,\n batch_size=8,\n split_sizes=[949, 50, 1],\n split_index=0,\n dtype=flow.int64,\n shuffle=True,\n random_seed=self.RANDOM_SEED,\n device_num=4,\n parallel_distribution=[\"S(0)\"],\n )\n\n tokens_list = []\n for _ in range(5):\n tokens = of_gpt_data_loader_fn()\n tokens_list.append(tokens)\n\n result_1n4d = np.stack(tokens_list, axis=0)\n result_1n1d = self.test_1n1d()\n self.assertTrue(np.array_equal(result_1n4d, result_1n1d))\n return result_1n4d\n\n @flow.unittest.skip_unless_2n4d()\n @unittest.skipIf(\n flow.unittest.env.eager_execution_enabled(),\n \"2-D SBP doesn't work in eager mode\",\n )\n def test_2n4d(self):\n of_gpt_data_loader_fn = _make_gpt_data_loader_func(\n data_file_prefix=self.DATA_FILE_PREFIX,\n seq_length=self.SEQ_LENGTH,\n num_samples=648,\n batch_size=8,\n split_sizes=[949, 50, 1],\n split_index=0,\n dtype=flow.int64,\n shuffle=True,\n random_seed=self.RANDOM_SEED,\n machine_num=2,\n device_num=4,\n parallel_distribution=[\"S(0)\", \"B\"],\n )\n\n tokens_list = []\n for _ in range(5):\n tokens = of_gpt_data_loader_fn()\n tokens_list.append(tokens)\n\n result_2n4d = np.stack(tokens_list, axis=0)\n result_1n1d = self.test_1n1d()\n self.assertTrue(np.array_equal(result_2n4d, result_1n1d))\n return result_2n4d\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"\r\nCopyright 2020 The OneFlow Authors. All rights reserved.\r\n\r\nLicensed under the Apache License, Version 2.0 (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\n\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software\r\ndistributed under the License is distributed on an \"AS IS\" BASIS,\r\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\nSee the License for the specific language governing permissions and\r\nlimitations under the License.\r\n\"\"\"\r\nimport unittest\r\nfrom collections import OrderedDict\r\n\r\nimport numpy as np\r\n\r\nimport oneflow.experimental as flow\r\nfrom test_util import GenArgList\r\n\r\n\r\ndef _test_sign_impl(test_case, shape, device):\r\n np_input = np.random.randn(*shape)\r\n of_input = flow.Tensor(\r\n np_input, dtype=flow.float32, device=flow.device(device), requires_grad=True\r\n )\r\n\r\n of_out = flow.sign(of_input)\r\n np_out = np.sign(np_input)\r\n test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))\r\n\r\n of_out = of_out.sum()\r\n of_out.backward()\r\n np_grad = np.zeros_like(np_input)\r\n test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_grad, 1e-4, 1e-4))\r\n\r\n\r\n@unittest.skipIf(\r\n not flow.unittest.env.eager_execution_enabled(),\r\n \".numpy() doesn't work in lazy mode\",\r\n)\r\nclass TestSign(flow.unittest.TestCase):\r\n def test_sign(test_case):\r\n arg_dict = OrderedDict()\r\n arg_dict[\"shape\"] = [(2, 3), (2, 4, 5, 6)]\r\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\r\n for arg in GenArgList(arg_dict):\r\n _test_sign_impl(test_case, *arg)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n", "\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom collections import OrderedDict\nimport math\nimport numpy as np\nimport unittest\n\nimport oneflow as flow\nimport oneflow.typing as oft\nimport test_global_storage\nfrom test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n\n\ndef gen_quant_scale_for_min_max_symmetric(weight, quantization_bit):\n weight_max = np.max(np.abs(weight))\n denominator = 2.0 ** (quantization_bit - 1) - 1\n return weight_max / denominator, 0\n\n\ndef gen_quant_scale_for_min_max_affine(weight, quantization_bit):\n weight_max = np.max(weight)\n weight_min = np.min(weight)\n denominator = 2.0 ** (quantization_bit) - 1\n scale = (weight_max - weight_min) / denominator\n zero_point = -np.round(weight_min / scale)\n return scale, zero_point\n\n\ndef gen_quant_scale_for_min_max_cambricon(weight, quantization_bit):\n weight_max = np.max(np.abs(weight))\n scale = math.floor(math.log2(weight_max)) - (quantization_bit - 2)\n return scale, 0\n\n\ndef product(tu):\n return np.prod(tu).astype(np.int).item()\n\n\ndef _check_min_max_observer(\n test_case,\n weight,\n scale_of,\n zero_point_of,\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n per_layer_quantization,\n):\n if per_layer_quantization or quantization_formula == \"cambricon\":\n outer_num = 1\n inner_num = product(weight.shape[0:])\n else:\n outer_num = weight.shape[0]\n inner_num = product(weight.shape[1:])\n\n scale_np = np.zeros((outer_num,))\n zero_point_np = np.zeros((outer_num,))\n weight_flatten = weight.flatten()\n\n if quantization_formula == \"google\":\n if quantization_scheme == \"symmetric\":\n for c in range(outer_num):\n (\n scale_np[c],\n zero_point_np[c],\n ) = gen_quant_scale_for_min_max_symmetric(\n weight_flatten[c * inner_num : (c + 1) * inner_num],\n quantization_bit,\n )\n else: # \"affine\"\n for c in range(outer_num):\n scale_np[c], zero_point_np[c] = gen_quant_scale_for_min_max_affine(\n weight_flatten[c * inner_num : (c + 1) * inner_num],\n quantization_bit,\n )\n else: # quantization_formula == \"cambricon\"\n scale_np[0], zero_point_np[0] = gen_quant_scale_for_min_max_cambricon(\n weight_flatten, quantization_bit\n )\n test_case.assertTrue(np.allclose(scale_of, scale_np, rtol=1e-3))\n test_case.assertTrue(\n np.allclose(\n zero_point_of.astype(np.int), zero_point_np.astype(np.int), rtol=1e-3\n )\n )\n\n\ndef _run_test_min_max_observer(\n test_case,\n device_type,\n device_num,\n dtype,\n weight_shape,\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n per_layer_quantization,\n):\n assert device_type in [\"gpu\", \"cpu\"]\n flow.clear_default_session()\n if device_type == \"cpu\":\n flow.config.cpu_device_num(device_num)\n else:\n flow.config.gpu_device_num(device_num)\n\n @flow.global_function(type=\"predict\", function_config=flow.FunctionConfig())\n def QuantizeJob(\n weight: oft.Numpy.Placeholder(weight_shape, dtype=type_name_to_flow_type[dtype])\n ):\n with flow.scope.placement(device_type, \"0:0-%d\" % (device_num - 1)):\n scale, zero_point = flow.quantization.min_max_observer(\n weight,\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n per_layer_quantization,\n )\n return scale, zero_point\n\n weight = (np.random.random(weight_shape) - 0.5).astype(type_name_to_np_type[dtype])\n scale, zero_point = QuantizeJob(weight).get()\n _check_min_max_observer(\n test_case,\n weight,\n scale.numpy(),\n zero_point.numpy(),\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n per_layer_quantization,\n )\n\n\ndef gen_quant_scale_for_moving_average_min_max_symmetric(\n activation, quantization_bit, momentum, moving_max, moving_min\n):\n activation_max = np.max(np.abs(activation))\n\n denominator = 2.0 ** (quantization_bit - 1) - 1\n\n if moving_max[0] == 0:\n moving_max[0] = activation_max\n else:\n moving_max[0] = moving_max[0] * momentum + activation_max * (1 - momentum)\n\n moving_min[0] = moving_max[0]\n\n return moving_max[0] / denominator, 0\n\n\ndef gen_quant_scale_for_moving_average_min_max_affine(\n activation, quantization_bit, momentum, moving_max, moving_min\n):\n activation_max = np.max(activation)\n activation_min = np.min(activation)\n\n denominator = 2.0 ** (quantization_bit) - 1\n\n if moving_max[0] == 0:\n moving_max[0] = activation_max\n else:\n moving_max[0] = moving_max[0] * momentum + activation_max * (1 - momentum)\n\n if moving_min[0] == 0:\n moving_min[0] = activation_min\n else:\n moving_min[0] = moving_min[0] * momentum + activation_min * (1 - momentum)\n\n scale = (moving_max[0] - moving_min[0]) / denominator\n zero_point = -np.round(moving_min[0] / scale)\n\n return scale, zero_point\n\n\ndef gen_quant_scale_for_moving_average_min_max_cambricon(\n activation, quantization_bit, momentum, moving_max, moving_min\n):\n activation_max = np.max(np.abs(activation))\n\n if moving_max[0] == 0:\n moving_max[0] = activation_max\n else:\n moving_max[0] = moving_max[0] * momentum + activation_max * (1 - momentum)\n\n moving_min[0] = moving_max[0]\n\n return math.floor(math.log2(moving_max[0])) - (quantization_bit - 2), 0\n\n\ndef _check_moving_average_min_max_observer(\n test_case,\n activation,\n scale_of,\n zero_point_of,\n moving_max_np,\n moving_min_np,\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n momentum,\n):\n if quantization_formula == \"google\":\n if quantization_scheme == \"symmetric\":\n (\n scale_np,\n zero_point_np,\n ) = gen_quant_scale_for_moving_average_min_max_symmetric(\n activation.flatten(),\n quantization_bit,\n momentum,\n moving_max_np,\n moving_min_np,\n )\n else: # \"affine\"\n scale_np, zero_point_np = gen_quant_scale_for_moving_average_min_max_affine(\n activation.flatten(),\n quantization_bit,\n momentum,\n moving_max_np,\n moving_min_np,\n )\n else: # quantization_formula == \"cambricon\":\n scale_np, zero_point_np = gen_quant_scale_for_moving_average_min_max_cambricon(\n activation.flatten(),\n quantization_bit,\n momentum,\n moving_max_np,\n moving_min_np,\n )\n test_case.assertTrue(np.allclose(scale_of[0], scale_np, rtol=1e-3))\n test_case.assertTrue(np.allclose(zero_point_of[0], zero_point_np, rtol=1e-3))\n\n\ndef _run_test_moving_average_min_max_observer(\n test_case,\n device_type,\n device_num,\n dtype,\n activation_shape,\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n momentum,\n):\n assert device_type in [\"gpu\", \"cpu\"]\n flow.clear_default_session()\n if device_type == \"cpu\":\n flow.config.cpu_device_num(device_num)\n else:\n flow.config.gpu_device_num(device_num)\n\n @flow.global_function(type=\"train\", function_config=flow.FunctionConfig())\n def QuantizeJob(\n activation: oft.Numpy.Placeholder(\n activation_shape, dtype=type_name_to_flow_type[dtype]\n )\n ):\n with flow.scope.placement(device_type, \"0:0-%d\" % (device_num - 1)):\n x = flow.get_variable(\n \"x\",\n shape=activation_shape,\n dtype=activation.dtype,\n initializer=flow.zeros_initializer(activation.dtype),\n trainable=True,\n )\n scale, zero_point = flow.quantization.moving_average_min_max_observer(\n activation,\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n momentum,\n )\n fake = x + activation\n loss = flow.math.reduce_mean(fake)\n flow.optimizer.Adam(\n flow.optimizer.PiecewiseConstantScheduler([], [0.001]),\n ).minimize(loss)\n return scale, zero_point\n\n moving_max_np = np.zeros((1,))\n moving_min_np = np.zeros((1,))\n\n for i in range(10):\n activation = (np.random.random(activation_shape) - 0.5).astype(\n type_name_to_np_type[dtype]\n )\n scale, zero_point = QuantizeJob(activation).get()\n _check_moving_average_min_max_observer(\n test_case,\n activation,\n scale.numpy(),\n zero_point.numpy(),\n moving_max_np,\n moving_min_np,\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n momentum,\n )\n\n\ndef fake_quant_per_layer_symmetric(input, quantization_bit, scale):\n upper_bound = 2.0 ** (quantization_bit - 1) - 1\n lower_bound = -upper_bound\n return np.clip(np.rint(input / scale), lower_bound, upper_bound) * scale\n\n\ndef fake_quant_per_layer_affine(input, quantization_bit, scale, zero_point):\n upper_bound = 2.0 ** (quantization_bit) - 1\n lower_bound = 0\n return (\n np.clip(np.rint(input / scale + zero_point), lower_bound, upper_bound)\n - zero_point\n ) * scale\n\n\ndef fake_quant_per_layer_cambricon(input, quantization_bit, shift):\n upper_bound = 2.0 ** (quantization_bit - 1) - 1\n lower_bound = -upper_bound\n scale = 2 ** shift\n return np.clip(np.rint(input / scale), lower_bound, upper_bound) * scale\n\n\ndef _check_fake_quantize(\n test_case,\n input,\n input_diff_of,\n out_of,\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n per_layer_quantization,\n):\n if per_layer_quantization or quantization_formula == \"cambricon\":\n outer_num = 1\n inner_num = product(input.shape[0:])\n else:\n outer_num = input.shape[0]\n inner_num = product(input.shape[1:])\n\n scale_np = np.zeros((outer_num,))\n zero_point_np = np.zeros((outer_num,))\n out_np = np.zeros((inner_num * outer_num,))\n\n input_flatten = input.flatten()\n input_diff_np = np.full((inner_num * outer_num,), 1.0 / (inner_num * outer_num))\n\n if quantization_formula == \"google\":\n if quantization_scheme == \"symmetric\":\n for c in range(outer_num):\n (\n scale_np[c],\n zero_point_np[c],\n ) = gen_quant_scale_for_min_max_symmetric(\n input_flatten[c * inner_num : (c + 1) * inner_num], quantization_bit\n )\n out = fake_quant_per_layer_symmetric(\n input_flatten[c * inner_num : (c + 1) * inner_num],\n quantization_bit,\n scale_np[c],\n )\n out_np[c * inner_num : (c + 1) * inner_num] = out\n\n else: # \"affine\"\n for c in range(outer_num):\n scale_np[c], zero_point_np[c] = gen_quant_scale_for_min_max_affine(\n input_flatten[c * inner_num : (c + 1) * inner_num], quantization_bit\n )\n out = fake_quant_per_layer_affine(\n input_flatten[c * inner_num : (c + 1) * inner_num],\n quantization_bit,\n scale_np[c],\n zero_point_np[c],\n )\n out_np[c * inner_num : (c + 1) * inner_num] = out\n else: # quantization_formula == \"cambricon\"\n scale_np[0], zero_point_np[0] = gen_quant_scale_for_min_max_cambricon(\n input_flatten, quantization_bit\n )\n out_np = fake_quant_per_layer_cambricon(\n input_flatten, quantization_bit, scale_np[0]\n )\n\n test_case.assertTrue(np.allclose(out_of, out_np, rtol=1e-3))\n test_case.assertTrue(np.allclose(input_diff_of, input_diff_np, rtol=1e-3))\n\n\ndef _run_test_fake_quantize(\n test_case,\n device_type,\n device_num,\n dtype,\n in_shape,\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n per_layer_quantization,\n):\n assert device_type in [\"gpu\", \"cpu\"]\n flow.clear_default_session()\n if device_type == \"cpu\":\n flow.config.cpu_device_num(device_num)\n else:\n flow.config.gpu_device_num(device_num)\n\n @flow.global_function(type=\"train\", function_config=flow.FunctionConfig())\n def QuantizeJob(\n input: oft.Numpy.Placeholder(in_shape, dtype=type_name_to_flow_type[dtype])\n ):\n with flow.scope.placement(device_type, \"0:0\"):\n x = flow.get_variable(\n \"x\",\n shape=in_shape,\n dtype=input.dtype,\n initializer=flow.zeros_initializer(input.dtype),\n trainable=True,\n )\n input_x = input + x\n\n flow.watch_diff(input_x, test_global_storage.Setter(\"input_diff\"))\n\n with flow.scope.placement(device_type, \"0:0-%d\" % (device_num - 1)):\n scale, zero_point = flow.quantization.min_max_observer(\n input_x,\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n per_layer_quantization,\n )\n out = flow.quantization.fake_quantization(\n input_x,\n scale,\n zero_point,\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n )\n loss = flow.math.reduce_mean(out)\n\n flow.optimizer.Adam(\n flow.optimizer.PiecewiseConstantScheduler([], [0.001]),\n ).minimize(loss)\n\n return out\n\n input = (np.random.random(in_shape) - 0.5).astype(type_name_to_np_type[dtype])\n out = QuantizeJob(input).get()\n\n input_diff = test_global_storage.Get(\"input_diff\")\n\n _check_fake_quantize(\n test_case,\n input,\n input_diff.flatten(),\n out.numpy().flatten(),\n quantization_bit,\n quantization_scheme,\n quantization_formula,\n per_layer_quantization,\n )\n\n\n@unittest.skip(\"This test possibly fails\")\n@flow.unittest.skip_unless_1n4d()\nclass TestMinMaxObserver(flow.unittest.TestCase):\n def test_min_max_observer(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_case\"] = [test_case]\n arg_dict[\"device_type\"] = [\"gpu\", \"cpu\"]\n arg_dict[\"device_num\"] = [1, 4]\n arg_dict[\"dtype\"] = [\"float32\", \"double\"]\n arg_dict[\"weight_shape\"] = [(9, 40, 20, 10)]\n arg_dict[\"quantization_bit\"] = [8, 2]\n arg_dict[\"quantization_scheme\"] = [\"symmetric\", \"affine\"]\n # TODO(Liang Depeng): Fix cambricon test\n arg_dict[\"quantization_formula\"] = [\"google\"]\n arg_dict[\"per_layer_quantization\"] = [True, False]\n\n for arg in GenArgList(arg_dict):\n if arg[-2] == \"cambricon\" and arg[-1] == False:\n continue\n _run_test_min_max_observer(*arg)\n\n\n@unittest.skip(\"This test possibly fails\")\nclass TestMovingAverageMinMaxObserver(flow.unittest.TestCase):\n def test_moving_average_min_max_observer(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_case\"] = [test_case]\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"device_num\"] = [1, 4]\n arg_dict[\"dtype\"] = [\"float32\", \"double\"]\n arg_dict[\"activation_shape\"] = [(9, 40, 20, 10)]\n arg_dict[\"quantization_bit\"] = [8, 2]\n arg_dict[\"quantization_scheme\"] = [\"symmetric\", \"affine\"]\n # TODO(Liang Depeng): Fix cambricon test\n arg_dict[\"quantization_formula\"] = [\"google\"]\n arg_dict[\"momentum\"] = [0.95]\n\n for arg in GenArgList(arg_dict):\n _run_test_moving_average_min_max_observer(*arg)\n\n\n@unittest.skip(\"This test possibly fails\")\n@flow.unittest.skip_unless_1n4d()\nclass TestFakeQuantize(flow.unittest.TestCase):\n def test_fake_quantize(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_case\"] = [test_case]\n arg_dict[\"device_type\"] = [\"gpu\", \"cpu\"]\n arg_dict[\"device_num\"] = [1, 4]\n arg_dict[\"dtype\"] = [\"float32\", \"double\"]\n arg_dict[\"in_shape\"] = [(9, 40, 20, 10)]\n arg_dict[\"quantization_bit\"] = [8, 2]\n arg_dict[\"quantization_scheme\"] = [\"symmetric\", \"affine\"]\n # TODO(Liang Depeng): Fix cambricon test\n arg_dict[\"quantization_formula\"] = [\"google\"]\n arg_dict[\"per_layer_quantization\"] = [True, False]\n\n for arg in GenArgList(arg_dict):\n if arg[-2] == \"cambricon\" and arg[-1] == False:\n continue\n _run_test_fake_quantize(*arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.stack", "numpy.array_equal" ], [ "numpy.sign", "numpy.zeros_like", "numpy.random.randn" ], [ "numpy.max", "numpy.full", "numpy.zeros", "numpy.round", "numpy.rint", "numpy.min", "numpy.allclose", "numpy.prod", "numpy.abs", "numpy.random.random" ] ]
AmisiJospin/Covid-19-Visualization
[ "84fd6e830455785d3ae76fe4f4a8183f24575b1e" ]
[ "COVID-9 Visualization.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nCreated on Wed Oct 27 09:44:29 2021\n\n@author: Jospin Amisi\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\n#Gathering data and plotting Total World Cases\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nplt.style.use('fivethirtyeight')\n\n#Loading the CSV file\ndf = pd.read_csv('https://raw.githubusercontent.com/datasets/covid-19/main/data/countries-aggregated.csv', parse_dates = ['Date'])\n\n#Sum of the total cases (Confirmed, Recovered, Deaths)\ndf['Total Cases'] = df[['Confirmed', 'Recovered', 'Deaths']].sum(axis=1)\n\n#Worldwide Cases\nworldwide_df = df.groupby(['Date']).sum()\nw = worldwide_df.plot(figsize=(16,10))\nw.set_xlabel('Date')\nw.set_ylabel('# of Cases worldwide')\nw.title.set_text('Worldwide COVID Insights')\n\nplt.show()\n\n\n# Malawi vs. Worldwide Total Cases\nmw_df = df[df['Country']=='Malawi'].groupby(['Date']).sum()\n\nfig = plt.figure(figsize=(12,5))\nax = fig.add_subplot(111)\n\nax.plot(worldwide_df[['Total Cases']], label='Worldwide')\nax.plot(mw_df[['Total Cases']], label='Malawi')\nax.set_xlabel('Date')\nax.set_ylabel('# of Total Cases')\nax.title.set_text('Worldwide vs. Malawi Total Cases')\n\nplt.legend(loc='upper left')\nplt.show()\n\n\n# Malawi Daily Cases and Deaths\nw_df = mw_df.reset_index()\nmw_df['Daily Confirmed'] = mw_df['Confirmed'].sub(mw_df['Confirmed'].shift())\nmw_df['Daily Deaths'] = mw_df['Deaths'].sub(mw_df['Deaths'].shift())\n\nfig = plt.figure(figsize=(20,8))\nax = fig.add_subplot(111)\n\nax.bar(mw_df['Date'], mw_df['Daily Confirmed'], color='b', label='Malawi Daily Confirmed Cases')\nax.bar(mw_df['Date'], mw_df['Daily Deaths'], color='r', label='Malawi Daily Deaths')\nax.set_xlabel('Date')\nax.set_ylabel('# of People Affected')\nax.title.set_text('Malawi Daily Cases and Deaths')\n\nplt.legend(loc='upper left')\nplt.show()\n\n\n# Worst Hit Countries by COVID-19\n\nfrom datetime import date, timedelta\n\nyesterday = date.today() - timedelta(days=1)\nyesterday.strftime('%Y-%m-%d')\n\ntoday_df = df[df['Date'] == yesterday]\ntop_10 = today_df.sort_values(['Confirmed'], ascending=False)[:10]\ntop_10.loc['rest-of-world'] = today_df.sort_values(['Confirmed'], ascending=False)[10:].sum()\ntop_10.loc['rest-of-world', 'Country'] = 'Rest of World'\n\nfig = plt.figure(figsize=(10,10))\nax = fig.add_subplot(111)\n\nax.pie(top_10['Confirmed'], labels=top_10['Country'], autopct = '%1.1f%%')\nax.title.set_text('Hardest Hit Countries Worldwide')\n\n# This code shows the legend of the graph and plot everything\n\nplt.legend(loc='upper left')\nplt.show()\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.style.use", "matplotlib.pyplot.show", "pandas.read_csv" ] ]