repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
HoSyTuyen/Federated_object_detection
[ "78cdb88b1b185db0545e9ed32efe73fe08042989" ]
[ "model/model_wrapper.py" ]
[ "import json\nimport numpy\nimport logging\nimport sys\nimport os\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom model.yolo import Darknet\nfrom utils.utils import *\nfrom utils.datasets import *\nfrom data.dataset import Dataset, TestDataset\nfrom utils import array_tool as at\nfrom utils.config import opt\nfrom model import FasterRCNNVGG16\nfrom model.faster_rcnn_trainer import FasterRCNNTrainer\nfrom utils.eval_tool import eval_detection_voc\n\n\nsys.path.append(\"\")\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\ntorch.set_num_threads(4)\ntorch.manual_seed(1234)\ntorch.cuda.manual_seed(1234)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nnumpy.random.seed(1234)\n\n\ndef load_json(filename):\n with open(filename) as f:\n return json.load(f)\n\n\nclass Yolo(object):\n def __init__(self, task_config):\n self.task_config = task_config\n self.model_config = load_json(task_config['model_config'])\n print(self.model_config)\n if 'train' in self.task_config:\n self.dataset = ListDataset(self.task_config['train'],\n augment=True,\n multiscale=self.model_config['multiscale_training'])\n logging.info('load data')\n self.dataloader = DataLoader(self.dataset,\n batch_size=self.task_config['batch_size'],\n shuffle=True,\n num_workers=self.task_config['n_cpu'],\n collate_fn=self.dataset.collate_fn)\n # TODO: add a valset for validate\n self.testset = ListDataset(self.task_config['test'],\n augment=False,\n multiscale=False)\n self.test_dataloader = DataLoader(\n self.testset,\n batch_size=self.task_config['batch_size'],\n num_workers=1,\n shuffle=False,\n collate_fn=self.testset.collate_fn\n )\n self.train_size = self.dataset.__len__()\n print(\"train_size:\", self.train_size)\n self.valid_size = self.testset.__len__()\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.yolo = Darknet(self.model_config['model_def']).to(self.device)\n assert os.path.exists(self.model_config['pretrained_weights'])\n self.yolo.load_darknet_weights(self.model_config['pretrained_weights'])\n logging.info('model construct completed')\n self.best_map = 0\n self.optimizer = torch.optim.Adam(self.yolo.parameters())\n\n def get_weights(self):\n params = [param.data.cpu().numpy()\n for param in self.yolo.parameters()]\n return params\n\n def set_weights(self, parameters):\n for i, param in enumerate(self.yolo.parameters()):\n param_ = torch.from_numpy(parameters[i]).cuda()\n param.data.copy_(param_)\n\n def train_one_epoch(self):\n \"\"\"\n Return:\n total_loss: the total loss during training\n accuracy: the mAP\n \"\"\"\n self.yolo.train()\n for batch_i, (_, imgs, targets) in enumerate(self.dataloader):\n batches_done = len(self.dataloader) * 1 + batch_i\n imgs = Variable(imgs.to(self.device))\n targets = Variable(targets.to(self.device), requires_grad=False)\n loss, outputs = self.yolo(imgs, targets)\n loss.backward()\n if batch_i % 10 == 0:\n print(\"step: {} | loss: {:.4f}\".format(batch_i, loss.item()))\n if batches_done % self.model_config[\"gradient_accumulations\"]:\n # Accumulates gradient before each step\n self.optimizer.step()\n self.optimizer.zero_grad()\n return loss.item()\n\n def eval(self, dataloader, yolo, test_num=10000):\n labels = []\n sample_metrics = [] # List of tuples (TP, confs, pred)\n total_losses = list()\n Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor\n for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc=\"Detecting objects\")):\n # Extract labels\n labels += targets[:, 1].tolist()\n # Rescale target\n targets = Variable(targets.to(self.device), requires_grad=False)\n\n imgs = Variable(imgs.type(Tensor), requires_grad=False)\n with torch.no_grad():\n loss, outputs = yolo(imgs, targets)\n outputs = non_max_suppression(outputs, conf_thres=0.5, nms_thres=0.5)\n total_losses.append(loss.item())\n targets = targets.to(\"cpu\")\n targets[:, 2:] = xywh2xyxy(targets[:, 2:])\n targets[:, 2:] *= int(self.model_config['img_size'])\n sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=0.5)\n if len(sample_metrics) > 0:\n true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]\n precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)\n else:\n return 0.0, 0.0, 0.0\n total_loss = sum(total_losses) / len(total_losses)\n return total_loss, AP.mean(), recall.mean()\n\n def validate(self):\n \"\"\"\n In the current version, the validate dataset hasn't been set, \n so we use the first 500 samples of testing set instead.\n \"\"\"\n print(\"run validation\")\n return self.evaluate(500)\n\n def evaluate(self, test_num=10000):\n \"\"\"\n Return:\n total_loss: the average loss\n accuracy: the evaluation map\n \"\"\"\n total_loss, mAP, recall = self.eval(\n self.test_dataloader, self.yolo, test_num)\n return total_loss, mAP, recall\n\n\nclass FasterRCNN(object):\n \"\"\"\n In fasterRCNN model, we only return the total loss, calculated from:\n rpn_loc_loss, rpn_cls_loss, roi_loc_loss, roi_cls_loss,\n and mAP@0.5\n \"\"\"\n\n def __init__(self, task_config):\n self.model_config = load_json(task_config['model_config_file'])\n self.model_config['voc_data_dir'] = task_config['data_path']\n self.opt = opt\n self.opt.log_filename = task_config['log_filename']\n self.opt._parse(self.model_config)\n self.dataset = Dataset(self.opt)\n logging.info('load data')\n self.dataloader = DataLoader(self.dataset,\n batch_size=self.model_config['batch_size'],\n shuffle=True,\n num_workers=self.opt.num_workers)\n # TODO: add a valset for validate\n self.testset = TestDataset(self.opt)\n self.test_dataloader = DataLoader(\n self.testset,\n batch_size=self.model_config['batch_size'],\n num_workers=self.opt.test_num_workers,\n shuffle=False,\n pin_memory=True\n )\n self.train_size = self.dataset.__len__()\n self.valid_size = self.testset.__len__()\n self.faster_rcnn = FasterRCNNVGG16()\n logging.info('model construct completed')\n self.trainer = FasterRCNNTrainer(\n self.faster_rcnn, self.opt.log_filename\n ).cuda()\n if self.opt.load_path:\n self.trainer.load(self.opt.load_path)\n logging.info('load pretrained model from %s' % self.opt.load_path)\n self.best_map = 0\n self.lr_ = self.opt.lr\n\n def get_weights(self):\n params = [param.data.cpu().numpy()\n for param in self.faster_rcnn.parameters()]\n return params\n\n def set_weights(self, parameters):\n for i, param in enumerate(self.faster_rcnn.parameters()):\n param_ = torch.from_numpy(parameters[i]).cuda()\n param.data.copy_(param_)\n\n def train_one_epoch(self):\n \"\"\"\n Return:\n total_loss: the total loss during training\n accuracy: the mAP\n \"\"\"\n pred_bboxes, pred_labels, pred_scores = list(), list(), list()\n gt_bboxes, gt_labels, gt_difficults = list(), list(), list()\n self.trainer.reset_meters()\n for ii, (img, sizes, bbox_, label_, scale, gt_difficults_) in \\\n tqdm.tqdm(enumerate(self.dataloader)):\n scale = at.scalar(scale)\n img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()\n self.trainer.train_step(img, bbox, label, scale)\n if (ii + 1) % self.opt.plot_every == 0:\n sizes = [sizes[0][0].item(), sizes[1][0].item()]\n pred_bboxes_, pred_labels_, pred_scores_ = \\\n self.faster_rcnn.predict(img, [sizes])\n pred_bboxes += pred_bboxes_\n pred_labels += pred_labels_\n pred_scores += pred_scores_\n gt_bboxes += list(bbox_.numpy())\n gt_labels += list(label_.numpy())\n gt_difficults += list(gt_difficults_.numpy())\n\n return self.trainer.get_meter_data()['total_loss']\n\n def eval(self, dataloader, faster_rcnn, test_num=10000):\n pred_bboxes, pred_labels, pred_scores = list(), list(), list()\n gt_bboxes, gt_labels, gt_difficults = list(), list(), list()\n total_losses = list()\n for ii, (imgs, sizes, gt_bboxes_, gt_labels_, scale, gt_difficults_) \\\n in tqdm.tqdm(enumerate(dataloader)):\n img = imgs.cuda().float()\n bbox = gt_bboxes_.cuda()\n label = gt_labels_.cuda()\n sizes = [sizes[0][0].item(), sizes[1][0].item()]\n pred_bboxes_, pred_labels_, pred_scores_ = \\\n faster_rcnn.predict(imgs, [sizes])\n losses = self.trainer.forward(img, bbox, label, float(scale))\n total_losses.append(losses.total_loss.item())\n gt_bboxes += list(gt_bboxes_.numpy())\n gt_labels += list(gt_labels_.numpy())\n gt_difficults += list(gt_difficults_.numpy())\n pred_bboxes += pred_bboxes_\n pred_labels += pred_labels_\n pred_scores += pred_scores_\n if ii == test_num: break\n\n result = eval_detection_voc(\n pred_bboxes, pred_labels, pred_scores,\n gt_bboxes, gt_labels, gt_difficults,\n use_07_metric=False)\n total_loss = sum(total_losses) / len(total_losses)\n return total_loss, result\n\n def validate(self):\n \"\"\"\n In the current version, the validate dataset hasn't been set,\n so we use the first 500 samples of testing set instead.\n \"\"\"\n print(\"run validation\")\n return self.evaluate(500)\n\n def evaluate(self, test_num=10000):\n \"\"\"\n Return:\n total_loss: the average loss\n accuracy: the evaluation map\n \"\"\"\n total_loss, eval_result = self.eval(\n self.test_dataloader, self.trainer.faster_rcnn, test_num)\n return total_loss, eval_result['map'], eval_result['mrec']\n\n\nclass Models:\n Yolo = Yolo\n FasterRCNN = FasterRCNN\n" ]
[ [ "torch.cuda.manual_seed", "numpy.random.seed", "torch.no_grad", "torch.from_numpy", "torch.manual_seed", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.set_num_threads" ] ]
hellohawaii/BasicSR
[ "2ef0c0e8f6844aed07655c340f3f6adc333c18a2" ]
[ "codes/scripts/transfer_params.py" ]
[ "import torch\r\n\r\npretrained_net = torch.load('../../experiments/pretrained_models/SRResNet_bicx2_in3nf64nb16.pth')\r\n# should run train debug mode first to get an initial model\r\ncrt_net = torch.load('../../experiments/debug_SRResNet_bicx4_in3nf64nb16/models/8_G.pth')\r\n\r\nfor k, v in crt_net.items():\r\n print(k)\r\nfor k, v in crt_net.items():\r\n if k in pretrained_net:\r\n crt_net[k] = pretrained_net[k]\r\n print('replace ... ', k)\r\n\r\n# x2 -> x4\r\ncrt_net['model.5.weight'] = pretrained_net['model.2.weight']\r\ncrt_net['model.5.bias'] = pretrained_net['model.2.bias']\r\ncrt_net['model.8.weight'] = pretrained_net['model.5.weight']\r\ncrt_net['model.8.bias'] = pretrained_net['model.5.bias']\r\ncrt_net['model.10.weight'] = pretrained_net['model.7.weight']\r\ncrt_net['model.10.bias'] = pretrained_net['model.7.bias']\r\ntorch.save(crt_net, '../pretrained_tmp.pth')\r\n\r\n# x2 -> x3\r\n'''\r\nin_filter = pretrained_net['model.2.weight'] # 256, 64, 3, 3\r\nnew_filter = torch.Tensor(576, 64, 3, 3)\r\nnew_filter[0:256, :, :, :] = in_filter\r\nnew_filter[256:512, :, :, :] = in_filter\r\nnew_filter[512:, :, :, :] = in_filter[0:576-512, :, :, :]\r\ncrt_net['model.2.weight'] = new_filter\r\n\r\nin_bias = pretrained_net['model.2.bias'] # 256, 64, 3, 3\r\nnew_bias = torch.Tensor(576)\r\nnew_bias[0:256] = in_bias\r\nnew_bias[256:512] = in_bias\r\nnew_bias[512:] = in_bias[0:576 - 512]\r\ncrt_net['model.2.bias'] = new_bias\r\n\r\ntorch.save(crt_net, '../pretrained_tmp.pth')\r\n'''\r\n\r\n# x2 -> x8\r\n'''\r\ncrt_net['model.5.weight'] = pretrained_net['model.2.weight']\r\ncrt_net['model.5.bias'] = pretrained_net['model.2.bias']\r\ncrt_net['model.8.weight'] = pretrained_net['model.2.weight']\r\ncrt_net['model.8.bias'] = pretrained_net['model.2.bias']\r\ncrt_net['model.11.weight'] = pretrained_net['model.5.weight']\r\ncrt_net['model.11.bias'] = pretrained_net['model.5.bias']\r\ncrt_net['model.13.weight'] = pretrained_net['model.7.weight']\r\ncrt_net['model.13.bias'] = pretrained_net['model.7.bias']\r\ntorch.save(crt_net, '../pretrained_tmp.pth')\r\n'''\r\n\r\n# x3/4/8 RGB -> Y\r\n'''\r\n\r\nin_filter = pretrained_net['model.0.weight']\r\nin_new_filter = in_filter[:,0,:,:]*0.2989 + in_filter[:,1,:,:]*0.587 + in_filter[:,2,:,:]*0.114\r\nin_new_filter.unsqueeze_(1)\r\ncrt_net['model.0.weight'] = in_new_filter\r\n\r\nout_filter = pretrained_net['model.13.weight']\r\nout_new_filter = out_filter[0, :, :, :] * 0.2989 + out_filter[1, :, :, :] * 0.587 + \\\r\n out_filter[2, :, :, :] * 0.114\r\nout_new_filter.unsqueeze_(0)\r\ncrt_net['model.13.weight'] = out_new_filter\r\nout_bias = pretrained_net['model.13.bias']\r\nout_new_bias = out_bias[0] * 0.2989 + out_bias[1] * 0.587 + out_bias[2] * 0.114\r\nout_new_bias = torch.Tensor(1).fill_(out_new_bias)\r\ncrt_net['model.13.bias'] = out_new_bias\r\ntorch.save(crt_net, '../pretrained_tmp.pth')\r\n'''\r\n" ]
[ [ "torch.save", "torch.load" ] ]
ioni-garfunkel/great_expectations
[ "cf221fb6eb7c3242cbda1b6fb1c02150a9cec65b" ]
[ "great_expectations/validator/validator.py" ]
[ "import copy\nimport datetime\nimport inspect\nimport json\nimport logging\nimport traceback\nimport warnings\nfrom collections import defaultdict, namedtuple\nfrom collections.abc import Hashable\nfrom typing import Any, Dict, Iterable, List, Optional, Set\n\nimport pandas as pd\nfrom dateutil.parser import parse\nfrom tqdm.auto import tqdm\n\nfrom great_expectations import __version__ as ge_version\nfrom great_expectations.core.batch import Batch\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.core.expectation_suite import (\n ExpectationSuite,\n expectationSuiteSchema,\n)\nfrom great_expectations.core.expectation_validation_result import (\n ExpectationSuiteValidationResult,\n ExpectationValidationResult,\n)\nfrom great_expectations.core.id_dict import BatchSpec\nfrom great_expectations.core.run_identifier import RunIdentifier\nfrom great_expectations.data_asset.util import recursively_convert_to_json_serializable\nfrom great_expectations.dataset import PandasDataset, SparkDFDataset, SqlAlchemyDataset\nfrom great_expectations.dataset.sqlalchemy_dataset import SqlAlchemyBatchReference\nfrom great_expectations.exceptions import (\n GreatExpectationsError,\n InvalidExpectationConfigurationError,\n)\nfrom great_expectations.execution_engine import (\n ExecutionEngine,\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.execution_engine.pandas_batch_data import PandasBatchData\nfrom great_expectations.expectations.registry import (\n get_expectation_impl,\n get_metric_provider,\n list_registered_expectation_implementations,\n)\nfrom great_expectations.marshmallow__shade import ValidationError\nfrom great_expectations.types import ClassConfig\nfrom great_expectations.util import load_class, verify_dynamic_loading_support\nfrom great_expectations.validator.validation_graph import (\n MetricConfiguration,\n MetricEdge,\n ValidationGraph,\n)\n\nlogger = logging.getLogger(__name__)\nlogging.captureWarnings(True)\n\n\nclass Validator:\n def __init__(\n self,\n execution_engine,\n interactive_evaluation=True,\n expectation_suite=None,\n expectation_suite_name=None,\n data_context=None,\n batches=None,\n **kwargs,\n ):\n \"\"\"\n Initialize the DataAsset.\n\n :param profiler (profiler class) = None: The profiler that should be run on the data_asset to\n build a baseline expectation suite.\n\n Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a\n Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments\n so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of\n *args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the\n support for the profiler parameter not obvious from the signature.\n\n \"\"\"\n\n self._data_context = data_context\n self._execution_engine = execution_engine\n self._expose_dataframe_methods = False\n self._validator_config = {}\n\n if batches is None:\n batches = tuple()\n\n self._batches = dict()\n\n for batch in batches:\n assert isinstance(\n batch, Batch\n ), \"batches provided to Validator must be Great Expectations Batch objects\"\n self._execution_engine.load_batch_data(batch.id, batch.data)\n self._batches[batch.id] = batch\n\n if len(batches) > 1:\n logger.warning(\n f\"{len(batches)} batches will be added to this Validator. The batch_identifiers for the active \"\n f\"batch are {self.active_batch.batch_definition['batch_identifiers'].items()}\"\n )\n\n self.interactive_evaluation = interactive_evaluation\n self._initialize_expectations(\n expectation_suite=expectation_suite,\n expectation_suite_name=expectation_suite_name,\n )\n self._default_expectation_args = {\n \"include_config\": True,\n \"catch_exceptions\": False,\n \"result_format\": \"BASIC\",\n }\n self._validator_config = {}\n\n # This special state variable tracks whether a validation run is going on, which will disable\n # saving expectation config objects\n self._active_validation = False\n if self._data_context and hasattr(\n self._data_context, \"_expectation_explorer_manager\"\n ):\n # TODO: verify flow of default expectation arguments\n self.set_default_expectation_argument(\"include_config\", True)\n\n def __dir__(self):\n \"\"\"\n This custom magic method is used to enable expectation tab completion on Validator objects.\n It also allows users to call Pandas.DataFrame methods on Validator objects\n \"\"\"\n validator_attrs = set(super().__dir__())\n class_expectation_impls = set(list_registered_expectation_implementations())\n # execution_engine_expectation_impls = (\n # {\n # attr_name\n # for attr_name in self.execution_engine.__dir__()\n # if attr_name.startswith(\"expect_\")\n # }\n # if self.execution_engine\n # else set()\n # )\n\n combined_dir = (\n validator_attrs\n | class_expectation_impls\n # | execution_engine_expectation_impls\n )\n\n if self._expose_dataframe_methods:\n combined_dir | set(dir(pd.DataFrame))\n\n return list(combined_dir)\n\n @property\n def expose_dataframe_methods(self):\n return self._expose_dataframe_methods\n\n @expose_dataframe_methods.setter\n def expose_dataframe_methods(self, value: bool):\n self._expose_dataframe_methods = value\n\n def __getattr__(self, name):\n if name.startswith(\"expect_\") and get_expectation_impl(name):\n return self.validate_expectation(name)\n elif (\n self._expose_dataframe_methods\n and isinstance(self.active_batch.data, PandasBatchData)\n and hasattr(pd.DataFrame, name)\n ):\n return getattr(self.active_batch.data.dataframe, name)\n else:\n raise AttributeError(\n f\"'{type(self).__name__}' object has no attribute '{name}'\"\n )\n\n def validate_expectation(self, name):\n \"\"\"\n Given the name of an Expectation, obtains the Class-first Expectation implementation and utilizes the expectation's\n validate method to obtain a validation result. Also adds in the runtime configuration\n\n Args:\n name (str): The name of the Expectation being validated\n\n Returns:\n The Expectation's validation result\n \"\"\"\n\n def inst_expectation(*args, **kwargs):\n try:\n expectation_impl = get_expectation_impl(name)\n allowed_config_keys = expectation_impl.get_allowed_config_keys()\n expectation_kwargs = recursively_convert_to_json_serializable(kwargs)\n meta = None\n # This section uses Expectation class' legacy_method_parameters attribute to maintain support for passing\n # positional arguments to expectation methods\n legacy_arg_names = expectation_impl.legacy_method_parameters.get(\n name, tuple()\n )\n for idx, arg in enumerate(args):\n try:\n arg_name = legacy_arg_names[idx]\n if arg_name in allowed_config_keys:\n expectation_kwargs[arg_name] = arg\n if arg_name == \"meta\":\n meta = arg\n except IndexError:\n raise InvalidExpectationConfigurationError(\n f\"Invalid positional argument: {arg}\"\n )\n\n # this is used so that exceptions are caught appropriately when they occur in expectation config\n basic_runtime_configuration = {\n k: v\n for k, v in kwargs.items()\n if k in (\"result_format\", \"include_config\", \"catch_exceptions\")\n }\n\n configuration = ExpectationConfiguration(\n expectation_type=name, kwargs=expectation_kwargs, meta=meta\n )\n\n # runtime_configuration = configuration.get_runtime_kwargs()\n expectation = expectation_impl(configuration)\n \"\"\"Given an implementation and a configuration for any Expectation, returns its validation result\"\"\"\n\n if not self.interactive_evaluation and not self._active_validation:\n validation_result = ExpectationValidationResult(\n expectation_config=copy.deepcopy(expectation.configuration)\n )\n else:\n validation_result = expectation.validate(\n validator=self,\n evaluation_parameters=self._expectation_suite.evaluation_parameters,\n data_context=self._data_context,\n runtime_configuration=basic_runtime_configuration,\n )\n\n # If validate has set active_validation to true, then we do not save the config to avoid\n # saving updating expectation configs to the same suite during validation runs\n if self._active_validation is True:\n stored_config = configuration.get_raw_configuration()\n else:\n # Append the expectation to the config.\n stored_config = self._expectation_suite.add_expectation(\n configuration.get_raw_configuration()\n )\n\n # If there was no interactive evaluation, success will not have been computed.\n if validation_result.success is not None:\n # Add a \"success\" object to the config\n stored_config.success_on_last_run = validation_result.success\n\n if self._data_context is not None:\n validation_result = self._data_context.update_return_obj(\n self, validation_result\n )\n\n except Exception as err:\n if basic_runtime_configuration.get(\"catch_exceptions\"):\n raised_exception = True\n exception_traceback = traceback.format_exc()\n exception_message = \"{}: {}\".format(type(err).__name__, str(err))\n\n validation_result = ExpectationValidationResult(success=False)\n\n validation_result.exception_info = {\n \"raised_exception\": raised_exception,\n \"exception_message\": exception_message,\n \"exception_traceback\": exception_traceback,\n }\n\n else:\n raise err\n return validation_result\n\n inst_expectation.__name__ = name\n return inst_expectation\n\n @property\n def execution_engine(self):\n \"\"\"Returns the execution engine being used by the validator at the given time\"\"\"\n return self._execution_engine\n\n def list_available_expectation_types(self):\n \"\"\" Returns a list of all expectations available to the validator\"\"\"\n keys = dir(self)\n return [\n expectation for expectation in keys if expectation.startswith(\"expect_\")\n ]\n\n def get_metrics(self, metrics: Dict[str, MetricConfiguration]) -> Dict[str, Any]:\n \"\"\"Return a dictionary with the requested metrics\"\"\"\n graph = ValidationGraph()\n resolved_metrics = {}\n for metric_name, metric_configuration in metrics.items():\n provider_cls, _ = get_metric_provider(\n metric_configuration.metric_name, self.execution_engine\n )\n for key in provider_cls.domain_keys:\n if (\n key not in metric_configuration.metric_domain_kwargs\n and key in provider_cls.default_kwarg_values\n ):\n metric_configuration.metric_domain_kwargs[\n key\n ] = provider_cls.default_kwarg_values[key]\n for key in provider_cls.value_keys:\n if (\n key not in metric_configuration.metric_value_kwargs\n and key in provider_cls.default_kwarg_values\n ):\n metric_configuration.metric_value_kwargs[\n key\n ] = provider_cls.default_kwarg_values[key]\n self.build_metric_dependency_graph(\n graph,\n child_node=metric_configuration,\n configuration=None,\n execution_engine=self._execution_engine,\n runtime_configuration=None,\n )\n self.resolve_validation_graph(graph, resolved_metrics)\n return {\n metric_name: resolved_metrics[metric_configuration.id]\n for (metric_name, metric_configuration) in metrics.items()\n }\n\n def get_metric(self, metric: MetricConfiguration) -> Any:\n \"\"\"return the value of the requested metric.\"\"\"\n return self.get_metrics({\"_\": metric})[\"_\"]\n\n def build_metric_dependency_graph(\n self,\n graph: ValidationGraph,\n child_node: MetricConfiguration,\n configuration: Optional[ExpectationConfiguration],\n execution_engine: \"ExecutionEngine\",\n parent_node: Optional[MetricConfiguration] = None,\n runtime_configuration: Optional[dict] = None,\n ) -> None:\n \"\"\"Obtain domain and value keys for metrics and proceeds to add these metrics to the validation graph\n until all metrics have been added.\"\"\"\n\n # metric_kwargs = get_metric_kwargs(metric_name)\n metric_impl = get_metric_provider(\n child_node.metric_name, execution_engine=execution_engine\n )[0]\n metric_dependencies = metric_impl.get_evaluation_dependencies(\n metric=child_node,\n configuration=configuration,\n execution_engine=execution_engine,\n runtime_configuration=runtime_configuration,\n )\n child_node.metric_dependencies = metric_dependencies\n\n if parent_node:\n graph.add(\n MetricEdge(\n parent_node,\n child_node,\n )\n )\n\n if len(metric_dependencies) == 0:\n graph.add(\n MetricEdge(\n child_node,\n None,\n )\n )\n\n else:\n for metric_dependency in metric_dependencies.values():\n if metric_dependency.id == child_node.id:\n logger.warning(\n f\"Metric {str(child_node.id)} has created a circular dependency\"\n )\n continue\n self.build_metric_dependency_graph(\n graph,\n metric_dependency,\n configuration,\n execution_engine,\n child_node,\n runtime_configuration=runtime_configuration,\n )\n\n def graph_validate(\n self,\n configurations: List[ExpectationConfiguration],\n metrics: dict = None,\n runtime_configuration: dict = None,\n ) -> List[ExpectationValidationResult]:\n \"\"\"Obtains validation dependencies for each metric using the implementation of their associated expectation,\n then proceeds to add these dependencies to the validation graph, supply readily available metric implementations\n to fulfill current metric requirements, and validate these metrics.\n\n Args:\n batches (Dict[str, Batch]): A Dictionary of batches and their corresponding names that will be used\n for Expectation Validation.\n configurations(List[ExpectationConfiguration]): A list of needed Expectation Configurations that will\n be used to supply domain and values for metrics.\n execution_engine (ExecutionEngine): An Execution Engine that will be used for extraction of metrics\n from the registry.\n metrics (dict): A list of currently registered metrics in the registry\n runtime_configuration (dict): A dictionary of runtime keyword arguments, controlling semantics\n such as the result_format.\n\n Returns:\n A list of Validations, validating that all necessary metrics are available.\n \"\"\"\n graph = ValidationGraph()\n if runtime_configuration is None:\n runtime_configuration = dict()\n\n if runtime_configuration.get(\"catch_exceptions\", True):\n catch_exceptions = True\n else:\n catch_exceptions = False\n\n processed_configurations = []\n evrs = []\n for configuration in configurations:\n # Validating\n try:\n assert (\n configuration.expectation_type is not None\n ), \"Given configuration should include expectation type\"\n except AssertionError as e:\n raise InvalidExpectationConfigurationError(str(e))\n\n expectation_impl = get_expectation_impl(configuration.expectation_type)\n validation_dependencies = expectation_impl().get_validation_dependencies(\n configuration, self._execution_engine, runtime_configuration\n )[\"metrics\"]\n\n try:\n for metric in validation_dependencies.values():\n self.build_metric_dependency_graph(\n graph,\n metric,\n configuration,\n self._execution_engine,\n runtime_configuration=runtime_configuration,\n )\n processed_configurations.append(configuration)\n except Exception as err:\n if catch_exceptions:\n raised_exception = True\n exception_traceback = traceback.format_exc()\n result = ExpectationValidationResult(\n success=False,\n exception_info={\n \"raised_exception\": raised_exception,\n \"exception_traceback\": exception_traceback,\n \"exception_message\": str(err),\n },\n )\n evrs.append(result)\n else:\n raise err\n\n if metrics is None:\n metrics = dict()\n\n metrics = self.resolve_validation_graph(graph, metrics, runtime_configuration)\n for configuration in processed_configurations:\n try:\n result = configuration.metrics_validate(\n metrics,\n execution_engine=self._execution_engine,\n runtime_configuration=runtime_configuration,\n )\n evrs.append(result)\n except Exception as err:\n if catch_exceptions:\n raised_exception = True\n exception_traceback = traceback.format_exc()\n\n result = ExpectationValidationResult(\n success=False,\n exception_info={\n \"raised_exception\": raised_exception,\n \"exception_traceback\": exception_traceback,\n \"exception_message\": str(err),\n },\n )\n evrs.append(result)\n else:\n raise err\n return evrs\n\n def resolve_validation_graph(self, graph, metrics, runtime_configuration=None):\n done: bool = False\n pbar = None\n while not done:\n ready_metrics, needed_metrics = self._parse_validation_graph(graph, metrics)\n if pbar is None:\n pbar = tqdm(\n total=len(ready_metrics) + len(needed_metrics),\n desc=\"Calculating Metrics\",\n disable=len(graph._edges) < 3,\n )\n pbar.update(0)\n metrics.update(\n self._resolve_metrics(\n execution_engine=self._execution_engine,\n metrics_to_resolve=ready_metrics,\n metrics=metrics,\n runtime_configuration=runtime_configuration,\n )\n )\n pbar.update(len(ready_metrics))\n if len(ready_metrics) + len(needed_metrics) == 0:\n done = True\n pbar.close()\n\n return metrics\n\n def _parse_validation_graph(self, validation_graph, metrics):\n \"\"\"Given validation graph, returns the ready and needed metrics necessary for validation using a traversal of\n validation graph (a graph structure of metric ids) edges\"\"\"\n unmet_dependency_ids = set()\n unmet_dependency = set()\n maybe_ready_ids = set()\n maybe_ready = set()\n\n for edge in validation_graph.edges:\n if edge.left.id not in metrics:\n if edge.right is None or edge.right.id in metrics:\n if edge.left.id not in maybe_ready_ids:\n maybe_ready_ids.add(edge.left.id)\n maybe_ready.add(edge.left)\n else:\n if edge.left.id not in unmet_dependency_ids:\n unmet_dependency_ids.add(edge.left.id)\n unmet_dependency.add(edge.left)\n\n return maybe_ready - unmet_dependency, unmet_dependency\n\n def _resolve_metrics(\n self,\n execution_engine: \"ExecutionEngine\",\n metrics_to_resolve: Iterable[MetricConfiguration],\n metrics: Dict,\n runtime_configuration: dict = None,\n ):\n \"\"\"A means of accessing the Execution Engine's resolve_metrics method, where missing metric configurations are\n resolved\"\"\"\n return execution_engine.resolve_metrics(\n metrics_to_resolve, metrics, runtime_configuration\n )\n\n def _initialize_expectations(\n self,\n expectation_suite: ExpectationSuite = None,\n expectation_suite_name: str = None,\n ):\n \"\"\"Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`.\n In addition, this always sets the `default_expectation_args` to:\n `include_config`: False,\n `catch_exceptions`: False,\n `output_format`: 'BASIC'\n\n By default, initializes data_asset_type to the name of the implementing class, but subclasses\n that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their\n interoperability.\n\n Args:\n expectation_suite (json): \\\n A json-serializable expectation config. \\\n If None, creates default `_expectation_suite` with an empty list of expectations and \\\n key value `data_asset_name` as `data_asset_name`.\n\n expectation_suite_name (string): \\\n The name to assign to the `expectation_suite.expectation_suite_name`\n\n Returns:\n None\n \"\"\"\n # Checking type of expectation_suite.\n # Check for expectation_suite_name is already done by ExpectationSuiteIdentifier\n if expectation_suite and not isinstance(expectation_suite, ExpectationSuite):\n raise TypeError(\n \"expectation_suite must be of type ExpectationSuite, not {}\".format(\n type(expectation_suite)\n )\n )\n if expectation_suite is not None:\n if isinstance(expectation_suite, dict):\n expectation_suite = expectationSuiteSchema.load(expectation_suite)\n else:\n expectation_suite = copy.deepcopy(expectation_suite)\n self._expectation_suite = expectation_suite\n\n if expectation_suite_name is not None:\n if (\n self._expectation_suite.expectation_suite_name\n != expectation_suite_name\n ):\n logger.warning(\n \"Overriding existing expectation_suite_name {n1} with new name {n2}\".format(\n n1=self._expectation_suite.expectation_suite_name,\n n2=expectation_suite_name,\n )\n )\n self._expectation_suite.expectation_suite_name = expectation_suite_name\n\n else:\n if expectation_suite_name is None:\n expectation_suite_name = \"default\"\n self._expectation_suite = ExpectationSuite(\n expectation_suite_name=expectation_suite_name\n )\n\n self._expectation_suite.execution_engine_type = type(\n self.execution_engine\n ).__name__\n\n def append_expectation(self, expectation_config):\n \"\"\"This method is a thin wrapper for ExpectationSuite.append_expectation\"\"\"\n warnings.warn(\n \"append_expectation is deprecated, and will be removed in a future release. \"\n + \"Please use ExpectationSuite.add_expectation instead.\",\n DeprecationWarning,\n )\n self._expectation_suite.append_expectation(expectation_config)\n\n def find_expectation_indexes(\n self,\n expectation_configuration: ExpectationConfiguration,\n match_type: str = \"domain\",\n ) -> List[int]:\n \"\"\"This method is a thin wrapper for ExpectationSuite.find_expectation_indexes\"\"\"\n warnings.warn(\n \"find_expectation_indexes is deprecated, and will be removed in a future release. \"\n + \"Please use ExpectationSuite.find_expectation_indexes instead.\",\n DeprecationWarning,\n )\n return self._expectation_suite.find_expectation_indexes(\n expectation_configuration=expectation_configuration, match_type=match_type\n )\n\n def find_expectations(\n self,\n expectation_configuration: ExpectationConfiguration,\n match_type: str = \"domain\",\n ) -> List[ExpectationConfiguration]:\n \"\"\"This method is a thin wrapper for ExpectationSuite.find_expectations()\"\"\"\n warnings.warn(\n \"find_expectations is deprecated, and will be removed in a future release. \"\n + \"Please use ExpectationSuite.find_expectation_indexes instead.\",\n DeprecationWarning,\n )\n return self._expectation_suite.find_expectations(\n expectation_configuration=expectation_configuration, match_type=match_type\n )\n\n def remove_expectation(\n self,\n expectation_configuration: ExpectationConfiguration,\n match_type: str = \"domain\",\n remove_multiple_matches: bool = False,\n ) -> List[ExpectationConfiguration]:\n \"\"\"This method is a thin wrapper for ExpectationSuite.remove()\"\"\"\n warnings.warn(\n \"DataAsset.remove_expectations is deprecated, and will be removed in a future release. \"\n + \"Please use ExpectationSuite.remove_expectation instead.\",\n DeprecationWarning,\n )\n return self._expectation_suite.remove_expectation(\n expectation_configuration=expectation_configuration,\n match_type=match_type,\n remove_multiple_matches=remove_multiple_matches,\n )\n\n def set_config_value(self, key, value):\n \"\"\"Setter for config value\"\"\"\n self._validator_config[key] = value\n\n def get_config_value(self, key):\n \"\"\"Getter for config value\"\"\"\n return self._validator_config.get(key)\n\n def load_batch(self, batch_list: List[Batch]):\n for batch in batch_list:\n self._execution_engine.load_batch_data(batch.id, batch.data)\n self._batches[batch.id] = batch\n # We set the active_batch_id in each iteration of the loop to keep in sync with the active_batch_id for the\n # execution_engine. The final active_batch_id will be that of the final batch loaded.\n self.active_batch_id = batch.id\n\n return batch_list\n\n @property\n def batches(self) -> Dict[str, Batch]:\n \"\"\"Getter for batches\"\"\"\n return self._batches\n\n @property\n def loaded_batch_ids(self) -> List[str]:\n return self.execution_engine.loaded_batch_data_ids\n\n @property\n def active_batch(self) -> Batch:\n \"\"\"Getter for active batch\"\"\"\n active_batch_id: str = self.execution_engine.active_batch_data_id\n batch: Batch = self.batches.get(active_batch_id) if active_batch_id else None\n return batch\n\n @property\n def active_batch_spec(self) -> Optional[BatchSpec]:\n \"\"\"Getter for active batch's batch_spec\"\"\"\n if not self.active_batch:\n return None\n else:\n return self.active_batch.batch_spec\n\n @property\n def active_batch_id(self) -> str:\n \"\"\"Getter for active batch id\"\"\"\n return self.execution_engine.active_batch_data_id\n\n @active_batch_id.setter\n def active_batch_id(self, batch_id: str):\n assert set(self.batches.keys()).issubset(set(self.loaded_batch_ids))\n available_batch_ids: Set[str] = set(self.batches.keys()).union(\n set(self.loaded_batch_ids)\n )\n if batch_id not in available_batch_ids:\n raise ValueError(\n f\"\"\"batch_id {batch_id} not found in loaded batches. Batches must first be loaded before they can be \\\nset as active.\n\"\"\"\n )\n else:\n self.execution_engine._active_batch_data_id = batch_id\n\n @property\n def active_batch_markers(self):\n \"\"\"Getter for active batch's batch markers\"\"\"\n if not self.active_batch:\n return None\n else:\n return self.active_batch.batch_markers\n\n @property\n def active_batch_definition(self):\n \"\"\"Getter for the active batch's batch definition\"\"\"\n if not self.active_batch:\n return None\n else:\n return self.active_batch.batch_definition\n\n def discard_failing_expectations(self):\n \"\"\"Removes any expectations from the validator where the validation has failed\"\"\"\n res = self.validate(only_return_failures=True).results\n if any(res):\n for item in res:\n self.remove_expectation(\n expectation_configuration=item.expectation_config,\n match_type=\"runtime\",\n )\n warnings.warn(\"Removed %s expectations that were 'False'\" % len(res))\n\n def get_default_expectation_arguments(self):\n \"\"\"Fetch default expectation arguments for this data_asset\n\n Returns:\n A dictionary containing all the current default expectation arguments for a data_asset\n\n Ex::\n\n {\n \"include_config\" : True,\n \"catch_exceptions\" : False,\n \"result_format\" : 'BASIC'\n }\n\n See also:\n set_default_expectation_arguments\n \"\"\"\n return self._default_expectation_args\n\n @property\n def default_expectation_args(self):\n \"\"\"A getter for default Expectation arguments\"\"\"\n return self._default_expectation_args\n\n def set_default_expectation_argument(self, argument, value):\n \"\"\"\n Set a default expectation argument for this data_asset\n\n Args:\n argument (string): The argument to be replaced\n value : The New argument to use for replacement\n\n Returns:\n None\n\n See also:\n get_default_expectation_arguments\n \"\"\"\n\n self._default_expectation_args[argument] = value\n\n def get_expectations_config(\n self,\n discard_failed_expectations=True,\n discard_result_format_kwargs=True,\n discard_include_config_kwargs=True,\n discard_catch_exceptions_kwargs=True,\n suppress_warnings=False,\n ):\n \"\"\"\n Returns an expectation configuration, providing an option to discard failed expectation and discard/ include'\n different result aspects, such as exceptions and result format.\n \"\"\"\n warnings.warn(\n \"get_expectations_config is deprecated, and will be removed in a future release. \"\n + \"Please use get_expectation_suite instead.\",\n DeprecationWarning,\n )\n return self.get_expectation_suite(\n discard_failed_expectations,\n discard_result_format_kwargs,\n discard_include_config_kwargs,\n discard_catch_exceptions_kwargs,\n suppress_warnings,\n )\n\n def get_expectation_suite(\n self,\n discard_failed_expectations=True,\n discard_result_format_kwargs=True,\n discard_include_config_kwargs=True,\n discard_catch_exceptions_kwargs=True,\n suppress_warnings=False,\n suppress_logging=False,\n ):\n \"\"\"Returns _expectation_config as a JSON object, and perform some cleaning along the way.\n\n Args:\n discard_failed_expectations (boolean): \\\n Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.\n discard_result_format_kwargs (boolean): \\\n In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.\n discard_include_config_kwargs (boolean): \\\n In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`.\n discard_catch_exceptions_kwargs (boolean): \\\n In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.\n suppress_warnings (boolean): \\\n If true, do not include warnings in logging information about the operation.\n suppress_logging (boolean): \\\n If true, do not create a log entry (useful when using get_expectation_suite programmatically)\n\n Returns:\n An expectation suite.\n\n Note:\n get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a \\\n copy of _expectation_suite, not the original object.\n \"\"\"\n\n expectation_suite = copy.deepcopy(self._expectation_suite)\n expectations = expectation_suite.expectations\n\n discards = defaultdict(int)\n\n if discard_failed_expectations:\n new_expectations = []\n\n for expectation in expectations:\n # Note: This is conservative logic.\n # Instead of retaining expectations IFF success==True, it discard expectations IFF success==False.\n # In cases where expectation.success is missing or None, expectations are *retained*.\n # Such a case could occur if expectations were loaded from a config file and never run.\n if expectation.success_on_last_run is False:\n discards[\"failed_expectations\"] += 1\n else:\n new_expectations.append(expectation)\n\n expectations = new_expectations\n\n message = \"\\t%d expectation(s) included in expectation_suite.\" % len(\n expectations\n )\n\n if discards[\"failed_expectations\"] > 0 and not suppress_warnings:\n message += (\n \" Omitting %d expectation(s) that failed when last run; set \"\n \"discard_failed_expectations=False to include them.\"\n % discards[\"failed_expectations\"]\n )\n\n for expectation in expectations:\n # FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation,\n # which calls _copy_and_clean_up_expectation\n expectation.success_on_last_run = None\n\n if discard_result_format_kwargs:\n if \"result_format\" in expectation.kwargs:\n del expectation.kwargs[\"result_format\"]\n discards[\"result_format\"] += 1\n\n if discard_include_config_kwargs:\n if \"include_config\" in expectation.kwargs:\n del expectation.kwargs[\"include_config\"]\n discards[\"include_config\"] += 1\n\n if discard_catch_exceptions_kwargs:\n if \"catch_exceptions\" in expectation.kwargs:\n del expectation.kwargs[\"catch_exceptions\"]\n discards[\"catch_exceptions\"] += 1\n\n settings_message = \"\"\n\n if discards[\"result_format\"] > 0 and not suppress_warnings:\n settings_message += \" result_format\"\n\n if discards[\"include_config\"] > 0 and not suppress_warnings:\n settings_message += \" include_config\"\n\n if discards[\"catch_exceptions\"] > 0 and not suppress_warnings:\n settings_message += \" catch_exceptions\"\n\n if (\n len(settings_message) > 1\n ): # Only add this if we added one of the settings above.\n settings_message += \" settings filtered.\"\n\n expectation_suite.expectations = expectations\n if not suppress_logging:\n logger.info(message + settings_message)\n return expectation_suite\n\n def save_expectation_suite(\n self,\n filepath=None,\n discard_failed_expectations=True,\n discard_result_format_kwargs=True,\n discard_include_config_kwargs=True,\n discard_catch_exceptions_kwargs=True,\n suppress_warnings=False,\n ):\n \"\"\"Writes ``_expectation_config`` to a JSON file.\n\n Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations \\\n can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value \\\n pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from \\\n the JSON expectations config.\n\n Args:\n filepath (string): \\\n The location and name to write the JSON config file to.\n discard_failed_expectations (boolean): \\\n If True, excludes expectations that do not return ``success = True``. \\\n If False, all expectations are written to the JSON config file.\n discard_result_format_kwargs (boolean): \\\n If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config \\\n file.\n discard_include_config_kwargs (boolean): \\\n If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config \\\n file.\n discard_catch_exceptions_kwargs (boolean): \\\n If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON \\\n config file.\n suppress_warnings (boolean): \\\n It True, all warnings raised by Great Expectations, as a result of dropped expectations, are \\\n suppressed.\n\n \"\"\"\n expectation_suite = self.get_expectation_suite(\n discard_failed_expectations,\n discard_result_format_kwargs,\n discard_include_config_kwargs,\n discard_catch_exceptions_kwargs,\n suppress_warnings,\n )\n if filepath is None and self._data_context is not None:\n self._data_context.save_expectation_suite(expectation_suite)\n elif filepath is not None:\n with open(filepath, \"w\") as outfile:\n json.dump(\n expectationSuiteSchema.dump(expectation_suite),\n outfile,\n indent=2,\n sort_keys=True,\n )\n else:\n raise ValueError(\n \"Unable to save config: filepath or data_context must be available.\"\n )\n\n def validate(\n self,\n expectation_suite=None,\n run_id=None,\n data_context=None,\n evaluation_parameters=None,\n catch_exceptions=True,\n result_format=None,\n only_return_failures=False,\n run_name=None,\n run_time=None,\n ):\n \"\"\"Generates a JSON-formatted report describing the outcome of all expectations.\n\n Use the default expectation_suite=None to validate the expectations config associated with the DataAsset.\n\n Args:\n expectation_suite (json or None): \\\n If None, uses the expectations config generated with the DataAsset during the current session. \\\n If a JSON file, validates those expectations.\n run_name (str): \\\n Used to identify this validation result as part of a collection of validations. \\\n See DataContext for more information.\n data_context (DataContext): \\\n A datacontext object to use as part of validation for binding evaluation parameters and \\\n registering validation results.\n evaluation_parameters (dict or None): \\\n If None, uses the evaluation_paramters from the expectation_suite provided or as part of the \\\n data_asset. If a dict, uses the evaluation parameters in the dictionary.\n catch_exceptions (boolean): \\\n If True, exceptions raised by tests will not end validation and will be described in the returned \\\n report.\n result_format (string or None): \\\n If None, uses the default value ('BASIC' or as specified). \\\n If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', \\\n etc.).\n only_return_failures (boolean): \\\n If True, expectation results are only returned when ``success = False`` \\\n\n Returns:\n A JSON-formatted dictionary containing a list of the validation results. \\\n An example of the returned format::\n\n {\n \"results\": [\n {\n \"unexpected_list\": [unexpected_value_1, unexpected_value_2],\n \"expectation_type\": \"expect_*\",\n \"kwargs\": {\n \"column\": \"Column_Name\",\n \"output_format\": \"SUMMARY\"\n },\n \"success\": true,\n \"raised_exception: false.\n \"exception_traceback\": null\n },\n {\n ... (Second expectation results)\n },\n ... (More expectations results)\n ],\n \"success\": true,\n \"statistics\": {\n \"evaluated_expectations\": n,\n \"successful_expectations\": m,\n \"unsuccessful_expectations\": n - m,\n \"success_percent\": m / n\n }\n }\n\n Notes:\n If the configuration object was built with a different version of great expectations then the \\\n current environment. If no version was found in the configuration file.\n\n Raises:\n AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError\n \"\"\"\n try:\n validation_time = datetime.datetime.now(datetime.timezone.utc).strftime(\n \"%Y%m%dT%H%M%S.%fZ\"\n )\n assert not (run_id and run_name) and not (\n run_id and run_time\n ), \"Please provide either a run_id or run_name and/or run_time.\"\n if isinstance(run_id, str) and not run_name:\n warnings.warn(\n \"String run_ids will be deprecated in the future. Please provide a run_id of type \"\n \"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name \"\n \"and run_time (both optional). Instead of providing a run_id, you may also provide\"\n \"run_name and run_time separately.\",\n DeprecationWarning,\n )\n try:\n run_time = parse(run_id)\n except (ValueError, TypeError):\n pass\n run_id = RunIdentifier(run_name=run_id, run_time=run_time)\n elif isinstance(run_id, dict):\n run_id = RunIdentifier(**run_id)\n elif not isinstance(run_id, RunIdentifier):\n run_id = RunIdentifier(run_name=run_name, run_time=run_time)\n\n self._active_validation = True\n if result_format is None:\n result_format = {\"result_format\": \"BASIC\"}\n # If a different validation data context was provided, override\n validate__data_context = self._data_context\n if data_context is None and self._data_context is not None:\n data_context = self._data_context\n elif data_context is not None:\n # temporarily set self._data_context so it is used inside the expectation decorator\n self._data_context = data_context\n\n if expectation_suite is None:\n expectation_suite = self.get_expectation_suite(\n discard_failed_expectations=False,\n discard_result_format_kwargs=False,\n discard_include_config_kwargs=False,\n discard_catch_exceptions_kwargs=False,\n )\n elif isinstance(expectation_suite, str):\n try:\n with open(expectation_suite) as infile:\n expectation_suite = expectationSuiteSchema.loads(infile.read())\n except ValidationError:\n raise\n except OSError:\n raise GreatExpectationsError(\n \"Unable to load expectation suite: IO error while reading %s\"\n % expectation_suite\n )\n elif not isinstance(expectation_suite, ExpectationSuite):\n logger.error(\n \"Unable to validate using the provided value for expectation suite; does it need to be \"\n \"loaded from a dictionary?\"\n )\n if getattr(data_context, \"_usage_statistics_handler\", None):\n handler = data_context._usage_statistics_handler\n handler.send_usage_message(\n event=\"data_asset.validate\",\n event_payload=handler._batch_anonymizer.anonymize_batch_info(\n self\n ),\n success=False,\n )\n return ExpectationValidationResult(success=False)\n # Evaluation parameter priority is\n # 1. from provided parameters\n # 2. from expectation configuration\n # 3. from data context\n # So, we load them in reverse order\n\n if data_context is not None:\n runtime_evaluation_parameters = (\n data_context.evaluation_parameter_store.get_bind_params(run_id)\n )\n else:\n runtime_evaluation_parameters = {}\n\n if expectation_suite.evaluation_parameters:\n runtime_evaluation_parameters.update(\n expectation_suite.evaluation_parameters\n )\n\n if evaluation_parameters is not None:\n runtime_evaluation_parameters.update(evaluation_parameters)\n\n # Convert evaluation parameters to be json-serializable\n runtime_evaluation_parameters = recursively_convert_to_json_serializable(\n runtime_evaluation_parameters\n )\n\n # Warn if our version is different from the version in the configuration\n # TODO: Deprecate \"great_expectations.__version__\"\n suite_ge_version = expectation_suite.meta.get(\n \"great_expectations_version\"\n ) or expectation_suite.meta.get(\"great_expectations.__version__\")\n\n # Group expectations by column\n columns = {}\n\n for expectation in expectation_suite.expectations:\n expectation.process_evaluation_parameters(\n evaluation_parameters=runtime_evaluation_parameters,\n interactive_evaluation=self.interactive_evaluation,\n data_context=self._data_context,\n )\n if \"column\" in expectation.kwargs and isinstance(\n expectation.kwargs[\"column\"], Hashable\n ):\n column = expectation.kwargs[\"column\"]\n else:\n column = \"_nocolumn\"\n if column not in columns:\n columns[column] = []\n columns[column].append(expectation)\n\n expectations_to_evaluate = []\n for col in columns:\n expectations_to_evaluate.extend(columns[col])\n\n results = self.graph_validate(\n expectations_to_evaluate,\n runtime_configuration={\n \"catch_exceptions\": catch_exceptions,\n \"result_format\": result_format,\n },\n )\n statistics = _calc_validation_statistics(results)\n\n if only_return_failures:\n abbrev_results = []\n for exp in results:\n if not exp.success:\n abbrev_results.append(exp)\n results = abbrev_results\n\n expectation_suite_name = expectation_suite.expectation_suite_name\n\n result = ExpectationSuiteValidationResult(\n results=results,\n success=statistics.success,\n statistics={\n \"evaluated_expectations\": statistics.evaluated_expectations,\n \"successful_expectations\": statistics.successful_expectations,\n \"unsuccessful_expectations\": statistics.unsuccessful_expectations,\n \"success_percent\": statistics.success_percent,\n },\n evaluation_parameters=runtime_evaluation_parameters,\n meta={\n \"great_expectations_version\": ge_version,\n \"expectation_suite_name\": expectation_suite_name,\n \"run_id\": run_id,\n \"batch_spec\": self.active_batch_spec,\n \"batch_markers\": self.active_batch_markers,\n \"active_batch_definition\": self.active_batch_definition,\n \"validation_time\": validation_time,\n },\n )\n\n self._data_context = validate__data_context\n except Exception as e:\n if getattr(data_context, \"_usage_statistics_handler\", None):\n handler = data_context._usage_statistics_handler\n handler.send_usage_message(\n event=\"data_asset.validate\",\n event_payload=handler._batch_anonymizer.anonymize_batch_info(self),\n success=False,\n )\n raise\n finally:\n self._active_validation = False\n\n if getattr(data_context, \"_usage_statistics_handler\", None):\n handler = data_context._usage_statistics_handler\n handler.send_usage_message(\n event=\"data_asset.validate\",\n event_payload=handler._batch_anonymizer.anonymize_batch_info(self),\n success=True,\n )\n return result\n\n def get_evaluation_parameter(self, parameter_name, default_value=None):\n \"\"\"\n Get an evaluation parameter value that has been stored in meta.\n\n Args:\n parameter_name (string): The name of the parameter to store.\n default_value (any): The default value to be returned if the parameter is not found.\n\n Returns:\n The current value of the evaluation parameter.\n \"\"\"\n if parameter_name in self._expectation_suite.evaluation_parameters:\n return self._expectation_suite.evaluation_parameters[parameter_name]\n else:\n return default_value\n\n def set_evaluation_parameter(self, parameter_name, parameter_value):\n \"\"\"\n Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate\n parameterized expectations.\n\n Args:\n parameter_name (string): The name of the kwarg to be replaced at evaluation time\n parameter_value (any): The value to be used\n \"\"\"\n self._expectation_suite.evaluation_parameters.update(\n {parameter_name: parameter_value}\n )\n\n def add_citation(\n self,\n comment,\n batch_spec=None,\n batch_markers=None,\n batch_definition=None,\n citation_date=None,\n ):\n \"\"\"Adds a citation to an existing Expectation Suite within the validator\"\"\"\n if batch_spec is None:\n batch_spec = self.batch_spec\n if batch_markers is None:\n batch_markers = self.active_batch_markers\n if batch_definition is None:\n batch_definition = self.active_batch_definition\n self._expectation_suite.add_citation(\n comment,\n batch_spec=batch_spec,\n batch_markers=batch_markers,\n batch_definition=batch_definition,\n citation_date=citation_date,\n )\n\n @property\n def expectation_suite_name(self):\n \"\"\"Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.\"\"\"\n return self._expectation_suite.expectation_suite_name\n\n @expectation_suite_name.setter\n def expectation_suite_name(self, expectation_suite_name):\n \"\"\"Sets the expectation_suite name of this data_asset as stored in the expectations configuration.\"\"\"\n self._expectation_suite.expectation_suite_name = expectation_suite_name\n\n def test_expectation_function(self, function, *args, **kwargs):\n \"\"\"Test a generic expectation function\n\n Args:\n function (func): The function to be tested. (Must be a valid expectation function.)\n *args : Positional arguments to be passed the the function\n **kwargs : Keyword arguments to be passed the the function\n\n Returns:\n A JSON-serializable expectation result object.\n\n Notes:\n This function is a thin layer to allow quick testing of new expectation functions, without having to \\\n define custom classes, etc. To use developed expectations from the command-line tool, you will still need \\\n to define custom classes, etc.\n\n Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.\n \"\"\"\n\n argspec = inspect.getfullargspec(function)[0][1:]\n\n new_function = self.expectation(argspec)(function)\n return new_function(self, *args, **kwargs)\n\n def columns(self, domain_kwargs: Optional[Dict[str, Any]] = None) -> List[str]:\n if domain_kwargs is None:\n domain_kwargs = {\n \"batch_id\": self.execution_engine.active_batch_data_id,\n }\n\n columns: List[str] = self.get_metric(\n metric=MetricConfiguration(\n metric_name=\"table.columns\",\n metric_domain_kwargs=domain_kwargs,\n )\n )\n\n return columns\n\n def head(\n self,\n n_rows: Optional[int] = 5,\n domain_kwargs: Optional[Dict[str, Any]] = None,\n fetch_all: Optional[bool] = False,\n ) -> pd.DataFrame:\n if domain_kwargs is None:\n domain_kwargs = {\n \"batch_id\": self.execution_engine.active_batch_data_id,\n }\n\n data: Any = self.get_metric(\n metric=MetricConfiguration(\n metric_name=\"table.head\",\n metric_domain_kwargs=domain_kwargs,\n metric_value_kwargs={\n \"n_rows\": n_rows,\n \"fetch_all\": fetch_all,\n },\n )\n )\n\n df: pd.DataFrame\n if isinstance(\n self.execution_engine, (PandasExecutionEngine, SqlAlchemyExecutionEngine)\n ):\n df = pd.DataFrame(data=data)\n elif isinstance(self.execution_engine, SparkDFExecutionEngine):\n rows: List[Dict[str, Any]] = [datum.asDict() for datum in data]\n df = pd.DataFrame(data=rows)\n else:\n raise GreatExpectationsError(\n \"Unsupported or unknown ExecutionEngine type encountered in Validator class.\"\n )\n\n return df.reset_index(drop=True, inplace=False)\n\n\nValidationStatistics = namedtuple(\n \"ValidationStatistics\",\n [\n \"evaluated_expectations\",\n \"successful_expectations\",\n \"unsuccessful_expectations\",\n \"success_percent\",\n \"success\",\n ],\n)\n\n\ndef _calc_validation_statistics(validation_results):\n \"\"\"\n Calculate summary statistics for the validation results and\n return ``ExpectationStatistics``.\n \"\"\"\n # calc stats\n successful_expectations = sum(exp.success for exp in validation_results)\n evaluated_expectations = len(validation_results)\n unsuccessful_expectations = evaluated_expectations - successful_expectations\n success = successful_expectations == evaluated_expectations\n try:\n success_percent = successful_expectations / evaluated_expectations * 100\n except ZeroDivisionError:\n # success_percent = float(\"nan\")\n success_percent = None\n\n return ValidationStatistics(\n successful_expectations=successful_expectations,\n evaluated_expectations=evaluated_expectations,\n unsuccessful_expectations=unsuccessful_expectations,\n success=success,\n success_percent=success_percent,\n )\n\n\nclass BridgeValidator:\n \"\"\"This is currently helping bridge APIs\"\"\"\n\n def __init__(self, batch, expectation_suite, expectation_engine=None, **kwargs):\n \"\"\"Builds an expectation_engine object using an expectation suite and a batch, with the expectation engine being\n determined either by the user or by the type of batch data (pandas dataframe, SqlAlchemy table, etc.)\n\n Args:\n batch (Batch): A Batch in Pandas, Spark, or SQL format\n expectation_suite (ExpectationSuite): The Expectation Suite available to the validator within the current Data\n Context\n expectation_engine (ExecutionEngine): The current Execution Engine being utilized. If this is not set, it is\n determined by the type of data within the given batch\n \"\"\"\n self.batch = batch\n self.expectation_suite = expectation_suite\n\n if isinstance(expectation_engine, dict):\n expectation_engine = ClassConfig(**expectation_engine)\n\n if isinstance(expectation_engine, ClassConfig):\n module_name = expectation_engine.module_name or \"great_expectations.dataset\"\n verify_dynamic_loading_support(module_name=module_name)\n expectation_engine = load_class(\n class_name=expectation_engine.class_name, module_name=module_name\n )\n\n self.expectation_engine = expectation_engine\n if self.expectation_engine is None:\n # Guess the engine\n try:\n import pandas as pd\n\n if isinstance(batch.data, pd.DataFrame):\n self.expectation_engine = PandasDataset\n except ImportError:\n pass\n if self.expectation_engine is None:\n if isinstance(batch.data, SqlAlchemyBatchReference):\n self.expectation_engine = SqlAlchemyDataset\n\n if self.expectation_engine is None:\n try:\n import pyspark\n\n if isinstance(batch.data, pyspark.sql.DataFrame):\n self.expectation_engine = SparkDFDataset\n except ImportError:\n pass\n\n if self.expectation_engine is None:\n raise ValueError(\n \"Unable to identify expectation_engine. It must be a subclass of DataAsset.\"\n )\n\n self.init_kwargs = kwargs\n\n def get_dataset(self):\n \"\"\"\n Bridges between Execution Engines in providing access to the batch data. Validates that Dataset classes\n contain proper type of data (i.e. a Pandas Dataset does not contain SqlAlchemy data)\n \"\"\"\n if issubclass(self.expectation_engine, PandasDataset):\n import pandas as pd\n\n if not isinstance(self.batch[\"data\"], pd.DataFrame):\n raise ValueError(\n \"PandasDataset expectation_engine requires a Pandas Dataframe for its batch\"\n )\n\n return self.expectation_engine(\n self.batch.data,\n expectation_suite=self.expectation_suite,\n batch_kwargs=self.batch.batch_kwargs,\n batch_parameters=self.batch.batch_parameters,\n batch_markers=self.batch.batch_markers,\n data_context=self.batch.data_context,\n **self.init_kwargs,\n **self.batch.batch_kwargs.get(\"dataset_options\", {}),\n )\n\n elif issubclass(self.expectation_engine, SqlAlchemyDataset):\n if not isinstance(self.batch.data, SqlAlchemyBatchReference):\n raise ValueError(\n \"SqlAlchemyDataset expectation_engine requires a SqlAlchemyBatchReference for its batch\"\n )\n\n init_kwargs = self.batch.data.get_init_kwargs()\n init_kwargs.update(self.init_kwargs)\n return self.expectation_engine(\n batch_kwargs=self.batch.batch_kwargs,\n batch_parameters=self.batch.batch_parameters,\n batch_markers=self.batch.batch_markers,\n data_context=self.batch.data_context,\n expectation_suite=self.expectation_suite,\n **init_kwargs,\n **self.batch.batch_kwargs.get(\"dataset_options\", {}),\n )\n\n elif issubclass(self.expectation_engine, SparkDFDataset):\n import pyspark\n\n if not isinstance(self.batch.data, pyspark.sql.DataFrame):\n raise ValueError(\n \"SparkDFDataset expectation_engine requires a spark DataFrame for its batch\"\n )\n\n return self.expectation_engine(\n spark_df=self.batch.data,\n expectation_suite=self.expectation_suite,\n batch_kwargs=self.batch.batch_kwargs,\n batch_parameters=self.batch.batch_parameters,\n batch_markers=self.batch.batch_markers,\n data_context=self.batch.data_context,\n **self.init_kwargs,\n **self.batch.batch_kwargs.get(\"dataset_options\", {}),\n )\n" ]
[ [ "pandas.DataFrame" ] ]
lucassm/cigre-montecarlo
[ "fd354b9c3ade460b46687ba312f51212dad17151" ]
[ "pandapower/build_bus.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\n# Copyright (c) 2016 by University of Kassel and Fraunhofer Institute for Wind Energy and Energy\r\n# System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed by a\r\n# BSD-style license that can be found in the LICENSE file.\r\n\r\n\r\nimport numpy as np\r\nfrom itertools import chain\r\nfrom collections import defaultdict\r\n\r\nfrom pypower.idx_bus import BUS_I, BASE_KV, PD, QD, GS, BS, VMAX, VMIN\r\n\r\nfrom pandapower.auxiliary import get_indices, _sum_by_group\r\n\r\n\r\nclass DisjointSet(dict):\r\n\r\n def add(self, item):\r\n self[item] = item\r\n\r\n def find(self, item):\r\n parent = self[item]\r\n if self[parent] != parent:\r\n parent = self.find(parent)\r\n self[item] = parent\r\n return parent\r\n\r\n def union(self, item1, item2):\r\n p1 = self.find(item1)\r\n p2 = self.find(item2)\r\n self[p1] = p2\r\n\r\n\r\ndef _build_bus_ppc(net, ppc, is_elems, init_results=False, set_opf_constraints=False):\r\n \"\"\"\r\n \"\"\"\r\n if len(net[\"trafo3w\"]) > 0:\r\n # TODO: include directly in pd2ppc so that buses are only in ppc, not in pandapower. LT\r\n _create_trafo3w_buses(net, init_results)\r\n if len(net[\"xward\"]) > 0:\r\n # TODO: include directly in pd2ppc so that buses are only in ppc, not in pandapower. LT\r\n _create_xward_buses(net, init_results)\r\n\r\n # get buses as set\r\n bus_list = set(net[\"bus\"].index.values)\r\n # get in service elements\r\n eg_is = is_elems['eg']\r\n gen_is = is_elems['gen']\r\n bus_is = is_elems['bus']\r\n\r\n # create a mapping from arbitrary pp-index to a consecutive index starting at zero (ppc-index)\r\n # To sort the array first, so that PV comes first, three steps are necessary:\r\n\r\n # 1. Find PV / Slack nodes and place them first (necessary for fast generation of Jacobi-Matrix)\r\n # get indices of PV (and ref) buses\r\n if len(net[\"xward\"]) > 0:\r\n # add xwards if available\r\n pv_ref = set((np.r_[eg_is[\"bus\"].values, gen_is[\"bus\"].values, net[\"xward\"][\r\n net[\"xward\"].in_service == 1][\"ad_bus\"].values]).flatten())\r\n else:\r\n pv_ref = set(np.r_[eg_is[\"bus\"].values, gen_is[\"bus\"].values].flatten())\r\n\r\n # 2. Add PQ buses without switches\r\n slidx = (net[\"switch\"][\"closed\"].values == 1) & (net[\"switch\"][\"et\"].values == \"b\") &\\\r\n (net[\"switch\"][\"bus\"].isin(bus_is.index).values) & (\r\n net[\"switch\"][\"element\"].isin(bus_is.index).values)\r\n\r\n # get buses with switches\r\n switch_buses = set((np.r_[net[\"switch\"][\"bus\"].values[slidx], net[\r\n \"switch\"][\"element\"].values[slidx]]).flatten())\r\n pq_buses_without_switches = (bus_list - switch_buses) - pv_ref\r\n\r\n # consecutive values for pv, ref, and non switch pq buses\r\n npArange = np.arange(len(pq_buses_without_switches) + len(pv_ref))\r\n # buses in PandaPower\r\n PandaBusses = sorted(pv_ref) + sorted(pq_buses_without_switches)\r\n # generate bus_lookup PandaPower -> [PV, PQ(without switches)]\r\n bus_lookup = dict(zip(PandaBusses, npArange))\r\n\r\n # 3. Add PQ buses with switches and fuse them\r\n v = defaultdict(set)\r\n\r\n # get the pp-indices of the buses for those switches\r\n fbus = net[\"switch\"][\"bus\"].values[slidx]\r\n tbus = net[\"switch\"][\"element\"].values[slidx]\r\n\r\n # create a mapping to map each bus to itself at frist ...\r\n ds = DisjointSet({e: e for e in chain(fbus, tbus)})\r\n for f, t in zip(fbus, tbus):\r\n ds.union(f, t)\r\n\r\n for a in ds:\r\n v[ds.find(a)].add(a)\r\n disjoint_sets = [e for e in v.values() if len(e) > 1]\r\n\r\n i = npArange[-1]\r\n\r\n # check if PV buses need to be fused\r\n # if yes: the sets with PV buses must be found (which is slow)\r\n # if no: the check can be omitted\r\n if any(i in fbus or i in tbus for i in pv_ref):\r\n for dj in disjoint_sets:\r\n pv_buses_in_set = pv_ref & dj\r\n nr_pv_bus = len(pv_buses_in_set)\r\n if nr_pv_bus == 0:\r\n i += 1\r\n map_to = i\r\n bus = dj.pop()\r\n PandaBusses.append(bus)\r\n bus_lookup[bus] = map_to\r\n elif nr_pv_bus == 1:\r\n map_to = bus_lookup[pv_buses_in_set.pop()]\r\n else:\r\n raise UserWarning(\"Can't fuse two PV buses\")\r\n for bus in dj:\r\n bus_lookup[bus] = map_to\r\n else:\r\n for dj in disjoint_sets:\r\n # new bus to map to\r\n i += 1\r\n map_to = i\r\n # get bus ID and append to Panda bus list\r\n bus = dj.pop()\r\n PandaBusses.append(bus)\r\n bus_lookup[bus] = map_to\r\n for bus in dj:\r\n bus_lookup[bus] = map_to\r\n\r\n # init ppc with zeros\r\n ppc[\"bus\"] = np.zeros(shape=(i + 1, 13), dtype=float)\r\n # fill ppc with init values\r\n ppc[\"bus\"][:] = np.array([0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1.1, 0.9])\r\n ppc[\"bus\"][:, BUS_I] = np.arange(i + 1)\r\n\r\n # change the voltages of the buses to the values in net\r\n ppc[\"bus\"][:, BASE_KV] = net[\"bus\"].vn_kv.ix[PandaBusses]\r\n\r\n if init_results is True and len(net[\"res_bus\"]) > 0:\r\n int_index = get_indices(net[\"bus\"].index.values, bus_lookup)\r\n ppc[\"bus\"][int_index, 7] = net[\"res_bus\"][\"vm_pu\"].values\r\n ppc[\"bus\"][int_index, 8] = net[\"res_bus\"].va_degree.values\r\n\r\n if set_opf_constraints:\r\n if \"max_vm_pu\" in net.bus:\r\n ppc[\"bus\"][:, VMAX] = net[\"bus\"].max_vm_pu.loc[PandaBusses]\r\n else:\r\n ppc[\"bus\"][:, VMAX] = 10\r\n if \"min_vm_pu\" in net.bus:\r\n ppc[\"bus\"][:, VMIN] = net[\"bus\"].min_vm_pu.loc[PandaBusses]\r\n else:\r\n ppc[\"bus\"][:, VMIN] = 0\r\n\r\n return bus_lookup\r\n\r\n\r\ndef _calc_loads_and_add_on_ppc(net, ppc, is_elems, bus_lookup):\r\n # get in service elements\r\n bus_is = is_elems['bus']\r\n\r\n l = net[\"load\"]\r\n # element_is = check if element is at a bus in service & element is in service\r\n load_is = np.in1d(l.bus.values, bus_is.index) \\\r\n & l.in_service.values.astype(bool)\r\n vl = load_is * l[\"scaling\"].values.T / np.float64(1000.)\r\n lp = l[\"p_kw\"].values * vl\r\n lq = l[\"q_kvar\"].values * vl\r\n\r\n s = net[\"sgen\"]\r\n sgen_is = np.in1d(s.bus.values, bus_is.index) \\\r\n & s.in_service.values.astype(bool)\r\n vl = sgen_is * s[\"scaling\"].values.T / np.float64(1000.)\r\n sp = s[\"p_kw\"].values * vl\r\n sq = s[\"q_kvar\"].values * vl\r\n\r\n w = net[\"ward\"]\r\n ward_is = np.in1d(w.bus.values, bus_is.index) \\\r\n & w.in_service.values.astype(bool)\r\n vl = ward_is / np.float64(1000.)\r\n wp = w[\"ps_kw\"].values * vl\r\n wq = w[\"qs_kvar\"].values * vl\r\n\r\n xw = net[\"xward\"]\r\n xward_is = np.in1d(xw.bus.values, bus_is.index) \\\r\n & xw.in_service.values.astype(bool)\r\n vl = xward_is / np.float64(1000.)\r\n xwp = xw[\"ps_kw\"].values * vl\r\n xwq = xw[\"qs_kvar\"].values * vl\r\n\r\n b = get_indices(np.hstack([l[\"bus\"].values, s[\"bus\"].values, w[\"bus\"].values, xw[\"bus\"].values]\r\n ), bus_lookup)\r\n b, vp, vq = _sum_by_group(b, np.hstack([lp, sp, wp, xwp]), np.hstack([lq, sq, wq, xwq]))\r\n\r\n ppc[\"bus\"][b, PD] = vp\r\n ppc[\"bus\"][b, QD] = vq\r\n\r\n\r\ndef _calc_shunts_and_add_on_ppc(net, ppc, is_elems, bus_lookup):\r\n # get in service elements\r\n bus_is = is_elems['bus']\r\n\r\n s = net[\"shunt\"]\r\n shunt_is = np.in1d(s.bus.values, bus_is.index) \\\r\n & s.in_service.values.astype(bool)\r\n vl = shunt_is / np.float64(1000.)\r\n sp = s[\"p_kw\"].values * vl\r\n sq = s[\"q_kvar\"].values * vl\r\n\r\n w = net[\"ward\"]\r\n ward_is = np.in1d(w.bus.values, bus_is.index) \\\r\n & w.in_service.values.astype(bool)\r\n vl = ward_is / np.float64(1000.)\r\n wp = w[\"pz_kw\"].values * vl\r\n wq = w[\"qz_kvar\"].values * vl\r\n\r\n xw = net[\"xward\"]\r\n xward_is = np.in1d(xw.bus.values, bus_is.index) \\\r\n & xw.in_service.values.astype(bool)\r\n vl = xward_is / np.float64(1000.)\r\n xwp = xw[\"pz_kw\"].values * vl\r\n xwq = xw[\"qz_kvar\"].values * vl\r\n\r\n b = get_indices(np.hstack([s[\"bus\"].values, w[\"bus\"].values, xw[\"bus\"].values]), bus_lookup)\r\n b, vp, vq = _sum_by_group(b, np.hstack([sp, wp, xwp]), np.hstack([sq, wq, xwq]))\r\n\r\n ppc[\"bus\"][b, GS] = vp\r\n ppc[\"bus\"][b, BS] = -vq\r\n\r\n\r\ndef _create_xward_buses(net, init_results):\r\n from pandapower.create import create_buses\r\n main_buses = net.bus.loc[net.xward.bus.values]\r\n bid = create_buses(net, nr_buses=len(main_buses),\r\n vn_kv=main_buses.vn_kv.values,\r\n in_service=net[\"xward\"][\"in_service\"].values)\r\n net.xward[\"ad_bus\"] = bid\r\n if init_results:\r\n # TODO: this is probably slow, but the whole auxiliary bus creation should be included in\r\n # pd2ppc anyways. LT\r\n for hv_bus, aux_bus in zip(main_buses.index, bid):\r\n net.res_bus.loc[aux_bus] = net.res_bus.loc[hv_bus].values\r\n\r\n\r\ndef _create_trafo3w_buses(net, init_results):\r\n from pandapower.create import create_buses\r\n hv_buses = net.bus.loc[net.trafo3w.hv_bus.values]\r\n bid = create_buses(net, nr_buses=len(net[\"trafo3w\"]),\r\n vn_kv=hv_buses.vn_kv.values,\r\n in_service=net.trafo3w.in_service.values)\r\n net.trafo3w[\"ad_bus\"] = bid\r\n if init_results:\r\n # TODO: this is probably slow, but the whole auxiliary bus creation should be included in\r\n # pd2ppc anyways. LT\r\n for hv_bus, aux_bus in zip(hv_buses.index, bid):\r\n net.res_bus.loc[aux_bus] = net.res_bus.loc[hv_bus].values\r\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.hstack", "numpy.float64", "numpy.arange", "numpy.in1d" ] ]
DevHyung/bert_score
[ "c3213b5a83471d3fde830e46f4761f021b759508" ]
[ "tests/test_scorer.py" ]
[ "import unittest\nimport torch\nfrom transformers import __version__ as ht_version\nimport bert_score\n\nEPS = 1e-5\n\ncands = [\n \"28-year-old chef found dead in San Francisco mall\",\n \"A 28-year-old chef who recently moved to San Francisco was found dead in the staircase of a local shopping center.\",\n 'The victim\\'s brother said he cannot imagine anyone who would want to harm him,\"Finally, it went uphill again at him.\"',\n]\nrefs = [\n \"28-Year-Old Chef Found Dead at San Francisco Mall\",\n \"A 28-year-old chef who had recently moved to San Francisco was found dead in the stairwell of a local mall this week.\",\n \"But the victim's brother says he can't think of anyone who would want to hurt him, saying, \\\"Things were finally going well for him.\\\"\",\n]\n\n\nclass TestScore(unittest.TestCase):\n def test_scorer(self):\n scorer = bert_score.BERTScorer(lang=\"en\", batch_size=3)\n\n (P, R, F), hash_code = scorer.score(cands, refs, return_hash=True)\n self.assertTrue(torch.is_tensor(P))\n self.assertTrue(torch.is_tensor(R))\n self.assertTrue(torch.is_tensor(F))\n self.assertEqual(\n hash_code, f\"roberta-large_L17_no-idf_version={bert_score.__version__}(hug_trans={ht_version})\"\n )\n self.assertTrue(\n (P - torch.tensor([0.9843302369117737, 0.9832239747047424, 0.9120386242866516])).abs_().max() < EPS\n )\n self.assertTrue(\n (R - torch.tensor([0.9823839068412781, 0.9732863903045654, 0.920428991317749])).abs_().max() < EPS\n )\n self.assertTrue(\n (F - torch.tensor([0.9833561182022095, 0.9782299995422363, 0.916214644908905])).abs_().max() < EPS\n )\n\n def test_idf_scorer(self):\n scorer = bert_score.BERTScorer(lang=\"en\", idf=True, idf_sents=refs, batch_size=3)\n\n (P, R, F), hash_code = scorer.score(cands, refs, return_hash=True)\n self.assertTrue(torch.is_tensor(P))\n self.assertTrue(torch.is_tensor(R))\n self.assertTrue(torch.is_tensor(F))\n self.assertEqual(hash_code, f\"roberta-large_L17_idf_version={bert_score.__version__}(hug_trans={ht_version})\")\n self.assertTrue(\n (P - torch.tensor([0.9837872385978699, 0.9754738807678223, 0.8947395086288452])).abs_().max() < EPS\n )\n self.assertTrue(\n (R - torch.tensor([0.9827190637588501, 0.9697767496109009, 0.9172918796539307])).abs_().max() < EPS\n )\n self.assertTrue(\n (F - torch.tensor([0.9832529425621033, 0.972616970539093, 0.9058753848075867])).abs_().max() < EPS\n )\n\n def test_scorer_rescale(self):\n scorer = bert_score.BERTScorer(lang=\"en\", rescale_with_baseline=True, batch_size=3)\n\n (P, R, F), hash_code = scorer.score(cands, refs, return_hash=True)\n self.assertTrue(torch.is_tensor(P))\n self.assertTrue(torch.is_tensor(R))\n self.assertTrue(torch.is_tensor(F))\n self.assertEqual(\n hash_code, f\"roberta-large_L17_no-idf_version={bert_score.__version__}(hug_trans={ht_version})-rescaled\"\n )\n self.assertTrue(\n (P - torch.tensor([0.907000780105591, 0.900435566902161, 0.477955609560013])).abs_().max() < EPS\n )\n self.assertTrue(\n (R - torch.tensor([0.895456790924072, 0.841467440128326, 0.527785062789917])).abs_().max() < EPS\n )\n self.assertTrue(\n (F - torch.tensor([0.901383399963379, 0.871010780334473, 0.503565192222595])).abs_().max() < EPS\n )\n\n def test_idf_scorer_rescale(self):\n scorer = bert_score.BERTScorer(lang=\"en\", rescale_with_baseline=True, idf=True, idf_sents=refs, batch_size=3)\n\n (P, R, F), hash_code = scorer.score(cands, refs, return_hash=True)\n self.assertTrue(torch.is_tensor(P))\n self.assertTrue(torch.is_tensor(R))\n self.assertTrue(torch.is_tensor(F))\n self.assertEqual(\n hash_code, f\"roberta-large_L17_idf_version={bert_score.__version__}(hug_trans={ht_version})-rescaled\"\n )\n self.assertTrue(\n (P - torch.tensor([0.903778135776520, 0.854439020156860, 0.375287383794785])).abs_().max() < EPS\n )\n self.assertTrue(\n (R - torch.tensor([0.897446095943451, 0.820639789104462, 0.509167850017548])).abs_().max() < EPS\n )\n self.assertTrue(\n (F - torch.tensor([0.900772094726562, 0.837753534317017, 0.442304641008377])).abs_().max() < EPS\n )\n\n def test_multi_refs(self):\n scorer = bert_score.BERTScorer(lang=\"en\", batch_size=3, rescale_with_baseline=True)\n\n cands = [\"I like lemons.\"]\n refs = [[\"I am proud of you.\", \"I love lemons.\", \"Go go go.\"]]\n P_mul, R_mul, F_mul = scorer.score(cands, refs,)\n P_best, R_best, F_best = scorer.score(cands, [refs[0][1]],)\n self.assertTrue((P_mul - P_best).abs_().max() < EPS)\n self.assertTrue((R_mul - R_best).abs_().max() < EPS)\n self.assertTrue((F_mul - F_best).abs_().max() < EPS)\n\n def test_multi_refs_working(self):\n scorer = bert_score.BERTScorer(lang=\"en\", batch_size=3, rescale_with_baseline=True)\n\n cands = [\"I like lemons.\", \"Hi\", \"Hey\", \"Hello\", \"Go\", \"\"]\n refs = [\n [\"I am proud of you.\", \"I love lemons.\", \"Go go go.\"],\n [\"I am proud of you.\", \"Go go go.\"],\n [\"Hi\", \"\"],\n [\"I am proud of you.\", \"I love lemons.\", \"Go go go.\", \"hello\"],\n [\"I am proud of you.\", \"Go go go.\", \"Go\", \"Go to school\"],\n [\"test\"],\n ]\n P_mul, R_mul, F_mul = scorer.score(cands, refs,)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.is_tensor", "torch.tensor" ] ]
Qi-max/amlearn
[ "88189519bc1079ab5085d5871169c223e0d03057" ]
[ "scripts/featurize_bp_symmfunc.py" ]
[ "import numpy as np\nfrom amlearn.utils.data import read_lammps_dump\nfrom amlearn.featurize.symmetry_function import \\\n BPRadialFunction, BPAngularFunction\n\n__author__ = \"Qi Wang\"\n__email__ = \"qiwang.mse@gmail.com\"\n\n\"\"\"\nThis is an example script of deriving B-P symmetry functinos for each atom, \nbased on the Fortran source codes in amlearn/featurize/src/bp_symmfunc.f90. \nPlease make sure to compile the Fortran code using f2py before running this \nscript. \n\"\"\"\n\nsystem = [\"Cu65Zr35\", \"qr_5plus10^10\"]\n\nlammps_file = \"xxx/dump.lmp\"\nstructure, bds = read_lammps_dump(lammps_file)\n\noutput_path = \"xxx/xxx\"\n\n# Calculating B-P radial symmetry function\nref_atom_number = \"29\" # Cu\natom_type_symbols = np.array([1, 2])\ndelta_r = 0.1\nn_r = 50\n\nbp_radial_function = BPRadialFunction.default_from_system(\n bds=bds, atom_type_symbols=atom_type_symbols,\n ref_atom_number=ref_atom_number,\n delta_r=delta_r, n_r=n_r, output_path=output_path)\n\nradial_funcs_df = bp_radial_function.fit_transform(structure)\n\n# Calculating B-P angular symmetry function\nksaais = np.array([14.633, 14.633, 14.638, 14.638, 2.554, 2.554, 2.554, 2.554,\n 1.648, 1.648, 1.204, 1.204, 1.204, 1.204, 0.933, 0.933,\n 0.933, 0.933, 0.695, 0.695, 0.695, 0.695])\nlambdas = np.array([-1, 1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1])\nzetas = np.array([1, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 2,\n 4, 16, 1, 2, 4, 16, 1, 2, 4, 16])\n\nbp_angular_function = \\\n BPAngularFunction.default_from_system(\n bds=bds, atom_type_symbols=atom_type_symbols,\n ref_atom_number=ref_atom_number, ksaais=ksaais, lambdas=lambdas,\n zetas=zetas, output_path=output_path)\n\nangular_funcs_df = bp_angular_function.fit_transform(structure)\n" ]
[ [ "numpy.array" ] ]
hungntt/Rainbow
[ "f25723ce984fcbd5e23416db36b70c712bd7a68e" ]
[ "test.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport os\nimport plotly\nfrom plotly.graph_objs import Scatter\nfrom plotly.graph_objs.scatter import Line\nimport torch\n\nfrom env import Env\n\n\n# Test DQN\ndef test(args, T, dqn, val_mem, metrics, results_dir, evaluate=False):\n env = Env(args)\n env.eval()\n metrics['steps'].append(T)\n T_rewards, T_Qs = [], []\n\n # Test performance over several episodes\n done = True\n for _ in range(args.evaluation_episodes):\n while True:\n if done:\n state, reward_sum, done = env.reset(), 0, False\n\n action = dqn.act_e_greedy(state) # Choose an action ε-greedily\n state, reward, done = env.step(action) # Step\n reward_sum += reward\n if args.render:\n env.render()\n\n if done:\n T_rewards.append(reward_sum)\n break\n env.close()\n\n # Test Q-values over validation memory\n for state in val_mem: # Iterate over valid states\n T_Qs.append(dqn.evaluate_q(state))\n\n avg_reward, avg_Q = sum(T_rewards) / len(T_rewards), sum(T_Qs) / len(T_Qs)\n if not evaluate:\n # Save model parameters if improved\n if avg_reward > metrics['best_avg_reward']:\n metrics['best_avg_reward'] = avg_reward\n dqn.save(results_dir)\n\n # Append to results and save metrics\n metrics['rewards'].append(T_rewards)\n metrics['Qs'].append(T_Qs)\n torch.save(metrics, os.path.join(results_dir, 'metrics.pth'))\n\n # Plot\n _plot_line(metrics['steps'], metrics['rewards'], 'Reward', path=results_dir)\n _plot_line(metrics['steps'], metrics['Qs'], 'Q', path=results_dir)\n\n # Return average reward and Q-value\n return avg_reward, avg_Q\n\n\n# Plots min, max and mean + standard deviation bars of a population over time\ndef _plot_line(xs, ys_population, title, path=''):\n max_colour, mean_colour, std_colour, transparent = 'rgb(0, 132, 180)', 'rgb(0, 172, 237)', 'rgba(29, 202, 255, 0.2)', 'rgba(0, 0, 0, 0)'\n\n ys = torch.tensor(ys_population, dtype=torch.float32)\n ys_min, ys_max, ys_mean, ys_std = ys.min(1)[0].squeeze(), ys.max(1)[0].squeeze(), ys.mean(1).squeeze(), ys.std(\n 1).squeeze()\n ys_upper, ys_lower = ys_mean + ys_std, ys_mean - ys_std\n\n trace_max = Scatter(x=xs, y=ys_max.numpy(), line=Line(color=max_colour, dash='dash'), name='Max')\n trace_upper = Scatter(x=xs, y=ys_upper.numpy(), line=Line(color=transparent), name='+1 Std. Dev.', showlegend=False)\n trace_mean = Scatter(x=xs, y=ys_mean.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=mean_colour),\n name='Mean')\n trace_lower = Scatter(x=xs, y=ys_lower.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=transparent),\n name='-1 Std. Dev.', showlegend=False)\n trace_min = Scatter(x=xs, y=ys_min.numpy(), line=Line(color=max_colour, dash='dash'), name='Min')\n\n plotly.offline.plot({\n 'data': [trace_upper, trace_mean, trace_lower, trace_min, trace_max],\n 'layout': dict(title=title, xaxis={'title': 'Step'}, yaxis={'title': title})\n }, filename=os.path.join(path, title + '.html'), auto_open=False)\n" ]
[ [ "torch.tensor" ] ]
jqhoogland/randnn
[ "8cc428e22b8a8ee1eea28ab2b96bcd79854779d2" ]
[ "randnn/weights/exponential.py" ]
[ "import numpy as np\n\n\ndef get_exponential_weights(n_dofs: int, coupling_strength: float) -> np.ndarray:\n \"\"\"\n $J_{ij}$ is drawn from $f(x; 1/\\beta) = \\exp(-x / \\beta) / \\beta$\n\n :param n_dofs: the number of nodes in the network\n :param coupling_strength: the scale parameter (equiv. the inverse rate).\n the final couplings are drawn from an exponential distribution with variation $g^2/N$, where $g$ is the\n coupling strength and $N$ is the number of nodes.\n \"\"\"\n strength_normalized = (coupling_strength / np.sqrt(n_dofs))\n coupling_matrix = np.random.exponential(size=(n_dofs, n_dofs), scale=coupling_strength)\n coupling_matrix *= np.random.choice([1, -1], size=(n_dofs, n_dofs)) # random sign for each node\n\n coupling_matrix *= (strength_normalized / np.std(coupling_strength))\n\n return coupling_matrix\n\n" ]
[ [ "numpy.random.exponential", "numpy.std", "numpy.random.choice", "numpy.sqrt" ] ]
GaelleChapuis/iblapps
[ "edfe7368b45480ce4a2307dd5d80ec2a38bb084d" ]
[ "atlaselectrophysiology/plot_data.py" ]
[ "from matplotlib import cm\r\nfrom pathlib import Path\r\nimport numpy as np\r\nimport alf.io\r\nfrom brainbox.processing import bincount2D\r\nfrom brainbox.population import xcorr\r\nfrom brainbox.task import passive\r\nimport scipy\r\nfrom PyQt5 import QtGui\r\n\r\nN_BNK = 4\r\nBNK_SIZE = 10\r\nAUTOCORR_BIN_SIZE = 0.25 / 1000\r\nAUTOCORR_WIN_SIZE = 10 / 1000\r\nFS = 30000\r\nnp.seterr(divide='ignore', invalid='ignore')\r\n\r\n\r\nclass PlotData:\r\n def __init__(self, alf_path, ephys_path):\r\n\r\n self.alf_path = alf_path\r\n self.ephys_path = ephys_path\r\n\r\n self.chn_coords = np.load(Path(self.alf_path, 'channels.localCoordinates.npy'))\r\n self.chn_ind = np.load(Path(self.alf_path, 'channels.rawInd.npy'))\r\n # See if spike data is available\r\n try:\r\n self.spikes = alf.io.load_object(self.alf_path, 'spikes')\r\n self.spike_data_status = True\r\n except Exception:\r\n print('spike data was not found, some plots will not display')\r\n self.spike_data_status = False\r\n\r\n try:\r\n self.clusters = alf.io.load_object(self.alf_path, 'clusters')\r\n self.filter_units('all')\r\n self.cluster_data_status = True\r\n self.compute_timescales()\r\n except Exception:\r\n print('cluster data was not found, some plots will not display')\r\n self.cluster_data_status = False\r\n\r\n try:\r\n lfp_spectrum = alf.io.load_object(self.ephys_path, 'ephysSpectralDensityLF',\r\n namespace='iblqc')\r\n if len(lfp_spectrum) == 2:\r\n self.lfp_freq = lfp_spectrum.get('freqs')\r\n self.lfp_power = lfp_spectrum.get('power', [])\r\n if not np.any(self.lfp_power):\r\n self.lfp_power = lfp_spectrum.get('amps')\r\n self.lfp_data_status = True\r\n else:\r\n print('lfp data was not found, some plots will not display')\r\n self.lfp_data_status = False\r\n except Exception:\r\n print('lfp data was not found, some plots will not display')\r\n self.lfp_data_status = False\r\n\r\n try:\r\n self.rf_map = alf.io.load_object(self.alf_path.parent, object='passiveRFM',\r\n namespace='ibl')\r\n if len(self.rf_map) == 2:\r\n self.rfmap_data_status = True\r\n else:\r\n print('rfmap data was not found, some plots will not display')\r\n self.rfmap_data_status = False\r\n except Exception:\r\n print('rfmp data was not found, some plots will not display')\r\n self.rfmap_data_status = False\r\n\r\n try:\r\n self.aud_stim = alf.io.load_object(self.alf_path.parent, object='passiveStims',\r\n namespace='ibl')['table']\r\n if len(self.aud_stim) > 0:\r\n self.passive_data_status = True\r\n except Exception:\r\n print('passive stim data was not found, some plots will not display')\r\n self.passive_data_status = False\r\n\r\n try:\r\n gabor = alf.io.load_object(self.alf_path.parent, object='passiveGabor',\r\n namespace='ibl')['table']\r\n self.vis_stim = dict()\r\n self.vis_stim['leftGabor'] = gabor['start'][(gabor['position'] == 35) &\r\n (gabor['contrast'] > 0.1)]\r\n self.vis_stim['rightGabor'] = gabor['start'][(gabor['position'] == -35) &\r\n (gabor['contrast'] > 0.1)]\r\n self.gabor_data_status = True\r\n except Exception:\r\n print('passive gabor data was not found, some plots will not display')\r\n self.gabor_data_status = False\r\n\r\n def filter_units(self, type):\r\n if type == 'all':\r\n self.spike_idx = np.arange(self.spikes['clusters'].size)\r\n # Filter for nans in depths and also in amps\r\n self.kp_idx = np.where(~np.isnan(self.spikes['depths'][self.spike_idx]) &\r\n ~np.isnan(self.spikes['amps'][self.spike_idx]))[0]\r\n\r\n else:\r\n clust = np.where(self.clusters.metrics.ks2_label == type)\r\n self.spike_idx = np.where(np.isin(self.spikes['clusters'], clust))[0]\r\n self.kp_idx = np.where(~np.isnan(self.spikes['depths'][self.spike_idx]) & ~np.isnan(\r\n self.spikes['amps'][self.spike_idx]))[0]\r\n\r\n# Plots that require spike and cluster data\r\n def get_depth_data_scatter(self):\r\n if not self.spike_data_status:\r\n data_scatter = None\r\n return data_scatter\r\n else:\r\n A_BIN = 10\r\n amp_range = np.quantile(self.spikes['amps'][self.spike_idx][self.kp_idx], [0, 0.9])\r\n amp_bins = np.linspace(amp_range[0], amp_range[1], A_BIN)\r\n colour_bin = np.linspace(0.0, 1.0, A_BIN + 1)\r\n colours = (cm.get_cmap('BuPu')(colour_bin)[np.newaxis, :, :3][0]) * 255\r\n spikes_colours = np.empty(self.spikes['amps'][self.spike_idx][self.kp_idx].size,\r\n dtype=object)\r\n spikes_size = np.empty(self.spikes['amps'][self.spike_idx][self.kp_idx].size)\r\n for iA in range(amp_bins.size):\r\n if iA == (amp_bins.size - 1):\r\n idx = np.where((self.spikes['amps'][self.spike_idx][self.kp_idx] >\r\n amp_bins[iA]))[0]\r\n # Make saturated spikes a very dark purple\r\n spikes_colours[idx] = QtGui.QColor('#400080')\r\n else:\r\n idx = np.where((self.spikes['amps'][self.spike_idx][self.kp_idx] >\r\n amp_bins[iA]) &\r\n (self.spikes['amps'][self.spike_idx][self.kp_idx] <=\r\n amp_bins[iA + 1]))[0]\r\n\r\n spikes_colours[idx] = QtGui.QColor(*colours[iA])\r\n\r\n spikes_size[idx] = iA / (A_BIN / 4)\r\n\r\n data_scatter = {\r\n 'x': self.spikes['times'][self.spike_idx][self.kp_idx][0:-1:100],\r\n 'y': self.spikes['depths'][self.spike_idx][self.kp_idx][0:-1:100],\r\n 'levels': amp_range * 1e6,\r\n 'colours': spikes_colours[0:-1:100],\r\n 'pen': None,\r\n 'size': spikes_size[0:-1:100],\r\n 'symbol': np.array('o'),\r\n 'xrange': np.array([np.min(self.spikes['times'][self.spike_idx][self.kp_idx]\r\n [0:-1:100]),\r\n np.max(self.spikes['times'][self.spike_idx][self.kp_idx]\r\n [0:-1:100])]),\r\n 'xaxis': 'Time (s)',\r\n 'title': 'Amplitude (uV)',\r\n 'cmap': 'BuPu',\r\n 'cluster': False\r\n }\r\n\r\n return data_scatter\r\n\r\n def get_fr_p2t_data_scatter(self):\r\n if not self.spike_data_status:\r\n data_fr_scatter = None\r\n data_p2t_scatter = None\r\n data_amp_scatter = None\r\n return data_fr_scatter, data_p2t_scatter, data_amp_scatter\r\n else:\r\n (clu,\r\n spike_depths,\r\n spike_amps,\r\n n_spikes) = self.compute_spike_average((self.spikes['clusters'][self.spike_idx]\r\n [self.kp_idx]), (self.spikes['depths']\r\n [self.spike_idx][self.kp_idx]),\r\n (self.spikes['amps'][self.spike_idx]\r\n [self.kp_idx]))\r\n spike_amps = spike_amps * 1e6\r\n fr = n_spikes / np.max(self.spikes['times'])\r\n fr_norm, fr_levels = self.normalise_data(fr, lquant=0, uquant=1)\r\n\r\n data_fr_scatter = {\r\n 'x': spike_amps,\r\n 'y': spike_depths,\r\n 'colours': fr_norm,\r\n 'pen': 'k',\r\n 'size': np.array(8),\r\n 'symbol': np.array('o'),\r\n 'levels': fr_levels,\r\n 'xrange': np.array([0.9 * np.min(spike_amps),\r\n 1.1 * np.max(spike_amps)]),\r\n 'xaxis': 'Amplitude (uV)',\r\n 'title': 'Firing Rate (Sp/s)',\r\n 'cmap': 'hot',\r\n 'cluster': True\r\n }\r\n\r\n p2t = self.clusters['peakToTrough'][clu]\r\n p2t_norm, p2t_levels = self.normalise_data(p2t, lquant=0, uquant=1)\r\n\r\n # Define the p2t levels so always same colourbar across sessions\r\n p2t_levels = [-1.5, 1.5]\r\n data_p2t_scatter = {\r\n 'x': spike_amps,\r\n 'y': spike_depths,\r\n\r\n 'colours': p2t_norm,\r\n 'pen': 'k',\r\n 'size': np.array(8),\r\n 'symbol': np.array('o'),\r\n 'levels': p2t_levels,\r\n 'xrange': np.array([0.9 * np.min(spike_amps),\r\n 1.1 * np.max(spike_amps)]),\r\n 'xaxis': 'Amplitude (uV)',\r\n 'title': 'Peak to Trough duration (ms)',\r\n 'cmap': 'RdYlGn',\r\n 'cluster': True\r\n }\r\n\r\n spike_amps_norm, spike_amps_levels = self.normalise_data(spike_amps, lquant=0,\r\n uquant=1)\r\n\r\n data_amp_scatter = {\r\n 'x': fr,\r\n 'y': spike_depths,\r\n\r\n 'colours': spike_amps_norm,\r\n 'pen': 'k',\r\n 'size': np.array(8),\r\n 'symbol': np.array('o'),\r\n 'levels': spike_amps_levels,\r\n 'xrange': np.array([0.9 * np.min(fr),\r\n 1.1 * np.max(fr)]),\r\n 'xaxis': 'Firing Rate (Sp/s)',\r\n 'title': 'Amplitude (uV)',\r\n 'cmap': 'magma',\r\n 'cluster': True\r\n }\r\n\r\n return data_fr_scatter, data_p2t_scatter, data_amp_scatter\r\n\r\n def get_fr_img(self):\r\n if not self.spike_data_status:\r\n data_img = None\r\n return data_img\r\n else:\r\n T_BIN = 0.05\r\n D_BIN = 5\r\n n, times, depths = bincount2D(self.spikes['times'][self.spike_idx][self.kp_idx],\r\n self.spikes['depths'][self.spike_idx][self.kp_idx],\r\n T_BIN, D_BIN, ylim=[0, np.max(self.chn_coords[:, 1])])\r\n img = n.T / T_BIN\r\n xscale = (times[-1] - times[0]) / img.shape[0]\r\n yscale = (depths[-1] - depths[0]) / img.shape[1]\r\n\r\n data_img = {\r\n 'img': img,\r\n 'scale': np.array([xscale, yscale]),\r\n 'levels': np.quantile(np.mean(img, axis=0), [0, 1]),\r\n 'offset': np.array([0, 0]),\r\n 'xrange': np.array([times[0], times[-1]]),\r\n 'xaxis': 'Time (s)',\r\n 'cmap': 'binary',\r\n 'title': 'Firing Rate'\r\n }\r\n\r\n return data_img\r\n\r\n def get_fr_amp_data_line(self):\r\n if not self.spike_data_status:\r\n data_fr_line = None\r\n data_amp_line = None\r\n return data_fr_line, data_amp_line\r\n else:\r\n T_BIN = np.max(self.spikes['times'])\r\n D_BIN = 10\r\n nspikes, times, depths = bincount2D(self.spikes['times'][self.spike_idx][self.kp_idx],\r\n self.spikes['depths'][self.spike_idx][self.kp_idx],\r\n T_BIN, D_BIN,\r\n ylim=[0, np.max(self.chn_coords[:, 1])])\r\n\r\n amp, times, depths = bincount2D(self.spikes['amps'][self.spike_idx][self.kp_idx],\r\n self.spikes['depths'][self.spike_idx][self.kp_idx],\r\n T_BIN, D_BIN, ylim=[0, np.max(self.chn_coords[:, 1])],\r\n weights=self.spikes['amps'][self.spike_idx]\r\n [self.kp_idx])\r\n mean_fr = nspikes[:, 0] / T_BIN\r\n mean_amp = np.divide(amp[:, 0], nspikes[:, 0]) * 1e6\r\n mean_amp[np.isnan(mean_amp)] = 0\r\n remove_bins = np.where(nspikes[:, 0] < 50)[0]\r\n mean_amp[remove_bins] = 0\r\n\r\n data_fr_line = {\r\n 'x': mean_fr,\r\n 'y': depths,\r\n 'xrange': np.array([0, np.max(mean_fr)]),\r\n 'xaxis': 'Firing Rate (Sp/s)'\r\n }\r\n\r\n data_amp_line = {\r\n 'x': mean_amp,\r\n 'y': depths,\r\n 'xrange': np.array([0, np.max(mean_amp)]),\r\n 'xaxis': 'Amplitude (uV)'\r\n }\r\n\r\n return data_fr_line, data_amp_line\r\n\r\n def get_correlation_data_img(self):\r\n if not self.spike_data_status:\r\n data_img = None\r\n return data_img\r\n else:\r\n T_BIN = 0.05\r\n D_BIN = 40\r\n R, times, depths = bincount2D(self.spikes['times'][self.spike_idx][self.kp_idx],\r\n self.spikes['depths'][self.spike_idx][self.kp_idx],\r\n T_BIN, D_BIN, ylim=[0, np.max(self.chn_coords[:, 1])])\r\n corr = np.corrcoef(R)\r\n corr[np.isnan(corr)] = 0\r\n scale = (np.max(depths) - np.min(depths)) / corr.shape[0]\r\n data_img = {\r\n 'img': corr,\r\n 'scale': np.array([scale, scale]),\r\n 'levels': np.array([np.min(corr), np.max(corr)]),\r\n 'offset': np.array([0, 0]),\r\n 'xrange': np.array([np.min(self.chn_coords[:, 1]), np.max(self.chn_coords[:, 1])]),\r\n 'cmap': 'viridis',\r\n 'title': 'Correlation',\r\n 'xaxis': 'Distance from probe tip (um)'\r\n }\r\n return data_img\r\n\r\n def get_rms_data_img_probe(self, format):\r\n # Finds channels that are at equivalent depth on probe and averages rms values for each\r\n # time point at same depth togehter\r\n try:\r\n rms_amps = alf.io.load_file_content(Path(self.ephys_path, '_iblqc_ephysTimeRms' +\r\n format + '.rms.npy'))\r\n except Exception:\r\n try:\r\n rms_amps = alf.io.load_file_content(Path(self.ephys_path, '_iblqc_ephysTimeRms' +\r\n format + '.amps.npy'))\r\n except Exception:\r\n print('rms data was not found, some plots will not display')\r\n data_img = None\r\n data_probe = None\r\n return data_img, data_probe\r\n\r\n try:\r\n rms_times = alf.io.load_file_content(Path(self.ephys_path, '_iblqc_ephysTimeRms' +\r\n format + '.timestamps.npy'))\r\n xaxis = 'Time (s)'\r\n except Exception:\r\n rms_times = np.array([0, rms_amps.shape[0]])\r\n xaxis = 'Time samples'\r\n\r\n # Img data\r\n _rms = np.take(rms_amps, self.chn_ind, axis=1)\r\n _, self.chn_depth, chn_count = np.unique(self.chn_coords[:, 1], return_index=True,\r\n return_counts=True)\r\n self.chn_depth_eq = np.copy(self.chn_depth)\r\n self.chn_depth_eq[np.where(chn_count == 2)] += 1\r\n\r\n def avg_chn_depth(a):\r\n return(np.mean([a[self.chn_depth], a[self.chn_depth_eq]], axis=0))\r\n\r\n def get_median(a):\r\n return(np.median(a))\r\n\r\n def median_subtract(a):\r\n return(a - np.median(a))\r\n img = np.apply_along_axis(avg_chn_depth, 1, _rms * 1e6)\r\n median = np.mean(np.apply_along_axis(get_median, 1, img))\r\n # Medium subtract to remove bands, but add back average median so values make sense\r\n img = np.apply_along_axis(median_subtract, 1, img) + median\r\n levels = np.quantile(img, [0.1, 0.9])\r\n xscale = (rms_times[-1] - rms_times[0]) / img.shape[0]\r\n yscale = (np.max(self.chn_coords[:, 1]) - np.min(self.chn_coords[:, 1])) / img.shape[1]\r\n\r\n if format == 'AP':\r\n cmap = 'plasma'\r\n else:\r\n cmap = 'inferno'\r\n\r\n data_img = {\r\n 'img': img,\r\n 'scale': np.array([xscale, yscale]),\r\n 'levels': levels,\r\n 'offset': np.array([0, 0]),\r\n 'cmap': cmap,\r\n 'xrange': np.array([rms_times[0], rms_times[-1]]),\r\n 'xaxis': xaxis,\r\n 'title': format + ' RMS (uV)'\r\n }\r\n\r\n # Probe data\r\n rms_avg = (np.mean(rms_amps, axis=0)[self.chn_ind]) * 1e6\r\n probe_levels = np.quantile(rms_avg, [0.1, 0.9])\r\n probe_img, probe_scale, probe_offset = self.arrange_channels2banks(rms_avg)\r\n\r\n data_probe = {\r\n 'img': probe_img,\r\n 'scale': probe_scale,\r\n 'offset': probe_offset,\r\n 'levels': probe_levels,\r\n 'cmap': cmap,\r\n 'xrange': np.array([0 * BNK_SIZE, (N_BNK) * BNK_SIZE]),\r\n 'title': format + ' RMS (uV)'\r\n }\r\n\r\n return data_img, data_probe\r\n\r\n def get_lfp_spectrum_data(self):\r\n freq_bands = np.vstack(([0, 4], [4, 10], [10, 30], [30, 80], [80, 200]))\r\n data_probe = {}\r\n if not self.lfp_data_status:\r\n data_img = None\r\n for freq in freq_bands:\r\n lfp_band_data = {f\"{freq[0]} - {freq[1]} Hz\": None}\r\n data_probe.update(lfp_band_data)\r\n\r\n return data_img, data_probe\r\n else:\r\n # Power spectrum image\r\n freq_range = [0, 300]\r\n freq_idx = np.where((self.lfp_freq >= freq_range[0]) &\r\n (self.lfp_freq < freq_range[1]))[0]\r\n _lfp = np.take(self.lfp_power[freq_idx], self.chn_ind, axis=1)\r\n _lfp_dB = 10 * np.log10(_lfp)\r\n _, self.chn_depth, chn_count = np.unique(self.chn_coords[:, 1], return_index=True,\r\n return_counts=True)\r\n self.chn_depth_eq = np.copy(self.chn_depth)\r\n self.chn_depth_eq[np.where(chn_count == 2)] += 1\r\n\r\n def avg_chn_depth(a):\r\n return(np.mean([a[self.chn_depth], a[self.chn_depth_eq]], axis=0))\r\n\r\n img = np.apply_along_axis(avg_chn_depth, 1, _lfp_dB)\r\n levels = np.quantile(img, [0.1, 0.9])\r\n xscale = (freq_range[-1] - freq_range[0]) / img.shape[0]\r\n yscale = (np.max(self.chn_coords[:, 1]) - np.min(self.chn_coords[:, 1])) / img.shape[1]\r\n\r\n data_img = {\r\n 'img': img,\r\n 'scale': np.array([xscale, yscale]),\r\n 'levels': levels,\r\n 'offset': np.array([0, 0]),\r\n 'cmap': 'viridis',\r\n 'xrange': np.array([freq_range[0], freq_range[-1]]),\r\n 'xaxis': 'Frequency (Hz)',\r\n 'title': 'PSD (dB)'\r\n }\r\n\r\n # Power spectrum in bands on probe\r\n for freq in freq_bands:\r\n freq_idx = np.where((self.lfp_freq >= freq[0]) & (self.lfp_freq < freq[1]))[0]\r\n lfp_avg = np.mean(self.lfp_power[freq_idx], axis=0)[self.chn_ind]\r\n lfp_avg_dB = 10 * np.log10(lfp_avg)\r\n probe_img, probe_scale, probe_offset = self.arrange_channels2banks(lfp_avg_dB)\r\n probe_levels = np.quantile(lfp_avg_dB, [0.1, 0.9])\r\n\r\n lfp_band_data = {f\"{freq[0]} - {freq[1]} Hz\": {\r\n 'img': probe_img,\r\n 'scale': probe_scale,\r\n 'offset': probe_offset,\r\n 'levels': probe_levels,\r\n 'cmap': 'viridis',\r\n 'xaxis': 'Time (s)',\r\n 'xrange': np.array([0 * BNK_SIZE, (N_BNK) * BNK_SIZE]),\r\n 'title': f\"{freq[0]} - {freq[1]} Hz (dB)\"}\r\n }\r\n data_probe.update(lfp_band_data)\r\n\r\n return data_img, data_probe\r\n\r\n def get_rfmap_data(self):\r\n data_img = dict()\r\n if not self.rfmap_data_status:\r\n return data_img, None\r\n else:\r\n\r\n (rf_map_times, rf_map_pos,\r\n rf_stim_frames) = passive.get_on_off_times_and_positions(self.rf_map)\r\n\r\n rf_map, depths = \\\r\n passive.get_rf_map_over_depth(rf_map_times, rf_map_pos, rf_stim_frames,\r\n self.spikes['times'][self.spike_idx][self.kp_idx],\r\n self.spikes['depths'][self.spike_idx][self.kp_idx],\r\n d_bin=160)\r\n rfs_svd = passive.get_svd_map(rf_map)\r\n img = dict()\r\n img['on'] = np.vstack(rfs_svd['on'])\r\n img['off'] = np.vstack(rfs_svd['off'])\r\n yscale = ((np.max(self.chn_coords[:, 1]) - np.min(\r\n self.chn_coords[:, 1])) / img['on'].shape[0])\r\n xscale = 1\r\n levels = np.quantile(np.c_[img['on'], img['off']], [0, 1])\r\n\r\n sub_type = ['on', 'off']\r\n for sub in sub_type:\r\n sub_data = {sub: {\r\n 'img': [img[sub].T],\r\n 'scale': [np.array([xscale, yscale])],\r\n 'levels': levels,\r\n 'offset': [np.array([0, 0])],\r\n 'cmap': 'viridis',\r\n 'xrange': np.array([0, 15]),\r\n 'xaxis': 'Position',\r\n 'title': 'rfmap (dB)'}\r\n }\r\n data_img.update(sub_data)\r\n\r\n return data_img, depths\r\n\r\n def get_passive_events(self):\r\n stim_keys = ['valveOn', 'toneOn', 'noiseOn', 'leftGabor', 'rightGabor']\r\n data_img = dict()\r\n if not self.passive_data_status and not self.gabor_data_status:\r\n return data_img\r\n elif not self.passive_data_status and self.gabor_data_status:\r\n stim_types = ['leftGabor', 'rightGabor']\r\n stims = self.vis_stim\r\n elif self.passive_data_status and not self.gabor_data_status:\r\n stim_types = ['valveOn', 'toneOn', 'noiseOn']\r\n stims = {stim_type: self.aud_stim[stim_type] for stim_type in stim_types}\r\n else:\r\n stim_types = stim_keys\r\n stims = {stim_type: self.aud_stim[stim_type] for stim_type in stim_types[0:3]}\r\n stims.update(self.vis_stim)\r\n\r\n base_stim = 1\r\n pre_stim = 0.4\r\n post_stim = 1\r\n stim_events = passive.get_stim_aligned_activity(stims, self.spikes['times'][self.spike_idx]\r\n [self.kp_idx], self.spikes['depths']\r\n [self.spike_idx][self.kp_idx],\r\n pre_stim=pre_stim, post_stim=post_stim,\r\n base_stim=base_stim)\r\n\r\n for stim_type, z_score in stim_events.items():\r\n xscale = (post_stim + pre_stim) / z_score.shape[1]\r\n yscale = ((np.max(self.chn_coords[:, 1]) - np.min(\r\n self.chn_coords[:, 1])) / z_score.shape[0])\r\n\r\n levels = [-10, 10]\r\n\r\n stim_data = {stim_type: {\r\n 'img': z_score.T,\r\n 'scale': np.array([xscale, yscale]),\r\n 'levels': levels,\r\n 'offset': np.array([-1 * pre_stim, 0]),\r\n 'cmap': 'bwr',\r\n 'xrange': [-1 * pre_stim, post_stim],\r\n 'xaxis': 'Time from Stim Onset (s)',\r\n 'title': 'Firing rate (z score)'}\r\n }\r\n data_img.update(stim_data)\r\n\r\n return data_img\r\n\r\n def get_autocorr(self, clust_idx):\r\n idx = np.where(self.spikes['clusters'] == self.clust_id[clust_idx])[0]\r\n autocorr = xcorr(self.spikes['times'][idx], self.spikes['clusters'][idx],\r\n AUTOCORR_BIN_SIZE, AUTOCORR_WIN_SIZE)\r\n\r\n return autocorr[0, 0, :]\r\n\r\n def get_template_wf(self, clust_idx):\r\n template_wf = (self.clusters['waveforms'][self.clust_id[clust_idx], :, 0])\r\n return template_wf * 1e6\r\n\r\n def arrange_channels2banks(self, data):\r\n Y_OFFSET = 20\r\n bnk_data = []\r\n bnk_scale = np.empty((N_BNK, 2))\r\n bnk_offset = np.empty((N_BNK, 2))\r\n for iX, x in enumerate(np.unique(self.chn_coords[:, 0])):\r\n bnk_idx = np.where(self.chn_coords[:, 0] == x)[0]\r\n bnk_vals = data[bnk_idx]\r\n _bnk_data = np.reshape(bnk_vals, (bnk_vals.size, 1)).T\r\n _bnk_yscale = ((np.max(self.chn_coords[bnk_idx, 1]) -\r\n np.min(self.chn_coords[bnk_idx, 1])) / _bnk_data.shape[1])\r\n _bnk_xscale = BNK_SIZE / _bnk_data.shape[0]\r\n _bnk_yoffset = np.min(self.chn_coords[bnk_idx, 1]) - Y_OFFSET\r\n _bnk_xoffset = BNK_SIZE * iX\r\n\r\n bnk_data.append(_bnk_data)\r\n bnk_scale[iX, :] = np.array([_bnk_xscale, _bnk_yscale])\r\n bnk_offset[iX, :] = np.array([_bnk_xoffset, _bnk_yoffset])\r\n\r\n return bnk_data, bnk_scale, bnk_offset\r\n\r\n def compute_spike_average(self, spike_clusters, spike_depth, spike_amp):\r\n clust, inverse, counts = np.unique(spike_clusters, return_inverse=True, return_counts=True)\r\n _spike_depth = scipy.sparse.csr_matrix((spike_depth, (inverse,\r\n np.zeros(inverse.size, dtype=int))))\r\n _spike_amp = scipy.sparse.csr_matrix((spike_amp, (inverse,\r\n np.zeros(inverse.size, dtype=int))))\r\n spike_depth_avg = np.ravel(_spike_depth.toarray()) / counts\r\n spike_amp_avg = np.ravel(_spike_amp.toarray()) / counts\r\n self.clust_id = clust\r\n return clust, spike_depth_avg, spike_amp_avg, counts\r\n\r\n def compute_timescales(self):\r\n self.t_autocorr = 1e3 * np.arange((AUTOCORR_WIN_SIZE / 2) - AUTOCORR_WIN_SIZE,\r\n (AUTOCORR_WIN_SIZE / 2) + AUTOCORR_BIN_SIZE,\r\n AUTOCORR_BIN_SIZE)\r\n n_template = self.clusters['waveforms'][0, :, 0].size\r\n self.t_template = 1e3 * (np.arange(n_template)) / FS\r\n\r\n def normalise_data(self, data, lquant=0, uquant=1):\r\n levels = np.quantile(data, [lquant, uquant])\r\n if np.min(data) < 0:\r\n data = data + np.abs(np.min(data))\r\n norm_data = data / np.max(data)\r\n norm_levels = np.quantile(norm_data, [lquant, uquant])\r\n norm_data[np.where(norm_data < norm_levels[0])] = 0\r\n norm_data[np.where(norm_data > norm_levels[1])] = 1\r\n\r\n return norm_data, levels\r\n" ]
[ [ "numpy.quantile", "numpy.median", "numpy.copy", "numpy.min", "numpy.mean", "numpy.where", "numpy.apply_along_axis", "numpy.max", "numpy.divide", "numpy.empty", "numpy.seterr", "numpy.take", "numpy.arange", "numpy.log10", "numpy.vstack", "numpy.array", "numpy.reshape", "numpy.zeros", "matplotlib.cm.get_cmap", "numpy.corrcoef", "numpy.isnan", "numpy.any", "numpy.linspace", "numpy.unique", "numpy.isin" ] ]
dezren39/manim
[ "80d7742446c588dc296bd3afb3465a63b63383db" ]
[ "utils/space_ops.py" ]
[ "import numpy as np\n\nfrom constants import OUT\nfrom constants import RIGHT\n\n# Matrix operations\n\n\ndef thick_diagonal(dim, thickness=2):\n row_indices = np.arange(dim).repeat(dim).reshape((dim, dim))\n col_indices = np.transpose(row_indices)\n return (np.abs(row_indices - col_indices) < thickness).astype('uint8')\n\n\ndef rotation_matrix(angle, axis):\n \"\"\"\n Rotation in R^3 about a specified axis of rotation.\n \"\"\"\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])\n\n\ndef rotation_about_z(angle):\n return [\n [np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]\n ]\n\n\ndef z_to_vector(vector):\n \"\"\"\n Returns some matrix in SO(3) which takes the z-axis to the\n (normalized) vector provided as an argument\n \"\"\"\n norm = np.linalg.norm(vector)\n if norm == 0:\n return np.identity(3)\n v = np.array(vector) / norm\n phi = np.arccos(v[2])\n if any(v[:2]):\n # projection of vector to unit circle\n axis_proj = v[:2] / np.linalg.norm(v[:2])\n theta = np.arccos(axis_proj[0])\n if axis_proj[1] < 0:\n theta = -theta\n else:\n theta = 0\n phi_down = np.array([\n [np.cos(phi), 0, np.sin(phi)],\n [0, 1, 0],\n [-np.sin(phi), 0, np.cos(phi)]\n ])\n return np.dot(rotation_about_z(theta), phi_down)\n\n\ndef rotate_vector(vector, angle, axis=OUT):\n return np.dot(rotation_matrix(angle, axis), vector)\n\n\ndef angle_between(v1, v2):\n return np.arccos(np.dot(\n v1 / np.linalg.norm(v1),\n v2 / np.linalg.norm(v2)\n ))\n\n\ndef angle_of_vector(vector):\n \"\"\"\n Returns polar coordinate theta when vector is project on xy plane\n \"\"\"\n z = complex(*vector[:2])\n if z == 0:\n return 0\n return np.angle(complex(*vector[:2]))\n\n\ndef angle_between_vectors(v1, v2):\n \"\"\"\n Returns the angle between two 3D vectors.\n This angle will always be btw 0 and TAU/2.\n \"\"\"\n l1 = np.linalg.norm(v1)\n l2 = np.linalg.norm(v2)\n return np.arccos(np.dot(v1, v2) / (l1 * l2))\n\n\ndef project_along_vector(point, vector):\n matrix = np.identity(3) - np.outer(vector, vector)\n return np.dot(point, matrix.T)\n\n###\n\n\ndef compass_directions(n=4, start_vect=RIGHT):\n angle = 2 * np.pi / n\n return np.array([\n rotate_vector(start_vect, k * angle)\n for k in range(n)\n ])\n\n\ndef complex_to_R3(complex_num):\n return np.array((complex_num.real, complex_num.imag, 0))\n\n\ndef R3_to_complex(point):\n return complex(*point[:2])\n\n\ndef center_of_mass(points):\n points = [np.array(point).astype(\"float\") for point in points]\n return sum(points) / len(points)\n" ]
[ [ "numpy.array", "numpy.arccos", "numpy.dot", "numpy.linalg.norm", "numpy.sin", "numpy.identity", "numpy.transpose", "numpy.arange", "numpy.cos", "numpy.abs", "numpy.outer", "numpy.linalg.inv" ] ]
justachetan/scientific-computing
[ "e8493b5308c337ea8965a5f96cdd49def94801e0" ]
[ "a3/problem_4d.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom problem_4a import make_data\nfrom problem_4b import pca\n\ndef main():\n\tdata = make_data()\n\tpcs, u, s, vh, Y = pca(data.copy())\n\ts_d = s.copy()\n\ts_d[-1] = 0\n\tY_d = np.dot(np.dot(u, np.diag(s_d)), vh)\n\tdata_d = Y_d * np.sqrt(Y.shape[1] + 1) + np.mean(data, axis=1).reshape(Y_d.shape[0], 1)\n\tpcs_d, u_d, s_dd, vh_d, Y_d = pca(data_d.copy())\n\tmean_d = np.mean(data, axis=1)\n\tplt.figure(figsize=(8, 4))\n\tplt.scatter(data_d[0], data_d[1])\n\tplt.gca().set_aspect(\"equal\")\n\tplt.arrow(mean_d[0], mean_d[1], pcs_d[:, 0][0], pcs_d[:, 0][1], head_width=0.05, head_length=0.1, fc='k', ec='k')\n\tplt.arrow(mean_d[0], mean_d[1], pcs_d[:, 1][0], pcs_d[:, 1][1], head_width=0.05, head_length=0.1, fc='k', ec='k')\n\tplt.xlabel(\"x\")\n\tplt.ylabel(\"y\")\n\tplt.title(\"Reformed Dataset with Principal Components\")\n\tplt.savefig(\"problem_4d.png\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()" ]
[ [ "matplotlib.pyplot.arrow", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "numpy.mean", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.sqrt", "matplotlib.pyplot.scatter", "matplotlib.pyplot.gca", "numpy.diag" ] ]
asmyoo/MSAP
[ "0ed89f90d67260892a8c4d945504f3b0a2096d36" ]
[ "msap/modeling/model_selection/preprocessing/preprocessor.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Data preprocessing object.\n\nAuthors:\n Fangzhou Li - fzli@ucdavis.edu\n\nTodo:\n - docstring\n\n\"\"\"\nimport pandas as pd\n\nfrom .scale import standardize, minmax_normalize, robust_normalize\nfrom .impute import knn_impute, iterative_impute, missforest\nfrom .detect_outliers import isolation_forest, local_outlier_factor\nfrom msap.utils import (\n dump_X_and_y)\n\n\nclass Preprocessor:\n \"\"\"Class of data preprocessing object.\n\n Args:\n clf: A classifier with `fit` method. Optional if `skip_fs` is True.\n scale_mode: Specification for a scaling method.\n {'standard',\n 'minmax',\n 'robust'}, default='standard'.\n impute_mode: Specification for a missing value imputation method.\n {'knn',\n 'iterative',\n 'missforest'}, default='knn'.\n outlier_mode: Specification for an outlier detection method.\n {'isolation_forest',\n 'lof'}, default='isolation_forest'.\n cat_vars: List of indices of columns that are categorical.\n If use, assumes these variables are one hot encoded\n already.\n skip_fs: Skip feature selection if True, default=True.\n\n Attributes:\n TODO\n \"\"\"\n\n def __init__(\n self,\n scale_mode='standard',\n impute_mode='knn',\n outlier_mode='isolation_forest',\n cat_vars=None,\n random_seed=None,\n dump_si_filename=None):\n self.scale_mode = scale_mode\n self.impute_mode = impute_mode\n self.outlier_mode = outlier_mode\n self.cat_vars = cat_vars\n self.random_seed = random_seed\n self.dump_si_filename = dump_si_filename\n\n def scale(self, X_df, cat_vars):\n \"\"\"Scale features.\n\n Args:\n X_df (pd.DataFrame): Input data.\n\n Returns:\n (pd.DataFrame): Scaled data.\n\n \"\"\"\n if self.scale_mode == 'standard':\n X_new_df = standardize(X_df, cat_vars)\n elif self.scale_mode == 'minmax':\n X_new_df = minmax_normalize(X_df, cat_vars)\n elif self.scale_mode == 'robust':\n X_new_df = robust_normalize(X_df, cat_vars)\n else:\n raise ValueError(f\"Invalid scaling mode: {self.scale_mode}\")\n\n return X_new_df\n\n def impute(self, X_df, cat_vars):\n \"\"\"Impute missing values.\n\n Args:\n X_df (pd.DataFrame): Input data.\n cat_vars (list): List of categorical variables\n\n Returns:\n (pd.DataFrame): Imputed data.\n\n \"\"\"\n if self.impute_mode == 'knn':\n X_new_df = knn_impute(X_df)\n elif self.impute_mode == 'iterative':\n X_new_df = iterative_impute(X_df, self.random_seed)\n elif self.impute_mode == 'missforest':\n X_new_df = missforest(X_df,\n cat_vars, self.random_seed)\n else:\n raise ValueError(f\"Invalid imputation mode: {self.impute_mode}\")\n\n return X_new_df\n\n def remove_outliers(self, X_df, y_se):\n \"\"\"Detect outliers.\n\n Args:\n X_df (pd.DataFrame): Input data.\n y_se (pd.Series): Target data.\n\n Returns:\n (list): Indices of outliers.\n\n \"\"\"\n data = pd.concat([X_df, y_se], axis=1)\n\n if self.outlier_mode == 'isolation_forest':\n idxs_outlier = isolation_forest(data, self.random_seed)\n elif self.outlier_mode == 'lof':\n idxs_outlier = local_outlier_factor(data)\n elif self.outlier_mode == 'none':\n idxs_outlier = []\n else:\n raise ValueError(\n f\"Invalid outlier detection mode: {self.outlier_mode}\")\n\n idxs_inlier = [i for i in range(len(X_df))\n if i not in idxs_outlier]\n X_df = X_df.iloc[idxs_inlier]\n y_se = y_se.iloc[idxs_inlier]\n\n return X_df, y_se, idxs_outlier\n\n def preprocess(self, X_df, y_se):\n \"\"\"Preprocess input data.\n\n Args:\n X_df (pd.DataFrame): Input data.\n y_se (pd.Series): Target data.\n\n Returns:\n (pd.DataFrame): Preprocessed data.\n\n \"\"\"\n # only scale things that are NOT categorical, put the df back together\n X_df = self.scale(X_df, self.cat_vars)\n X_df = self.impute(X_df, self.cat_vars)\n if self.dump_si_filename is not None:\n dump_X_and_y(\n X=X_df,\n y=y_se,\n path_output_data=self.dump_si_filename)\n X_df, y_se, idxs_outlier = self.remove_outliers(X_df, y_se)\n\n # Remove all the constant columns.\n X_df = X_df.loc[:, (X_df != X_df.iloc[0]).any()]\n\n return X_df, y_se, idxs_outlier\n" ]
[ [ "pandas.concat" ] ]
VertNet/chrono
[ "681cf4626b84c1f29911e95fb8ee0f3e3ac3fdce" ]
[ "build/build-termlist.py" ]
[ "# Script to build Markdown pages that provide term metadata for complex vocabularies\n# Steve Baskauf 2020-06-28 CC0\n# This script merges static Markdown header and footer documents with term information tables (in Markdown) generated from data in the rs.tdwg.org repo from the TDWG Github site\n\nimport re\nimport requests # best library to manage HTTP transactions\nimport csv # library to read/write/parse CSV files\nimport json # library to convert JSON to Python data structures\nimport pandas as pd\n\n# -----------------\n# Configuration section\n# -----------------\n\n# This is the base URL for raw files from the branch of the repo that has been pushed to GitHub\ngithubBaseUri = 'https://raw.githubusercontent.com/tdwg/rs.tdwg.org/master/'\n\nheaderFileName = 'termlist-header.md'\nfooterFileName = 'termlist-footer.md'\noutFileName = '../docs/list/index.md'\n\n# This is a Python list of the database names of the term lists to be included in the document.\ntermLists = ['chronometricage', 'chronoiri']\n\n# NOTE! There may be problems unless every term list is of the same vocabulary type since the number of columns will differ\n# However, there probably aren't any circumstances where mixed types will be used to generate the same page.\nvocab_type = 1 # 1 is simple vocabulary, 2 is simple controlled vocabulary, 3 is c.v. with broader hierarchy\n\n# Terms in large vocabularies like Darwin and Audubon Cores may be organized into categories using tdwgutility_organizedInClass\n# If so, those categories can be used to group terms in the generated term list document.\norganized_in_categories = True\n\n# If organized in categories, the display_order list must contain the IRIs that are values of tdwgutility_organizedInClass\n# If not organized into categories, the value is irrelevant. There just needs to be one item in the list.\ndisplay_order = [ 'http://tdwg.org/chrono/terms/ChronometricAge', 'http://rs.tdwg.org/dwc/terms/attributes/UseWithIRI']\ndisplay_label = ['Chronometric Age', 'IRI-value terms']\ndisplay_comments = ['','']\ndisplay_id = ['chronometric_age', 'use_with_iri']\n\n# ---------------\n# Function definitions\n# ---------------\n\n# replace URL with link\n#\ndef createLinks(text):\n def repl(match):\n if match.group(1)[-1] == '.':\n return '<a href=\"' + match.group(1)[:-1] + '\">' + match.group(1)[:-1] + '</a>.'\n return '<a href=\"' + match.group(1) + '\">' + match.group(1) + '</a>'\n\n pattern = '(https?://[^\\s,;\\)\"]*)'\n result = re.sub(pattern, repl, text)\n return result\n\nprint('Retrieving term list metadata from GitHub')\nterm_lists_info = []\n\nframe = pd.read_csv(githubBaseUri + 'term-lists/term-lists.csv', na_filter=False)\nfor termList in termLists:\n term_list_dict = {'list_iri': termList}\n term_list_dict = {'database': termList}\n for index,row in frame.iterrows():\n if row['database'] == termList:\n term_list_dict['pref_ns_prefix'] = row['vann_preferredNamespacePrefix']\n term_list_dict['pref_ns_uri'] = row['vann_preferredNamespaceUri']\n term_list_dict['list_iri'] = row['list']\n term_lists_info.append(term_list_dict)\n#print(term_lists_info)\n\n# Create column list\ncolumn_list = ['pref_ns_prefix', 'pref_ns_uri', 'term_localName', 'label', 'rdfs_comment', 'dcterms_description', 'examples', 'term_modified', 'term_deprecated', 'rdf_type', 'tdwgutility_abcdEquivalence', 'replaces_term', 'replaces1_term']\nif vocab_type == 2:\n column_list += ['controlled_value_string']\nelif vocab_type == 3:\n column_list += ['controlled_value_string', 'skos_broader']\nif organized_in_categories:\n column_list.append('tdwgutility_organizedInClass')\ncolumn_list.append('version_iri')\n\nprint('Retrieving metadata about terms from all namespaces from GitHub')\n# Create list of lists metadata table\ntable_list = []\nfor term_list in term_lists_info:\n # retrieve versions metadata for term list\n versions_url = githubBaseUri + term_list['database'] + '-versions/' + term_list['database'] + '-versions.csv'\n versions_df = pd.read_csv(versions_url, na_filter=False)\n \n # retrieve current term metadata for term list\n data_url = githubBaseUri + term_list['database'] + '/' + term_list['database'] + '.csv'\n frame = pd.read_csv(data_url, na_filter=False)\n for index,row in frame.iterrows():\n row_list = [term_list['pref_ns_prefix'], term_list['pref_ns_uri'], row['term_localName'], row['label'], row['rdfs_comment'], row['dcterms_description'], row['examples'], row['term_modified'], row['term_deprecated'], row['rdf_type'], row['tdwgutility_abcdEquivalence'], row['replaces_term'], row['replaces1_term']]\n #row_list = [term_list['pref_ns_prefix'], term_list['pref_ns_uri'], row['term_localName'], row['label'], row['definition'], row['usage'], row['notes'], row['term_modified'], row['term_deprecated'], row['type']]\n if vocab_type == 2:\n row_list += [row['controlled_value_string']]\n elif vocab_type == 3:\n if row['skos_broader'] =='':\n row_list += [row['controlled_value_string'], '']\n else:\n row_list += [row['controlled_value_string'], term_list['pref_ns_prefix'] + ':' + row['skos_broader']]\n if organized_in_categories:\n row_list.append(row['tdwgutility_organizedInClass'])\n\n # Borrowed terms really don't have implemented versions. They may be lacking values for version_status.\n # In their case, their version IRI will be omitted.\n found = False\n for vindex, vrow in versions_df.iterrows():\n if vrow['term_localName']==row['term_localName'] and vrow['version_status']=='recommended':\n found = True\n version_iri = vrow['version']\n # NOTE: the current hack for non-TDWG terms without a version is to append # to the end of the term IRI\n if version_iri[len(version_iri)-1] == '#':\n version_iri = ''\n if not found:\n version_iri = ''\n row_list.append(version_iri)\n\n table_list.append(row_list)\n\nprint('processing data')\n# Turn list of lists into dataframe\nterms_df = pd.DataFrame(table_list, columns = column_list)\n\nterms_sorted_by_label = terms_df.sort_values(by='label')\n#terms_sorted_by_localname = terms_df.sort_values(by='term_localName')\n\n# This makes sort case insensitive\nterms_sorted_by_localname = terms_df.iloc[terms_df.term_localName.str.lower().argsort()]\n#terms_sorted_by_localname\nprint('done retrieving')\nprint()\n\nprint('Generating term index by CURIE')\ntext = '### 3.1 Index By Term Name\\n\\n'\ntext += '(See also [3.2 Index By Label](#32-index-by-label))\\n\\n'\n\ntext += '**Classes**\\n'\ntext += '\\n'\nfor row_index,row in terms_sorted_by_localname.iterrows():\n if row['rdf_type'] == 'http://www.w3.org/2000/01/rdf-schema#Class':\n curie = row['pref_ns_prefix'] + \":\" + row['term_localName']\n curie_anchor = curie.replace(':','_')\n text += '[' + curie + '](#' + curie_anchor + ') |\\n'\ntext = text[:len(text)-2] # remove final trailing vertical bar and newline\ntext += '\\n\\n' # put back removed newline\n\nfor category in range(0,len(display_order)):\n text += '**' + display_label[category] + '**\\n'\n text += '\\n'\n if organized_in_categories:\n filtered_table = terms_sorted_by_localname[terms_sorted_by_localname['tdwgutility_organizedInClass']==display_order[category]]\n filtered_table.reset_index(drop=True, inplace=True)\n else:\n filtered_table = terms_sorted_by_localname\n \n for row_index,row in filtered_table.iterrows():\n if row['rdf_type'] != 'http://www.w3.org/2000/01/rdf-schema#Class':\n curie = row['pref_ns_prefix'] + \":\" + row['term_localName']\n curie_anchor = curie.replace(':','_')\n text += '[' + curie + '](#' + curie_anchor + ') |\\n'\n text = text[:len(text)-2] # remove final trailing vertical bar and newline\n text += '\\n\\n' # put back removed newline\n\nindex_by_name = text\n\n#print(index_by_name)\n\nprint('Generating term index by label')\ntext = '\\n\\n'\n\n# Comment out the following two lines if there is no index by local names\ntext = '### 3.2 Index By Label\\n\\n'\ntext += '(See also [3.1 Index By Term Name](#31-index-by-term-name))\\n\\n'\n\ntext += '**Classes**\\n'\ntext += '\\n'\nfor row_index,row in terms_sorted_by_label.iterrows():\n if row['rdf_type'] == 'http://www.w3.org/2000/01/rdf-schema#Class':\n curie_anchor = row['pref_ns_prefix'] + \"_\" + row['term_localName']\n text += '[' + row['label'] + '](#' + curie_anchor + ') |\\n'\ntext = text[:len(text)-2] # remove final trailing vertical bar and newline\ntext += '\\n\\n' # put back removed newline\n\nfor category in range(0,len(display_order)):\n if organized_in_categories:\n text += '**' + display_label[category] + '**\\n'\n text += '\\n'\n filtered_table = terms_sorted_by_label[terms_sorted_by_label['tdwgutility_organizedInClass']==display_order[category]]\n filtered_table.reset_index(drop=True, inplace=True)\n else:\n filtered_table = terms_sorted_by_label\n \n for row_index,row in filtered_table.iterrows():\n if row_index == 0 or (row_index != 0 and row['label'] != filtered_table.iloc[row_index - 1].loc['label']): # this is a hack to prevent duplicate labels\n if row['rdf_type'] != 'http://www.w3.org/2000/01/rdf-schema#Class':\n curie_anchor = row['pref_ns_prefix'] + \"_\" + row['term_localName']\n text += '[' + row['label'] + '](#' + curie_anchor + ') |\\n'\n text = text[:len(text)-2] # remove final trailing vertical bar and newline\n text += '\\n\\n' # put back removed newline\n\nindex_by_label = text\n\n#print(index_by_label)\n\ndecisions_df = pd.read_csv('https://raw.githubusercontent.com/tdwg/rs.tdwg.org/master/decisions/decisions-links.csv', na_filter=False)\n\n# ---------------\n# generate a table for each term, with terms grouped by category\n# ---------------\n\nprint('Generating terms table')\n# generate the Markdown for the terms table\ntext = '## 4 Vocabulary\\n'\nif True:\n filtered_table = terms_sorted_by_localname\n\n#for category in range(0,len(display_order)):\n# if organized_in_categories:\n# text += '### 4.' + str(category + 1) + ' ' + display_label[category] + '\\n'\n# text += '\\n'\n# text += display_comments[category] # insert the comments for the category, if any.\n# filtered_table = terms_sorted_by_localname[terms_sorted_by_localname['tdwgutility_organizedInClass']==display_order[category]]\n# filtered_table.reset_index(drop=True, inplace=True)\n# else:\n# filtered_table = terms_sorted_by_localname\n\n for row_index,row in filtered_table.iterrows():\n text += '<table>\\n'\n curie = row['pref_ns_prefix'] + \":\" + row['term_localName']\n curieAnchor = curie.replace(':','_')\n text += '\\t<thead>\\n'\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<th colspan=\"2\"><a id=\"' + curieAnchor + '\"></a>Term Name ' + curie + '</th>\\n'\n text += '\\t\\t</tr>\\n'\n text += '\\t</thead>\\n'\n text += '\\t<tbody>\\n'\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Term IRI</td>\\n'\n uri = row['pref_ns_uri'] + row['term_localName']\n text += '\\t\\t\\t<td><a href=\"' + uri + '\">' + uri + '</a></td>\\n'\n text += '\\t\\t</tr>\\n'\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Modified</td>\\n'\n text += '\\t\\t\\t<td>' + row['term_modified'] + '</td>\\n'\n text += '\\t\\t</tr>\\n'\n\n if row['version_iri'] != '':\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Term version IRI</td>\\n'\n text += '\\t\\t\\t<td><a href=\"' + row['version_iri'] + '\">' + row['version_iri'] + '</a></td>\\n'\n text += '\\t\\t</tr>\\n'\n\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Label</td>\\n'\n text += '\\t\\t\\t<td>' + row['label'] + '</td>\\n'\n text += '\\t\\t</tr>\\n'\n\n if row['term_deprecated'] != '':\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td></td>\\n'\n text += '\\t\\t\\t<td><strong>This term is deprecated and should no longer be used.</strong></td>\\n'\n text += '\\t\\t</tr>\\n'\n\n for dep_index,dep_row in filtered_table.iterrows():\n if dep_row['replaces_term'] == uri:\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Is replaced by</td>\\n'\n text += '\\t\\t\\t<td><a href=\"#' + dep_row['pref_ns_prefix'] + \"_\" + dep_row['term_localName'] + '\">' + dep_row['pref_ns_uri'] + dep_row['term_localName'] + '</a></td>\\n'\n text += '\\t\\t</tr>\\n'\n if dep_row['replaces1_term'] == uri:\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Is replaced by</td>\\n'\n text += '\\t\\t\\t<td><a href=\"#' + dep_row['pref_ns_prefix'] + \"_\" + dep_row['term_localName'] + '\">' + dep_row['pref_ns_uri'] + dep_row['term_localName'] + '</a></td>\\n'\n text += '\\t\\t</tr>\\n'\n\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Definition</td>\\n'\n text += '\\t\\t\\t<td>' + row['rdfs_comment'] + '</td>\\n'\n #text += '\\t\\t\\t<td>' + row['definition'] + '</td>\\n'\n text += '\\t\\t</tr>\\n'\n\n if row['dcterms_description'] != '':\n #if row['notes'] != '':\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Notes</td>\\n'\n text += '\\t\\t\\t<td>' + createLinks(row['dcterms_description']) + '</td>\\n'\n #text += '\\t\\t\\t<td>' + createLinks(row['notes']) + '</td>\\n'\n text += '\\t\\t</tr>\\n'\n\n if row['examples'] != '':\n #if row['usage'] != '':\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Examples</td>\\n'\n text += '\\t\\t\\t<td>' + createLinks(row['examples']) + '</td>\\n'\n #text += '\\t\\t\\t<td>' + createLinks(row['usage']) + '</td>\\n'\n text += '\\t\\t</tr>\\n'\n\n if row['tdwgutility_abcdEquivalence'] != '':\n #if row['usage'] != '':\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>ABCD equivalence</td>\\n'\n text += '\\t\\t\\t<td>' + createLinks(row['tdwgutility_abcdEquivalence']) + '</td>\\n'\n text += '\\t\\t</tr>\\n'\n\n if vocab_type == 2 or vocab_type ==3: # controlled vocabulary\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Controlled value</td>\\n'\n text += '\\t\\t\\t<td>' + row['controlled_value_string'] + '</td>\\n'\n text += '\\t\\t</tr>\\n'\n\n if vocab_type == 3 and row['skos_broader'] != '': # controlled vocabulary with skos:broader relationships\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Has broader concept</td>\\n'\n curieAnchor = row['skos_broader'].replace(':','_')\n text += '\\t\\t\\t<td><a href=\"#' + curieAnchor + '\">' + row['skos_broader'] + '</a></td>\\n'\n text += '\\t\\t</tr>\\n'\n\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Type</td>\\n'\n if row['rdf_type'] == 'http://www.w3.org/1999/02/22-rdf-syntax-ns#Property':\n #if row['type'] == 'http://www.w3.org/1999/02/22-rdf-syntax-ns#Property':\n text += '\\t\\t\\t<td>Property</td>\\n'\n elif row['rdf_type'] == 'http://www.w3.org/2000/01/rdf-schema#Class':\n #elif row['type'] == 'http://www.w3.org/2000/01/rdf-schema#Class':\n text += '\\t\\t\\t<td>Class</td>\\n'\n elif row['rdf_type'] == 'http://www.w3.org/2004/02/skos/core#Concept':\n #elif row['type'] == 'http://www.w3.org/2004/02/skos/core#Concept':\n text += '\\t\\t\\t<td>Concept</td>\\n'\n else:\n text += '\\t\\t\\t<td>' + row['rdf_type'] + '</td>\\n' # this should rarely happen\n #text += '\\t\\t\\t<td>' + row['type'] + '</td>\\n' # this should rarely happen\n text += '\\t\\t</tr>\\n'\n\n # Look up decisions related to this term\n for drow_index,drow in decisions_df.iterrows():\n if drow['linked_affected_resource'] == uri:\n text += '\\t\\t<tr>\\n'\n text += '\\t\\t\\t<td>Executive Committee decision</td>\\n'\n text += '\\t\\t\\t<td><a href=\"http://rs.tdwg.org/decisions/' + drow['decision_localName'] + '\">http://rs.tdwg.org/decisions/' + drow['decision_localName'] + '</a></td>\\n'\n text += '\\t\\t</tr>\\n' \n\n text += '\\t</tbody>\\n'\n text += '</table>\\n'\n text += '\\n'\n text += '\\n'\nterm_table = text\nprint('done generating')\nprint()\n\n#print(term_table)\n\nprint('Merging term table with header and footer and saving file')\n#text = index_by_label + term_table\ntext = index_by_name + index_by_label + term_table\n\n# read in header and footer, merge with terms table, and output\n\nheaderObject = open(headerFileName, 'rt', encoding='utf-8')\nheader = headerObject.read()\nheaderObject.close()\n\nfooterObject = open(footerFileName, 'rt', encoding='utf-8')\nfooter = footerObject.read()\nfooterObject.close()\n\noutput = header + text + footer\noutputObject = open(outFileName, 'wt', encoding='utf-8')\noutputObject.write(output)\noutputObject.close()\n \nprint('done')\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
JasonKessler/shifterator
[ "092879f88c2c919ada4a0a5fd3a0df7d8ffb2ac6" ]
[ "shifterator/shifterator.py" ]
[ "import sys\nimport warnings\n\nimport matplotlib.pyplot as plt\n\nfrom . import helper, plotting\n\n\nclass Shift:\n \"\"\"\n Shift object for calculating weighted scores of two systems of types,\n and the shift between them\n\n Parameters\n ----------\n type2freq_1, type2freq_2: dict\n Keys are types of a system and values are frequencies of those types\n type2score_1, type2score_2: dict or str, optional\n If dict, types are keys and values are scores associated with each\n type. If str, the name of a score lexicon included in Shifterator.\n If None and other type2score is None, defaults to uniform scores\n across types. Otherwise defaults to the other type2score dict\n reference_value: str or float, optional\n The reference score to use to partition scores into two different\n regimes. If 'average', uses the average score according to type2freq_1\n and type2score_1. If None and a lexicon is selected for type2score,\n uses the respective middle point in that lexicon's scale. Otherwise\n if None, uses zero as the reference point\n handle_missing_scores: str, optional\n If 'error', throws an error whenever a word has a score in one score\n dictionary but not the other. If 'exclude', excludes any word that is\n missing a score in one score dictionary from all word shift\n calculations, regardless if it may have a score in the other dictionary.\n If 'adopt' and the score is missing in one dictionary, then uses the\n score from the other dictionary if it is available\n stop_lens: iterable of 2-tuples, optional\n Denotes intervals of scores that should be excluded from word shifts\n calculations. Types with scores in this range will be excluded from word\n shift calculations\n stop_words: set, optional\n Denotes words that should be excluded from calculation of word shifts\n normalization: str, optional\n If 'variation', normalizes shift scores so that the sum of\n their absolute values sums to 1. If 'trajectory', normalizes\n them so that the sum of shift scores is 1 or -1. The trajectory\n normalization cannot be applied if the total shift score is 0, so\n scores are left unnormalized if the total is 0 and 'trajectory' is\n specified\n \"\"\"\n\n def __init__(\n self,\n type2freq_1,\n type2freq_2,\n type2score_1=None,\n type2score_2=None,\n reference_value=None,\n handle_missing_scores=\"error\",\n stop_lens=None,\n stop_words=None,\n normalization=\"variation\",\n ):\n # Set type2score dictionaries\n if type2score_1 is not None and type2score_2 is not None:\n self.type2score_1, lex_ref = helper.get_score_dictionary(type2score_1)\n self.type2score_2, _ = helper.get_score_dictionary(type2score_2)\n if type2score_1 != type2score_2:\n self.show_score_diffs = True\n else:\n self.show_score_diffs = False\n elif type2score_1 is not None:\n self.type2score_1, lex_ref = helper.get_score_dictionary(type2score_1)\n self.type2score_2 = self.type2score_1\n self.show_score_diffs = False\n elif type2score_2 is not None:\n self.type2score_2, lex_ref = helper.get_score_dictionary(type2score_2)\n self.type2score_1 = self.type2score_2\n self.show_score_diffs = False\n else:\n self.type2score_1 = {t: 1 for t in type2freq_1}\n self.type2score_2 = {t: 1 for t in type2freq_2}\n self.show_score_diffs = False\n\n # Preprocess words according to score rules, stop words, and stop lens\n self.handle_missing_scores = handle_missing_scores\n if stop_lens is None:\n self.stop_lens = []\n else:\n self.stop_lens = stop_lens\n if stop_words is None:\n self.stop_words = set()\n else:\n self.stop_words = stop_words\n preprocessed = helper.preprocess_words_scores(type2freq_1,\n self.type2score_1,\n type2freq_2,\n self.type2score_2,\n self.stop_lens,\n self.stop_words,\n self.handle_missing_scores)\n self.type2freq_1 = preprocessed[0]\n self.type2freq_2 = preprocessed[1]\n self.type2score_1 = preprocessed[2]\n self.type2score_2 = preprocessed[3]\n self.types = preprocessed[4]\n self.filtered_types = preprocessed[5]\n self.no_score_types = preprocessed[6]\n self.adopted_score_types = preprocessed[7]\n\n # Set reference value\n if reference_value is not None:\n if reference_value == \"average\":\n self.reference_value = self.get_weighted_score(\n self.type2freq_1, self.type2score_1\n )\n else:\n self.reference_value = reference_value\n else:\n if lex_ref is not None:\n self.reference_value = lex_ref\n else:\n self.reference_value = 0\n\n # Get shift scores\n self.normalization = normalization\n self.get_shift_scores(details=False)\n\n def get_weighted_score(self, type2freq, type2score):\n \"\"\"\n Calculate an average score according to a set of frequencies and scores\n\n Parameters\n ----------\n type2freq: dict\n Keys are types and values are frequencies\n type2score: dict\n Keys are types and values are scores\n\n Returns\n -------\n s_avg: float\n Average weighted score of system\n \"\"\"\n # Check we have a vocabulary to work with\n types = set(type2freq.keys()).intersection(set(type2score.keys()))\n if len(types) == 0:\n return\n # Get weighted score and total frequency\n f_total = sum([freq for t, freq in type2freq.items() if t in types])\n s_weighted = sum(\n [type2score[t] * freq for t, freq in type2freq.items() if t in types]\n )\n s_avg = s_weighted / f_total\n return s_avg\n\n def get_shift_scores(self, details=False):\n \"\"\"\n Calculates the type shift scores between the two systems\n\n Parameters\n ----------\n details: boolean\n If true, returns each of the major components of each type's shift\n score, along with the overall shift scores. Otherwise, only returns\n the overall shift scores\n\n Returns\n -------\n type2p_diff: dict\n If details is True, returns dict where keys are types and values are\n the difference in relatively frequency, i.e. p_i,2 - p_i,1 for type i\n type2s_diff: dict,\n If details is True, returns dict where keys are types and values are\n the relative differences in score, i.e. s_i,2 - s_i,1 for type i\n type2p_avg: dict,\n If details is True, returns dict where keys are types and values are\n the average relative frequencies, i.e. 0.5*(p_i,1+p_i,2) for type i\n type2s_ref_diff: dict\n If details is True, returns dict where keys are types and values are\n relative deviation from reference score, i.e. 0.5*(s_i,2+s_i,1)-s_ref\n for type i\n type2shift_score: dict\n Keys are types and values are shift scores. The overall shift scores\n are normalized according to the `normalization` parameter of the\n Shift object\n \"\"\"\n s_avg_ref = self.reference_value\n\n # Get total frequencies\n total_freq_1 = sum(\n [freq for t, freq in self.type2freq_1.items() if t in self.types]\n )\n total_freq_2 = sum(\n [freq for t, freq in self.type2freq_2.items() if t in self.types]\n )\n # Get relative frequency of types in both systems\n type2p_1 = {\n t: self.type2freq_1[t] / total_freq_1 if t in self.type2freq_1 else 0\n for t in self.types\n }\n type2p_2 = {\n t: self.type2freq_2[t] / total_freq_2 if t in self.type2freq_2 else 0\n for t in self.types\n }\n\n # Calculate shift components\n type2p_avg = dict()\n type2p_diff = dict()\n type2s_diff = dict()\n type2s_ref_diff = dict()\n type2shift_score = dict()\n for t in self.types:\n type2p_avg[t] = 0.5 * (type2p_1[t] + type2p_2[t])\n type2p_diff[t] = type2p_2[t] - type2p_1[t]\n type2s_diff[t] = self.type2score_2[t] - self.type2score_1[t]\n type2s_ref_diff[t] = (\n 0.5 * (self.type2score_2[t] + self.type2score_1[t]) - s_avg_ref\n )\n type2shift_score[t] = (\n type2p_diff[t] * type2s_ref_diff[t] + type2s_diff[t] * type2p_avg[t]\n )\n\n # Normalize the total shift scores\n total_diff = sum(type2shift_score.values())\n self.diff = total_diff\n if self.normalization == \"variation\":\n abs_sum = sum(abs(s) for s in type2shift_score.values())\n self.norm = abs_sum\n elif self.normalization == \"trajectory\" and total_diff != 0:\n self.norm = abs(total_diff)\n else:\n self.norm = 1\n type2shift_score = {\n t: shift_score / self.norm for t, shift_score in type2shift_score.items()\n }\n\n # Set results in shift object\n self.type2p_diff = type2p_diff\n self.type2s_diff = type2s_diff\n self.type2p_avg = type2p_avg\n self.type2s_ref_diff = type2s_ref_diff\n self.type2shift_score = type2shift_score\n # Return shift scores\n if details:\n return (\n type2p_diff,\n type2s_diff,\n type2p_avg,\n type2s_ref_diff,\n type2shift_score,\n )\n else:\n return type2shift_score\n\n def get_shift_component_sums(self):\n \"\"\"\n Calculates the cumulative contribution of each component of the different\n kinds of shift scores.\n\n Returns\n -------\n Dictionary with six keys, one for each of the different component\n contributions: pos_s_pos_p, pos_s_neg_p, neg_s_pos_p, neg_s_neg_p,\n pos_s, neg_s. Values are the total contribution from that component\n across all types\n \"\"\"\n # Get shift scores\n if self.type2shift_score is None:\n shift_scores = self.get_shift_scores(details=True)\n else:\n shift_scores = [\n (\n t,\n self.type2p_diff[t],\n self.type2s_diff[t],\n self.type2p_avg[t],\n self.type2s_ref_diff[t],\n self.type2shift_score[t],\n )\n for t in self.type2s_diff\n ]\n\n # Sum up components of shift score\n pos_s_pos_p = 0\n pos_s_neg_p = 0\n neg_s_pos_p = 0\n neg_s_neg_p = 0\n pos_s = 0\n neg_s = 0\n for t, p_diff, s_diff, p_avg, s_ref_diff, _ in shift_scores:\n # Get contribution of p_diff*s_ref_diff term\n if s_ref_diff > 0:\n if p_diff > 0:\n pos_s_pos_p += p_diff * s_ref_diff\n else:\n pos_s_neg_p += p_diff * s_ref_diff\n else:\n if p_diff > 0:\n neg_s_pos_p += p_diff * s_ref_diff\n else:\n neg_s_neg_p += p_diff * s_ref_diff\n # Get contribution of s_diff term\n if s_diff > 0:\n pos_s += p_avg * s_diff\n else:\n neg_s += p_avg * s_diff\n return {\n \"pos_s_pos_p\": pos_s_pos_p,\n \"pos_s_neg_p\": pos_s_neg_p,\n \"neg_s_pos_p\": neg_s_pos_p,\n \"neg_s_neg_p\": neg_s_neg_p,\n \"pos_s\": pos_s,\n \"neg_s\": neg_s,\n }\n\n def get_shift_graph(\n self,\n ax=None,\n top_n=50,\n text_size_inset=True,\n cumulative_inset=True,\n show_plot=True,\n filename=None,\n **kwargs\n ):\n \"\"\"\n Plot the shift graph between two systems of types\n\n Parameters\n ----------\n ax: matplotlib.pyplot.axes.Axes, optional\n Axes to draw figure onto. Will create new axes if none are given.\n top_n: int, optional\n Display the top_n types as sorted by their absolute contribution to\n the difference between systems\n cumulative_inset: bool, optional\n Whether to show an inset showing the cumulative contributions to the\n shift by ranked types\n text_size_inset: bool, optional\n Whether to show an inset showing the relative sizes of each system\n show_plot: bool, optional\n Whether to show plot when it is done being rendered\n filename: str, optional\n If not None, name of the file for saving the shift graph\n\n Returns\n -------\n ax\n Matplotlib ax of shift graph. Displays shift graph if show_plot=True\n \"\"\"\n # Set plotting parameters\n kwargs = plotting.get_plot_params(kwargs, self.show_score_diffs, self.diff)\n\n # Get type score components\n type_scores = [\n (\n t,\n self.type2p_diff[t],\n self.type2s_diff[t],\n self.type2p_avg[t],\n self.type2s_ref_diff[t],\n self.type2shift_score[t],\n )\n for t in self.type2s_diff\n ]\n # Reverse sorting to get highest scores, then reverse top n for plotting\n type_scores = sorted(type_scores, key=lambda x: abs(x[-1]), reverse=True)[\n :top_n\n ]\n type_scores.reverse()\n\n # Get bar heights and colors\n bar_dims = plotting.get_bar_dims(type_scores, self.norm, kwargs)\n bar_colors = plotting.get_bar_colors(type_scores, kwargs)\n\n # Initialize plot\n if ax is None:\n _, ax = plt.subplots(figsize=(kwargs[\"width\"], kwargs[\"height\"]))\n ax.margins(kwargs[\"y_margin\"])\n # Plot type contributions\n ax = plotting.plot_contributions(ax, top_n, bar_dims, bar_colors, kwargs)\n # Plot total sum contributions\n total_comp_sums = self.get_shift_component_sums()\n bar_order = plotting.get_bar_order(kwargs)\n ax, comp_bar_heights, bar_order = plotting.plot_total_contribution_sums(\n ax, total_comp_sums, bar_order, top_n, bar_dims, kwargs\n )\n # Get labels for bars\n type_labels = [t for (t, _, _, _, _, _) in type_scores]\n # Add indicator if type borrowed a score\n m_sym = kwargs[\"missing_symbol\"]\n type_labels = [\n t + m_sym if t in self.adopted_score_types else t for t in type_labels\n ]\n # Get labels for total contribution bars\n bar_labels = [kwargs[\"symbols\"][b] for b in bar_order]\n labels = type_labels + bar_labels\n # Set font type\n if kwargs[\"serif\"]:\n plotting.set_serif()\n # Set labels\n if kwargs[\"detailed\"]:\n ax = plotting.set_bar_labels(\n ax, top_n, labels, bar_dims[\"label_heights\"], comp_bar_heights, kwargs,\n )\n else:\n ax = plotting.set_bar_labels(\n ax, top_n, labels, bar_dims[\"total_heights\"], comp_bar_heights, kwargs,\n )\n\n # Add center dividing line\n ax.axvline(0, ls=\"-\", color=\"black\", lw=1.0, zorder=20)\n\n # Add dividing line between types and component bars\n ax.axhline(top_n + 1, ls=\"-\", color=\"black\", lw=0.7, zorder=20)\n if kwargs[\"show_total\"]:\n ax.axhline(top_n + 2.75, ls=\"-\", color=\"black\", lw=0.5, zorder=20)\n\n # Set insets\n if cumulative_inset:\n plotting.get_cumulative_inset(\n ax.figure, self.type2shift_score, top_n, self.normalization, kwargs\n )\n if text_size_inset:\n plotting.get_text_size_inset(\n ax.figure, self.type2freq_1, self.type2freq_2, kwargs\n )\n\n # Make x-tick labels bigger, flip y-axis ticks and label every 5th one\n ax = plotting.set_ticks(ax, top_n, kwargs)\n\n # Set axis spines\n ax = plotting.set_spines(ax, kwargs)\n\n # Set axis labels and title\n ax.set_xlabel(kwargs[\"xlabel\"], fontsize=kwargs[\"xlabel_fontsize\"])\n ax.set_ylabel(kwargs[\"ylabel\"], fontsize=kwargs[\"ylabel_fontsize\"])\n if \"title\" not in kwargs:\n s_avg_1 = self.get_weighted_score(self.type2freq_1, self.type2score_1)\n s_avg_2 = self.get_weighted_score(self.type2freq_2, self.type2score_2)\n title = (\n \"{}: \".format(kwargs[\"system_names\"][0])\n + r\"$\\Phi_{avg}=$\"\n + \"{0:.2f}\".format(s_avg_1)\n + \"\\n\"\n + \"{}: \".format(kwargs[\"system_names\"][1])\n + r\"$\\Phi_{avg}=$\"\n + \"{0:.2f}\".format(s_avg_2)\n )\n kwargs[\"title\"] = title\n ax.set_title(kwargs[\"title\"], fontsize=kwargs[\"title_fontsize\"])\n # Show and return plot\n if kwargs[\"tight\"]:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n plt.tight_layout()\n if filename is not None:\n plt.savefig(filename, dpi=kwargs[\"dpi\"])\n if show_plot:\n plt.show()\n return ax\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.show", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplots" ] ]
LinkGeoML/Geocoding
[ "2b4c189fdd6e2bf7c90407f4537b465264d7d88a" ]
[ "geocoding/osm_utilities.py" ]
[ "import numpy as np\nimport pandas as pd\nimport json\nimport requests\nfrom shapely.geometry import LineString\nfrom sklearn.cluster import KMeans\nimport time\nimport os\n\nfrom geocoding.config import Config\n\n\ndef query_api(query, fpath):\n \"\"\"\n Queries Overpass API for *query*.\n\n Args:\n query (str): The query to be passed to API\n fpath (str): File path to write the API response\n\n Returns:\n None\n \"\"\"\n status = 0\n overpass_url = 'http://overpass-api.de/api/interpreter'\n try:\n response = requests.get(overpass_url, params={'data': query}).json()\n with open(fpath, 'w') as f:\n json.dump(response, f)\n except ValueError:\n print('Overpass api error: Trying again with a greater timeout...')\n time.sleep(3)\n status = 1\n return status\n\n\ndef parse_streets(fpath):\n \"\"\"\n Parses the API response from *fpath* and converts it to a dataframe.\n\n Args:\n fpath (str): File path to read\n\n Returns:\n pandas.DataFrame: Contains all streets as well as their geometries\n \"\"\"\n # Helper function\n def convert_to_wkt_geometry(row):\n lons = [p['lon'] for p in row['geometry']]\n lats = [p['lat'] for p in row['geometry']]\n if len(lons) < 2 or len(lats) < 2:\n return None\n return LineString(list(zip(lons, lats)))\n\n with open(fpath, encoding='utf-8') as f:\n streets = json.load(f)['elements']\n if not streets:\n return None\n\n data = [(street['id'], street['geometry']) for street in streets]\n cols = ['id', 'geometry']\n street_df = pd.DataFrame(data=data, columns=cols)\n street_df['geometry'] = street_df.apply(convert_to_wkt_geometry, axis=1)\n street_df = street_df.dropna()\n return street_df\n\n\ndef extract_streets(points, path):\n \"\"\"\n A wrapper function that administrates the streets download.\n\n Args:\n points (numpy.ndarray): Contains the data points that define the area \\\n to extract from Overpass API\n path (str): Path to write\n\n Returns:\n None\n \"\"\"\n labels = cluster_points(points)\n clusters_bboxes = get_clusters_bboxes(points, labels)\n street_dfs = []\n for cluster, bbox in clusters_bboxes.items():\n print('Getting bbox', cluster + 1, 'out of', len(clusters_bboxes))\n cell_street_df = download_cell(bbox, os.path.join(path, \"osm_streets.json\"))\n if cell_street_df is not None:\n print('Number of streets:', len(cell_street_df))\n street_dfs.append(cell_street_df)\n else:\n print('Number of streets:', 0)\n # if (cluster + 1) % 5 == 0:\n # print(f'Suspended for {config.osm_timeout} secs...')\n # time.sleep(config.osm_timeout)\n # delete file\n if os.path.exists(os.path.join(path, \"osm_streets.json\")):\n os.remove(os.path.join(path, \"osm_streets.json\"))\n\n street_df = pd.concat(street_dfs, ignore_index=True)\n street_df.drop_duplicates(subset='id', inplace=True)\n street_df.to_csv(f'{os.path.join(path, \"osm_streets.csv\")}', columns=['id', 'geometry'], index=False)\n print(f'Extracted {len(street_df.index)} unique streets')\n\n\ndef download_cell(cell, fpath):\n \"\"\"\n Downloads *cell* from Overpass API, writes results in *fpath* and then \\\n parses them into a pandas.DataFrame.\n\n Args:\n cell (list): Contains the bounding box coords\n fpath (str): Path to write results and then to read from in order to \\\n parse them\n\n Returns:\n pandas.DataFrame: Contains all street elements included in *cell*\n \"\"\"\n west, south, east, north = cell\n counter = 0\n status = 1\n while status and (counter < Config.max_overpass_tries):\n counter += 1\n query = (\n f'[out:json][timeout:{Config.osm_timeout * counter}];' \n # f'way[\"highway\"][\"highway\"!~\"^(cycleway|footway)$\"]'\n f'way[\"highway\"][\"highway\"!~\"^(cycleway)$\"]'\n # 'way[\"highway\"~\"^(motorway|trunk|primary)$\"];'\n # 'way[\"highway\"]'\n f'({south},{west},{north},{east});'\n 'out geom;')\n status = query_api(query, fpath)\n\n if status:\n print('Overpass api error: Exiting.')\n exit()\n return parse_streets(fpath)\n\n\ndef cluster_points(X):\n \"\"\"\n Clusters points given in *X*.\n\n Args:\n X (numpy.ndarray): Contains the points to be clustered\n\n Returns:\n numpy.ndarray: The predicted clusters labels\n \"\"\"\n n_clusters = int(Config.clusters_pct * X.shape[0])\n kmeans = KMeans(\n n_clusters=n_clusters, random_state=Config.seed_no, n_init=20, max_iter=500,\n ).fit(X)\n labels = kmeans.predict(X)\n return labels\n\n\ndef get_clusters_bboxes(X, labels):\n \"\"\"\n Extracts a bounding box for each one of the clusters.\n\n Args:\n X (numpy.ndarray): Contains the clustered points\n labels (numpy.ndarray): Contains the cluster label for each point in \\\n *X*\n Returns:\n dict: Contains the cluster labels as keys and the corresponding \\\n bounding box as values\n \"\"\"\n bboxes = {}\n for i in range(len(set(labels))):\n cluster_points = np.vstack([p for j, p in enumerate(X) if labels[j] == i])\n xmin, ymin = cluster_points.min(axis=0) - Config.osm_buffer\n xmax, ymax = cluster_points.max(axis=0) + Config.osm_buffer\n bboxes[i] = [xmin, ymin, xmax, ymax]\n # print({k: v for k, v in sorted(bboxes.items(), key=lambda item: item[1][0])})\n return bboxes\n" ]
[ [ "pandas.DataFrame", "sklearn.cluster.KMeans", "pandas.concat" ] ]
cetmann/robustness-interpretability
[ "6da6a28c101a0763f3ca59c51f2913eb0525f5c3" ]
[ "imagenet_data.py" ]
[ "\"\"\"\nThis file defines how to handle the ImageNet dataset from tfrecord files. The tfrecord files used in this work were\ncreated using the code from\nhttps://github.com/tensorflow/models/blob/master/research/inception/inception/data/build_imagenet_data.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport csv\nimport preprocessing\nfrom preprocessing import image_preprocessing\nimport configparser\n\n\nthis_folder = os.path.dirname(os.path.abspath(__file__)) + '/'\n\nconfig = configparser.ConfigParser()\nconfig.read(this_folder + 'dataset_paths.ini')\nbase_folder = config['PATHS'].get('ImageNet')\n\n# Load a class dictionary that matches the pre-trained\n# encoding.\nlabels_dict = {}\nwith open(this_folder + 'imagenet_labels.csv', 'rt') as csvfile:\n file_contents = csv.reader(csvfile, delimiter=',')\n for row in file_contents:\n labels_dict[row[0]] = row[1]\n\n##### Training ###########\ndef collect_train_data(num_samples_per_class=0):\n print(\"Collecting training data...\")\n tfrecord_folder = base_folder + 'tfrecord/'\n file_list = os.listdir(tfrecord_folder)\n train_data = [(tfrecord_folder + f) for f in file_list if 'train-' in f]\n # Create dummy labels, because the label information is contained\n # in the train_data files.\n train_labels = np.zeros_like(train_data,dtype=np.int32)\n return train_data, train_labels\n\ndef collect_val_data():\n print(\"Collecting validation data...\")\n tfrecord_folder = base_folder + 'tfrecord/'\n file_list = os.listdir(tfrecord_folder)\n val_data = [(tfrecord_folder + f) for f in file_list if 'validation-' in f]\n # Create dummy labels, because the label information is contained\n # in the train_data files.\n val_labels = np.zeros_like(val_data,dtype=np.int32)\n return val_data, val_labels\n\n# tf.data batch iterator for the training data\ndef train_BI(filenames, \n labels, \n batch_size,\n num_parallel_calls):\n dataset = tf.data.TFRecordDataset(filenames)\n batch_prepare = lambda image: image_preprocessing(image,\n None,\n file_type = 'tfrecord',\n shape = [256,256],\n random_events = True,\n data_augmentation = True,\n additive_noise = False,\n subtract_by = 'ImageNet',\n divide_by = 1.,\n colorspace = 'BGR',\n min_rescale = 258,\n rescale = True,\n noise_level = 10.,\n clip_values = bounds())\n dataset = dataset.map(batch_prepare,num_parallel_calls=num_parallel_calls) \n batched_dataset = dataset.batch(batch_size,\n drop_remainder = False) \n train_batch_iterator = batched_dataset.make_initializable_iterator()\n return train_batch_iterator\n\n# tf.data batch iterator for the validation data\ndef val_BI(filenames, \n labels, \n batch_size,\n num_parallel_calls):\n dataset = tf.data.TFRecordDataset(filenames)\n batch_prepare = lambda image: image_preprocessing(image, \n None,\n file_type = 'tfrecord',\n shape = [256,256],\n random_events = False,\n data_augmentation = False,\n additive_noise = False,\n subtract_by = 'ImageNet',\n divide_by = 1.,\n colorspace = 'BGR',\n min_rescale = 258,\n rescale = True)\n dataset = dataset.map(batch_prepare,\n num_parallel_calls=num_parallel_calls)\n batched_dataset = dataset.batch(batch_size,\n drop_remainder = False) \n batch_iterator = batched_dataset.make_initializable_iterator()\n return batch_iterator\n\n# Additional tf.data batch iterator for the data that is used just for the propagation\n# of a few images for visualization.\ndef img_BI(filenames, \n labels, \n batch_size,\n num_parallel_calls):\n dataset = tf.data.TFRecordDataset(filenames)\n batch_prepare = lambda image: image_preprocessing(image, \n None,\n file_type = 'tfrecord',\n shape = [256,256],\n random_events = False,\n data_augmentation = False,\n additive_noise = False,\n subtract_by = 'ImageNet',\n divide_by = 1.,\n colorspace = 'BGR',\n min_rescale = 258,\n rescale = True)\n dataset = dataset.map(batch_prepare,\n num_parallel_calls=num_parallel_calls)\n batched_dataset = dataset.batch(batch_size,\n drop_remainder = False) \n batch_iterator = batched_dataset.make_initializable_iterator()\n return batch_iterator\n\ndef interpret_as_image(image):\n return preprocessing.interpret_as_image(image,\n add_by='ImageNet',\n colorspace='BGR')\n\ndef num_train_samples():\n return 1281167\n\ndef num_val_samples():\n return 50000\n\ndef bounds():\n # This is a little problematic here. Foolbox only allows\n # for scalar bounds, not bounds per channel. For this reason,\n # we use the worst-case bounds here.\n return (-130., 255.-100.)\n\nmin_values = np.array([0.,0.,0.],np.float32)\nmax_values = np.array([1.,1.,1.],np.float32)\ndef image_range():\n return [0.,255.]" ]
[ [ "numpy.zeros_like", "numpy.array", "tensorflow.data.TFRecordDataset" ] ]
arjunmajum/detectron2
[ "b9d77de2a9e16ac0b866fb0890ff0a618aa5f11b" ]
[ "detectron2/engine/hooks.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport datetime\nimport itertools\nimport logging\nimport os\nimport tempfile\nimport time\nfrom collections import Counter\nimport torch\nfrom fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer\nfrom fvcore.common.file_io import PathManager\nfrom fvcore.common.timer import Timer\nfrom fvcore.nn.precise_bn import get_bn_modules, update_bn_stats\n\nimport detectron2.utils.comm as comm\nfrom detectron2.evaluation.testing import flatten_results_dict\nfrom detectron2.utils.events import EventStorage, EventWriter\n\nfrom .train_loop import HookBase\n\n__all__ = [\n \"CallbackHook\",\n \"IterationTimer\",\n \"PeriodicWriter\",\n \"PeriodicCheckpointer\",\n \"LRScheduler\",\n \"AutogradProfiler\",\n \"EvalHook\",\n \"PreciseBN\",\n]\n\n\n\"\"\"\nImplement some common hooks.\n\"\"\"\n\n\nclass CallbackHook(HookBase):\n \"\"\"\n Create a hook using callback functions provided by the user.\n \"\"\"\n\n def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):\n \"\"\"\n Each argument is a function that takes one argument: the trainer.\n \"\"\"\n self._before_train = before_train\n self._before_step = before_step\n self._after_step = after_step\n self._after_train = after_train\n\n def before_train(self):\n if self._before_train:\n self._before_train(self.trainer)\n\n def after_train(self):\n if self._after_train:\n self._after_train(self.trainer)\n # The functions may be closures that hold reference to the trainer\n # Therefore, delete them to avoid circular reference.\n del self._before_train, self._after_train\n del self._before_step, self._after_step\n\n def before_step(self):\n if self._before_step:\n self._before_step(self.trainer)\n\n def after_step(self):\n if self._after_step:\n self._after_step(self.trainer)\n\n\nclass IterationTimer(HookBase):\n \"\"\"\n Track the time spent for each iteration (each run_step call in the trainer).\n Print a summary in the end of training.\n\n This hook uses the time between the call to its :meth:`before_step`\n and :meth:`after_step` methods.\n Under the convention that :meth:`before_step` of all hooks should only\n take negligible amount of time, the :class:`IterationTimer` hook should be\n placed at the beginning of the list of hooks to obtain accurate timing.\n \"\"\"\n\n def __init__(self, warmup_iter=3):\n \"\"\"\n Args:\n warmup_iter (int): the number of iterations at the beginning to exclude\n from timing.\n \"\"\"\n self._warmup_iter = warmup_iter\n self._step_timer = Timer()\n self._start_time = time.perf_counter()\n self._total_timer = Timer()\n\n def before_train(self):\n self._start_time = time.perf_counter()\n self._total_timer.reset()\n self._total_timer.pause()\n\n def after_train(self):\n logger = logging.getLogger(__name__)\n total_time = time.perf_counter() - self._start_time\n total_time_minus_hooks = self._total_timer.seconds()\n hook_time = total_time - total_time_minus_hooks\n\n num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter\n\n if num_iter > 0 and total_time_minus_hooks > 0:\n # Speed is meaningful only after warmup\n # NOTE this format is parsed by grep in some scripts\n logger.info(\n \"Overall training speed: {} iterations in {} ({:.4f} s / it)\".format(\n num_iter,\n str(datetime.timedelta(seconds=int(total_time_minus_hooks))),\n total_time_minus_hooks / num_iter,\n )\n )\n\n logger.info(\n \"Total training time: {} ({} on hooks)\".format(\n str(datetime.timedelta(seconds=int(total_time))),\n str(datetime.timedelta(seconds=int(hook_time))),\n )\n )\n\n def before_step(self):\n self._step_timer.reset()\n self._total_timer.resume()\n\n def after_step(self):\n # +1 because we're in after_step\n iter_done = self.trainer.iter - self.trainer.start_iter + 1\n if iter_done >= self._warmup_iter:\n sec = self._step_timer.seconds()\n self.trainer.storage.put_scalars(time=sec)\n else:\n self._start_time = time.perf_counter()\n self._total_timer.reset()\n\n self._total_timer.pause()\n\n\nclass PeriodicWriter(HookBase):\n \"\"\"\n Write events to EventStorage (by calling ``writer.write()``) periodically.\n\n It is executed every ``period`` iterations and after the last iteration.\n Note that ``period`` does not affect how data is smoothed by each writer.\n \"\"\"\n\n def __init__(self, writers, period=20):\n \"\"\"\n Args:\n writers (list[EventWriter]): a list of EventWriter objects\n period (int):\n \"\"\"\n self._writers = writers\n for w in writers:\n assert isinstance(w, EventWriter), w\n self._period = period\n\n def after_step(self):\n if (self.trainer.iter + 1) % self._period == 0 or (\n self.trainer.iter == self.trainer.max_iter - 1\n ):\n for writer in self._writers:\n writer.write()\n\n def after_train(self):\n for writer in self._writers:\n writer.close()\n\n\nclass PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):\n \"\"\"\n Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook.\n\n Note that when used as a hook,\n it is unable to save additional data other than what's defined\n by the given `checkpointer`.\n\n It is executed every ``period`` iterations and after the last iteration.\n \"\"\"\n\n def before_train(self):\n self.max_iter = self.trainer.max_iter\n\n def after_step(self):\n # No way to use **kwargs\n self.step(self.trainer.iter)\n\n\nclass LRScheduler(HookBase):\n \"\"\"\n A hook which executes a torch builtin LR scheduler and summarizes the LR.\n It is executed after every iteration.\n \"\"\"\n\n def __init__(self, optimizer, scheduler):\n \"\"\"\n Args:\n optimizer (torch.optim.Optimizer):\n scheduler (torch.optim._LRScheduler)\n \"\"\"\n self._optimizer = optimizer\n self._scheduler = scheduler\n\n # NOTE: some heuristics on what LR to summarize\n # summarize the param group with most parameters\n largest_group = max(len(g[\"params\"]) for g in optimizer.param_groups)\n\n if largest_group == 1:\n # If all groups have one parameter,\n # then find the most common initial LR, and use it for summary\n lr_count = Counter([g[\"lr\"] for g in optimizer.param_groups])\n lr = lr_count.most_common()[0][0]\n for i, g in enumerate(optimizer.param_groups):\n if g[\"lr\"] == lr:\n self._best_param_group_id = i\n break\n else:\n for i, g in enumerate(optimizer.param_groups):\n if len(g[\"params\"]) == largest_group:\n self._best_param_group_id = i\n break\n\n def after_step(self):\n lr = self._optimizer.param_groups[self._best_param_group_id][\"lr\"]\n self.trainer.storage.put_scalar(\"lr\", lr, smoothing_hint=False)\n self._scheduler.step()\n\n\nclass AutogradProfiler(HookBase):\n \"\"\"\n A hook which runs `torch.autograd.profiler.profile`.\n\n Examples:\n ::\n hooks.AutogradProfiler(\n lambda trainer: trainer.iter > 10 and trainer.iter < 20, self.cfg.OUTPUT_DIR\n )\n\n The above example will run the profiler for iteration 10~20 and dump\n results to ``OUTPUT_DIR``. We did not profile the first few iterations\n because they are typically slower than the rest.\n The result files can be loaded in the ``chrome://tracing`` page in chrome browser.\n\n Note:\n When used together with NCCL on older version of GPUs,\n autograd profiler may cause deadlock because it unnecessarily allocates\n memory on every device it sees. The memory management calls, if\n interleaved with NCCL calls, lead to deadlock on GPUs that do not\n support ``cudaLaunchCooperativeKernelMultiDevice``.\n \"\"\"\n\n def __init__(self, enable_predicate, output_dir, *, use_cuda=True):\n \"\"\"\n Args:\n enable_predicate (callable[trainer -> bool]): a function which takes a trainer,\n and returns whether to enable the profiler.\n It will be called once every step, and can be used to select which steps to profile.\n output_dir (str): the output directory to dump tracing files.\n use_cuda (bool): same as in `torch.autograd.profiler.profile`.\n \"\"\"\n self._enable_predicate = enable_predicate\n self._use_cuda = use_cuda\n self._output_dir = output_dir\n\n def before_step(self):\n if self._enable_predicate(self.trainer):\n self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda)\n self._profiler.__enter__()\n else:\n self._profiler = None\n\n def after_step(self):\n if self._profiler is None:\n return\n self._profiler.__exit__(None, None, None)\n PathManager.mkdirs(self._output_dir)\n out_file = os.path.join(\n self._output_dir, \"profiler-trace-iter{}.json\".format(self.trainer.iter)\n )\n if \"://\" not in out_file:\n self._profiler.export_chrome_trace(out_file)\n else:\n # Support non-posix filesystems\n with tempfile.TemporaryDirectory(prefix=\"detectron2_profiler\") as d:\n tmp_file = os.path.join(d, \"tmp.json\")\n self._profiler.export_chrome_trace(tmp_file)\n with open(tmp_file) as f:\n content = f.read()\n with PathManager.open(out_file, \"w\") as f:\n f.write(content)\n\n\nclass EvalHook(HookBase):\n \"\"\"\n Run an evaluation function periodically, and at the end of training.\n\n It is executed every ``eval_period`` iterations and after the last iteration.\n \"\"\"\n\n def __init__(self, eval_period, eval_function):\n \"\"\"\n Args:\n eval_period (int): the period to run `eval_function`.\n eval_function (callable): a function which takes no arguments, and\n returns a nested dict of evaluation metrics.\n\n Note:\n This hook must be enabled in all or none workers.\n If you would like only certain workers to perform evaluation,\n give other workers a no-op function (`eval_function=lambda: None`).\n \"\"\"\n self._period = eval_period\n self._func = eval_function\n\n def _do_eval(self):\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)\n\n # Evaluation may take different time among workers.\n # A barrier make them start the next iteration together.\n comm.synchronize()\n\n def after_step(self):\n next_iter = self.trainer.iter + 1\n is_final = next_iter == self.trainer.max_iter\n if is_final or (self._period > 0 and next_iter % self._period == 0):\n self._do_eval()\n\n def after_train(self):\n # func is likely a closure that holds reference to the trainer\n # therefore we clean it to avoid circular reference in the end\n del self._func\n\n\nclass PreciseBN(HookBase):\n \"\"\"\n The standard implementation of BatchNorm uses EMA in inference, which is\n sometimes suboptimal.\n This class computes the true average of statistics rather than the moving average,\n and put true averages to every BN layer in the given model.\n\n It is executed every ``period`` iterations and after the last iteration.\n \"\"\"\n\n def __init__(self, period, model, data_loader, num_iter):\n \"\"\"\n Args:\n period (int): the period this hook is run, or 0 to not run during training.\n The hook will always run in the end of training.\n model (nn.Module): a module whose all BN layers in training mode will be\n updated by precise BN.\n Note that user is responsible for ensuring the BN layers to be\n updated are in training mode when this hook is triggered.\n data_loader (iterable): it will produce data to be run by `model(data)`.\n num_iter (int): number of iterations used to compute the precise\n statistics.\n \"\"\"\n self._logger = logging.getLogger(__name__)\n if len(get_bn_modules(model)) == 0:\n self._logger.info(\n \"PreciseBN is disabled because model does not contain BN layers in training mode.\"\n )\n self._disabled = True\n return\n\n self._model = model\n self._data_loader = data_loader\n self._num_iter = num_iter\n self._period = period\n self._disabled = False\n\n self._data_iter = None\n\n def after_step(self):\n next_iter = self.trainer.iter + 1\n is_final = next_iter == self.trainer.max_iter\n if is_final or (self._period > 0 and next_iter % self._period == 0):\n self.update_stats()\n\n def update_stats(self):\n \"\"\"\n Update the model with precise statistics. Users can manually call this method.\n \"\"\"\n if self._disabled:\n return\n\n if self._data_iter is None:\n self._data_iter = iter(self._data_loader)\n\n def data_loader():\n for num_iter in itertools.count(1):\n if num_iter % 100 == 0:\n self._logger.info(\n \"Running precise-BN ... {}/{} iterations.\".format(num_iter, self._num_iter)\n )\n # This way we can reuse the same iterator\n yield next(self._data_iter)\n\n with EventStorage(): # capture events in a new storage to discard them\n self._logger.info(\n \"Running precise-BN for {} iterations... \".format(self._num_iter)\n + \"Note that this could produce different statistics every time.\"\n )\n update_bn_stats(self._model, data_loader(), self._num_iter)\n" ]
[ [ "torch.autograd.profiler.profile" ] ]
joelkalonji/SplitMLModel
[ "5bf4afef50504f984bf90672f1b3b2e0917cf8fb" ]
[ "SplitModel.py" ]
[ "from sklearn.model_selection import StratifiedShuffleSplit\r\nfrom sklearn.model_selection import train_test_split\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\ndata = pd.read_csv('data/data.csv')\r\n\r\ndata.head()\r\n\r\n\r\ntrain_data, test_data = train_test_split(data, test_size=0.2)\r\n\r\ntrain_data.head()\r\n\r\n\r\ntrain_data, test_data = train_test_split(data, test_size=0.2)\r\n\r\ndata['price_category'] = pd.cut(data, test_size=0.2)\r\n\r\ndata.describe()\r\n\r\ndata['price_category'] = pd.cut(data['AveragePrice'], bins=[\r\n 0, 0.7, 1.2, 1.6, 2.5, 3., np.inf], labels=[1, 2, 3, 4, 5, 6])\r\n\r\n\r\ntrain_data, test_data = train_test_split(data, test_size=0.2)\r\n\r\ntrain_data['price_category'].value_counts()/len(train_data)\r\n\r\ntest_data['price_category'].value_counts()/len(test_data)\r\n\r\n\r\nsplit = StratifiedShuffleSplit(n_splits=1, test_size=0.2)\r\n\r\nfor train_ids, test_ids in split.split(data, data['price_category']):\r\n train_data = data.loc[train_ids]\r\n test_data = data.loc[test_ids]\r\n\r\ntest_data['price_category'].value_counts()/len(test_data)\r\n" ]
[ [ "sklearn.model_selection.train_test_split", "sklearn.model_selection.StratifiedShuffleSplit", "pandas.read_csv", "pandas.cut" ] ]
hhsecond/redisai-examples
[ "9eb167962a77a5ff307ee924c2e4ece3b98362ca" ]
[ "models/imagenet/tensorflow/model_checker.py" ]
[ "import tensorflow as tf\nimport tensorflow_hub as hub\nfrom skimage import io\nimport numpy as np\nimport json\n\n\nclass_idx = json.load(open(\"../data/imagenet_classes.json\"))\n\nfilepath = '../data/guitar.jpg'\nnumpy_img = io.imread(filepath).astype(dtype=np.float32)\nnumpy_img = np.expand_dims(numpy_img, axis=0) / 255\n\n\nfrozen_graph=\"resnet50.pb\"\nwith tf.gfile.GFile(frozen_graph, \"rb\") as f:\n restored_graph_def = tf.GraphDef()\n restored_graph_def.ParseFromString(f.read())\nwith tf.Graph().as_default() as graph:\n tf.import_graph_def(\n restored_graph_def,\n input_map=None,\n return_elements=None,\n name=\"\"\n )\nimages = graph.get_tensor_by_name('images:0')\nlogits = graph.get_tensor_by_name('output:0')\nwith tf.Session(graph=graph) as sess:\n sess.run([tf.global_variables_initializer()])\n ret = sess.run(logits, feed_dict={images: numpy_img})\n\nprint(ret.shape, ret.dtype)\nind = ret.argmax()\nprint(class_idx[str(ind.item() - 1)])\n" ]
[ [ "tensorflow.GraphDef", "tensorflow.Session", "tensorflow.import_graph_def", "tensorflow.Graph", "tensorflow.gfile.GFile", "tensorflow.global_variables_initializer", "numpy.expand_dims" ] ]
anon-paper-submissions-1982/shrinkbench
[ "fd42c6701ab45473fad95b4dd99bb17356b8c82d" ]
[ "datasets/zeros224.py" ]
[ "import random\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass Zeros224(Dataset):\n\n def __init__(self, num_samples):\n self.len = num_samples\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, index):\n x = torch.zeros(3, 224, 224)\n y = 0 # random.randint(0, 999)\n return x, y\n\n\ndef train_dataset(preproc=True, path=None):\n return Zeros224(int(1.28e6))\n\n\ndef val_dataset(preproc=True, path=None):\n return Zeros224(int(5e4))\n" ]
[ [ "torch.zeros" ] ]
CAOR-MINES-ParisTech/ukfm
[ "fa6c1358d73598fb57ad9930d7e89516830d27db" ]
[ "docsource/source/auto_examples/slam2d.py" ]
[ "\"\"\"\n********************************************************************************\n2D Robot SLAM - Example\n********************************************************************************\nGoals of this script:\n\n- apply the UKF for performing 2D SLAM (Simultaneous Localization And Mapping).\n\n- discover a computationally alternative way for performing UKF inspired from\n :cite:`HuangA2013`. This alternative leads to computational speed improvement\n when only a part of the state is involved in a propagation or in update step.\n\n- augment the state when a new landmark is observed in a UKF derivative-free\n way.\n\n*We assume the reader is already familiar with the approach described in the\ntutorial.*\n\nThis script considers the 2D robot SLAM problem where the robot is equipped with\nwheel odometry and observes unknown landmark measurements. The robot state is\npropagated through the odometry model and landmark observations are used in the\nUKF measurement step. Landmarks are static and we assume no error coming from\ndata association. We reproduce the simulations that are described in\n:cite:`huangObservability2010` , :cite:`HuangA2013`.\n\"\"\"\n\n################################################################################\n# Import\n# ==============================================================================\nimport ukfm\nfrom ukfm import SLAM2D as MODEL\nimport numpy as np\nimport matplotlib\nukfm.set_matplotlib_config()\n\n################################################################################\n# Model and Simulation\n# ==============================================================================\n# This script uses the :meth:`~ukfm.SLAM2D` model that requires sequence time\n# and odometry frequency.\n\n# sequence time (s)\nT = 2500\n# odometry frequency (Hz)\nodo_freq = 1\n# create the model\nmodel = MODEL(T, odo_freq)\n\n################################################################################\n# The trajectory of the robot consists in turning at constant speed.\n\n# true speed of robot (m/s)\nv = 0.25\n# true angular velocity (rad/s)\ngyro = 1.5 / 180 * np.pi\n# odometry noise standard deviation\nodo_std = np.array([0.05*v/np.sqrt(2), # speed (v/m)\n 0.05*v*np.sqrt(2)*2]) # angular speed (rad/s)\n\n################################################################################\n# When simulating data, we generate a map. The map is defined as landmarks\n# constantly spaced on a circle with slightly higher radius than the radius of\n# the robot trajectory.\n\n# simulate true trajectory and noisy input\nstates, omegas, ldks = model.simu_f(odo_std, v, gyro)\n# observation noise standard deviation (m)\nobs_std = 0.1\n# plot the map\nmodel.plot_traj(states, ldks)\n\n################################################################################\n# The state and the input contain the following variables:\n#\n# .. highlight:: python\n# .. code-block:: python\n#\n# states[n].Rot # orientation (matrix)\n# states[n].p # robot position\n# states[n].p_l # landmark positions\n# omegas[n].gyro # robot angular velocity\n# omegas[n].v # robot speed\n#\n\n################################################################################\n# Landmark positions are a 2D array where we get the k-th landmark as\n# ``states[n].p_l[k]``. The number of landmarks in the state starts from zero\n# and increases when the robot observes a new landmark.\n\n################################################################################\n# We compute noisy landmark measurements based on the true states.\n\nys = model.simu_h(states, obs_std, ldks)\n\n################################################################################\n# A measurement contains the observations of all visible landmarks as:\n#\n# .. highlight:: python\n# .. code-block:: python\n#\n# y_n = ys[n] # measurement at timestamp n\n# y_n_k = y_n[k] # k-th observed landmark at instant n, where y_n_k[2] is the\n# # landmark index (-1 if the landmark is not observed)\n#\n\n################################################################################\n# Filter Design and Initialization\n# ------------------------------------------------------------------------------\n# We embed the robot state in :math:`SO(2) \\times \\mathbb{R}^2` and each\n# landmark position in :math:`\\mathbb{R}^2`, such that:\n#\n# - the retraction :math:`\\varphi(.,.)` is the :math:`SO(2)` exponential for\n# orientation, and the vector addition for positions.\n#\n# - the inverse retraction :math:`\\varphi^{-1}_.(.)` is the :math:`SO(2)`\n# logarithm for orientation and the vector subtraction for positions.\n\n################################################################################\n# Remaining parameter setting is standard. \n\n# propagation noise covariance matrix\nQ = np.diag(odo_std**2)\n# measurement noise covariance matrix\nR = obs_std**2*np.eye(2)\n# sigma point parameters\nalpha = np.array([1e-3, 1e-3, 1e-3, 1e-3, 1e-3])\n# initial uncertainty matrix\nP0 = np.zeros((3, 3)) # The state is perfectly initialized without\n\n################################################################################\n# Regarding implementation, we use the Jacobian UKF (:meth:`~ukfm.JUKF`) that\n# spares time when only a part of the space is involved in a propagation or\n# update step.\n#\n# **How it works ?** Consider the propagation of the covariance in an extended\n# Kalman filter as\n#\n# .. math::\n#\n# \\mathbf{P}_{n+1} = \\mathbf{F} \\mathbf{P}_{n} \\mathbf{F}^T +\n# \\mathbf{G} \\mathbf{Q} \\mathbf{G}^T,\n#\n# where the robot state uncertainty is put in the first indices of the\n# covariance matrix :math:`\\mathbf{P}_{n}`. As landmarks are statics, the\n# Jacobian take the forms\n#\n# .. math::\n#\n# \\mathbf{F} = \\begin{bmatrix} \\mathbf{F}^R & \\mathbf{0} \\\\\n# \\mathbf{0} & \\mathbf{I} \\end{bmatrix}, \\mathbf{G} = \\begin{bmatrix}\n# \\mathbf{G}^R & \\mathbf{0} \\\\ \\mathbf{0} & \\mathbf{0} \\end{bmatrix}.\n#\n# The JUKF allows to compute :math:`\\mathbf{F}^R` and :math:`\\mathbf{G}^R` by\n# only using the required sigma points. Here it corresponds to the sigma points\n# of the robot state. This requires to set the reduced retraction ``red_phi``\n# and inverse retraction ``red_phi_inv`` that compute the required subpart of\n# the full retraction :math:`\\varphi(.,.)` and inverse retraction\n# :math:`\\varphi^{-1}_.(.)`, and to define corresponding indices ``red_idx`` in\n# :math:`\\mathbf{P}_n`.\n#\n# Similarly for the observation of a landmark, e.g. the first landmark, the\n# observation matrix has the form\n#\n# .. math::\n#\n# \\mathbf{H} = \\begin{bmatrix} \\mathbf{H}^1 & \\mathbf{0} \\end{bmatrix}.\n#\n# The JUKF computes :math:`\\mathbf{H}^1` by only using the required sigma points\n# of the robot state and the observed landmark. This requires to set another\n# function ``up_phi`` using during update to compute a subpart of the retraction\n# :math:`\\varphi(.,.)`, as corresponding indices ``up_idx`` in\n# :math:`\\mathbf{P}_n`.\n\n################################################################################\n# Finally, we require to define a new function :math:`z(.,.)` to augment the\n# state such that\n#\n# .. math::\n#\n# \\boldsymbol{\\chi}_n^{\\mathrm{aug}} = z(\\boldsymbol{\\chi}_{n}, \\mathbf{y}_n),\n#\n# where :math:`\\boldsymbol{\\chi}_n^{\\mathrm{aug}}` is the augmented state and\n# the :math:`\\mathbf{y}_n` the measurement used to augment the state. Here this\n# measurement is a landmark observation. To make the augmentation efficient we\n# need to compute sigma points for only the state involved in :math:`z(.,.)`,\n# ``aug_phi`` is thus only a subpart of :math:`\\varphi(.,.)` and ``aug_inv_phi``\n# is the inverse retraction of :math:`\\varphi(.,.)^{-1}` corresponding to the\n# novel part of the state only.\n\n# reduced weights during propagation\nred_idxs = np.array([0, 1, 2]) # indices corresponding to the robot state in P\n# weights during update\naug_idxs = np.array([0, 1, 2]) # indices corresponding to the robot state in P\n\nstate0 = model.STATE(Rot=states[0].Rot, p=states[0].p, p_l=np.zeros((0, 2)))\n\nukf = ukfm.JUKF(state0=state0, P0=P0, f=model.f, h=model.h, Q=Q, phi=model.phi,\n alpha=alpha, \n red_phi=model.red_phi,\n red_phi_inv=model.red_phi_inv,\n red_idxs=red_idxs,\n up_phi=model.up_phi,\n up_idxs=np.arange(5), # it will changes during the sequence\n aug_z=model.aug_z,\n aug_phi=model.aug_phi,\n aug_phi_inv=model.aug_phi_inv,\n aug_idxs=aug_idxs,\n aug_q=2)\n\n# set variables for recording estimates along the full trajectory\nukf_states = [states[0]]\nukf_Ps = [P0]\n\n# indices of already observed landmarks\nukf_lmk = np.array([])\n\n################################################################################\n# Filtering\n# ==============================================================================\n# The UKF proceeds as a standard Kalman filter with a for loop.\n\nfor n in range(1, model.N):\n # propagation\n ukf.propagation(omegas[n-1], model.dt)\n y_n = ys[n]\n # observed landmarks\n idxs = np.where(y_n[:, 2] >= 0)[0]\n # update each landmark already in the filter\n p_ls = ukf.state.p_l\n for idx0 in idxs:\n idx = np.where(ukf_lmk == y_n[idx0, 2])[0]\n if idx.shape[0] == 0:\n continue\n # indices of the robot and observed landmark in P\n up_idxs = np.hstack([0, 1, 2, 3+2*idx, 4+2*idx])\n ukf.state.p_l = np.squeeze(p_ls[idx])\n # compute observability matrices and residual\n ukf.H_num(np.squeeze(y_n[idx0, :2]), up_idxs, R)\n ukf.state.p_l = p_ls\n # update only if some landmarks have been observed\n if ukf.H.shape[0] > 0:\n ukf.state_update()\n # augment the state with new landmark\n for idx0 in idxs:\n idx = np.where(ukf_lmk == y_n[idx0, 2])[0]\n if not idx.shape[0] == 0:\n continue\n # augment the landmark state\n ukf_lmk = np.hstack([ukf_lmk, int(y_n[idx0, 2])])\n # indices of the new landmark\n idx = ukf_lmk.shape[0]-1\n # new landmark position\n p_l = np.expand_dims(ukf.state.p + ukf.state.Rot.dot(y_n[idx0, :2]), 0)\n p_ls = np.vstack([ukf.state.p_l, p_l])\n ukf.state.p_l = p_l\n # get Jacobian and then covariance following [2]\n R_n = obs_std ** 2 * np.eye(2)\n ukf.aug(y_n[idx0, :2], aug_idxs, R)\n ukf.state.p_l = p_ls\n # save estimates\n ukf_states.append(ukf.state)\n ukf_Ps.append(ukf.P)\n\n################################################################################\n# Results\n# ------------------------------------------------------------------------------\n# We plot the trajectory, the position of the landmarks and the estimated\n# trajectory in the same plot, the attitude error, the position error, and their\n# confidence interval.\n\nmodel.plot_results(ukf_states, ukf_Ps, states, ldks)\n\n################################################################################\n# We note the :math:`3\\sigma` confidence interval decreases along time.\n\n################################################################################\n# Conclusion\n# ==============================================================================\n# This script shows how the UKF on parallelizable manifolds can be used for 2D\n# SLAM. By leveraging numerical Jacobian inference, one obtains a\n# computationally more efficient filter. The UKF works for this example, but\n# consistency issues happear at the end of the trajectory.\n#\n# You can now:\n#\n# - consider non-linear range and bearing measurement.\n#\n# - benchmark the UKF with different retractions and compare it to the extended\n# Kalman filter and the invariant extended Kalman filter of\n# :cite:`barrauInvariant2017`.\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.squeeze", "numpy.eye", "numpy.where", "numpy.arange", "numpy.sqrt", "numpy.hstack", "numpy.diag", "numpy.vstack" ] ]
chatid/chatnet
[ "9d21a1b38da0a624daa4bb163fca854e14a456b3" ]
[ "chatnet/evaluation.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n evaluation\n ~~~~~~~~~~\n \n Evaluate model performance\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib.pylab import plt\n\n\ndef calibration_plot(prob, ytest):\n # stolen from stackoverflow!\n outcome = ytest\n data = pd.DataFrame(dict(prob=prob, outcome=outcome))\n\n #group outcomes into bins of similar probability\n bins = np.linspace(0, 1, 20)\n cuts = pd.cut(prob, bins)\n binwidth = bins[1] - bins[0]\n \n #freshness ratio and number of examples in each bin\n cal = data.groupby(cuts).outcome.agg(['mean', 'count'])\n cal['pmid'] = (bins[:-1] + bins[1:]) / 2\n cal['sig'] = np.sqrt(cal.pmid * (1 - cal.pmid) / cal['count'])\n \n #the calibration plot\n ax = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n p = plt.errorbar(cal.pmid, cal['mean'], cal['sig'])\n plt.plot(cal.pmid, cal.pmid, linestyle='--', lw=1, color='k')\n plt.ylabel(\"Empirical P(Product)\")\n \n #the distribution of P(fresh)\n ax = plt.subplot2grid((3, 1), (2, 0), sharex=ax)\n \n plt.bar(left=cal.pmid - binwidth / 2, height=cal['count'],\n width=.95 * (bins[1] - bins[0]),\n fc=p[0].get_color())\n \n plt.xlabel(\"Predicted P(Product)\")\n plt.ylabel(\"Number\")\n\n\ndef is_middling_group(df):\n return df.count() == df[df['predicted'].map(lambda v: v > .2 and v < .8)].count()\n" ]
[ [ "matplotlib.pylab.plt.ylabel", "matplotlib.pylab.plt.subplot2grid", "pandas.cut", "matplotlib.pylab.plt.xlabel", "numpy.sqrt", "numpy.linspace", "matplotlib.pylab.plt.errorbar", "matplotlib.pylab.plt.plot" ] ]
brunoluanUFS/ProtoML
[ "cc467b7e41b45244b6f54725f08bbc89f806d708" ]
[ "app.py" ]
[ "import gunicorn\r\nfrom fastapi import FastAPI, File, Form, UploadFile, Query\r\nimport shutil\r\nimport pandas as pd\r\nfrom pandas_profiling import ProfileReport\r\nfrom fastapi.responses import FileResponse\r\nfrom datetime import datetime\r\nimport pickle\r\nimport numpy as np\r\n\r\napp = FastAPI()\r\n\r\n@app.post(\"/UploadCSV/\")\r\nasync def Upload_CSV(file: UploadFile = File(...)):\r\n with open(\"dataset.csv\", \"wb\") as buffer:\r\n shutil.copyfileobj(file.file, buffer)\r\n\r\n df = pd.read_csv(\"dataset.csv\")\r\n colunas = str(df.columns.values.tolist())\r\n\r\n return f\"O dataset foi carregado e possui {len(df.columns.values.tolist())} colunas {colunas}\"\r\n\r\n\r\n@app.post(\"/Analisador/\")\r\nasync def Recebe_CSV_Gera_Relatorio(file: UploadFile = File(...)):\r\n with open(f'{file.filename}', \"wb\") as buffer:\r\n shutil.copyfileobj(file.file, buffer)\r\n \r\n df = pd.read_csv(file.filename)\r\n analise = ProfileReport(df)\r\n data = datetime.now().strftime(\"%d_%m_%Y-%H_%M_%S\")\r\n \r\n analise.to_file(f\"analise_{data}.html\")\r\n FileResponse(f\"analise_{data}.html\")\r\n\r\n return f\"O Relatório analise_{data}.html foi salvo\"\r\n\r\n@app.post(\"/TreinaClassificador-GaussianNB/\")\r\nasync def Treina_Classificador(target: str = Form(...)):\r\n\r\n from sklearn.naive_bayes import GaussianNB\r\n\r\n df = pd.read_csv(\"dataset.csv\")\r\n \r\n X = df.loc[:, df.columns != target]\r\n y = df.loc[:, df.columns == target]\r\n\r\n GNB = GaussianNB()\r\n GNB.fit(X,y.values.ravel())\r\n score = str(round(GNB.score(X,y)*100,2))+\"%\"\r\n\r\n pkl_filename = \"GNB_model.pkl\"\r\n with open(pkl_filename, 'wb') as file:\r\n pickle.dump(GNB, file)\r\n \r\n atributos = str(X.columns.values.tolist())\r\n return f\"O modelo foi treinado e salvo no servidor com atributos: {str(atributos)}, target: {target} e {score} de acurácia média\"\r\n\r\n@app.post(\"/TreinaClassificador-LogisticRegression/\")\r\nasync def Treina_Classificador(target: str = Form(...)):\r\n\r\n from sklearn.linear_model import LogisticRegression\r\n df = pd.read_csv(\"dataset.csv\")\r\n # PREPROCESSAMENTO\r\n\r\n # df_numeric = df._get_numeric_data()\r\n # print(df_numeric)\r\n # cols = df.columns\r\n # num_cols = df._get_numeric_data().columns\r\n # cat_cols = list(set(cols) - set(num_cols))\r\n\r\n # ONEHOT\r\n \r\n\r\n # TREINA O MODELO\r\n X = df.loc[:, df.columns != target]\r\n y = df.loc[:, df.columns == target]\r\n LR = LogisticRegression()\r\n LR.fit(X,y.values.ravel())\r\n score = str(round(LR.score(X,y)*100,2))+\"%\"\r\n pkl_filename = \"LR_model.pkl\"\r\n with open(pkl_filename, 'wb') as file:\r\n pickle.dump(LR, file)\r\n atributos = str(X.columns.values.tolist())\r\n return f\"O modelo foi treinado e salvo no servidor com atributos: {str(atributos)}, target: {target} e {score} de acurácia média\"\r\n\r\n@app.post('/InferenciaGNB/')\r\nasync def predictGNB(q: list = Query([])):\r\n q2 = []\r\n for i in q:\r\n q2.append(np.float_(i))\r\n\r\n pkl_filename = \"GNB_model.pkl\"\r\n with open(pkl_filename, 'rb') as file:\r\n GNB = pickle.load(file)\r\n\r\n pred = GNB.predict([q2])\r\n return str(pred)\r\n\r\n@app.post('/InferenciaLR/')\r\nasync def predictLR(q: list = Query([])):\r\n q2 = []\r\n for i in q:\r\n q2.append(np.float_(i))\r\n\r\n pkl_filename = \"LR_model.pkl\"\r\n with open(pkl_filename, 'rb') as file:\r\n LR = pickle.load(file)\r\n \r\n print(q,[q2],len(q),len(q2))\r\n\r\n pred = LR.predict([q2])\r\n return str(pred)\r\n" ]
[ [ "numpy.float_", "sklearn.linear_model.LogisticRegression", "pandas.read_csv", "sklearn.naive_bayes.GaussianNB" ] ]
AjithK14/PandemicSimulator
[ "654bc9c67b8799bf530bb296e270d85412a06a73" ]
[ "python/pandemic_simulator/script_helpers/person_routines.py" ]
[ "# Confidential, Copyright 2020, Sony Corporation of America, All rights reserved.\nfrom typing import Sequence, Type, Optional\n\nimport numpy as np\n\nfrom ..environment import LocationID, PersonRoutine, Registry, SimTimeInterval, GroceryStore, \\\n RetailStore, BarberShop, Retired, Restaurant, Bar\n\n__all__ = ['get_minor_routines', 'get_adult_routines']\n\n# helper method that encapsulates adding restaurant routine \ndef add_restaurant_routine(routines,\n registry: Registry,\n numpy_rng: Optional[np.random.RandomState] = None):\n restaurants = registry.location_ids_of_type(Restaurant)\n if len(restaurants) > 0:\n interval_in_days = 1\n routines.append(PersonRoutine(start_loc=None,\n end_loc=restaurants[numpy_rng.randint(0, len(restaurants))],\n trigger_interval=SimTimeInterval(day=interval_in_days,\n offset_day=numpy_rng.randint(0,\n interval_in_days))\n )\n )\n\n# helper method that encapsulates adding bar routine \ndef add_bar_routine(routines,\n registry: Registry,\n numpy_rng: Optional[np.random.RandomState] = None):\n bars = registry.location_ids_of_type(Bar)\n if len(bars) > 0:\n interval_in_days = 4\n routines.append(PersonRoutine(start_loc=None,\n end_loc=bars[numpy_rng.randint(0, len(bars))],\n trigger_interval=SimTimeInterval(day=interval_in_days,\n offset_day=numpy_rng.randint(0,\n interval_in_days)),\n end_locs=bars,\n explore_probability=0.03\n )\n\n )\n\ndef get_minor_routines(home_id: LocationID,\n registry: Registry,\n numpy_rng: Optional[np.random.RandomState] = None) -> Sequence[PersonRoutine]:\n routines = []\n numpy_rng = numpy_rng if numpy_rng is not None else np.random.RandomState()\n\n barber_shops = registry.location_ids_of_type(BarberShop)\n if len(barber_shops) > 0:\n routines.append(PersonRoutine(start_loc=home_id,\n end_loc=barber_shops[numpy_rng.randint(0, len(barber_shops))],\n trigger_interval=SimTimeInterval(day=30)))\n\n # add restaurant routine\n add_restaurants(routines, registry, numpy_rng)\n\n return routines\n\n\ndef get_adult_routines(person_type: Type,\n home_id: LocationID,\n registry: Registry,\n numpy_rng: Optional[np.random.RandomState] = None) -> Sequence[PersonRoutine]:\n routines = []\n numpy_rng = numpy_rng if numpy_rng is not None else np.random.RandomState()\n\n shopping_rate = 1 if isinstance(person_type, Retired) else 1\n grocery_stores = registry.location_ids_of_type(GroceryStore)\n if len(grocery_stores) > 0:\n interval_in_days = int(7 / shopping_rate)\n routines.append(PersonRoutine(start_loc=None,\n end_loc=grocery_stores[numpy_rng.randint(0, len(grocery_stores))],\n trigger_interval=SimTimeInterval(day=interval_in_days,\n offset_day=numpy_rng.randint(0,\n interval_in_days)),\n end_locs=grocery_stores,\n explore_probability=0.05))\n\n retail_stores = registry.location_ids_of_type(RetailStore)\n if len(retail_stores) > 0:\n interval_in_days = int(7 / shopping_rate)\n routines.append(PersonRoutine(start_loc=None,\n end_loc=retail_stores[numpy_rng.randint(0, len(retail_stores))],\n trigger_interval=SimTimeInterval(day=interval_in_days,\n offset_day=numpy_rng.randint(0,\n interval_in_days)),\n end_locs=retail_stores,\n explore_probability=0.05))\n\n barber_shops = registry.location_ids_of_type(BarberShop)\n if len(barber_shops) > 0:\n interval_in_days = 30\n routines.append(PersonRoutine(start_loc=home_id,\n end_loc=barber_shops[numpy_rng.randint(0, len(barber_shops))],\n trigger_interval=SimTimeInterval(day=interval_in_days,\n offset_day=numpy_rng.randint(0,\n interval_in_days))\n )\n )\n\n add_bar_routine(routines, registry, numpy_rng)\n\n return routines\n\ndef get_during_work_routines(registry: Registry):\n routines = []\n numpy_rng = numpy_rng if numpy_rng is not None else np.random.RandomState()\n\n add_restaurant_routine(routines, registry, numpy_rng)\n\n return routines" ]
[ [ "numpy.random.RandomState" ] ]
sanazkeshvari/allrank
[ "527ed1e60320caaddfcef391eee37df9b875d8d0" ]
[ "allrank/models/losses/listMap2.py" ]
[ "import torch\n\nfrom allrank.data.dataset_loading import PADDED_Y_VALUE\nfrom allrank.models.losses import DEFAULT_EPS\n\n\ndef listMap2(y_pred, y_true, eps=DEFAULT_EPS, padded_value_indicator=PADDED_Y_VALUE):\n\n \"\"\"\n ListMLE loss introduced in \"Listwise Approach to Learning to Rank - Theory and Algorithm\".\n :param y_pred: predictions from the model, shape [batch_size, slate_length]\n :param y_true: ground truth labels, shape [batch_size, slate_length]\n :param eps: epsilon value, used for numerical stability\n :param padded_value_indicator: an indicator of the y_true index containing a padded item, e.g. -1\n :return: loss value, a torch.Tensor\n \"\"\"\n # shuffle for randomised tie resolution\n\n random_indices = torch.randperm(y_pred.shape[-1])\n y_pred_shuffled = y_pred[:, random_indices]\n y_true_shuffled = y_true[:, random_indices]\n\n y_true_sorted, indices = y_true_shuffled.sort(descending=True, dim=-1)\n\n mask = y_true_sorted == padded_value_indicator\n\n preds_sorted_by_true = torch.gather(y_pred_shuffled, dim=1, index=indices)\n\n preds_sorted_by_true[mask] = float(\"-inf\")\n\n max_pred_values, _ = preds_sorted_by_true.max(dim=1, keepdim=True)\n\n preds_sorted_by_true_minus_max = preds_sorted_by_true - max_pred_values\n cumsums = torch.cumsum(preds_sorted_by_true_minus_max.exp().flip(dims=[1]), dim=1).flip(dims=[1])\n #gx = torch.exp(preds_sorted_by_true)\n #gx = torch.exp((y_true_sorted+2)/1000)\n gx = torch.exp((y_true_sorted+2)/1000)\n #gx = torch.exp(preds_sorted_by_true_minus_max)\n #print(\"It's True\")\n m = preds_sorted_by_true_minus_max.size(1)\n sumgx = torch.sum(gx, 0)\n sumloggx = torch.sum(torch.log(gx + eps), 0)\n sumloggxgx = torch.sum(torch.multiply(torch.log(gx + eps),gx), 0)\n temp = torch.mul(m, sumloggxgx) - torch.multiply(sumgx, sumloggx)\n a = torch.load('a.pt')\n b = torch.load('b.pt')\n shape = (gx.size(1))\n if a.size(0) == shape:\n\n #a = torch.ones(shape)\n #b = torch.ones(shape)/10000\n #gamma0 = torch.divide(torch.pow(b.cuda(), a), torch.lgamma(a) + eps),\n #gamma = torch.stack(list(gamma0), dim=0)\n x = torch.multiply(-b, gx)\n prior0 = torch.multiply(torch.pow(gx, (a - 1)),torch.exp(x))\n prior2 = torch.log(prior0 + eps)\n #prior = torch.pow(gx, (a - 1))\n observation_loss = torch.log(cumsums + eps) - preds_sorted_by_true_minus_max\n #print(observation_loss - prior2)\n observation_loss = observation_loss + prior2\n else:\n observation_loss = torch.log(cumsums + eps) - preds_sorted_by_true_minus_max\n observation_loss[mask] = 0.0\n z = torch.mean(torch.sum(observation_loss, dim=1))\n return torch.mean(torch.sum(observation_loss, dim=1))" ]
[ [ "torch.mul", "torch.gather", "torch.randperm", "torch.pow", "torch.load", "torch.multiply", "torch.log", "torch.exp", "torch.sum" ] ]
centercitypcs/centercitypcs_utils
[ "a81f527529884dcbfc869665ea1416f83304b488" ]
[ "centercitypcs_utils/__init__.py" ]
[ "\"\"\"\nUtility Functions\n\"\"\"\n\n__version__ = \"0.2.2\"\n\nimport sqlalchemy\nimport records\nimport pandas as pd\nimport gspread\nimport gspread_dataframe\n\n\ndef get_sql_as_df(database_url: str, query_file: str, **kwargs: dict) -> pd.DataFrame:\n with open(query_file, \"r\") as query_file:\n query = query_file.read()\n\n with sqlalchemy.create_engine(\n database_url, max_identifier_length=128\n ).connect() as db:\n df = pd.read_sql(query, db, **kwargs)\n\n return df\n\n\ndef ps_query_to_df(\n database_url: str, query_file: str, params: dict = {}\n) -> pd.DataFrame:\n db = records.Database(database_url, max_identifier_length=128)\n rows = db.query_file(query_file, **params)\n df = rows.export(\"df\")\n db.close()\n return df\n\n\ndef get_google_sheet_as_df(\n spreadsheet_key: str,\n service_account: str,\n worksheet_number: str = 0,\n **kwargs,\n) -> pd.DataFrame:\n access = gspread.service_account(service_account)\n spreadsheet = access.open_by_key(spreadsheet_key)\n sheet = spreadsheet.get_worksheet(worksheet_number)\n df = gspread_dataframe.get_as_dataframe(sheet, evaluate_formulas=True, **kwargs)\n\n df.dropna(axis=\"index\", how=\"all\", inplace=True)\n df.dropna(axis=\"columns\", how=\"all\", inplace=True)\n\n return df\n" ]
[ [ "pandas.read_sql" ] ]
FelixLorenz/distiller
[ "08b5cd95704d850cfb845ed7785f739cbb57de54" ]
[ "tests/common.py" ]
[ "#\n# Copyright (c) 2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport torch\nimport distiller\nfrom distiller.models import create_model\n\n\ndef setup_test(arch, dataset, parallel):\n model = create_model(False, dataset, arch, parallel=parallel)\n assert model is not None\n\n # Create the masks\n zeros_mask_dict = {}\n for name, param in model.named_parameters():\n masker = distiller.ParameterMasker(name)\n zeros_mask_dict[name] = masker\n return model, zeros_mask_dict\n\n\ndef find_module_by_name(model, module_to_find):\n for name, m in model.named_modules():\n if name == module_to_find:\n return m\n return None\n\n\ndef get_dummy_input(dataset):\n if dataset == \"imagenet\":\n return torch.randn(1, 3, 224, 224).cuda()\n elif dataset == \"cifar10\":\n return torch.randn(1, 3, 32, 32).cuda()\n raise ValueError(\"Trying to use an unknown dataset \" + dataset)\n\n\ndef almost_equal(a , b, max_diff=0.000001):\n return abs(a - b) <= max_diff\n" ]
[ [ "torch.randn" ] ]
rehno-lindeque/detr
[ "65c4f49b2795f68fba57b0f139d02e2dbe8b83ac" ]
[ "util/misc.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nMisc functions, including distributed helpers.\n\nMostly copy-paste from torchvision references.\n\"\"\"\nimport os\nimport subprocess\nimport time\nfrom collections import defaultdict, deque\nimport datetime\nimport pickle\nfrom typing import Optional, List\n\nimport torch\nimport torch.distributed as dist\nfrom torch import Tensor\n\n# needed due to empty tensor bug in pytorch and torchvision 0.5\nimport torchvision\nif float(torchvision.__version__[:3]) < 0.7:\n from torchvision.ops import _new_empty_tensor\n from torchvision.ops.misc import _output_size\n\nimport wandb\n\n\nclass SmoothedValue(object):\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.4f} ({global_avg:.4f})\"\n self.deque = deque(maxlen=window_size)\n self.total = 0.0\n self.count = 0\n self.fmt = fmt\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n def synchronize_between_processes(self):\n \"\"\"\n Warning: does not synchronize the deque!\n \"\"\"\n if not is_dist_avail_and_initialized():\n return\n t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')\n dist.barrier()\n dist.all_reduce(t)\n t = t.tolist()\n self.count = int(t[0])\n self.total = t[1]\n\n @property\n def median(self):\n d = torch.tensor(list(self.deque))\n return d.median().item()\n\n @property\n def avg(self):\n d = torch.tensor(list(self.deque), dtype=torch.float32)\n return d.mean().item()\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n @property\n def max(self):\n return max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n # @property\n # def measurable(self):\n # return self.count > 0\n\n def __str__(self):\n return self.fmt.format(\n median=self.median,\n avg=self.avg,\n global_avg=self.global_avg,\n max=self.max,\n value=self.value)\n\n\ndef all_gather(data):\n \"\"\"\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n \"\"\"\n world_size = get_world_size()\n if world_size == 1:\n return [data]\n\n # serialized to a Tensor\n buffer = pickle.dumps(data)\n storage = torch.ByteStorage.from_buffer(buffer)\n tensor = torch.ByteTensor(storage).to(\"cuda\")\n\n # obtain Tensor size of each rank\n local_size = torch.tensor([tensor.numel()], device=\"cuda\")\n size_list = [torch.tensor([0], device=\"cuda\") for _ in range(world_size)]\n dist.all_gather(size_list, local_size)\n size_list = [int(size.item()) for size in size_list]\n max_size = max(size_list)\n\n # receiving Tensor from all ranks\n # we pad the tensor because torch all_gather does not support\n # gathering tensors of different shapes\n tensor_list = []\n for _ in size_list:\n tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=\"cuda\"))\n if local_size != max_size:\n padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=\"cuda\")\n tensor = torch.cat((tensor, padding), dim=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.cpu().numpy().tobytes()[:size]\n data_list.append(pickle.loads(buffer))\n\n return data_list\n\n\ndef reduce_dict(input_dict, average=True):\n \"\"\"\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that all processes\n have the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n \"\"\"\n world_size = get_world_size()\n if world_size < 2:\n return input_dict\n with torch.no_grad():\n names = []\n values = []\n # sort the keys so that they are consistent across processes\n for k in sorted(input_dict.keys()):\n names.append(k)\n values.append(input_dict[k])\n values = torch.stack(values, dim=0)\n dist.all_reduce(values)\n if average:\n values /= world_size\n reduced_dict = {k: v for k, v in zip(names, values)}\n return reduced_dict\n\n\nclass MetricLogger(object):\n def __init__(self, prefix, epoch, num_batches, delimiter=\"\\t\"):\n self.meters = defaultdict(SmoothedValue)\n self.delimiter = delimiter\n self.prefix = prefix\n self.epoch = epoch\n self.num_batches = num_batches\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def __getattr__(self, attr):\n if attr in self.meters:\n return self.meters[attr]\n if attr in self.__dict__:\n return self.__dict__[attr]\n raise AttributeError(\"'{}' object has no attribute '{}'\".format(\n type(self).__name__, attr))\n\n def __str__(self):\n loss_str = []\n for name, meter in self.meters.items():\n loss_str.append(\n \"{}: {}\".format(name, str(meter))\n )\n return self.delimiter.join(loss_str)\n\n def synchronize_between_processes(self):\n for meter in self.meters.values():\n meter.synchronize_between_processes()\n\n def add_meter(self, name, meter):\n self.meters[name] = meter\n\n def log_every(self, iterable, print_freq, header=None):\n i = 0\n if not header:\n header = ''\n start_time = time.time()\n end = time.time()\n iter_time = SmoothedValue(fmt='{avg:.4f}')\n data_time = SmoothedValue(fmt='{avg:.4f}')\n space_fmt = ':' + str(len(str(len(iterable)))) + 'd'\n if torch.cuda.is_available():\n log_msg = self.delimiter.join([\n header,\n '[{0' + space_fmt + '}/{1}]',\n 'eta: {eta}',\n '{meters}',\n 'time: {time}',\n 'data: {data}',\n 'max mem: {memory:.0f}'\n ])\n else:\n log_msg = self.delimiter.join([\n header,\n '[{0' + space_fmt + '}/{1}]',\n 'eta: {eta}',\n '{meters}',\n 'time: {time}',\n 'data: {data}'\n ])\n MB = 1024.0 * 1024.0\n for obj in iterable:\n data_time.update(time.time() - end)\n yield obj\n iter_time.update(time.time() - end)\n if i % print_freq == 0 or i == len(iterable) - 1:\n eta_seconds = iter_time.global_avg * (len(iterable) - i)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n if torch.cuda.is_available():\n print(log_msg.format(\n i, len(iterable), eta=eta_string,\n meters=str(self),\n time=str(iter_time), data=str(data_time),\n memory=torch.cuda.max_memory_allocated() / MB))\n else:\n print(log_msg.format(\n i, len(iterable), eta=eta_string,\n meters=str(self),\n time=str(iter_time), data=str(data_time)))\n\n # Log every step to wandb\n stats = {k: meter.global_avg for k, meter in self.meters.items()}\n log_stats = {**{f'{self.prefix}_{k}': v for k, v in stats.items()},\n 'epoch': self.epoch,\n 'batch_step': i,\n 'step': self.epoch * self.num_batches + i\n }\n wandb.log(log_stats)\n\n i += 1\n end = time.time()\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('{} Total time: {} ({:.4f} s / it)'.format(\n header, total_time_str, total_time / len(iterable)))\n\n\n\ndef get_sha():\n cwd = os.path.dirname(os.path.abspath(__file__))\n\n def _run(command):\n return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()\n sha = 'N/A'\n diff = \"clean\"\n branch = 'N/A'\n try:\n sha = _run(['git', 'rev-parse', 'HEAD'])\n subprocess.check_output(['git', 'diff'], cwd=cwd)\n diff = _run(['git', 'diff-index', 'HEAD'])\n diff = \"has uncommited changes\" if diff else \"clean\"\n branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n except Exception:\n pass\n message = f\"sha: {sha}, status: {diff}, branch: {branch}\"\n return message\n\n\ndef collate_fn(batch):\n batch = list(zip(*batch))\n batch[0] = nested_tensor_from_tensor_list(batch[0])\n return tuple(batch)\n\n\ndef _max_by_axis(the_list):\n # type: (List[List[int]]) -> List[int]\n maxes = the_list[0]\n for sublist in the_list[1:]:\n for index, item in enumerate(sublist):\n maxes[index] = max(maxes[index], item)\n return maxes\n\n\nclass NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n\ndef nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)\n\n\n# _onnx_nested_tensor_from_tensor_list() is an implementation of\n# nested_tensor_from_tensor_list() that is supported by ONNX tracing.\n@torch.jit.unused\ndef _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:\n max_size = []\n for i in range(tensor_list[0].dim()):\n max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)\n max_size.append(max_size_i)\n max_size = tuple(max_size)\n\n # work around for\n # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n # m[: img.shape[1], :img.shape[2]] = False\n # which is not yet supported in onnx\n padded_imgs = []\n padded_masks = []\n for img in tensor_list:\n padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]\n padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))\n padded_imgs.append(padded_img)\n\n m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)\n padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), \"constant\", 1)\n padded_masks.append(padded_mask.to(torch.bool))\n\n tensor = torch.stack(padded_imgs)\n mask = torch.stack(padded_masks)\n\n return NestedTensor(tensor, mask=mask)\n\n\ndef setup_for_distributed(is_master):\n \"\"\"\n This function disables printing when not in master process\n \"\"\"\n import builtins as __builtin__\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop('force', False)\n if is_master or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print\n\n\ndef is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True\n\n\ndef get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()\n\n\ndef get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()\n\n\ndef is_main_process():\n return get_rank() == 0\n\n\ndef save_on_master(*args, **kwargs):\n if is_main_process():\n torch.save(*args, **kwargs)\n\n\ndef init_distributed_mode(args):\n if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:\n args.rank = int(os.environ[\"RANK\"])\n args.world_size = int(os.environ['WORLD_SIZE'])\n args.gpu = int(os.environ['LOCAL_RANK'])\n elif 'SLURM_PROCID' in os.environ:\n args.rank = int(os.environ['SLURM_PROCID'])\n args.gpu = args.rank % torch.cuda.device_count()\n else:\n print('Not using distributed mode')\n args.distributed = False\n return\n\n args.distributed = True\n\n torch.cuda.set_device(args.gpu)\n args.dist_backend = 'nccl'\n print('| distributed init (rank {}): {}'.format(\n args.rank, args.dist_url), flush=True)\n torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n torch.distributed.barrier()\n setup_for_distributed(args.rank == 0)\n\n\n@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if float(torchvision.__version__[:3]) < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)\n" ]
[ [ "torch.distributed.get_world_size", "torch.cat", "torch.stack", "torch.ones", "torch.cuda.is_available", "torch.nn.functional.pad", "torch.distributed.init_process_group", "torch.ByteTensor", "torch.distributed.is_initialized", "torch.tensor", "torch.zeros_like", "torch.distributed.get_rank", "torch.empty", "torch.zeros", "torch.save", "torch.cuda.max_memory_allocated", "torch.cuda.device_count", "torch.cuda.set_device", "torch.distributed.barrier", "torch.distributed.is_available", "torch.no_grad", "torch.nn.functional.interpolate", "torch.distributed.all_gather", "torch.ByteStorage.from_buffer", "torch.distributed.all_reduce" ] ]
cgodine/ACT
[ "af9f0edb76e6f16e2764d5441a4bf4d7fb3a9f39" ]
[ "act/io/csvfiles.py" ]
[ "\"\"\"\n===============\nact.io.csvfiles\n===============\nThis module contains I/O operations for loading csv files.\n\n\"\"\"\n\n# import standard modules\nimport pandas as pd\n\nfrom .armfiles import check_arm_standards\n\n\ndef read_csv(filename, sep=',', engine='python', column_names=None,\n skipfooter=0, **kwargs):\n\n \"\"\"\n Returns an `xarray.Dataset` with stored data and metadata from user-defined\n query of CSV files.\n\n Parameters\n ----------\n filenames : str or list\n Name of file(s) to read.\n sep : str\n The separator between columns in the csv file.\n column_names : list or None\n The list of column names in the csv file.\n verbose : bool\n If true, will print if a file is not found.\n\n Additional keyword arguments will be passed into pandas.read_csv.\n\n Returns\n -------\n act_obj : Object\n ACT dataset. Will be None if the file is not found.\n\n Examples\n --------\n This example will load the example sounding data used for unit testing:\n\n .. code-block:: python\n\n import act\n the_ds, the_flag = act.io.csvfiles.read(\n act.tests.sample_files.EXAMPLE_CSV_WILDCARD)\n\n \"\"\"\n # Read data using pandas read_csv\n arm_ds = pd.read_csv(filename, sep=sep, names=column_names,\n skipfooter=skipfooter, engine=engine, **kwargs)\n\n # Set Coordinates if there's a variable date_time\n if 'date_time' in arm_ds:\n arm_ds.date_time = arm_ds.date_time.astype('datetime64')\n arm_ds.time = arm_ds.date_time\n arm_ds = arm_ds.set_index('time')\n\n # Convert to xarray DataSet\n arm_ds = arm_ds.to_xarray()\n\n # Set additional variables\n # Since we cannot assume a standard naming convention setting\n # file_date and file_time to the first time in the file\n x_coord = arm_ds.coords.to_index().values[0]\n if isinstance(x_coord, str):\n x_coord_dt = pd.to_datetime(x_coord)\n arm_ds.attrs['_file_dates'] = x_coord_dt.strftime('%Y%m%d')\n arm_ds.attrs['_file_times'] = x_coord_dt.strftime('%H%M%S')\n\n # Check for standard ARM datastream name, if none, assume the file is ARM\n # standard format.\n is_arm_file_flag = check_arm_standards(arm_ds)\n if is_arm_file_flag == 0:\n arm_ds.attrs['_datastream'] = '.'.join(filename.split('/')[-1].split('.')[0:2])\n\n # Add additional attributes, site, standards flag, etc...\n arm_ds.attrs['_site'] = str(arm_ds.attrs['_datastream'])[0:3]\n\n arm_ds.attrs['_arm_standards_flag'] = is_arm_file_flag\n\n return arm_ds\n" ]
[ [ "pandas.to_datetime", "pandas.read_csv" ] ]
792370706/Ivy
[ "d5d3d92443dfb590335554dd4ce834ea675189fe" ]
[ "ivy_tests/test_core/test_reductions.py" ]
[ "\"\"\"\nCollection of tests for templated reduction functions\n\"\"\"\n\n# global\nimport pytest\nimport numpy as np\n\n# local\nimport ivy\nimport ivy.numpy\nimport ivy_tests.helpers as helpers\n\n\n# reduce_sum\n@pytest.mark.parametrize(\n \"x\", [[1., 2., 3.], [[1., 2., 3.]]])\n@pytest.mark.parametrize(\n \"axis\", [None, 0, -1, (0,), (-1,)])\n@pytest.mark.parametrize(\n \"kd\", [True, False])\n@pytest.mark.parametrize(\n \"dtype_str\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])\ndef test_reduce_sum(x, axis, kd, dtype_str, tensor_fn, dev_str, call):\n # smoke test\n x = tensor_fn(x, dtype_str, dev_str)\n ret = ivy.reduce_sum(x, axis, kd)\n # type test\n assert ivy.is_array(ret)\n # cardinality test\n if axis is None:\n expected_shape = [1]*len(x.shape) if kd else []\n else:\n axis_ = [axis] if isinstance(axis, int) else axis\n axis_ = [item % len(x.shape) for item in axis_]\n expected_shape = list(x.shape)\n if kd:\n expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]\n else:\n [expected_shape.pop(item) for item in axis_]\n expected_shape = [1] if expected_shape == [] else expected_shape\n assert ret.shape == tuple(expected_shape)\n # value test\n assert np.allclose(call(ivy.reduce_sum, x), ivy.numpy.reduce_sum(ivy.to_numpy(x)))\n # compilation test\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.reduce_sum)\n\n\n# reduce_prod\n@pytest.mark.parametrize(\n \"x\", [[1., 2., 3.], [[1., 2., 3.]]])\n@pytest.mark.parametrize(\n \"axis\", [None, 0, -1, (0,), (-1,)])\n@pytest.mark.parametrize(\n \"kd\", [True, False])\n@pytest.mark.parametrize(\n \"dtype_str\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])\ndef test_reduce_prod(x, axis, kd, dtype_str, tensor_fn, dev_str, call):\n # smoke test\n x = tensor_fn(x, dtype_str, dev_str)\n ret = ivy.reduce_prod(x, axis, kd)\n # type test\n assert ivy.is_array(ret)\n # cardinality test\n if axis is None:\n expected_shape = [1]*len(x.shape) if kd else []\n else:\n axis_ = [axis] if isinstance(axis, int) else axis\n axis_ = [item % len(x.shape) for item in axis_]\n expected_shape = list(x.shape)\n if kd:\n expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]\n else:\n [expected_shape.pop(item) for item in axis_]\n expected_shape = [1] if expected_shape == [] else expected_shape\n assert ret.shape == tuple(expected_shape)\n # value test\n assert np.allclose(call(ivy.reduce_prod, x), ivy.numpy.reduce_prod(ivy.to_numpy(x)))\n # compilation test\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.reduce_prod)\n\n\n# reduce_mean\n@pytest.mark.parametrize(\n \"x\", [[1., 2., 3.], [[1., 2., 3.]]])\n@pytest.mark.parametrize(\n \"axis\", [None, 0, -1, (0,), (-1,)])\n@pytest.mark.parametrize(\n \"kd\", [True, False])\n@pytest.mark.parametrize(\n \"dtype_str\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])\ndef test_reduce_mean(x, axis, kd, dtype_str, tensor_fn, dev_str, call):\n # smoke test\n x = tensor_fn(x, dtype_str, dev_str)\n ret = ivy.reduce_mean(x, axis, kd)\n # type test\n assert ivy.is_array(ret)\n # cardinality test\n if axis is None:\n expected_shape = [1]*len(x.shape) if kd else []\n else:\n axis_ = [axis] if isinstance(axis, int) else axis\n axis_ = [item % len(x.shape) for item in axis_]\n expected_shape = list(x.shape)\n if kd:\n expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]\n else:\n [expected_shape.pop(item) for item in axis_]\n expected_shape = [1] if expected_shape == [] else expected_shape\n assert ret.shape == tuple(expected_shape)\n # value test\n assert np.allclose(call(ivy.reduce_mean, x), ivy.numpy.reduce_mean(ivy.to_numpy(x)))\n # compilation test\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.reduce_mean)\n\n\n# reduce_var\n@pytest.mark.parametrize(\n \"x\", [[1., 2., 3.], [[1., 2., 3.]]])\n@pytest.mark.parametrize(\n \"axis\", [None, 0, -1, (0,), (-1,)])\n@pytest.mark.parametrize(\n \"kd\", [True, False])\n@pytest.mark.parametrize(\n \"dtype_str\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])\ndef test_reduce_var(x, axis, kd, dtype_str, tensor_fn, dev_str, call):\n # smoke test\n x = tensor_fn(x, dtype_str, dev_str)\n ret = ivy.reduce_var(x, axis, kd)\n # type test\n assert ivy.is_array(ret)\n # cardinality test\n if axis is None:\n expected_shape = [1]*len(x.shape) if kd else []\n else:\n axis_ = [axis] if isinstance(axis, int) else axis\n axis_ = [item % len(x.shape) for item in axis_]\n expected_shape = list(x.shape)\n if kd:\n expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]\n else:\n [expected_shape.pop(item) for item in axis_]\n expected_shape = [1] if expected_shape == [] else expected_shape\n assert ret.shape == tuple(expected_shape)\n # value test\n assert np.allclose(call(ivy.reduce_var, x), ivy.numpy.reduce_var(ivy.to_numpy(x)))\n # compilation test\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.reduce_var)\n\n\n# reduce_std\n@pytest.mark.parametrize(\n \"x\", [[1., 2., 3.], [[1., 2., 3.]]])\n@pytest.mark.parametrize(\n \"axis\", [None, 0, -1, (0,), (-1,)])\n@pytest.mark.parametrize(\n \"kd\", [True, False])\n@pytest.mark.parametrize(\n \"dtype_str\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])\ndef test_reduce_std(x, axis, kd, dtype_str, tensor_fn, dev_str, call):\n # smoke test\n x = tensor_fn(x, dtype_str, dev_str)\n ret = ivy.reduce_std(x, axis, kd)\n # type test\n assert ivy.is_array(ret)\n # cardinality test\n if axis is None:\n expected_shape = [1]*len(x.shape) if kd else []\n else:\n axis_ = [axis] if isinstance(axis, int) else axis\n axis_ = [item % len(x.shape) for item in axis_]\n expected_shape = list(x.shape)\n if kd:\n expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]\n else:\n [expected_shape.pop(item) for item in axis_]\n expected_shape = [1] if expected_shape == [] else expected_shape\n assert ret.shape == tuple(expected_shape)\n # value test\n assert np.allclose(call(ivy.reduce_std, x), ivy.numpy.reduce_var(ivy.to_numpy(x)) ** 0.5)\n # compilation test\n if call is helpers.torch_call:\n # PyTorch cannot yet compile ivy.core only functions, without a direct backend implementation\n return\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.reduce_std)\n\n\n# reduce_min\n@pytest.mark.parametrize(\n \"x\", [[1., 2., 3.], [[1., 2., 3.]]])\n@pytest.mark.parametrize(\n \"axis\", [None, 0, -1, (0,), (-1,)])\n@pytest.mark.parametrize(\n \"kd\", [True, False])\n@pytest.mark.parametrize(\n \"dtype_str\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])\ndef test_reduce_min(x, axis, kd, dtype_str, tensor_fn, dev_str, call):\n # smoke test\n x = tensor_fn(x, dtype_str, dev_str)\n ret = ivy.reduce_min(x, axis, kd)\n # type test\n assert ivy.is_array(ret)\n # cardinality test\n if axis is None:\n expected_shape = [1]*len(x.shape) if kd else []\n else:\n axis_ = [axis] if isinstance(axis, int) else axis\n axis_ = [item % len(x.shape) for item in axis_]\n expected_shape = list(x.shape)\n if kd:\n expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]\n else:\n [expected_shape.pop(item) for item in axis_]\n expected_shape = [1] if expected_shape == [] else expected_shape\n assert ret.shape == tuple(expected_shape)\n # value test\n assert np.allclose(call(ivy.reduce_min, x), ivy.numpy.reduce_min(ivy.to_numpy(x)))\n # compilation test\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.reduce_min)\n\n\n# reduce_max\n@pytest.mark.parametrize(\n \"x\", [[1., 2., 3.], [[1., 2., 3.]]])\n@pytest.mark.parametrize(\n \"axis\", [None, 0, -1, (0,), (-1,)])\n@pytest.mark.parametrize(\n \"kd\", [True, False])\n@pytest.mark.parametrize(\n \"dtype_str\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])\ndef test_reduce_max(x, axis, kd, dtype_str, tensor_fn, dev_str, call):\n # smoke test\n x = tensor_fn(x, dtype_str, dev_str)\n ret = ivy.reduce_max(x, axis, kd)\n # type test\n assert ivy.is_array(ret)\n # cardinality test\n if axis is None:\n expected_shape = [1]*len(x.shape) if kd else []\n else:\n axis_ = [axis] if isinstance(axis, int) else axis\n axis_ = [item % len(x.shape) for item in axis_]\n expected_shape = list(x.shape)\n if kd:\n expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]\n else:\n [expected_shape.pop(item) for item in axis_]\n expected_shape = [1] if expected_shape == [] else expected_shape\n assert ret.shape == tuple(expected_shape)\n # value test\n assert np.allclose(call(ivy.reduce_max, x), ivy.numpy.reduce_max(ivy.to_numpy(x)))\n # compilation test\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.reduce_max)\n\n\n# einsum\n@pytest.mark.parametrize(\n \"eq_n_op_n_shp\", [(\"ii\", (np.arange(25).reshape(5, 5),), (1,)),\n (\"ii->i\", (np.arange(25).reshape(5, 5),), (5,)),\n (\"ij,j\", (np.arange(25).reshape(5, 5), np.arange(5)), (5,))])\n@pytest.mark.parametrize(\n \"dtype_str\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])\ndef test_einsum(eq_n_op_n_shp, dtype_str, tensor_fn, dev_str, call):\n # smoke test\n eq, operands, true_shape = eq_n_op_n_shp\n operands = [tensor_fn(op, dtype_str, dev_str) for op in operands]\n ret = ivy.einsum(eq, *operands)\n # type test\n assert ivy.is_array(ret)\n # cardinality test\n assert ret.shape == true_shape\n # value test\n assert np.allclose(call(ivy.einsum, eq, *operands),\n ivy.numpy.einsum(eq, *[ivy.to_numpy(op) for op in operands]))\n # compilation test\n if call is helpers.torch_call:\n # torch.jit functions can't take variable number of arguments\n return\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.einsum)\n" ]
[ [ "numpy.arange" ] ]
BinhuiXie/SPCL
[ "9e5bab7b5d38fde847f9e8f85ca64498baaf86be" ]
[ "semantic_prototype.py" ]
[ "import argparse\r\nimport os\r\nimport logging\r\nfrom collections import OrderedDict\r\nfrom tqdm import tqdm\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport torch.utils.data\r\nimport torch.distributed\r\nimport torch.backends.cudnn\r\n\r\nfrom core.configs import cfg\r\nfrom core.datasets import build_dataset\r\nfrom core.models import build_feature_extractor, build_classifier\r\nfrom core.utils.misc import mkdir\r\nfrom core.utils.logger import setup_logger\r\n\r\n\r\ndef strip_prefix_if_present(state_dict, prefix):\r\n keys = sorted(state_dict.keys())\r\n if not all(key.startswith(prefix) for key in keys):\r\n return state_dict\r\n stripped_state_dict = OrderedDict()\r\n for key, value in state_dict.items():\r\n stripped_state_dict[key.replace(prefix, \"\")] = value\r\n return stripped_state_dict\r\n\r\n\r\ndef get_source_centroids(cfg):\r\n logger = logging.getLogger(\"ICCV2021.trainer\")\r\n logger.info(\"Start training\")\r\n\r\n feature_extractor = build_feature_extractor(cfg)\r\n device = torch.device(cfg.MODEL.DEVICE)\r\n feature_extractor.to(device)\r\n\r\n classifier = build_classifier(cfg)\r\n classifier.to(device)\r\n\r\n if cfg.resume:\r\n logger.info(\"Loading checkpoint from {}\".format(cfg.resume))\r\n checkpoint = torch.load(cfg.resume, map_location=torch.device('cpu'))\r\n model_weights = strip_prefix_if_present(checkpoint['feature_extractor'], 'module.')\r\n feature_extractor.load_state_dict(model_weights)\r\n classifier_weights = strip_prefix_if_present(checkpoint['classifier'], 'module.')\r\n classifier.load_state_dict(classifier_weights)\r\n\r\n feature_extractor.eval()\r\n classifier.eval()\r\n\r\n torch.cuda.empty_cache()\r\n\r\n src_train_data = build_dataset(cfg, mode='train', is_source=True, epochwise=True)\r\n src_train_loader = torch.utils.data.DataLoader(src_train_data,\r\n batch_size=cfg.SOLVER.BATCH_SIZE_VAL,\r\n shuffle=False,\r\n num_workers=4,\r\n pin_memory=True,\r\n drop_last=False)\r\n\r\n # the k-th column is the representation of k-th semantic prototype\r\n objective_feat = {k: None for k in range(cfg.MODEL.NUM_CLASSES)}\r\n\r\n with torch.no_grad():\r\n for batch in tqdm(src_train_loader):\r\n src_input, src_label, _ = batch\r\n src_input = src_input.cuda(non_blocking=True)\r\n src_label = src_label.cuda(non_blocking=True).long()\r\n\r\n src_feat = feature_extractor(src_input)\r\n src_seg = classifier(src_feat)\r\n\r\n src_seg_softmax = F.softmax(src_seg, dim=1)\r\n _, src_seg_label = torch.max(src_seg_softmax, dim=1)\r\n\r\n # source mask\r\n N, C1, H1, W1 = src_feat.size()\r\n src_mask = F.interpolate(src_label.unsqueeze(0).float(), size=(H1, W1), mode='nearest').squeeze(0).long()\r\n src_mask[src_mask != src_seg_label] = 255 # combine the predicted label\r\n\r\n # normalization before compute centroid\r\n src_feat = F.normalize(src_feat, p=2, dim=1)\r\n src_feat = src_feat.transpose(1, 2).transpose(2, 3).contiguous()\r\n for k in range(cfg.MODEL.NUM_CLASSES):\r\n if k in src_mask:\r\n src_k_mask = (src_mask == k)\r\n if src_k_mask.sum() > 0:\r\n src_k_feat = src_feat[src_k_mask.view(N, H1, W1, 1).repeat(1, 1, 1, C1)].view(-1, C1)\r\n src_k_feat_centroids = torch.mean(src_k_feat, dim=0).view(-1, 1)\r\n if objective_feat[k] is None:\r\n objective_feat[k] = src_k_feat_centroids\r\n else:\r\n objective_feat[k] = torch.cat((objective_feat[k], src_k_feat_centroids), dim=1)\r\n\r\n semantic_prototype = torch.zeros((C1, cfg.MODEL.NUM_CLASSES))\r\n for k in range(cfg.MODEL.NUM_CLASSES):\r\n semantic_prototype[:, [k]] = torch.mean(objective_feat[k], dim=1).view(-1, 1).cpu()\r\n logger.info('Semantic prototype finised!')\r\n return semantic_prototype\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description=\"PyTorch Semantic Segmentation Testing\")\r\n parser.add_argument(\"-cfg\",\r\n \"--config-file\",\r\n default=\"\",\r\n metavar=\"FILE\",\r\n help=\"path to config file\",\r\n type=str,\r\n )\r\n parser.add_argument(\r\n \"opts\",\r\n help=\"Modify config options using the command-line\",\r\n default=None,\r\n nargs=argparse.REMAINDER,\r\n )\r\n\r\n args = parser.parse_args()\r\n\r\n torch.backends.cudnn.benchmark = True\r\n\r\n cfg.merge_from_file(args.config_file)\r\n cfg.merge_from_list(args.opts)\r\n cfg.freeze()\r\n\r\n output_dir = cfg.OUTPUT_DIR\r\n if output_dir:\r\n mkdir(output_dir)\r\n\r\n logger = setup_logger(\"ICCV2021\", output_dir, 0)\r\n logger.info(args)\r\n\r\n logger.info(\"Loaded configuration file {}\".format(args.config_file))\r\n logger.info(\"Running with config:\\n{}\".format(cfg))\r\n\r\n semantic_prototype = get_source_centroids(cfg)\r\n torch.save(semantic_prototype, os.path.join(output_dir, 'semantic_prototype.pth'))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n" ]
[ [ "torch.zeros", "torch.device", "torch.nn.functional.normalize", "torch.cat", "torch.max", "torch.no_grad", "torch.cuda.empty_cache", "torch.utils.data.DataLoader", "torch.nn.functional.softmax", "torch.mean" ] ]
speedcell4/OpenNRE
[ "4e4757eed77eb5fdcaefb4770e724b9a37090cb7" ]
[ "old/plot_pr.py" ]
[ "from sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import average_precision_score\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.clf()\nfilename = ['CNN+ATT', 'Hoffmann', 'MIMLRE', 'Mintz', 'PCNN+ATT']\ncolor = ['red', 'turquoise', 'darkorange', 'cornflowerblue', 'teal']\nfor i in range(len(filename)):\n precision = np.load('./data/' + filename[i] + '_precision.npy')\n recall = np.load('./data/' + filename[i] + '_recall.npy')\n plt.plot(recall, precision, color=color[i], lw=2, label=filename[i])\n\n# ATTENTION: put the model iters you want to plot into the list\nmodel_iter = [10900]\nfor one_iter in model_iter:\n y_true = np.load('./data/allans.npy')\n y_scores = np.load('./out/sample_allprob_iter_' + str(one_iter) + '.npy')\n\n precision, recall, threshold = precision_recall_curve(y_true, y_scores)\n average_precision = average_precision_score(y_true, y_scores)\n\n plt.plot(recall[:], precision[:], lw=2, color='navy', label='BGRU+2ATT')\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.3, 1.0])\n plt.xlim([0.0, 0.4])\n plt.title('Precision-Recall Area={0:0.2f}'.format(average_precision))\n plt.legend(loc=\"upper right\")\n plt.grid(True)\n plt.savefig('iter_' + str(one_iter))\n" ]
[ [ "matplotlib.pyplot.xlim", "sklearn.metrics.precision_recall_curve", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "numpy.load", "matplotlib.pyplot.legend", "matplotlib.pyplot.grid", "sklearn.metrics.average_precision_score", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf" ] ]
ajrichards/bayesian-examples
[ "fbd87c6f1613ea516408e9ebc3c9eff1248246e4", "fbd87c6f1613ea516408e9ebc3c9eff1248246e4" ]
[ "archive/visualization/simple_network.py", "archive/pymc3/linear-regression.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef draw_network(edge_dict,file_name=None,ax=None,node_size=500,k=10):\n \"\"\"draw a simple network\"\"\"\n if not ax:\n fig = plt.figure(figsize=(10,10))\n ax = fig.add_subplot(111)\n ax.set_yticks([])\n ax.set_xticks([])\n\n G = nx.Graph()\n for edge,weight in edge_dict.items():\n G.add_edge(edge[0],edge[1],weight=weight)\n\n pos=nx.spring_layout(G,k=k)\n\n ## draw the nodes \n nx.draw_networkx(G,pos,node_size=node_size,\n node_color='#ADD8E6',alpha=0.9,ax=ax)\n\n ax.axis('off')\n \n if file_name:\n plt.savefig(file_name)\n\nif __name__ == \"__main__\":\n np.random.seed(42)\n edge_dict = {(\"A\",\"B\"):1,(\"B\",\"C\"):1,(\"A\",\"C\"):1,(\"A\",\"E\"):1}\n draw_network(edge_dict,\"simple.png\",k=100)\n plt.show()\n \n\n \n", "#!/usr/bin/env python\n\"\"\"\nlinear regression example\n\nhttp://pymc-devs.github.io/pymc3/notebooks/getting_started.html\n\n\"\"\"\n\nfrom __future__ import division\nimport os,sys,cPickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats as stats\nimport pymc3 as pm\nimport pandas as pd\n\nfrom mylib import *\n\n\nimport logging\n_logger = logging.getLogger(\"theano.gof.compilelock\")\n_logger.setLevel(logging.ERROR)\n\n\nseed = 42\nn = 20\nb0_true = -0.3\nb1_true = 0.5\nx,y = get_simple_regression_samples(n,b0=b0_true,b1=b1_true,seed=seed)\nniter = 2000\nrun_trace = False\n\nwith pm.Model():\n\n ## Priors for unknown model parameters\n b0 = pm.Normal('b0', mu=0, sd=10)\n betas = pm.Normal('betas', mu=0, sd=10, shape=x.shape[1])\n sigma = pm.HalfNormal('sigma', sd=1)\n\n ## Expected value of outcome\n mu = b0 + betas[0]*x[:,0]\n\n ## Likelihood (sampling distribution) of observations\n y_obs = pm.Normal('y_obs', mu=mu, sd=sigma, observed=y)\n\n ## inference\n start = pm.find_MAP()\n step = pm.NUTS() # Hamiltonian MCMC with No U-Turn Sampler\n\n trace_pickle = \"traces/linreg.pkl\"\n if run_trace or not os.path.exists(trace_pickle):\n tmp = open(trace_pickle,'w')\n trace = pm.sample(niter, step, start,random_seed=123, progressbar=True)\n cPickle.dump(trace,tmp)\n tmp.close()\n else:\n print(\"...loading saved trace\")\n tmp = open(trace_pickle,'r')\n trace = cPickle.load(tmp)\n\n## create a fit from the traces\nb0_hat = trace['b0'][-500:].mean()\nb1_hat = trace['betas'][:,0][-500:].mean()\n#print(pc.utils.hpd(MCMClinear.trace('m')[:] , 1.- 0.95))\n#print(pc.utils.hpd(MCMClinear.trace('c')[:] , 1.- 0.95))\ny_pred_pymc = b0_hat + (b1_hat*x[:,0])\n\n## make a least squares fit \ncoefs_lstsq = fit_linear_lstsq(x,y)\ny_pred_lstsq = coefs_lstsq[0] + (coefs_lstsq[1]*x[:,0])\n\n## plot the fits\nfig = plt.figure(figsize=(8,8))\nax = fig.add_subplot(111)\nax.plot(x[:,0],y,'ko')\nax.plot(x[:,0],y_pred_pymc,color='red',linewidth=5.0,label='pymc',alpha=0.5)\nax.plot(x[:,0],y_pred_lstsq,color='black',label='least squares')\n\n## add a credible interval (1sd)\nupper = y_pred_pymc + y_pred_pymc.std() * 0.5\nlower = y_pred_pymc - y_pred_pymc.std() * 0.5\nax.plot(x[:,0],upper,color='black',label='upper')\nax.plot(x[:,0],lower,color='black',label='lower')\n\nax.legend()\nplt.show()\n\n\n\n## print summary\nprint(\"\\n-----------------------\")\nprint(\"truth: b0=%s,b1=%s\"%(b0_true,b1_true))\nprint(\"pymc fit: b0=%s,b1=%s\"%(round(b0_hat,3),round(b1_hat,3)))\nprint(\"lstsq fit: b0=%s,b1=%s\"%(round(coefs_lstsq[0],3),round(coefs_lstsq[1],3)))\n" ]
[ [ "numpy.random.seed", "matplotlib.pyplot.show", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
Coderx7/InsightFace-v2
[ "d9e8519f4ea6de891b6b04a095c758b0d31d36b6" ]
[ "retinaface/layers/functions/prior_box.py" ]
[ "from itertools import product as product\nfrom math import ceil\n\nimport torch\n\n\nclass PriorBox(object):\n def __init__(self, cfg, image_size=None, phase='train'):\n super(PriorBox, self).__init__()\n self.min_sizes = cfg['min_sizes']\n self.steps = cfg['steps']\n self.clip = cfg['clip']\n self.image_size = image_size\n self.feature_maps = [[ceil(self.image_size[0] / step), ceil(self.image_size[1] / step)] for step in self.steps]\n self.name = \"s\"\n\n def forward(self):\n anchors = []\n for k, f in enumerate(self.feature_maps):\n min_sizes = self.min_sizes[k]\n for i, j in product(range(f[0]), range(f[1])):\n for min_size in min_sizes:\n s_kx = min_size / self.image_size[1]\n s_ky = min_size / self.image_size[0]\n dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]\n dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]\n for cy, cx in product(dense_cy, dense_cx):\n anchors += [cx, cy, s_kx, s_ky]\n\n # back to torch land\n output = torch.Tensor(anchors).view(-1, 4)\n if self.clip:\n output.clamp_(max=1, min=0)\n return output\n" ]
[ [ "torch.Tensor" ] ]
JianJiao16/BANG
[ "95d5a8b95397f401f4b9c677899ef01126910cdb" ]
[ "bang/bang/bang_AR.py" ]
[ "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom fairseq import options, utils\nfrom fairseq.models import (\n FairseqEncoder,\n FairseqIncrementalDecoder,\n FairseqEncoderDecoderModel,\n register_model,\n register_model_architecture,\n)\nfrom fairseq.modules import (\n MultiheadAttention,\n LayerNorm,\n)\nfrom fairseq.modules.transformer_sentence_encoder import init_bert_params\nfrom .learned_positional_embedding import LearnedPositionalEmbedding\nfrom .ngram_multihead_attention import NgramMultiheadAttention, ngram_attention_bias\n\nDEFAULT_MAX_SOURCE_POSITIONS = 512\nDEFAULT_MAX_TARGET_POSITIONS = 512\n\n\n@register_model('bang_ar')\nclass BANGARModel(FairseqEncoderDecoderModel):\n \"\"\"\n Args:\n encoder (TransformerEncoder): the encoder\n decoder (TransformerDecoder): the decoder\n The Transformer model provides the following named architectures and\n command-line arguments:\n .. argparse::\n :ref: fairseq.models.transformer_parser\n :prog:\n \"\"\"\n\n def __init__(self, encoder, decoder):\n super().__init__(encoder, decoder)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n parser.add_argument('--ngram', type=int, metavar='N',\n help='num of predicting grams')\n parser.add_argument('--num_buckets', type=int, metavar='N',\n help='num of buckets for relative position')\n parser.add_argument('--relative_max_distance', type=int, metavar='N',\n help='num of bucket for relative position')\n # fmt: off\n parser.add_argument('--activation-fn',\n choices=utils.get_available_activation_fns(),\n help='activation function to use')\n parser.add_argument('--dropout', type=float, metavar='D',\n help='dropout probability')\n parser.add_argument('--attention-dropout', type=float, metavar='D',\n help='dropout probability for attention weights')\n parser.add_argument('--activation-dropout', type=float, metavar='D',\n help='dropout probability after activation in FFN.')\n\n parser.add_argument('--encoder-embed-dim', type=int, metavar='N',\n help='encoder embedding dimension')\n parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',\n help='encoder embedding dimension for FFN')\n parser.add_argument('--encoder-layers', type=int, metavar='N',\n help='num encoder layers')\n parser.add_argument('--encoder-attention-heads', type=int, metavar='N',\n help='num encoder attention heads')\n\n parser.add_argument('--decoder-embed-dim', type=int, metavar='N',\n help='decoder embedding dimension')\n parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',\n help='decoder embedding dimension for FFN')\n parser.add_argument('--decoder-layers', type=int, metavar='N',\n help='num decoder layers')\n parser.add_argument('--decoder-attention-heads', type=int, metavar='N',\n help='num decoder attention heads')\n\n parser.add_argument('--share-all-embeddings', action='store_true',\n help='share encoder, decoder and output embeddings'\n ' (requires shared dictionary and embed dim)')\n parser.add_argument('--load-from-pretrained-model', type=str, default=None,\n help='Load from pretrained model')\n parser.add_argument('--load-sep', action='store_true',\n help='load pretrained [SEP] weight into [X_SEP]. ([SEP] used as eos in fine tuning)')\n # fmt: on\n\n def get_normalized_probs(self, net_output, log_probs, sample=None):\n \"\"\"Get normalized probabilities (or log probs) from a net's output.\"\"\"\n if hasattr(self, 'decoder'):\n return self.decoder.get_normalized_probs(net_output, log_probs, sample)\n elif torch.is_tensor(net_output):\n logits = net_output.float()\n if log_probs:\n return F.log_softmax(logits, dim=-1)\n else:\n return F.softmax(logits, dim=-1)\n raise NotImplementedError\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if not hasattr(args, 'max_source_positions'):\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\n if not hasattr(args, 'max_target_positions'):\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\n\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n\n def build_embedding(dictionary, embed_dim):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\n return emb\n\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError('--share-all-embeddings requires a joined dictionary')\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')\n encoder_embed_tokens = build_embedding(\n src_dict, args.encoder_embed_dim\n )\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n encoder_embed_tokens = build_embedding(\n src_dict, args.encoder_embed_dim\n )\n decoder_embed_tokens = build_embedding(\n tgt_dict, args.decoder_embed_dim\n )\n\n encoder = TransformerEncoder(args, src_dict, encoder_embed_tokens)\n decoder = NgramTransformerDecoder(args, tgt_dict, decoder_embed_tokens)\n\n model = BANGARModel(encoder, decoder)\n\n if args.load_from_pretrained_model is not None:\n states = torch.load(args.load_from_pretrained_model, map_location='cpu')\n if 'model' in states and 'args' in states:\n states = states['model']\n if args.load_sep:\n encoder_token_weight = states['encoder.embed_tokens.weight']\n decoder_token_weight = states['decoder.embed_tokens.weight']\n encoder_token_weight[2] = encoder_token_weight[102]\n decoder_token_weight[2] = decoder_token_weight[102]\n states['encoder.embed_tokens.weight'] = encoder_token_weight\n states['decoder.embed_tokens.weight'] = decoder_token_weight\n for position_name, target_position_length in [\n ('encoder.embed_positions.weight', model.encoder.embed_positions.weight.size(0)), \\\n ('decoder.embed_positions.weight', model.decoder.embed_positions.weight.size(0))]:\n if states[position_name].size(0) < target_position_length:\n _index = torch.arange(states[position_name].size(1))\n expend_position_states = states[position_name].clone()\n while states[position_name].size(0) < target_position_length:\n _index = torch.cat((_index[1:], _index[:1]), dim=0)\n states[position_name] = torch.cat([states[position_name], expend_position_states[:, _index]],\n dim=0)\n if states[position_name].size(0) > target_position_length:\n states[position_name] = states[position_name][:target_position_length]\n if states['decoder.ngram_input_embed.weight'].size(0) == 2:\n states['decoder.ngram_input_embed.weight'] = states['decoder.ngram_input_embed.weight'][1:2, :]\n model.load_state_dict(states)\n args.load_from_pretrained_model = None # Clear this param\n\n return BANGARModel(encoder, decoder)\n\n def max_positions(self):\n return (self.encoder.max_positions(), self.decoder.max_positions())\n\n def forward(self, src_tokens=None, src_lengths=None, prev_output_tokens=None, **kwargs):\n \"\"\"\n Run the forward pass for an encoder-decoder model.\n First feed a batch of source tokens through the encoder. Then, feed the\n encoder output and previous decoder outputs (i.e., teacher forcing) to\n the decoder to produce the next outputs::\n encoder_out = self.encoder(src_tokens, src_lengths)\n return self.decoder(prev_output_tokens, encoder_out)\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)\n decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, **kwargs)\n return decoder_out\n\n\nclass TransformerEncoderLayer(nn.Module):\n \"\"\"\n Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained\n models.\n \"\"\"\n\n def __init__(\n self,\n embedding_dim: float = 768,\n ffn_embedding_dim: float = 3072,\n num_attention_heads: float = 8,\n dropout: float = 0.1,\n attention_dropout: float = 0.1,\n activation_dropout: float = 0.1,\n activation_fn: str = 'relu',\n add_bias_kv: bool = False,\n add_zero_attn: bool = False,\n export: bool = False,\n ) -> None:\n super().__init__()\n # Initialize parameters\n self.embedding_dim = embedding_dim\n self.dropout = dropout\n self.activation_dropout = activation_dropout\n\n # Initialize blocks\n self.activation_fn = utils.get_activation_fn(activation_fn)\n self.self_attn = MultiheadAttention(\n self.embedding_dim,\n num_attention_heads,\n dropout=attention_dropout,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n self_attention=True,\n )\n\n # layer norm associated with the self attention layer\n self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)\n self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)\n self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)\n\n # layer norm associated with the position wise feed-forward NN\n self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)\n\n def forward(\n self,\n x: torch.Tensor,\n self_attn_mask: torch.Tensor = None,\n self_attn_padding_mask: torch.Tensor = None,\n ):\n \"\"\"\n LayerNorm is applied either before or after the self-attention/ffn\n modules similar to the original Transformer imlementation.\n \"\"\"\n residual = x\n x, attn = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=self_attn_padding_mask,\n need_weights=False,\n attn_mask=self_attn_mask,\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.self_attn_layer_norm(x)\n\n residual = x\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.final_layer_norm(x)\n return x, attn\n\n\nclass NgramTransformerDecoderLayer(nn.Module):\n def __init__(\n self,\n ngram=1,\n embedding_dim: float = 768,\n ffn_embedding_dim: float = 3072,\n num_attention_heads: float = 8,\n dropout: float = 0.1,\n attention_dropout: float = 0.1,\n activation_dropout: float = 0.1,\n activation_fn: str = 'relu',\n add_bias_kv: bool = False,\n add_zero_attn: bool = False,\n export: bool = False,\n\n ):\n super().__init__()\n\n self.embedding_dim = embedding_dim\n self.dropout = dropout\n self.activation_dropout = activation_dropout\n\n # Initialize blocks\n self.activation_fn = utils.get_activation_fn(activation_fn)\n self.ngram_self_attn = NgramMultiheadAttention(\n self.embedding_dim,\n num_attention_heads,\n dropout=attention_dropout,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n self_attention=True,\n ngram=ngram\n )\n self.ngram = ngram\n assert ngram == 1, 'BANG ar model ngram 1 for AR'\n\n # layer norm associated with the self attention layer\n self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)\n\n self.encoder_attn = MultiheadAttention(\n self.embedding_dim,\n num_attention_heads,\n kdim=embedding_dim,\n vdim=embedding_dim,\n dropout=attention_dropout,\n encoder_decoder_attention=True,\n )\n self.encoder_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)\n\n self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)\n self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)\n\n # layer norm associated with the position wise feed-forward NN\n self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)\n self.need_attn = False\n\n def forward(\n self,\n x,\n encoder_out=None,\n encoder_mask=None,\n incremental_state=None,\n prev_self_attn_state=None,\n prev_attn_state=None,\n self_attn_mask=None,\n ngram_mask_matrix=None,\n i_buckets_main_stream=None,\n i_bucket_relative_stream=None,\n real_positions=None\n ):\n # one main stream and ngram predicting streams\n residual = x\n\n if prev_self_attn_state is not None:\n if incremental_state is None:\n incremental_state = {}\n prev_key, prev_value = prev_self_attn_state\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\n self.self_attn._set_input_buffer(incremental_state, saved_state)\n\n x, attn = self.ngram_self_attn(\n query=x,\n key=x,\n value=x,\n incremental_state=incremental_state,\n need_weights=False,\n self_attn_mask=self_attn_mask,\n ngram_mask_matrix=ngram_mask_matrix,\n i_buckets_main_stream=i_buckets_main_stream,\n i_bucket_relative_stream=i_bucket_relative_stream,\n real_positions=real_positions\n )\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.self_attn_layer_norm(x)\n\n residual = x\n if prev_attn_state is not None:\n if incremental_state is None:\n incremental_state = {}\n prev_key, prev_value = prev_attn_state\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\n self.encoder_attn._set_input_buffer(incremental_state, saved_state)\n x, attn = self.encoder_attn(\n query=x,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_mask,\n incremental_state=incremental_state,\n static_kv=True,\n need_weights=(not self.training and self.need_attn),\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.encoder_attn_layer_norm(x)\n\n residual = x\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.final_layer_norm(x)\n return x, attn\n\n def make_generation_fast_(self, need_attn=False, **kwargs):\n self.need_attn = need_attn\n\n\nclass TransformerEncoder(FairseqEncoder):\n \"\"\"\n Transformer encoder consisting of *args.encoder_layers* layers. Each layer\n is a :class:`TransformerEncoderLayer`.\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): encoding dictionary\n embed_tokens (torch.nn.Embedding): input embedding\n \"\"\"\n\n def __init__(self, args, dictionary, embed_tokens):\n super().__init__(dictionary)\n self.register_buffer('version', torch.Tensor([3]))\n\n self.dropout = args.dropout\n\n embed_dim = embed_tokens.embedding_dim\n self.padding_idx = embed_tokens.padding_idx\n self.max_source_positions = args.max_source_positions\n\n self.embed_tokens = embed_tokens\n self.embed_scale = None # math.sqrt(embed_dim)\n self.embed_positions = LearnedPositionalEmbedding(\n args.max_source_positions + 1 + self.padding_idx, embed_dim, self.padding_idx,\n )\n\n self.layers = nn.ModuleList([])\n\n self.layers.extend([\n TransformerEncoderLayer(\n args.encoder_embed_dim,\n args.encoder_ffn_embed_dim,\n args.encoder_attention_heads,\n args.dropout,\n args.attention_dropout,\n args.activation_dropout,\n args.activation_fn,\n )\n for i in range(args.encoder_layers)\n ])\n\n self.emb_layer_norm = LayerNorm(embed_dim)\n\n self.apply(init_bert_params)\n\n def forward(self, src_tokens, src_lengths, **unused):\n \"\"\"\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (torch.LongTensor): lengths of each source sentence of\n shape `(batch)`\n Returns:\n dict:\n - **encoder_out** (Tensor): the last encoder layer's output of\n shape `(src_len, batch, embed_dim)`\n - **encoder_padding_mask** (ByteTensor): the positions of\n padding elements of shape `(batch, src_len)`\n \"\"\"\n # compute padding mask\n encoder_padding_mask = src_tokens.eq(self.padding_idx)\n if not encoder_padding_mask.any():\n encoder_padding_mask = None\n\n x = self.embed_tokens(src_tokens)\n # embed tokens and positions\n if self.embed_scale is not None:\n x *= self.embed_scale\n\n if self.embed_positions is not None:\n pos_emb, real_positions = self.embed_positions(src_tokens)\n x += pos_emb\n\n if self.emb_layer_norm:\n x = self.emb_layer_norm(x)\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n if encoder_padding_mask is not None:\n x *= 1 - encoder_padding_mask.unsqueeze(-1).type_as(x)\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n # encoder layers\n for layer in self.layers:\n # x, _ = layer(x, self_attn_padding_mask=encoder_padding_mask, real_positions=real_positions)\n x, _ = layer(x, self_attn_padding_mask=encoder_padding_mask, )\n\n return {\n 'encoder_out': x, # T x B x C\n 'encoder_padding_mask': encoder_padding_mask, # B x T\n }\n\n def reorder_encoder_out(self, encoder_out, new_order):\n \"\"\"\n Reorder encoder output according to *new_order*.\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n Returns:\n *encoder_out* rearranged according to *new_order*\n \"\"\"\n if encoder_out['encoder_out'] is not None:\n encoder_out['encoder_out'] = \\\n encoder_out['encoder_out'].index_select(1, new_order)\n if encoder_out['encoder_padding_mask'] is not None:\n encoder_out['encoder_padding_mask'] = \\\n encoder_out['encoder_padding_mask'].index_select(0, new_order)\n return encoder_out\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n if self.embed_positions is None:\n return self.max_source_positions\n return min(self.max_source_positions, self.embed_positions.max_positions())\n\n\nclass NgramTransformerDecoder(FairseqIncrementalDecoder):\n \"\"\"\n Transformer decoder consisting of *args.decoder_layers* layers. Each layer\n is a :class:`TransformerDecoderLayer`.\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n embed_tokens (torch.nn.Embedding): output embedding\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):\n super().__init__(dictionary)\n self.register_buffer('version', torch.Tensor([3]))\n self.ngram = args.ngram\n self.num_buckets = args.num_buckets\n self.relative_max_distance = args.relative_max_distance\n\n self.dropout = args.dropout\n self.share_input_output_embed = args.share_decoder_input_output_embed\n\n input_embed_dim = embed_tokens.embedding_dim\n embed_dim = args.decoder_embed_dim\n\n self.padding_idx = embed_tokens.padding_idx\n self.max_target_positions = args.max_target_positions\n self.embed_dim = embed_dim\n self.embed_tokens = embed_tokens\n self.embed_scale = None # math.sqrt(embed_dim) # todo: try with input_embed_dim\n\n self.embed_positions = LearnedPositionalEmbedding(\n args.max_target_positions + 2 + self.padding_idx, embed_dim, self.padding_idx,\n )\n\n self.ngram_input_embed = Embedding(1, input_embed_dim, None)\n\n self.layers = nn.ModuleList([])\n\n self.layers.extend([\n NgramTransformerDecoderLayer(\n args.ngram,\n args.decoder_embed_dim,\n args.decoder_ffn_embed_dim,\n args.decoder_attention_heads,\n args.dropout,\n args.attention_dropout,\n args.activation_dropout,\n args.activation_fn,\n\n )\n for _ in range(args.decoder_layers)\n ])\n\n if not self.share_input_output_embed:\n self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.embed_dim))\n nn.init.normal_(self.embed_out, mean=0, std=self.embed_dim ** -0.5)\n\n self.emb_layer_norm = LayerNorm(embed_dim)\n self.apply(init_bert_params)\n\n def forward(self,\n prev_output_tokens,\n encoder_out=None,\n incremental_state=None,\n **unused):\n # T\n T = prev_output_tokens.size(1)\n # x [B, (1+ngram)*T, C]\n x_list, extra = self.extract_features(prev_output_tokens, encoder_out, incremental_state, **unused)\n x_predicted = x_list[1:]\n x_predicted = [self.output_layer(x) for x in x_predicted]\n if incremental_state is not None:\n x_predicted = x_predicted[0]\n for k in extra:\n if extra[k] is not None:\n extra[k] = extra[k][0]\n return x_predicted, extra\n\n def _relative_positions_bucket(self, relative_positions, bidirectional=False):\n num_buckets = self.num_buckets\n max_distance = self.relative_max_distance\n n = -relative_positions\n result = 0\n if bidirectional:\n num_buckets = num_buckets // 2\n result = result + torch.lt(n, torch.zeros_like(n)).int() * num_buckets\n n = torch.abs(n)\n else:\n n = torch.max(n, torch.zeros_like(n))\n max_exact = num_buckets // 2\n is_small = torch.lt(n, max_exact)\n val_if_large = max_exact + torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (\n num_buckets - max_exact)\n val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1))\n val_if_large = val_if_large.int()\n result = result + torch.where(is_small, n.int(), val_if_large)\n return result\n\n def cal_pretrain_relative_positions(self, real_positions):\n # main stream\n main_stream_relative_positions = real_positions.unsqueeze(1)\n # [B,T,T/S]\n main_stream_relative_positions = main_stream_relative_positions.repeat(1, real_positions.size(-1), 1)\n # [B,T,1]\n real_positions_main = real_positions.unsqueeze(-1)\n main_stream_relative_positions = main_stream_relative_positions - real_positions_main\n\n # predicting stream\n # input shift\n real_positions_shift_predicting_stream = real_positions - 1\n # [B,1, 2*T]\n predicting_stream_relative_positions = torch.cat((real_positions_shift_predicting_stream, real_positions),\n dim=-1).unsqueeze(1)\n # [B,T, 2*T]\n predicting_stream_relative_positions = predicting_stream_relative_positions.repeat(1, real_positions.size(-1),\n 1)\n # [B,T, 1]\n real_positions_predicting_stream = real_positions.unsqueeze(-1)\n predicting_stream_relative_positions = predicting_stream_relative_positions - real_positions_predicting_stream\n i_buckets_main_stream = self._relative_positions_bucket(main_stream_relative_positions, bidirectional=False)\n i_bucket_relative_stream = self._relative_positions_bucket(predicting_stream_relative_positions,\n bidirectional=False)\n return i_buckets_main_stream, i_bucket_relative_stream\n\n def cal_finetune_relative_positions(self, real_positions):\n n_tokens = real_positions.size(-1)\n batch_size = real_positions.size(0)\n if not hasattr(self,\n '_finetune_i_bucket_main_stream') or \\\n self._finetune_i_bucket_main_stream is None \\\n or self._finetune_i_bucket_main_stream.device != real_positions.device:\n fake_positions = torch.arange(1, self.max_target_positions + 1).repeat(1, 1)\n finetune_i_bucket_main_stream, finetune_i_bucket_predicting_stream = \\\n self.cal_pretrain_relative_positions(fake_positions)\n self._finetune_i_bucket_main_stream = finetune_i_bucket_main_stream.to(real_positions.device)\n self._finetune_i_bucket_predicting_stream = finetune_i_bucket_predicting_stream.to(real_positions.device)\n finetune_i_bucket_main_stream = self._finetune_i_bucket_main_stream[:, :n_tokens, :n_tokens].repeat(batch_size,\n 1, 1)\n finetune_i_bucket_predicting_stream = torch.cat([\n self._finetune_i_bucket_predicting_stream[:, :n_tokens, :n_tokens],\n self._finetune_i_bucket_predicting_stream[:, :n_tokens,\n self.max_target_positions:self.max_target_positions + n_tokens]\n ], 2).repeat(batch_size, 1, 1)\n return finetune_i_bucket_main_stream, finetune_i_bucket_predicting_stream\n\n def extract_features(self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused):\n # embed positions\n # [bos, A, B, C, D, eos] with real positions [1,2,3,4,5,6](main stream), [2,3,4,5,6,7](predicting stream)\n # target [B,C,D] with prev [A,B,C] from [A,B,C,D] as pretraining span with real positions [2,3,4],\n # but target actually [3,4,5] for fine tune with another [bos].\n # thus [2,3,4] used for main stream shifted prev [A,B,C], [3,4,5] used for predicting [B,C,D]\n if 'positions' in unused:\n # pretrain procedure\n main_stream_pos_embed = self.embed_positions._forward(unused['positions'])\n real_positions = unused['positions']\n i_buckets_main_stream, i_bucket_relative_stream = \\\n self.cal_pretrain_relative_positions(real_positions)\n else:\n # fine tune procedure\n main_stream_pos_embed, real_positions = self.embed_positions(\n prev_output_tokens,\n incremental_state=incremental_state,\n ) if self.embed_positions is not None else None\n if incremental_state is not None:\n i_buckets_main_stream, i_bucket_relative_stream = None, None\n else:\n i_buckets_main_stream, i_bucket_relative_stream = \\\n self.cal_finetune_relative_positions(real_positions)\n\n predicting_stream_pos_embed = self.embed_positions._forward(real_positions + 1)\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n if main_stream_pos_embed is not None:\n main_stream_pos_embed = main_stream_pos_embed[:, -1:]\n\n x = self.embed_tokens(prev_output_tokens)\n # embed tokens and positions\n if self.embed_scale is not None:\n x *= self.embed_scale\n\n if main_stream_pos_embed is not None:\n x += main_stream_pos_embed\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n attn = None\n\n inner_states = [x]\n if main_stream_pos_embed is None:\n print('positions should be used to predict ngrams')\n raise Exception()\n\n if self.embed_scale is not None:\n ngram_input_embed = self.embed_scale * self.ngram_input_embed.weight\n else:\n ngram_input_embed = self.ngram_input_embed.weight\n\n if incremental_state is not None:\n B = x.size(1)\n ngram_masks = [\n (ngram_input_embed[0] + predicting_stream_pos_embed).transpose(0, 1).repeat(1, B, 1)\n for ngram in range(self.ngram)]\n else:\n ngram_masks = [(ngram_input_embed[0] + predicting_stream_pos_embed).transpose(0, 1) for\n ngram in range(self.ngram)]\n\n self_attn_mask = self.buffered_future_mask(x) if incremental_state is None else None\n ngram_mask_matrix = self.buffered_future_mask_ngram(x) if incremental_state is None else None\n\n # TODO in train [(1+ngram)*T, B, C], in inference [T+ngram, B, C]\n x = torch.cat([x] + ngram_masks, 0)\n\n if self.emb_layer_norm:\n x = self.emb_layer_norm(x)\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # decoder layers\n for layer in self.layers:\n x, attn = layer(\n x,\n encoder_out['encoder_out'] if encoder_out is not None else None,\n encoder_out['encoder_padding_mask'] if encoder_out is not None else None,\n incremental_state,\n self_attn_mask=self_attn_mask,\n ngram_mask_matrix=ngram_mask_matrix,\n i_buckets_main_stream=i_buckets_main_stream,\n i_bucket_relative_stream=i_bucket_relative_stream,\n real_positions=real_positions\n )\n inner_states.append(x)\n\n # TODO [(1+ngram)*T, B, C] -> [B, (1+ngram)*T, C]\n x_list = x.transpose(0, 1).chunk(1 + self.ngram, 1)\n if attn is not None:\n attn_list = attn.transpose(0, 1).chunk(1 + self.ngram, 1)\n else:\n attn_list = None\n\n return x_list, {'attn': attn_list}\n\n def get_normalized_probs(self, net_output, log_probs, sample):\n \"\"\"Get normalized probabilities (or log probs) from a net's output.\"\"\"\n\n if hasattr(self, 'adaptive_softmax') and self.adaptive_softmax is not None:\n if sample is not None:\n assert 'target' in sample\n target = sample['target']\n else:\n target = None\n out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)\n return out.exp_() if not log_probs else out\n '''\n logits_list = net_output[0]\n if log_probs:\n return [utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace) for logits in logits_list][0]\n else:\n return [utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace) for logits in logits_list][0]\n '''\n logits = net_output[0]\n if log_probs:\n return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace)\n else:\n return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace)\n\n def output_layer(self, features, **kwargs):\n \"\"\"Project features to the vocabulary size.\"\"\"\n # project back to size of vocabulary\n if self.share_input_output_embed:\n return F.linear(features, self.embed_tokens.weight)\n else:\n return F.linear(features, self.embed_out)\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the decoder.\"\"\"\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions())\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n if not hasattr(self,\n '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device or self._future_mask.size(\n 0) < dim:\n self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\n return self._future_mask[:dim, :dim]\n\n def buffered_future_mask_ngram(self, tensor):\n dim = tensor.size(0)\n if not hasattr(self,\n '_ngram_future_mask') or self._ngram_future_mask is None or self._ngram_future_mask.device != tensor.device:\n self._ngram_future_mask = ngram_attention_bias(self.max_target_positions, self.ngram).type(tensor.dtype).to(\n tensor.device)\n ngram_future_mask = torch.cat([self._ngram_future_mask[:, :dim, :dim],\n self._ngram_future_mask[:, :dim,\n self.max_target_positions: self.max_target_positions + dim]\n ], 2)\n return ngram_future_mask\n\n\ndef Embedding(num_embeddings, embedding_dim, padding_idx):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n nn.init.constant_(m.weight[padding_idx], 0)\n return m\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.)\n return m\n\n\n@register_model_architecture('bang_ar', 'bang_ar')\ndef base_architecture(args):\n args.ngram = getattr(args, 'ngram', 1)\n args.num_buckets = getattr(args, 'num_buckets', 32)\n args.relative_max_distance = getattr(args, 'relative_max_distance', 128)\n\n args.activation_fn = getattr(args, 'activation_fn', 'relu')\n args.dropout = getattr(args, 'dropout', 0.1)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.)\n args.activation_dropout = getattr(args, 'activation_dropout', 0.)\n\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\n\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)\n\n args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)\n args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)\n args.load_sep = getattr(args, 'load_sep', False)\n\n\n@register_model_architecture('bang_ar', 'bang_ar_base')\ndef transformer_base(args):\n args.ngram = getattr(args, 'ngram', 1)\n args.num_buckets = getattr(args, 'num_buckets', 32)\n args.relative_max_distance = getattr(args, 'relative_max_distance', 128)\n\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n\n args.dropout = getattr(args, 'dropout', 0.1)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.1)\n args.activation_dropout = getattr(args, 'activation_dropout', 0.1)\n args.activation_fn = getattr(args, 'activation_fn', 'gelu')\n\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n\n args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', True)\n args.share_all_embeddings = getattr(args, 'share_all_embeddings', True)\n base_architecture(args)\n\n\n@register_model_architecture('bang_ar', 'bang_ar_middle')\ndef transformer_middle(args):\n args.ngram = getattr(args, 'ngram', 1)\n args.num_buckets = getattr(args, 'num_buckets', 32)\n args.relative_max_distance = getattr(args, 'relative_max_distance', 128)\n\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n transformer_base(args)\n\n\n@register_model_architecture('bang_ar', 'bang_ar_large')\ndef transformer_big(args):\n args.ngram = getattr(args, 'ngram', 1)\n args.num_buckets = getattr(args, 'num_buckets', 32)\n args.relative_max_distance = getattr(args, 'relative_max_distance', 128)\n\n args.encoder_layers = getattr(args, 'encoder_layers', 12)\n args.decoder_layers = getattr(args, 'decoder_layers', 12)\n transformer_middle(args)\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.nn.ModuleList", "torch.load", "torch.lt", "torch.nn.init.constant_", "torch.is_tensor", "torch.abs", "torch.nn.init.normal_", "torch.zeros_like", "torch.Tensor", "torch.nn.functional.dropout", "torch.nn.functional.log_softmax", "torch.nn.functional.linear", "torch.nn.functional.softmax", "torch.arange", "torch.nn.init.xavier_uniform_", "torch.ones_like", "torch.nn.Embedding" ] ]
HimanchalChandra/visual-relationship-detection
[ "74922fbb8a3dc1a15b539a7178acb48256f3ad0c" ]
[ "datasets/visual_genome.py" ]
[ "import numpy as np\nimport cv2\nfrom shapely.geometry import box\nfrom shapely.ops import cascaded_union\nfrom PIL import Image\nimport torch\nimport pandas as pd\nfrom skimage import io, transform\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nimport os \nimport json\n\nclass VisualGenome(Dataset):\n \"\"\"VRD dataset.\"\"\"\n def __init__(self, dataset_path, type, transform=None):\n # read annotations file\n with open(os.path.join(dataset_path,'json_dataset',f'annotations_{type}.json'), 'r') as f:\n self.annotations = json.load(f)\n\n # read image filenames\n with open(os.path.join(dataset_path, f'{type}.txt'), 'r') as f:\n image_names = f.read()\n \n self.imgs_list = image_names.split('\\n')[:-1]\n \n self.transform = transform\n self.root = os.path.join(dataset_path,'sg_dataset',f'sg_{type}_images')\n\n def __len__(self):\n return len(self.imgs_list)\n\n def pre_process(self, img, annotation):\n # list containing cropped unioned images\n cropped_imgs = []\n spatial_locations = []\n word_vectors = []\n predicate_list = []\n for sub_pred_obj in annotation:\n bbox_sub = sub_pred_obj['subject']['bbox']\n bbox_obj = sub_pred_obj['object']['bbox']\n # convert to x1,y1,x2,y2\n x1_sub, y1_sub, x2_sub, y2_sub = bbox_sub[2], bbox_sub[0], bbox_sub[3],bbox_sub[1]\n x1_obj, y1_obj, x2_obj, y2_obj = [bbox_obj[2], bbox_obj[0], bbox_obj[3],bbox_obj[1]]\n # get subject, object category\n sub_category = sub_pred_obj['subject']['category']\n object_category = sub_pred_obj['object']['category']\n # takes union\n polygons = [box(x1_sub, y1_sub, x2_sub, y2_sub),\n box(x1_obj, y1_obj, x2_obj, y2_obj)]\n unioned = cascaded_union(polygons)\n unioned = unioned.bounds\n x1_unioned, y1_unioned, x2_unioned, y2_unioned = unioned\n # crop image\n cropped_img = img.crop((int(x1_unioned), int(y1_unioned), int(x2_unioned), int(y2_unioned))) \n img_w, img_h = cropped_img.size\n\n cropped_img = self.transform(cropped_img)\n \n factor_h = img_h/224\n factor_w = img_w/224\n\n cropped_imgs.append(cropped_img)\n # spatial locations\n # find bounding box coordinates relative to unioned image\n sub_x1 = x1_sub - int(x1_unioned)\n sub_y1 = y1_sub - int(y1_unioned)\n sub_x2 = x2_sub - int(x1_unioned)\n sub_y2 = y2_sub - int(y1_unioned)\n\n obj_x1 = x1_obj - int(x1_unioned)\n obj_y1 = y1_obj - int(y1_unioned)\n obj_x2 = x2_obj - int(x1_unioned)\n obj_y2 = y2_obj - int(y1_unioned)\n \n # rescaling of bboxes for image with dim (224,224)\n bbox_sub_scaled = [sub_x1//factor_w, sub_y1//factor_h, sub_x2//factor_w, sub_y2//factor_h]\n bbox_obj_scaled = [obj_x1//factor_w, obj_y1//factor_h, obj_x2//factor_w, obj_y2//factor_h]\n \n spatial_locations.append([bbox_sub_scaled, bbox_obj_scaled])\n # word vectors\n word_vectors.append([sub_category, object_category])\n # predicate label\n predicate = sub_pred_obj['predicate']\n predicate_list.append(predicate)\n\n imgs = torch.stack(cropped_imgs)\n spatial_locations = torch.Tensor(spatial_locations)\n word_vectors = torch.Tensor(word_vectors)\n targets = torch.Tensor(predicate_list)\n return imgs, spatial_locations, word_vectors, targets\n \n def my_collate(self, batch):\n imgs = [] \n spatial_locations = []\n word_vectors = []\n targets = []\n for item in batch:\n # remove incomplete annotations\n if (len(item[0].shape)==4):\n imgs.append(item[0])\n spatial_locations.append(item[1])\n word_vectors.append(item[2])\n targets.append(item[3])\n\n imgs = torch.cat(imgs)\n spatial_locations = torch.cat(spatial_locations)\n word_vectors = torch.cat(word_vectors)\n word_vectors = word_vectors.type(torch.LongTensor)\n targets = torch.cat(targets)\n # flatten\n targets = targets.view(-1)\n targets = targets.type(torch.LongTensor)\n return imgs, spatial_locations, word_vectors, targets\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.root, self.imgs_list[idx])\n img = Image.open(img_path)\n\n annotation = self.annotations[self.imgs_list[idx]]\n imgs, spatial_locations, word_vectors, targets = self.pre_process(img, annotation)\n return (imgs, spatial_locations, word_vectors, targets)\n \n # if torch.is_tensor(idx):\n # idx = idx.tolist()\n\n # img_name = os.path.join(self.root_dir,\n # self.landmarks_frame.iloc[idx, 0])\n # image = io.imread(img_name)\n # landmarks = self.landmarks_frame.iloc[idx, 1:]\n # landmarks = np.array([landmarks])\n # landmarks = landmarks.astype('float').reshape(-1, 2)\n # sample = {'image': image, 'landmarks': landmarks}\n\n # if self.transform:\n # sample = self.transform(sample)\n\n" ]
[ [ "torch.Tensor", "torch.cat", "torch.stack" ] ]
DLPerf/PointASNL
[ "ec90b99e79fb9c7fbece319127404b60817edc63" ]
[ "SemanticKITTI/semantic_kitti_dataset.py" ]
[ "import os\nimport numpy as np\nimport yaml\nimport random\nfrom auxiliary import laserscan\n\nsplits = [\"train\", \"valid\", \"test\"]\nmapped_content = {0: 0.03150183342534689, 1: 0.042607828674502385, 2: 0.00016609538710764618, 3: 0.00039838616015114444,\n 4: 0.0021649398241338114, 5: 0.0018070552978863615, 6: 0.0003375832743104974,\n 7: 0.00012711105887399155, 8: 3.746106399997359e-05, 9: 0.19879647126983288, 10: 0.014717169549888214,\n 11: 0.14392298360372, 12: 0.0039048553037472045, 13: 0.1326861944777486, 14: 0.0723592229456223,\n 15: 0.26681502148037506, 16: 0.006035012012626033, 17: 0.07814222006271769, 18: 0.002855498193863172,\n 19: 0.0006155958086189918}\n\nseed = 100\n\nclass SemanticKittiDataset():\n def __init__(self, root, sample_points=8192, block_size=10, num_classes=20, split='train', with_remission=False,\n config_file='semantic-kitti.yaml', should_map=True, padding=0.01, random_sample=False, random_rate=0.1):\n self.root = root\n assert split in splits\n self.split = split\n self.padding = padding\n self.block_size = block_size\n self.sample_points = sample_points\n self.random_sample = random_sample\n self.with_remission = with_remission\n self.should_map = should_map\n self.config = yaml.safe_load(open(config_file, 'r'))\n self.scan = laserscan.SemLaserScan(nclasses=num_classes, sem_color_dict=self.config['color_map'])\n sequences = self.config['split'][split]\n\n self.points_name = []\n self.label_name = []\n for sequence in sequences:\n sequence = '{0:02d}'.format(int(sequence))\n points_path = os.path.join(self.root, 'sequences', sequence, 'velodyne')\n label_path = os.path.join(self.root, 'sequences', sequence, 'labels')\n seq_points_name = [os.path.join(points_path, pn) for pn in os.listdir(points_path) if pn.endswith('.bin')]\n seq_label_name = [os.path.join(label_path, ln) for ln in os.listdir(label_path) if ln.endswith('.label')]\n assert len(seq_points_name) == len(seq_label_name)\n seq_points_name.sort()\n seq_label_name.sort()\n self.points_name.extend(seq_points_name)\n self.label_name.extend(seq_label_name)\n\n if self.random_sample:\n random.Random(seed).shuffle(self.points_name)\n random.Random(seed).shuffle(self.label_name)\n total_length = len(self.points_name)\n self.points_name = self.points_name[:int(total_length * random_rate)]\n self.label_name = self.label_name[:int(total_length * random_rate)]\n\n label_weights_dict = mapped_content\n num_keys = len(label_weights_dict.keys())\n self.label_weights_lut = np.zeros((num_keys), dtype=np.float32)\n self.label_weights_lut[list(label_weights_dict.keys())] = list(label_weights_dict.values())\n self.label_weights_lut = np.power(np.amax(self.label_weights_lut[1:]) / self.label_weights_lut, 1 / 3.0)\n\n if should_map:\n remapdict = self.config[\"learning_map\"]\n # make lookup table for mapping\n maxkey = max(remapdict.keys())\n # +100 hack making lut bigger just in case there are unknown labels\n self.remap_lut = np.zeros((maxkey + 100), dtype=np.int32)\n self.remap_lut[list(remapdict.keys())] = list(remapdict.values())\n\n def __getitem__(self, index):\n points_name, label_name = self.points_name[index], self.label_name[index]\n self.scan.open_scan(points_name)\n self.scan.open_label(label_name)\n points = self.scan.points\n\n label = self.scan.sem_label\n if self.should_map:\n label = self.remap_lut[label]\n label_weights = self.label_weights_lut[label]\n coordmax = np.max(points[:, 0:3], axis=0)\n coordmin = np.min(points[:, 0:3], axis=0)\n\n for i in range(10):\n curcenter = points[np.random.choice(len(label), 1)[0], 0:3]\n curmin = curcenter - [self.block_size/2, self.block_size/2, 14]\n curmax = curcenter + [self.block_size/2, self.block_size/2, 14]\n curmin[2] = coordmin[2]\n curmax[2] = coordmax[2]\n curchoice = np.sum((points[:, 0:3] >= (curmin - 0.2)) * (points[:, 0:3] <= (curmax + 0.2)),\n axis=1) == 3\n # print(curchoice)\n cur_point_set = points[curchoice, 0:3]\n cur_point_full = points[curchoice, :]\n cur_semantic_seg = label[curchoice]\n if len(cur_semantic_seg) == 0:\n continue\n mask = np.sum((cur_point_set >= (curmin - self.padding)) * (cur_point_set <= (curmax + self.padding)), axis=1) == 3\n\n isvalid = np.sum(cur_semantic_seg > 0) / len(cur_semantic_seg) >= 0.7\n if isvalid:\n break\n choice = np.random.choice(len(cur_semantic_seg), self.sample_points, replace=True)\n point_set = cur_point_full[choice, :]\n semantic_seg = cur_semantic_seg[choice]\n mask = mask[choice]\n sample_weight = label_weights[semantic_seg]\n sample_weight *= mask\n if self.with_remission:\n point_set = np.concatenate((point_set, np.expand_dims(self.scan.remissions[choice], axis=1)), axis=1)\n\n return point_set, semantic_seg, sample_weight\n\n def __len__(self):\n return len(self.points_name)\n\n\nclass SemanticKittiDataset_whole():\n def __init__(self, root, sample_points=8192, block_size=10, num_classes=20, split='train', with_remission=False,\n config_file='semantic-kitti.yaml', should_map=True, padding=0.01, random_sample=False, random_rate=0.1):\n self.root = root\n assert split in splits\n self.split = split\n self.padding = padding\n self.block_size = block_size\n self.sample_points = sample_points\n self.random_sample = random_sample\n self.with_remission = with_remission\n self.should_map = should_map\n self.config = yaml.safe_load(open(config_file, 'r'))\n self.scan = laserscan.SemLaserScan(nclasses=num_classes, sem_color_dict=self.config['color_map'])\n sequences = self.config['split'][split]\n\n self.points_name = []\n self.label_name = []\n for sequence in sequences:\n sequence = '{0:02d}'.format(int(sequence))\n points_path = os.path.join(self.root, 'sequences', sequence, 'velodyne')\n label_path = os.path.join(self.root, 'sequences', sequence, 'labels')\n seq_points_name = [os.path.join(points_path, pn) for pn in os.listdir(points_path) if pn.endswith('.bin')]\n seq_label_name = [os.path.join(label_path, ln) for ln in os.listdir(label_path) if ln.endswith('.label')]\n assert len(seq_points_name) == len(seq_label_name)\n seq_points_name.sort()\n seq_label_name.sort()\n self.points_name.extend(seq_points_name)\n self.label_name.extend(seq_label_name)\n if self.random_sample:\n random.Random(seed).shuffle(self.points_name)\n random.Random(seed).shuffle(self.label_name)\n total_length = len(self.points_name)\n self.points_name = self.points_name[:int(total_length * random_rate)]\n self.label_name = self.label_name[:int(total_length * random_rate)]\n label_weights_dict = mapped_content\n num_keys = len(label_weights_dict.keys())\n self.label_weights_lut = np.zeros((num_keys), dtype=np.float32)\n self.label_weights_lut[list(label_weights_dict.keys())] = list(label_weights_dict.values())\n self.label_weights_lut = np.power(np.amax(self.label_weights_lut[1:]) / self.label_weights_lut, 1 / 3.0)\n\n if should_map:\n remapdict = self.config[\"learning_map\"]\n # make lookup table for mapping\n maxkey = max(remapdict.keys())\n # +100 hack making lut bigger just in case there are unknown labels\n self.remap_lut = np.zeros((maxkey + 100), dtype=np.int32)\n self.remap_lut[list(remapdict.keys())] = list(remapdict.values())\n\n def __getitem__(self, index):\n points_name, label_name = self.points_name[index], self.label_name[index]\n self.scan.open_scan(points_name)\n self.scan.open_label(label_name)\n points = self.scan.points\n\n label = self.scan.sem_label\n if self.should_map:\n label = self.remap_lut[label]\n label_weights = self.label_weights_lut[label]\n coordmax = np.max(points[:, 0:3], axis=0)\n coordmin = np.min(points[:, 0:3], axis=0)\n\n nsubvolume_x = np.ceil((coordmax[0] - coordmin[0]) / self.block_size).astype(np.int32)\n nsubvolume_y = np.ceil((coordmax[1] - coordmin[1]) / self.block_size).astype(np.int32)\n point_sets = list()\n semantic_segs = list()\n sample_weights = list()\n for i in range(nsubvolume_x):\n for j in range(nsubvolume_y):\n curmin = coordmin + [i * self.block_size, j * self.block_size, 0]\n curmax = coordmin + [(i + 1) * self.block_size, (j + 1) * self.block_size, coordmax[2] - coordmin[2]]\n curchoice = np.sum(\n (points[:, 0:3] >= (curmin - 0.2)) * (points[:, 0:3] <= (curmax + 0.2)), axis=1) == 3\n cur_point_set = points[curchoice, 0:3]\n cur_point_full = points[curchoice, :]\n cur_semantic_seg = label[curchoice]\n if len(cur_semantic_seg) == 0:\n continue\n mask = np.sum((cur_point_set >= (curmin - self.padding)) * (cur_point_set <= (curmax + self.padding)),axis=1) == 3\n\n choice = np.random.choice(len(cur_semantic_seg), self.sample_points, replace=True)\n point_set = cur_point_full[choice, :] # Nx3/6\n if self.with_remission:\n point_set = np.concatenate((point_set, np.expand_dims(self.scan.remissions[choice], axis=1)),axis=1)\n semantic_seg = cur_semantic_seg[choice] # N\n mask = mask[choice]\n\n sample_weight = label_weights[semantic_seg]\n sample_weight *= mask # N\n point_sets.append(np.expand_dims(point_set, 0)) # 1xNx3\n semantic_segs.append(np.expand_dims(semantic_seg, 0)) # 1xN\n sample_weights.append(np.expand_dims(sample_weight, 0)) # 1xN\n point_sets = np.concatenate(tuple(point_sets), axis=0)\n semantic_segs = np.concatenate(tuple(semantic_segs), axis=0)\n sample_weights = np.concatenate(tuple(sample_weights), axis=0)\n\n return point_sets, semantic_segs, sample_weights\n\n def __len__(self):\n return len(self.points_name)\n\n\nclass SemanticKittiDatasetSlidingWindow():\n # prepare to give prediction on each points\n def __init__(self, root, sample_points=8192, block_size=10, stride=3.3, num_classes=20, split='test', with_remission=False,\n config_file='semantic-kitti.yaml', should_map=True):\n self.root = root\n assert split in splits\n self.split = split\n self.stride = stride\n self.block_size = block_size\n self.block_points = sample_points\n self.should_map = should_map\n self.with_remission = with_remission\n self.config = yaml.safe_load(open(config_file, 'r'))\n self.scan = laserscan.SemLaserScan(\n nclasses=num_classes, sem_color_dict=self.config['color_map'])\n sequences = self.config['split'][split]\n color = []\n for values in self.config['learning_map_inv'].values():\n color.append(self.config['color_map'][values])\n self.color_map = np.array(color)\n\n self.points_name = []\n self.label_name = []\n for sequence in sequences:\n sequence = '{0:02d}'.format(int(sequence))\n points_path = os.path.join(\n self.root, 'sequences', sequence, 'velodyne')\n label_path = os.path.join(self.root, 'sequences', sequence, 'labels')\n seq_points_name = [os.path.join(points_path, pn) for pn in os.listdir(points_path) if pn.endswith('.bin')]\n seq_points_name.sort()\n self.points_name.extend(seq_points_name)\n if split != 'test':\n seq_label_name = [os.path.join(label_path, ln) for ln in os.listdir(label_path) if ln.endswith('.label')]\n seq_label_name.sort()\n self.label_name.extend(seq_label_name)\n if should_map:\n remapdict = self.config[\"learning_map\"]\n # make lookup table for mapping\n maxkey = max(remapdict.keys())\n # +100 hack making lut bigger just in case there are unknown labels\n self.remap_lut = np.zeros((maxkey + 100), dtype=np.int32)\n self.remap_lut[list(remapdict.keys())] = list(remapdict.values())\n\n def chunks(self, l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n def split_data(self, data, idx):\n new_data = []\n for i in range(len(idx)):\n new_data += [np.expand_dims(data[idx[i]], axis=0)]\n return new_data\n\n def nearest_dist(self, block_center, block_center_list):\n num_blocks = len(block_center_list)\n dist = np.zeros(num_blocks)\n for i in range(num_blocks):\n dist[i] = np.linalg.norm(block_center_list[i] - block_center, ord=2) # i->j\n return np.argsort(dist)[0]\n\n def __getitem__(self, index):\n points_name = self.points_name[index]\n self.scan.open_scan(points_name)\n point_set_ini = self.scan.points\n if self.split != 'test':\n label_name = self.label_name[index]\n self.scan.open_label(label_name)\n label = self.scan.sem_label\n if self.should_map:\n label = self.remap_lut[label]\n\n coordmax = np.max(point_set_ini[:, 0:3], axis=0)\n coordmin = np.min(point_set_ini[:, 0:3], axis=0)\n nsubvolume_x = np.ceil((coordmax[0] - coordmin[0]) / self.stride).astype(np.int32)\n nsubvolume_y = np.ceil((coordmax[1] - coordmin[1]) / self.stride).astype(np.int32)\n point_sets = []\n point_idxs = []\n block_center = []\n for i in range(nsubvolume_x):\n for j in range(nsubvolume_y):\n curmin = coordmin + [i * self.stride, j * self.stride, 0]\n curmax = curmin + [self.block_size, self.block_size, coordmax[2] - coordmin[2]]\n curchoice = np.sum(\n (point_set_ini[:, 0:3] >= (curmin - 0.2)) * (point_set_ini[:, 0:3] <= (curmax + 0.2)), axis=1) == 3\n curchoice_idx = np.where(curchoice)[0]\n cur_point_set = point_set_ini[curchoice, :]\n if self.with_remission:\n cur_point_set = np.concatenate((cur_point_set, np.expand_dims(self.scan.remissions[curchoice], axis=1)),axis=1)\n point_sets.append(cur_point_set) # 1xNx3/4\n point_idxs.append(curchoice_idx) # 1xN\n block_center.append((curmin[0:2] + curmax[0:2]) / 2.0)\n\n # merge small blocks\n num_blocks = len(point_sets)\n block_idx = 0\n while block_idx < num_blocks:\n if point_sets[block_idx].shape[0] > 4096:\n block_idx += 1\n continue\n\n small_block_data = point_sets[block_idx].copy()\n small_block_idxs = point_idxs[block_idx].copy()\n small_block_center = block_center[block_idx].copy()\n point_sets.pop(block_idx)\n point_idxs.pop(block_idx)\n block_center.pop(block_idx)\n nearest_block_idx = self.nearest_dist(small_block_center, block_center)\n point_sets[nearest_block_idx] = np.concatenate((point_sets[nearest_block_idx], small_block_data), axis=0)\n point_idxs[nearest_block_idx] = np.concatenate((point_idxs[nearest_block_idx], small_block_idxs), axis=0)\n num_blocks = len(point_sets)\n\n # divide large blocks\n num_blocks = len(point_sets)\n div_blocks = []\n div_blocks_idxs = []\n div_blocks_center = []\n for block_idx in range(num_blocks):\n cur_num_pts = point_sets[block_idx].shape[0]\n\n point_idx_block = np.array([x for x in range(cur_num_pts)])\n if point_idx_block.shape[0] % self.block_points != 0:\n makeup_num = self.block_points - point_idx_block.shape[0] % self.block_points\n np.random.shuffle(point_idx_block)\n point_idx_block = np.concatenate((point_idx_block, point_idx_block[0:makeup_num].copy()))\n\n np.random.shuffle(point_idx_block)\n\n sub_blocks = list(self.chunks(point_idx_block, self.block_points))\n\n div_blocks += self.split_data(point_sets[block_idx], sub_blocks)\n div_blocks_idxs += self.split_data(point_idxs[block_idx], sub_blocks)\n div_blocks_center += [block_center[block_idx].copy() for _ in range(len(sub_blocks))]\n div_blocks = np.concatenate(tuple(div_blocks), axis=0)\n div_blocks_idxs = np.concatenate(tuple(div_blocks_idxs), axis=0)\n if self.split != 'test':\n return div_blocks, div_blocks_idxs, point_set_ini, label\n else:\n return div_blocks, div_blocks_idxs, point_set_ini\n\n def __len__(self):\n return len(self.points_name)\n\nif __name__ == '__main__':\n dataset = SemanticKittiDatasetSlidingWindow('/home/yxu/github/data/kitti/dataset/',split='valid', with_remission=True)\n print(len(dataset))\n import time\n st = time.time()\n data = dataset[4][1]\n print(data.shape)\n print(data)\n print(time.time()-st)\n\n\n" ]
[ [ "numpy.max", "numpy.concatenate", "numpy.array", "numpy.linalg.norm", "numpy.ceil", "numpy.zeros", "numpy.sum", "numpy.min", "numpy.random.shuffle", "numpy.where", "numpy.amax", "numpy.argsort", "numpy.expand_dims" ] ]
WhisperRen/Bag-of-Tricks-for-Multimodal-Classification
[ "e9b12d941391e1f42e5f265f730fea8e58f0a96b" ]
[ "single_modality_models/base_text_model.py" ]
[ "import tensorflow as tf\n\n\ndef make_text_model(tokenizer, output_dim=16, trainable=True):\n text_model = tf.keras.Sequential([\n tf.keras.layers.Embedding(len(tokenizer.index_word) + 1, 8),\n tf.keras.layers.Dropout(rate=0.5, trainable=trainable),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(8)),\n tf.keras.layers.Dense(units=output_dim, activation='relu')\n ])\n return text_model\n" ]
[ [ "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.LSTM", "tensorflow.keras.layers.Dense" ] ]
AtmaHou/UserSimulator
[ "4908e6a19f23b20064bb10c504eeadb0de5c235e" ]
[ "src/deep_dialog/dialog_system/state_tracker.py" ]
[ "\"\"\"\nCreated on May 20, 2016\n\nstate tracker\n\n@author: xiul, t-zalipt\n\"\"\"\n\nfrom . import KBHelper\nimport numpy as np\nimport copy\n\n\nclass StateTracker:\n \"\"\" The state tracker maintains a record of which request slots are filled and which inform slots are filled \"\"\"\n\n def __init__(self, act_set, slot_set, movie_dictionary):\n \"\"\" constructor for statetracker takes movie knowledge base and initializes a new episode\n\n Arguments:\n act_set -- The set of all acts availavle\n slot_set -- The total set of available slots\n movie_dictionary -- A representation of all the available movies. Generally this object is accessed via the KBHelper class\n\n Class Variables:\n history_vectors -- A record of the current dialog so far in vector format (act-slot, but no values)\n history_dictionaries -- A record of the current dialog in dictionary format\n current_slots -- A dictionary that keeps a running record of which slots are filled current_slots['inform_slots'] and which are requested current_slots['request_slots'] (but not filed)\n action_dimension -- # TODO indicates the dimensionality of the vector representaiton of the action\n kb_result_dimension -- A single integer denoting the dimension of the kb_results features.\n turn_count -- A running count of which turn we are at in the present dialog\n \"\"\"\n self.movie_dictionary = movie_dictionary\n self.initialize_episode()\n self.history_vectors = None\n self.history_dictionaries = None\n self.current_slots = None\n self.action_dimension = 10 # TODO REPLACE WITH REAL VALUE\n self.kb_result_dimension = 10 # TODO REPLACE WITH REAL VALUE\n self.turn_count = 0\n self.kb_helper = KBHelper(movie_dictionary)\n \n\n def initialize_episode(self):\n \"\"\" Initialize a new episode (dialog), flush the current state and tracked slots \"\"\"\n \n self.action_dimension = 10\n self.history_vectors = np.zeros((1, self.action_dimension))\n self.history_dictionaries = []\n self.turn_count = 0\n self.current_slots = {}\n \n self.current_slots['inform_slots'] = {}\n self.current_slots['request_slots'] = {}\n self.current_slots['proposed_slots'] = {}\n self.current_slots['agent_request_slots'] = {}\n\n\n def dialog_history_vectors(self):\n \"\"\" Return the dialog history (both user and agent actions) in vector representation \"\"\"\n return self.history_vectors\n\n\n def dialog_history_dictionaries(self):\n \"\"\" Return the dictionary representation of the dialog history (includes values) \"\"\"\n return self.history_dictionaries\n\n\n def kb_results_for_state(self):\n \"\"\" Return the information about the database results based on the currently informed slots \"\"\"\n ########################################################################\n # TODO Calculate results based on current informed slots\n ########################################################################\n kb_results = self.kb_helper.database_results_for_agent(self.current_slots) # replace this with something less ridiculous\n # TODO turn results into vector (from dictionary)\n results = np.zeros((0, self.kb_result_dimension))\n return results\n \n\n def get_state_for_agent(self):\n \"\"\" Get the state representatons to send to agent \"\"\"\n #state = {'user_action': self.history_dictionaries[-1], 'current_slots': self.current_slots, 'kb_results': self.kb_results_for_state()}\n state = {'user_action': self.history_dictionaries[-1], 'current_slots': self.current_slots, #'kb_results': self.kb_results_for_state(), \n 'kb_results_dict':self.kb_helper.database_results_for_agent(self.current_slots), 'turn': self.turn_count, 'history': self.history_dictionaries, \n 'agent_action': self.history_dictionaries[-2] if len(self.history_dictionaries) > 1 else None}\n return copy.deepcopy(state)\n \n def get_suggest_slots_values(self, request_slots):\n \"\"\" Get the suggested values for request slots \"\"\"\n \n suggest_slot_vals = {}\n if len(request_slots) > 0: \n suggest_slot_vals = self.kb_helper.suggest_slot_values(request_slots, self.current_slots)\n \n return suggest_slot_vals\n \n def get_current_kb_results(self):\n \"\"\" get the kb_results for current state \"\"\"\n kb_results = self.kb_helper.available_results_from_kb(self.current_slots)\n return kb_results\n \n \n def update(self, agent_action=None, user_action=None):\n \"\"\" Update the state based on the latest action \"\"\"\n\n ########################################################################\n # Make sure that the function was called properly\n ########################################################################\n assert(not (user_action and agent_action))\n assert(user_action or agent_action)\n\n ########################################################################\n # Update state to reflect a new action by the agent.\n ########################################################################\n if agent_action:\n \n ####################################################################\n # Handles the act_slot response (with values needing to be filled)\n ####################################################################\n if agent_action['act_slot_response']:\n response = copy.deepcopy(agent_action['act_slot_response'])\n \n inform_slots = self.kb_helper.fill_inform_slots(response['inform_slots'], self.current_slots) # TODO this doesn't actually work yet, remove this warning when kb_helper is functional\n agent_action_values = {'turn': self.turn_count, 'speaker': \"agent\", 'diaact': response['diaact'], 'inform_slots': inform_slots, 'request_slots':response['request_slots']}\n \n agent_action['act_slot_response'].update({'diaact': response['diaact'], 'inform_slots': inform_slots, 'request_slots':response['request_slots'], 'turn':self.turn_count})\n \n elif agent_action['act_slot_value_response']:\n agent_action_values = copy.deepcopy(agent_action['act_slot_value_response'])\n # print(\"Updating state based on act_slot_value action from agent\")\n agent_action_values['turn'] = self.turn_count\n agent_action_values['speaker'] = \"agent\"\n \n ####################################################################\n # This code should execute regardless of which kind of agent produced action\n ####################################################################\n for slot in agent_action_values['inform_slots'].keys():\n self.current_slots['proposed_slots'][slot] = agent_action_values['inform_slots'][slot]\n self.current_slots['inform_slots'][slot] = agent_action_values['inform_slots'][slot] # add into inform_slots\n if slot in self.current_slots['request_slots'].keys():\n del self.current_slots['request_slots'][slot]\n\n for slot in agent_action_values['request_slots'].keys():\n if slot not in self.current_slots['agent_request_slots']:\n self.current_slots['agent_request_slots'][slot] = \"UNK\"\n\n self.history_dictionaries.append(agent_action_values)\n current_agent_vector = np.ones((1, self.action_dimension))\n self.history_vectors = np.vstack([self.history_vectors, current_agent_vector])\n \n ########################################################################\n # Update the state to reflect a new action by the user\n ########################################################################\n elif user_action:\n \n ####################################################################\n # Update the current slots\n ####################################################################\n for slot in user_action['inform_slots'].keys():\n self.current_slots['inform_slots'][slot] = user_action['inform_slots'][slot]\n if slot in self.current_slots['request_slots'].keys():\n del self.current_slots['request_slots'][slot]\n\n for slot in user_action['request_slots'].keys():\n if slot not in self.current_slots['request_slots']:\n self.current_slots['request_slots'][slot] = \"UNK\"\n \n self.history_vectors = np.vstack([self.history_vectors, np.zeros((1,self.action_dimension))])\n new_move = {'turn': self.turn_count, 'speaker': \"user\", 'request_slots': user_action['request_slots'], 'inform_slots': user_action['inform_slots'], 'diaact': user_action['diaact']}\n self.history_dictionaries.append(copy.deepcopy(new_move))\n\n ########################################################################\n # This should never happen if the asserts passed\n ########################################################################\n else:\n pass\n\n ########################################################################\n # This code should execute after update code regardless of what kind of action (agent/user)\n ########################################################################\n self.turn_count += 1" ]
[ [ "numpy.ones", "numpy.vstack", "numpy.zeros" ] ]
DELTA37/LDPC-code
[ "e055685d8dfebd50fa19a2638a8934b4a43b7ad3" ]
[ "estimate_dmin_upper_bound_qc_ldpc.py" ]
[ "import os\nimport sys\nimport argparse\nimport numpy as np\nfrom codes.qc_ldpc import QuasiCyclicLDPCCode\nfrom utils.poly_gf2 import poly1d_gf2\nimport importlib\n\n\ndef read_alist(alist):\n \"\"\"Create H matrix from a file in the .alist format\n\n See http://www.inference.org.uk/mackay/codes/alist.html for\n documentation on the .alist format\n\n alist -- string, filename of alist\n\n returns\n H -- numpy array of size MxN, H matrix\n N -- int, N parameter from alist\n M -- int, M parameter from alist\n\n \"\"\"\n\n with open(alist, 'r') as f:\n\n line = f.readline()\n N, M = line.split(' ')\n\n line = f.readline()\n max_col_weight, max_row_weight = line.split(' ')\n\n line = f.readline()\n col_weights = line.split(' ')\n col_weights.pop()\n\n line = f.readline()\n row_weights = line.split(' ')\n row_weights.pop()\n\n nlist = []\n mlist = []\n\n for i in range(int(N)):\n nlist.append(f.readline().split('\\n')[0])\n\n for i in range(int(M)):\n mlist.append(f.readline().split('\\n')[0])\n\n H = np.zeros((int(M), int(N)), dtype=bool)\n\n for i in range(int(M)):\n indices = mlist[i].split(' ')[0:int(max_row_weight)]\n indices = [int(x) - 1 for x in indices]\n\n # print indices\n for k in indices:\n if k != -1:\n H[i][k] = 1\n return H, int(N), int(M)\n\n\ndef C2H_poly_gf2(C, r):\n pass\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('pathToMat')\n args = parser.parse_args()\n\n dir_path, module_name = os.path.split(args.pathToMat)\n module_name = os.path.splitext(module_name)[0]\n sys.path.append(dir_path)\n data = importlib.import_module(module_name)\n\n r = data.r\n C = data.C\n print(C.shape)\n H_poly_gf2 = np.empty(C.shape, dtype=poly1d_gf2)\n for i in range(C.shape[0]):\n for j in range(C.shape[1]):\n if C[i, j] == -1:\n H_poly_gf2[i, j] = poly1d_gf2([0])\n else:\n H_poly_gf2[i, j] = poly1d_gf2.create_basis(C[i, j])\n\n dmin_upper_bound = QuasiCyclicLDPCCode.estimate_dmin_upper_bound(H_poly_gf2)\n print(dmin_upper_bound)\n" ]
[ [ "numpy.empty" ] ]
awesome-archive/tensorpack
[ "e5e54e07bb47f85fc7efe9c78bde3e153ef0d49b", "d7a13cb74c9066bc791d7aafc3b744b60ee79a9f", "e5e54e07bb47f85fc7efe9c78bde3e153ef0d49b", "e5e54e07bb47f85fc7efe9c78bde3e153ef0d49b" ]
[ "tensorpack/dataflow/dataset/cifar.py", "tensorpack/dataflow/imgaug/noise.py", "tensorpack/tfutils/sessinit.py", "tensorpack/tfutils/varreplace.py" ]
[ "# -*- coding: utf-8 -*-\n# File: cifar.py\n\n# Yukun Chen <cykustc@gmail.com>\n\nimport numpy as np\nimport os\nimport pickle\nimport tarfile\nimport six\nfrom six.moves import range\n\nfrom ...utils import logger\nfrom ...utils.fs import download, get_dataset_path\nfrom ..base import RNGDataFlow\n\n__all__ = ['CifarBase', 'Cifar10', 'Cifar100']\n\n\nDATA_URL_CIFAR_10 = ('http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', 170498071)\nDATA_URL_CIFAR_100 = ('http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz', 169001437)\n\n\ndef maybe_download_and_extract(dest_directory, cifar_classnum):\n \"\"\"Download and extract the tarball from Alex's website. Copied from tensorflow example \"\"\"\n assert cifar_classnum == 10 or cifar_classnum == 100\n if cifar_classnum == 10:\n cifar_foldername = 'cifar-10-batches-py'\n else:\n cifar_foldername = 'cifar-100-python'\n if os.path.isdir(os.path.join(dest_directory, cifar_foldername)):\n logger.info(\"Found cifar{} data in {}.\".format(cifar_classnum, dest_directory))\n return\n else:\n DATA_URL = DATA_URL_CIFAR_10 if cifar_classnum == 10 else DATA_URL_CIFAR_100\n filename = DATA_URL[0].split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n download(DATA_URL[0], dest_directory, expect_size=DATA_URL[1])\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n\ndef read_cifar(filenames, cifar_classnum):\n assert cifar_classnum == 10 or cifar_classnum == 100\n ret = []\n for fname in filenames:\n fo = open(fname, 'rb')\n if six.PY3:\n dic = pickle.load(fo, encoding='bytes')\n else:\n dic = pickle.load(fo)\n data = dic[b'data']\n if cifar_classnum == 10:\n label = dic[b'labels']\n IMG_NUM = 10000 # cifar10 data are split into blocks of 10000\n else:\n label = dic[b'fine_labels']\n IMG_NUM = 50000 if 'train' in fname else 10000\n fo.close()\n for k in range(IMG_NUM):\n img = data[k].reshape(3, 32, 32)\n img = np.transpose(img, [1, 2, 0])\n ret.append([img, label[k]])\n return ret\n\n\ndef get_filenames(dir, cifar_classnum):\n assert cifar_classnum == 10 or cifar_classnum == 100\n if cifar_classnum == 10:\n train_files = [os.path.join(\n dir, 'cifar-10-batches-py', 'data_batch_%d' % i) for i in range(1, 6)]\n test_files = [os.path.join(\n dir, 'cifar-10-batches-py', 'test_batch')]\n meta_file = os.path.join(dir, 'cifar-10-batches-py', 'batches.meta')\n elif cifar_classnum == 100:\n train_files = [os.path.join(dir, 'cifar-100-python', 'train')]\n test_files = [os.path.join(dir, 'cifar-100-python', 'test')]\n meta_file = os.path.join(dir, 'cifar-100-python', 'meta')\n return train_files, test_files, meta_file\n\n\ndef _parse_meta(filename, cifar_classnum):\n with open(filename, 'rb') as f:\n obj = pickle.load(f)\n return obj['label_names' if cifar_classnum == 10 else 'fine_label_names']\n\n\nclass CifarBase(RNGDataFlow):\n \"\"\"\n Produces [image, label] in Cifar10/100 dataset,\n image is 32x32x3 in the range [0,255].\n label is an int.\n \"\"\"\n def __init__(self, train_or_test, shuffle=None, dir=None, cifar_classnum=10):\n \"\"\"\n Args:\n train_or_test (str): 'train' or 'test'\n shuffle (bool): defaults to True for training set.\n dir (str): path to the dataset directory\n cifar_classnum (int): 10 or 100\n \"\"\"\n assert train_or_test in ['train', 'test']\n assert cifar_classnum == 10 or cifar_classnum == 100\n self.cifar_classnum = cifar_classnum\n if dir is None:\n dir = get_dataset_path('cifar{}_data'.format(cifar_classnum))\n maybe_download_and_extract(dir, self.cifar_classnum)\n train_files, test_files, meta_file = get_filenames(dir, cifar_classnum)\n if train_or_test == 'train':\n self.fs = train_files\n else:\n self.fs = test_files\n for f in self.fs:\n if not os.path.isfile(f):\n raise ValueError('Failed to find file: ' + f)\n self._label_names = _parse_meta(meta_file, cifar_classnum)\n self.train_or_test = train_or_test\n self.data = read_cifar(self.fs, cifar_classnum)\n self.dir = dir\n\n if shuffle is None:\n shuffle = train_or_test == 'train'\n self.shuffle = shuffle\n\n def __len__(self):\n return 50000 if self.train_or_test == 'train' else 10000\n\n def __iter__(self):\n idxs = np.arange(len(self.data))\n if self.shuffle:\n self.rng.shuffle(idxs)\n for k in idxs:\n # since cifar is quite small, just do it for safety\n yield self.data[k]\n\n def get_per_pixel_mean(self):\n \"\"\"\n Returns:\n a mean image of all (train and test) images of size 32x32x3\n \"\"\"\n train_files, test_files, _ = get_filenames(self.dir, self.cifar_classnum)\n all_imgs = [x[0] for x in read_cifar(train_files + test_files, self.cifar_classnum)]\n arr = np.array(all_imgs, dtype='float32')\n mean = np.mean(arr, axis=0)\n return mean\n\n def get_label_names(self):\n \"\"\"\n Returns:\n [str]: name of each class.\n \"\"\"\n return self._label_names\n\n def get_per_channel_mean(self):\n \"\"\"\n return three values as mean of each channel\n \"\"\"\n mean = self.get_per_pixel_mean()\n return np.mean(mean, axis=(0, 1))\n\n\nclass Cifar10(CifarBase):\n \"\"\"\n Produces [image, label] in Cifar10 dataset,\n image is 32x32x3 in the range [0,255].\n label is an int.\n \"\"\"\n def __init__(self, train_or_test, shuffle=None, dir=None):\n \"\"\"\n Args:\n train_or_test (str): either 'train' or 'test'.\n shuffle (bool): shuffle the dataset, default to shuffle in training\n \"\"\"\n super(Cifar10, self).__init__(train_or_test, shuffle, dir, 10)\n\n\nclass Cifar100(CifarBase):\n \"\"\" Similar to Cifar10\"\"\"\n def __init__(self, train_or_test, shuffle=None, dir=None):\n super(Cifar100, self).__init__(train_or_test, shuffle, dir, 100)\n\n\nif __name__ == '__main__':\n ds = Cifar10('train')\n mean = ds.get_per_channel_mean()\n print(mean)\n\n import cv2\n ds.reset_state()\n for i, dp in enumerate(ds):\n if i == 100:\n break\n img = dp[0]\n cv2.imwrite(\"{:04d}.jpg\".format(i), img)\n", "# -*- coding: utf-8 -*-\n# File: noise.py\n\n\nimport numpy as np\nimport cv2\n\nfrom .base import ImageAugmentor\n\n__all__ = ['JpegNoise', 'GaussianNoise', 'SaltPepperNoise']\n\n\nclass JpegNoise(ImageAugmentor):\n \"\"\" Random JPEG noise. \"\"\"\n\n def __init__(self, quality_range=(40, 100)):\n \"\"\"\n Args:\n quality_range (tuple): range to sample JPEG quality\n \"\"\"\n super(JpegNoise, self).__init__()\n self._init(locals())\n\n def _get_augment_params(self, img):\n return self.rng.randint(*self.quality_range)\n\n def _augment(self, img, q):\n enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]\n return cv2.imdecode(enc, 1).astype(img.dtype)\n\n\nclass GaussianNoise(ImageAugmentor):\n \"\"\"\n Add random Gaussian noise N(0, sigma^2) of the same shape to img.\n \"\"\"\n def __init__(self, sigma=1, clip=True):\n \"\"\"\n Args:\n sigma (float): stddev of the Gaussian distribution.\n clip (bool): clip the result to [0,255] in the end.\n \"\"\"\n super(GaussianNoise, self).__init__()\n self._init(locals())\n\n def _get_augment_params(self, img):\n return self.rng.randn(*img.shape)\n\n def _augment(self, img, noise):\n old_dtype = img.dtype\n ret = img + noise * self.sigma\n if self.clip or old_dtype == np.uint8:\n ret = np.clip(ret, 0, 255)\n return ret.astype(old_dtype)\n\n\nclass SaltPepperNoise(ImageAugmentor):\n \"\"\" Salt and pepper noise.\n Randomly set some elements in image to 0 or 255, regardless of its channels.\n \"\"\"\n\n def __init__(self, white_prob=0.05, black_prob=0.05):\n \"\"\"\n Args:\n white_prob (float), black_prob (float): probabilities setting an element to 255 or 0.\n \"\"\"\n assert white_prob + black_prob <= 1, \"Sum of probabilities cannot be greater than 1\"\n super(SaltPepperNoise, self).__init__()\n self._init(locals())\n\n def _get_augment_params(self, img):\n return self.rng.uniform(low=0, high=1, size=img.shape)\n\n def _augment(self, img, param):\n img[param > (1 - self.white_prob)] = 255\n img[param < self.black_prob] = 0\n return img\n", "# -*- coding: utf-8 -*-\n# File: sessinit.py\n\n\nimport numpy as np\nimport six\nimport tensorflow as tf\n\nfrom ..utils import logger\nfrom .common import get_op_tensor_name\nfrom .varmanip import SessionUpdate, get_checkpoint_path, get_savename_from_varname, is_training_name\n\n__all__ = ['SessionInit', 'ChainInit',\n 'SaverRestore', 'SaverRestoreRelaxed', 'DictRestore',\n 'JustCurrentSession', 'get_model_loader']\n\n\nclass SessionInit(object):\n \"\"\" Base class for utilities to load variables to a (existing) session. \"\"\"\n def init(self, sess):\n \"\"\"\n Initialize a session\n\n Args:\n sess (tf.Session): the session\n \"\"\"\n self._setup_graph()\n self._run_init(sess)\n\n def _setup_graph(self):\n pass\n\n def _run_init(self, sess):\n pass\n\n\nclass JustCurrentSession(SessionInit):\n \"\"\" This is a no-op placeholder\"\"\"\n pass\n\n\nclass CheckpointReaderAdapter(object):\n \"\"\"\n An adapter to work around old checkpoint format, where the keys are op\n names instead of tensor names (with :0).\n \"\"\"\n def __init__(self, reader):\n self._reader = reader\n m = self._reader.get_variable_to_shape_map()\n self._map = {k if k.endswith(':0') else k + ':0': v\n for k, v in six.iteritems(m)}\n\n def get_variable_to_shape_map(self):\n return self._map\n\n def get_tensor(self, name):\n if self._reader.has_tensor(name):\n return self._reader.get_tensor(name)\n if name in self._map:\n assert name.endswith(':0'), name\n name = name[:-2]\n return self._reader.get_tensor(name)\n\n def has_tensor(self, name):\n return name in self._map\n\n # some checkpoint might not have ':0'\n def get_real_name(self, name):\n if self._reader.has_tensor(name):\n return name\n assert self.has_tensor(name)\n return name[:-2]\n\n\nclass MismatchLogger(object):\n def __init__(self, exists, nonexists):\n self._exists = exists\n self._nonexists = nonexists\n self._names = []\n\n def add(self, name):\n self._names.append(get_op_tensor_name(name)[0])\n\n def log(self):\n if len(self._names):\n logger.warn(\"The following variables are in the {}, but not found in the {}: {}\".format(\n self._exists, self._nonexists, ', '.join(self._names)))\n\n\nclass SaverRestore(SessionInit):\n \"\"\"\n Restore a tensorflow checkpoint saved by :class:`tf.train.Saver` or :class:`ModelSaver`.\n \"\"\"\n def __init__(self, model_path, prefix=None, ignore=[]):\n \"\"\"\n Args:\n model_path (str): a model name (model-xxxx) or a ``checkpoint`` file.\n prefix (str): during restore, add a ``prefix/`` for every variable in this checkpoint.\n ignore (list[str]): list of tensor names that should be ignored during loading, e.g. learning-rate\n \"\"\"\n if model_path.endswith('.npy') or model_path.endswith('.npz'):\n logger.warn(\"SaverRestore expect a TF checkpoint, but got a model path '{}'.\".format(model_path) +\n \" To load from a dict, use 'DictRestore'.\")\n model_path = get_checkpoint_path(model_path)\n self.path = model_path # attribute used by AutoResumeTrainConfig!\n self.prefix = prefix\n self.ignore = [i if i.endswith(':0') else i + ':0' for i in ignore]\n\n def _setup_graph(self):\n dic = self._get_restore_dict()\n self.saver = tf.train.Saver(var_list=dic, name=str(id(dic)))\n\n def _run_init(self, sess):\n logger.info(\"Restoring checkpoint from {} ...\".format(self.path))\n self.saver.restore(sess, self.path)\n\n @staticmethod\n def _read_checkpoint_vars(model_path):\n \"\"\" return a set of strings \"\"\"\n reader = tf.train.NewCheckpointReader(model_path)\n reader = CheckpointReaderAdapter(reader) # use an adapter to standardize the name\n ckpt_vars = reader.get_variable_to_shape_map().keys()\n return reader, set(ckpt_vars)\n\n def _match_vars(self, func):\n reader, chkpt_vars = SaverRestore._read_checkpoint_vars(self.path)\n graph_vars = tf.global_variables()\n chkpt_vars_used = set()\n\n mismatch = MismatchLogger('graph', 'checkpoint')\n for v in graph_vars:\n name = get_savename_from_varname(v.name, varname_prefix=self.prefix)\n if name in self.ignore and reader.has_tensor(name):\n logger.info(\"Variable {} in the graph will not be loaded from the checkpoint!\".format(name))\n else:\n if reader.has_tensor(name):\n func(reader, name, v)\n chkpt_vars_used.add(name)\n else:\n # use tensor name (instead of op name) for logging, to be consistent with the reverse case\n if not is_training_name(v.name):\n mismatch.add(v.name)\n mismatch.log()\n mismatch = MismatchLogger('checkpoint', 'graph')\n if len(chkpt_vars_used) < len(chkpt_vars):\n unused = chkpt_vars - chkpt_vars_used\n for name in sorted(unused):\n if not is_training_name(name):\n mismatch.add(name)\n mismatch.log()\n\n def _get_restore_dict(self):\n var_dict = {}\n\n def f(reader, name, v):\n name = reader.get_real_name(name)\n assert name not in var_dict, \"Restore conflict: {} and {}\".format(v.name, var_dict[name].name)\n var_dict[name] = v\n self._match_vars(f)\n return var_dict\n\n\nclass SaverRestoreRelaxed(SaverRestore):\n \"\"\" Same as :class:`SaverRestore`, but has more relaxed constraints.\n\n It allows upcasting certain variables, or reshape certain\n variables when there is a mismatch that can be fixed.\n Another advantage is that it doesn't add any new ops to the graph.\n But it is also slower than :class:`SaverRestore`.\n \"\"\"\n def _run_init(self, sess):\n logger.info(\n \"Restoring checkpoint from {} ...\".format(self.path))\n\n def f(reader, name, v):\n val = reader.get_tensor(name)\n SessionUpdate.load_value_to_var(v, val)\n with sess.as_default():\n self._match_vars(f)\n\n\nclass DictRestore(SessionInit):\n \"\"\"\n Restore variables from a dictionary.\n \"\"\"\n\n def __init__(self, variable_dict):\n \"\"\"\n Args:\n variable_dict (dict): a dict of {name: value}\n \"\"\"\n assert isinstance(variable_dict, dict), type(variable_dict)\n # use varname (with :0) for consistency\n self._prms = {get_op_tensor_name(n)[1]: v for n, v in six.iteritems(variable_dict)}\n\n def _run_init(self, sess):\n variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n\n variable_names = set([k.name for k in variables])\n param_names = set(six.iterkeys(self._prms))\n\n intersect = variable_names & param_names\n\n logger.info(\"Variables to restore from dict: {}\".format(', '.join(map(str, intersect))))\n\n mismatch = MismatchLogger('graph', 'dict')\n for k in sorted(variable_names - param_names):\n if not is_training_name(k):\n mismatch.add(k)\n mismatch.log()\n mismatch = MismatchLogger('dict', 'graph')\n for k in sorted(param_names - variable_names):\n mismatch.add(k)\n mismatch.log()\n\n upd = SessionUpdate(sess, [v for v in variables if v.name in intersect])\n logger.info(\"Restoring {} variables from dict ...\".format(len(intersect)))\n upd.update({name: value for name, value in six.iteritems(self._prms) if name in intersect})\n\n\nclass ChainInit(SessionInit):\n \"\"\"\n Initialize a session by a list of :class:`SessionInit` instance, executed one by one.\n This can be useful for, e.g., loading several models from different files\n to form a composition of models.\n \"\"\"\n\n def __init__(self, sess_inits):\n \"\"\"\n Args:\n sess_inits (list[SessionInit]): list of :class:`SessionInit` instances.\n \"\"\"\n self.inits = sess_inits\n\n def _setup_graph(self):\n for i in self.inits:\n i._setup_graph()\n\n def _run_init(self, sess):\n for i in self.inits:\n i._run_init(sess)\n\n\ndef get_model_loader(filename):\n \"\"\"\n Get a corresponding model loader by looking at the file name.\n\n Returns:\n SessInit: either a :class:`DictRestore` (if name ends with 'npy/npz') or\n :class:`SaverRestore` (otherwise).\n \"\"\"\n assert isinstance(filename, six.string_types), filename\n if filename.endswith('.npy'):\n assert tf.gfile.Exists(filename), filename\n return DictRestore(np.load(filename, encoding='latin1').item())\n elif filename.endswith('.npz'):\n assert tf.gfile.Exists(filename), filename\n obj = np.load(filename)\n return DictRestore(dict(obj))\n else:\n return SaverRestore(filename)\n", "# -*- coding: utf-8 -*-\n# File: varreplace.py\n# Credit: Qinyao He\n\nfrom contextlib import contextmanager\nimport tensorflow as tf\n\nfrom .common import get_tf_version_tuple\n\n__all__ = ['custom_getter_scope', 'freeze_variables', 'remap_variables']\n\n\n@contextmanager\ndef custom_getter_scope(custom_getter):\n \"\"\"\n Args:\n custom_getter: the same as in :func:`tf.get_variable`\n\n Returns:\n The current variable scope with a custom_getter.\n \"\"\"\n scope = tf.get_variable_scope()\n if get_tf_version_tuple() >= (1, 5):\n with tf.variable_scope(\n scope, custom_getter=custom_getter,\n auxiliary_name_scope=False):\n yield\n else:\n ns = tf.get_default_graph().get_name_scope()\n with tf.variable_scope(\n scope, custom_getter=custom_getter):\n with tf.name_scope(ns + '/' if ns else ''):\n yield\n\n\ndef remap_variables(fn):\n \"\"\"\n Use fn to map the output of any variable getter.\n\n Args:\n fn (tf.Variable -> tf.Tensor)\n\n Returns:\n The current variable scope with a custom_getter that maps\n all the variables by fn.\n\n Example:\n .. code-block:: python\n\n with varreplace.remap_variables(lambda var: quantize(var)):\n x = FullyConnected('fc', x, 1000) # fc/{W,b} will be quantized\n \"\"\"\n def custom_getter(getter, *args, **kwargs):\n v = getter(*args, **kwargs)\n return fn(v)\n return custom_getter_scope(custom_getter)\n\n\ndef freeze_variables(stop_gradient=True, skip_collection=False):\n \"\"\"\n Return a context to freeze variables,\n by wrapping ``tf.get_variable`` with a custom getter.\n It works by either applying ``tf.stop_gradient`` on the variables,\n or by keeping them out of the ``TRAINABLE_VARIABLES`` collection, or\n both.\n\n Example:\n .. code-block:: python\n\n with varreplace.freeze_variable(stop_gradient=False, skip_collection=True):\n x = FullyConnected('fc', x, 1000) # fc/* will not be trained\n\n Args:\n stop_gradient (bool): if True, variables returned from `get_variable`\n will be wrapped with `tf.stop_gradient` and therefore has no\n gradient when used later.\n Note that the created variables may still have gradient when accessed\n by other approaches (e.g. by name, or by collection).\n Also note that this makes `tf.get_variable` returns a Tensor instead of a Variable,\n which may break existing code.\n Therefore, it's recommended to use the `skip_collection` option instead.\n skip_collection (bool): if True, do not add the variable to\n ``TRAINABLE_VARIABLES`` collection, but to ``MODEL_VARIABLES``\n collection. As a result they will not be trained by default.\n \"\"\"\n def custom_getter(getter, *args, **kwargs):\n trainable = kwargs.get('trainable', True)\n name = args[0] if len(args) else kwargs.get('name')\n if skip_collection:\n kwargs['trainable'] = False\n v = getter(*args, **kwargs)\n if skip_collection:\n tf.add_to_collection(tf.GraphKeys.MODEL_VARIABLES, v)\n if trainable and stop_gradient:\n v = tf.stop_gradient(v, name='freezed_' + name)\n return v\n return custom_getter_scope(custom_getter)\n" ]
[ [ "numpy.array", "numpy.transpose", "numpy.mean" ], [ "numpy.clip" ], [ "tensorflow.gfile.Exists", "numpy.load", "tensorflow.global_variables", "tensorflow.train.NewCheckpointReader", "tensorflow.get_collection" ], [ "tensorflow.get_default_graph", "tensorflow.variable_scope", "tensorflow.get_variable_scope", "tensorflow.name_scope", "tensorflow.stop_gradient", "tensorflow.add_to_collection" ] ]
nikwitt/FLEX_IR
[ "900221ea4078cc79fa101dff9fa9921194f18bc7" ]
[ "single_orbital/parameters.py" ]
[ "# -*- encoding: latin-1 -*-\r\n\r\nimport numpy as np\r\nimport h5py\r\nimport os\r\n\r\nclass parameters:\r\n \"\"\"\r\n Setting parameters for current calculation.\r\n As no extra file is generated, one should make thorough notes on what \r\n parameters are set in each calculation!\r\n \"\"\"\r\n def __init__(self,T, n, tpr, round_it, T_load = 0.1, tpr_load = 0.0): \r\n ### Calculation parameters\r\n # General settings\r\n self.mode = 'FLEX'\r\n self.mix = 0.2 # Value of how much of the new G is to be used!\r\n self.round_it = round_it\r\n \r\n # SC calculation options\r\n self.SC_type = 'd' #'s' or 'p' or 'd'\r\n \r\n # Cutoffs/accuracy\r\n self.IR_tol = 1e-15\r\n self.g_sfc_tol = 1e-5\r\n self.SC_sfc_tol = 1e-4\r\n \r\n # Physical quantities\r\n self.nk1, self.nk2, self.nk3 = 64, 64, 1\r\n self.T = T\r\n self.beta = 1/self.T\r\n self.Lambda = 10**4\r\n self.n_fill = n\r\n self.nspin = 1\r\n self.norb = 1 \r\n self.nwan = self.nspin * self.norb #spin * orbital\r\n \r\n # Interaction (for t=1)\r\n self.u0 = 4\r\n\r\n\r\n ### Setting up k-mesh\r\n self.nk = self.nk1 *self.nk2 * self.nk3\r\n self.dk1, self.dk2, self.dk3 = 1./self.nk1, 1./self.nk2, 1./self.nk3\r\n k1, k2, k3 = np.meshgrid(np.arange(self.nk1)*self.dk1,\\\r\n np.arange(self.nk2)*self.dk2,\\\r\n np.arange(self.nk3)*self.dk3)\r\n self.k1, self.k2, self.k3 = k1.flatten(), k2.flatten(), k3.flatten()\r\n\r\n\r\n ### Energy dispersion (mutli_orb -> hamiltonian.py)\r\n self.t = 1\r\n self.t_prime = tpr*self.t\r\n \r\n self.ek = 2*self.t * (np.cos(2*np.pi*self.k1) + np.cos(2*np.pi*self.k2)) \\\r\n + 4*self.t_prime * np.cos(2*np.pi*self.k1) * np.cos(2*np.pi*self.k2)\r\n\r\n \r\n ### Setting Log options\r\n Log_name = 'Log_n_{}'.format(self.n_fill)\r\n self.Logstr = Log_name + \".dat\"\r\n self.Logerrstr = Log_name + \"_err.dat\"\r\n self.err_str_begin = (\"System T = {} | n = {} | U = {} | tpr = {} : \").format(self.T,self.n_fill,self.u0,self.t_prime)\r\n \r\n ### Seting saving options\r\n self.sp_dir = \"Odata_n_{}\".format(self.n_fill) + \"/\"\r\n self.sp_name = \"calculation_data_T_{}_U_{}_tpr_{}_n_{}.h5\"\r\n \r\n #formatting middle string\r\n self.sp_name_save = self.sp_name.format(self.T,self.u0,self.t_prime,self.n_fill)\r\n self.sp_name_load = self.sp_name.format(T_load,self.u0,tpr_load,self.n_fill)\r\n \r\n #generating full string \r\n self.savepath = self.sp_dir + self.sp_name_save\r\n self.loadpath = self.sp_dir + self.sp_name_load\r\n \r\n #eigenvalue strings\r\n self.BSE_EV_path = \"BSE_kernel_EV/max_spin_charge_ev_n_{}_tpr_{}_U_{}.dat\".format(self.n_fill,self.t_prime,self.u0)\r\n self.SC_EV_path = \"SC_EV/{}w_lam_n_{}_tpr_{}_U_{}.dat\".format(self.SC_type,self.n_fill,self.t_prime,self.u0)\r\n self.SC_EV_path_neg = \"SC_EV/{}w_lam_n_{}_tpr_{}_U_{}.dat\".format(self.SC_type,self.n_fill,self.t_prime,str(self.u0)+\"_first_negative\")\r\n\r\n \r\n #generate hdf5 file if it does not exist\r\n os.makedirs(\"SC_EV\", exist_ok=True) \r\n os.makedirs(\"BSE_kernel_EV\", exist_ok=True)\r\n os.makedirs(self.sp_dir, exist_ok=True)\r\n \r\n if not os.path.exists(self.savepath):\r\n with h5py.File(self.savepath,'w') as file: \r\n metadata = {'System name' : 'Hubbard Square lattice',\r\n 'N_k1' : self.nk1,\r\n 'N_k2' : self.nk2,\r\n 'N_k3' : self.nk3,\r\n 'Lambda_IR' : self.Lambda,\r\n 'IR_tol' : self.IR_tol,\r\n 'g_sfc_tol' : self.g_sfc_tol,\r\n 'SC_sfc_tol' : self.SC_sfc_tol,\r\n 'n_fill' : self.n_fill,\r\n 'T' : self.T,\r\n 'U' : self.u0,\r\n 't_prime' : self.t_prime,}\r\n \r\n file.attrs.update(metadata)\r\n" ]
[ [ "numpy.arange", "numpy.cos" ] ]
zh217/infirunner
[ "ceb4b7b3a7ca9c080d537f2e601d1be197e27395" ]
[ "infirunner/hyperband.py" ]
[ "import abc\nimport importlib\nimport json\nimport math\nimport os\nimport random\nimport sys\nimport time\nimport datetime\n\nimport colorama\nimport click\nimport infirunner.capsule\nimport numpy as np\nimport scipy.stats as sps\n\nfrom colorama import Style, Fore\nfrom statsmodels.nonparametric.api import KDEMultivariate\n\nfrom infirunner.util import make_trial_id, UniformBernoulli\nfrom infirunner.generator import Generator\nfrom infirunner.watch import ExperimentWatcher, FastTSVTail\n\ncolorama.init()\n\n\ndef int_ceil(x):\n return int(math.ceil(x))\n\n\ndef int_floor(x):\n return int(math.floor(x))\n\n\ndef log_print(*args):\n print(Fore.LIGHTBLACK_EX + f'[{datetime.datetime.now()}]' + Style.RESET_ALL, *args, file=sys.stderr)\n\n\n# Note that the metric is always assumed to be optimized towards minimum.\nclass BracketElement:\n def __init__(self, bracket, round, budget, metric, trial, active, promoted):\n self.bracket = bracket\n self.round = round\n self.budget = budget\n self.metric = metric\n self.trial = trial\n self.active = active\n self.promoted = promoted\n\n def __eq__(self, other):\n return self is other or (self.bracket == other.bracket and\n self.round == other.round and\n self.budget == other.budget and\n self.metric == other.metric and\n self.trial == other.trial and\n self.active == other.active and\n self.promoted == other.promoted)\n\n def __repr__(self):\n return (f'<BracketElement bracket={self.bracket} round={self.round} budget={self.budget} '\n f'metric={self.metric} trial={self.trial} active={self.active} promoted={self.promoted}>')\n\n def serialize(self):\n return {\n 'bracket': self.bracket,\n 'round': self.round,\n 'budget': self.budget,\n 'trial': self.trial,\n 'active': self.active,\n 'promoted': self.promoted\n }\n\n @staticmethod\n def deserialize(data):\n return BracketElement(bracket=data['bracket'],\n round=data['round'],\n budget=data['budget'],\n metric=data['metric'],\n trial=data['trial'],\n active=data['active'],\n promoted=data['promoted'])\n\n\nclass ParamGen(abc.ABC):\n def __init__(self, module, experiment_dir):\n importlib.import_module(module)\n self.capsule = infirunner.capsule.active_capsule\n self.experiment_dir = experiment_dir\n\n def get_next_parameter(self):\n return self.capsule.gen_params(use_default=False, skip_const=True)\n\n\nclass RandomParamGen(ParamGen):\n pass\n\n\nclass BOHBParamGen(ParamGen):\n def __init__(self, module, experiment_dir,\n random_ratio, random_sample_size,\n guided_ratio, guided_sample_size,\n result_size_threshold=None, good_ratio=0.15, early_stop_ratio=1 / math.e,\n model_cache_time=30, mode='minimize',\n min_bandwidth=1e-3, bandwidth_estimation='normal_reference',\n bandwidth_factor=3.):\n super().__init__(module, experiment_dir)\n\n assert 0 <= random_ratio <= 1\n assert 0 <= guided_ratio <= 1\n assert 0 <= random_ratio + guided_ratio <= 1\n\n sample_params = super().get_next_parameter()\n if result_size_threshold is None:\n result_size_threshold = len(sample_params) + 1\n else:\n result_size_threshold = max(len(sample_params) + 1, result_size_threshold)\n log_print(Fore.LIGHTBLACK_EX + 'model-based threshold is', result_size_threshold)\n\n self.random_dice = UniformBernoulli(random_ratio)\n self.guided_dice = UniformBernoulli(guided_ratio / (1 - random_ratio))\n self.good_ratio = good_ratio\n self.early_stop_ratio = early_stop_ratio\n self.random_sample_size = random_sample_size\n self.guided_sample_size = guided_sample_size\n self.result_size_threshold = result_size_threshold\n self.model_cache_time = model_cache_time\n self.last_stats_collect_time = 0.\n self.last_stats = (None, None)\n self.last_models = (None, None)\n self.is_maximize = mode == 'maximize'\n self.min_bandwidth = min_bandwidth\n self.bandwidth_estimation = bandwidth_estimation\n self.bandwidth_factor = bandwidth_factor\n self.kde_vartypes, self.kde_data_encoder, self.kde_data_decoder, self.kde_data_bounds = self.make_kde_helpers()\n\n def make_kde_helpers(self):\n param_keys = list(super().get_next_parameter().keys())\n param_keys.sort()\n param_gen = self.capsule.param_gen\n\n var_types = ''.join(param_gen[key].var_type for key in param_keys)\n\n data_bounds = [param_gen[key].encoded_bounds() for key in param_keys]\n\n def data_encoder(data):\n return [param_gen[key].encode_as_numerical(data[key]) for key in param_keys]\n\n def data_decoder(data, old_params):\n ret = {}\n for idx, key in enumerate(param_keys):\n decoded = param_gen[key].decode_from_numerical(data[idx])\n if var_types[idx] == 'u' and decoded != old_params[key]:\n while True:\n decoded = param_gen[key].get_next_value()\n if decoded != old_params[key]:\n break\n ret[key] = decoded\n return ret\n\n return var_types, data_encoder, data_decoder, data_bounds\n\n def get_trial_params(self, trial):\n with open(os.path.join(self.experiment_dir, trial, f'last_state.json'), 'r', encoding='utf-8') as f:\n old_params = json.load(f)['params']\n return old_params\n\n def guided_modify_parameter(self, trial, model):\n old_params = self.get_trial_params(trial)\n old_params_encoded = self.kde_data_encoder(old_params)\n new_params_encoded = []\n for old_param, bw_, (lbound, ubound), vartype in zip(old_params_encoded, model.bw, self.kde_data_bounds,\n self.kde_vartypes):\n bw = bw_ * self.bandwidth_factor\n if lbound is None or ubound is None:\n new_params = np.random.normal(loc=old_param, scale=bw)\n else:\n new_params = sps.truncnorm.rvs((lbound - old_param) / bw, (ubound - old_param) / bw,\n loc=old_param, scale=bw)\n new_params_encoded.append(new_params)\n\n return self.kde_data_decoder(new_params_encoded, old_params)\n\n def get_suggested_next_parameter(self, goods, bads):\n good_model, bad_model = self.last_models\n if good_model is None or bad_model is None:\n good_model = KDEMultivariate(data=[self.kde_data_encoder(self.get_trial_params(t)) for t, _ in goods],\n var_type=self.kde_vartypes,\n bw=self.bandwidth_estimation)\n bad_model = KDEMultivariate(data=[self.kde_data_encoder(self.get_trial_params(t)) for t, _ in bads],\n var_type=self.kde_vartypes,\n bw=self.bandwidth_estimation)\n good_model.bw = np.clip(good_model.bw, self.min_bandwidth, None)\n bad_model.bw = np.clip(bad_model.bw, self.min_bandwidth, None)\n self.last_models = good_model, bad_model\n\n best_score = float('-inf')\n best_candidate = None\n use_guided = self.guided_dice()\n for _ in range(self.guided_sample_size if use_guided else self.random_sample_size):\n if use_guided:\n next_param = self.guided_modify_parameter(random.choice(goods)[0], good_model)\n else:\n next_param = super().get_next_parameter()\n good_score = np.log(np.clip(good_model.pdf(self.kde_data_encoder(next_param)), 1e-32, None))\n bad_score = np.log(np.clip(bad_model.pdf(self.kde_data_encoder(next_param)), 1e-32, None))\n score = good_score - bad_score\n if score > best_score:\n best_score = score\n best_candidate = next_param\n log_print(Fore.LIGHTBLACK_EX + 'proposing', 'guided' if use_guided else 'sieved', 'parameter with score',\n best_score)\n return best_candidate\n\n def collect_stats(self):\n now = time.time()\n if now - self.last_stats_collect_time > self.model_cache_time:\n metrics = list(self.get_all_budget_metrics().items())\n metrics.sort(key=lambda x: x[0], reverse=True)\n goods = None\n bads = None\n\n for budget, trial_data in metrics:\n bads_ = [(trial, metric) for trial, metric in trial_data if\n metric is None or not math.isfinite(metric)]\n goods_ = [(trial, metric) for trial, metric in trial_data if\n metric is not None and math.isfinite(metric)]\n if len(goods_) >= self.result_size_threshold and len(goods_) + len(bads_) > self.result_size_threshold:\n goods_.sort(key=lambda x: x[1], reverse=self.is_maximize)\n good_size = int_ceil(len(goods_) * self.good_ratio)\n bads_ = list(reversed(goods_))[\n :max(len(goods_) - good_size, self.result_size_threshold - len(bads_))] + bads_\n goods_ = goods_[:max(good_size, self.result_size_threshold)]\n log_print(Fore.LIGHTBLACK_EX + f'collected stats for budget {budget} with {len(goods_)} goods, '\n f'{len(bads_)} bads')\n log_print(Fore.LIGHTBLACK_EX + f'best good: {goods_[0][1]:10.4f}, best bad: {bads_[0][1]:10.4f}')\n goods = goods_\n bads = bads_\n break\n if self.last_stats != (goods, bads):\n self.last_stats = (goods, bads)\n self.last_models = (None, None)\n self.last_stats_collect_time = now\n return goods, bads\n else:\n return self.last_stats\n\n def get_all_budget_metrics(self):\n active_dirs = []\n for parent, dirs, files in os.walk(self.experiment_dir):\n active_dirs = dirs\n break\n\n metrics = {}\n\n nan_metrics = []\n\n for dir in active_dirs:\n try:\n with open(os.path.join(self.experiment_dir, dir, 'metric.tsv'), 'rb') as f:\n for l in f:\n budget, metric_time, metric_res = l.split(b'\\t')\n budget = int(budget)\n budget_metric = metrics.setdefault(budget, [])\n metric = float(metric_res)\n budget_metric.append((dir, metric))\n if not math.isfinite(metric):\n nan_metrics.append((budget, dir))\n except FileNotFoundError:\n continue\n for budget in metrics.keys():\n for trial_budget, dir in nan_metrics:\n if budget > trial_budget:\n metrics[budget].append((dir, float('nan')))\n return metrics\n\n def should_terminate_trials(self, trials):\n if not trials:\n return []\n trials = set(trials)\n should_terminate = set()\n metrics = self.get_all_budget_metrics()\n good_metrics_threshold = {}\n for budget, trials_data in metrics.items():\n budget_data = []\n for _, metric in trials_data:\n if math.isfinite(metric):\n budget_data.append(metric)\n if len(budget_data) > self.result_size_threshold:\n budget_data.sort() # ascending sort\n budget_data = budget_data[:int_ceil(len(budget_data) * self.early_stop_ratio)]\n good_metrics_threshold[budget] = budget_data[-1]\n\n for budget, trials_data in sorted(metrics.items(), key=lambda x: x[0], reverse=True):\n if budget == 0:\n continue\n prev_threshold = good_metrics_threshold.get(budget - 1)\n if prev_threshold is None:\n continue\n for trial, metric in trials_data:\n if trial not in trials:\n continue\n if metric < prev_threshold:\n continue\n log_print(Fore.LIGHTBLACK_EX + 'adding trial', trial, 'to termination list.',\n 'budget', budget, 'metric', metric, 'threshold', prev_threshold)\n should_terminate.add(trial)\n\n return list(should_terminate)\n\n def get_next_parameter(self):\n if self.random_dice():\n log_print(Fore.LIGHTBLACK_EX + 'generate random parameter because dice says so')\n return super().get_next_parameter()\n else:\n goods, bads = self.collect_stats()\n if goods and bads:\n return self.get_suggested_next_parameter(goods, bads)\n else:\n log_print(Fore.LIGHTBLACK_EX + 'generate random parameter because not enough samples')\n return super().get_next_parameter()\n\n\nclass Hyperband:\n def __init__(self, min_budget, max_budget, reset_nan_trial=True, reduction_ratio=math.e):\n self.min_budget = min_budget\n self.max_budget = max_budget\n self.reduction_ratio = reduction_ratio\n self.bracket_max = int_floor(math.log(max_budget / min_budget, reduction_ratio))\n self.brackets = self.make_brackets()\n self.cur_bracket_idx = 0\n self.cur_round_idx = 0\n self.reset_nan_trial = reset_nan_trial\n\n def serialize(self):\n return {\n 'min_budget': self.min_budget,\n 'max_budget': self.max_budget,\n 'reduction_ratio': self.reduction_ratio,\n 'bracket_max': self.bracket_max,\n 'brackets': [[[btrial.serialize() for btrial in round] for round in bracket] for bracket in self.brackets],\n 'cur_bracket_idx': self.cur_bracket_idx,\n 'cur_round_idx': self.cur_round_idx,\n 'reset_nan_trial': self.reset_nan_trial\n }\n\n @staticmethod\n def deserialize(data):\n self = Hyperband(min_budget=data['min_budget'],\n max_budget=data['max_budget'],\n reduction_ratio=data['reduction_ratio'],\n reset_nan_trial=data['reset_nan_trial'])\n self.brackets = [[[BracketElement.deserialize(btrial)\n for btrial in round]\n for round in bracket]\n for bracket in data['brackets']]\n self.cur_round_idx = data['cur_round_idx']\n self.cur_bracket_idx = data['cur_bracket_idx']\n self.bracket_max = data['bracket_max']\n return self\n\n def pprint_brackets(self):\n for bracket_idx, bracket in enumerate(self.brackets):\n log_print(Fore.LIGHTBLACK_EX + f'bracket {bracket_idx}:')\n for round_idx, round in enumerate(bracket):\n actives = [e for e in round if e.active]\n dones = [e for e in round if e.metric is not None]\n good_dones = [e for e in dones if math.isfinite(e.metric)]\n if not round:\n continue\n budget = round[0].budget\n to_print = (f'\\tround {round_idx:1}: {len(round):3} trials with {budget:3} budgets, ' +\n f'{len(actives):3} active, {len(dones):3} complete')\n if good_dones:\n best_metric = min(e.metric for e in good_dones)\n best_trial = [e.trial for e in dones if e.metric == best_metric][0]\n to_print += f', {best_metric:10.4f} best {best_trial}'\n else:\n to_print = Fore.LIGHTBLACK_EX + to_print\n log_print(to_print)\n\n def make_brackets(self):\n brackets = []\n for bracket_idx in range(self.bracket_max, -1, -1):\n bracket = []\n init_n_trials = (self.bracket_max + 1) / (bracket_idx + 1) * (self.reduction_ratio ** bracket_idx)\n init_budget = self.max_budget / (self.reduction_ratio ** bracket_idx)\n\n for i in range(bracket_idx + 1):\n n_trials = int_ceil(init_n_trials / (self.reduction_ratio ** i))\n if i == bracket_idx:\n budget = self.max_budget\n elif bracket_idx == self.bracket_max and i == 0:\n budget = self.min_budget\n n_trials = int_ceil(self.max_budget / self.min_budget)\n else:\n budget = int_ceil(init_budget * (self.reduction_ratio ** i))\n bracket_trials = []\n for _ in range(n_trials):\n bracket_trials.append(BracketElement(bracket=self.bracket_max - bracket_idx,\n round=i,\n budget=budget,\n metric=None,\n trial=None,\n active=False,\n promoted=False))\n bracket.append(bracket_trials)\n brackets.append(bracket)\n return brackets\n\n def is_round_complete(self, bracket_id, round_id):\n return all(e.trial is not None and not e.active for e in self.brackets[bracket_id][round_id])\n\n def is_complete(self):\n return all(e.trial is not None and not e.active for e in self.brackets[-1][-1])\n\n def request_trial(self):\n # if all brackets are complete, raise StopIteration\n # if caller should wait, return None\n # return: BracketElement, note that if el.trial is empty the caller is responsible for filling it\n if self.cur_bracket_idx > self.bracket_max:\n self.mark_all_brackets()\n log_print(Fore.LIGHTGREEN_EX + 'All brackets complete')\n raise StopIteration\n cur_bracket = self.brackets[self.cur_bracket_idx]\n cur_round = cur_bracket[self.cur_round_idx]\n\n inactive_without_trial = [e for e in cur_round if e.trial is None and not e.active]\n if inactive_without_trial:\n ret = inactive_without_trial[0]\n\n if self.cur_round_idx == 0:\n ret.active = True\n return ret\n else:\n last_round_completed_trials = [e for e in cur_bracket[self.cur_round_idx - 1]\n if e.metric is not None and math.isfinite(e.metric)\n and not e.promoted]\n if last_round_completed_trials:\n last_round_completed_trials.sort(key=lambda e: e.metric)\n best_available_trial = last_round_completed_trials[0]\n best_available_trial.promoted = True\n log_print(Fore.LIGHTBLACK_EX + 'promote best available trial', best_available_trial.trial,\n best_available_trial.metric,\n '(worst is', last_round_completed_trials[-1].metric, ')')\n ret.trial = best_available_trial.trial\n ret.active = True\n # if no trial is present, the caller is responsible for filling it it\n return ret\n elif not self.is_round_complete(self.cur_bracket_idx, self.cur_round_idx - 1):\n return None\n else:\n log_print(Fore.LIGHTRED_EX + 'Insufficient previous rounders to continue', id(self))\n self.mark_all_brackets()\n raise StopIteration\n\n if self.is_round_complete(self.cur_bracket_idx, self.cur_round_idx):\n if self.cur_round_idx == len(cur_bracket) - 1:\n self.cur_round_idx = 0\n self.cur_bracket_idx += 1\n else:\n self.cur_round_idx += 1\n log_print(Fore.LIGHTBLACK_EX + str(id(self)), 'proceed to bracket', self.cur_bracket_idx, 'round',\n self.cur_round_idx)\n return self.request_trial()\n else:\n return None\n\n def mark_all_brackets(self):\n self.brackets = [self.mark_bracket_failed(bracket) for bracket in self.brackets]\n\n def mark_bracket_failed(self, bracket):\n cleaned_bracket = [self.mark_round_failed(round) for round in bracket]\n return cleaned_bracket\n\n def mark_round_failed(self, round):\n return [t for t in round if t.trial is not None]\n\n def report_trial(self, bracket_idx, round_idx, trial, metric):\n # mark inactive, set metric\n # make verdict for all completed rounds\n requested_round = self.brackets[bracket_idx][round_idx]\n requested_element = None\n for el in requested_round:\n if el.trial == trial:\n requested_element = el\n assert requested_element\n requested_element.metric = metric\n requested_element.active = False\n if math.isfinite(metric):\n log_print('hyperband received report', bracket_idx, round_idx, trial, metric)\n else:\n log_print(Fore.LIGHTRED_EX + 'hyperband received report', bracket_idx, round_idx, trial, metric)\n # reset first rounder null results\n if self.reset_nan_trial:\n log_print(Fore.LIGHTRED_EX + 'nan trial')\n requested_element.trial = None\n requested_element.metric = None\n\n\nclass HyperbandDriver:\n def __init__(self, experiment_dir, trial_generator, param_generator, min_budget, max_budget,\n reduction_ratio, sleep_interval, max_hyperbands, mode, reset_nan_trial,\n early_stop_min_budget, early_stop_threshold):\n self.experiment_dir = experiment_dir\n self.min_budget = min_budget\n self.max_budget = max_budget\n self.reduction_ratio = reduction_ratio\n self.sleep_interval = sleep_interval\n self.watcher = ExperimentWatcher(experiment_dir)\n self.hyperbands = []\n self.watch_active_trials = []\n self.trial_generator = trial_generator\n self.param_generator = param_generator\n self.max_hyperbands = max_hyperbands\n self.is_maximize = mode == 'maximize'\n self.reset_nan_trial = reset_nan_trial\n self.early_stop_min_budget = early_stop_min_budget\n self.early_stop_threshold = ((-early_stop_threshold if self.is_maximize else early_stop_threshold)\n if early_stop_threshold is not None else float('inf'))\n\n def generate_new_trial(self, end_budget, n_gpu=1):\n params = self.param_generator.get_next_parameter()\n new_id = make_trial_id()\n log_print(Fore.LIGHTGREEN_EX + f'generate new trial {new_id} with budget 0 -> {end_budget}')\n self.trial_generator.change_capsule_trial_id(new_id)\n self.trial_generator.save_start_state(start_budget=0,\n end_budget=end_budget,\n n_gpu=n_gpu,\n params=params)\n return new_id\n\n def amend_trial(self, old_trial, end_budget, n_gpu=1):\n self.trial_generator.change_capsule_trial_id(old_trial)\n new_state = self.trial_generator.amend_start_state(end_budget=end_budget, n_gpu=n_gpu)\n log_print(\n Fore.LIGHTBLUE_EX + f'amended trial {old_trial} with budget '\n f'{new_state[\"start_budget\"]} -> {new_state[\"end_budget\"]}, params',\n new_state['params'])\n\n def get_next_hyperband_trial(self):\n for hyperband_idx, hyperband in enumerate(self.hyperbands):\n try:\n new_trial = hyperband.request_trial()\n except StopIteration:\n continue\n if new_trial is not None:\n if new_trial.trial is None:\n new_trial.trial = self.generate_new_trial(new_trial.budget)\n else:\n self.amend_trial(new_trial.trial, new_trial.budget)\n return hyperband_idx, new_trial\n if len(self.hyperbands) < self.max_hyperbands:\n self.hyperbands.append(Hyperband(min_budget=self.min_budget,\n max_budget=self.max_budget,\n reduction_ratio=self.reduction_ratio,\n reset_nan_trial=self.reset_nan_trial))\n return self.get_next_hyperband_trial()\n return None, None\n\n def get_available_slots(self):\n slot_files = []\n for parent, dirs, files in os.walk(self.experiment_dir):\n slot_files = [f for f in files if f.startswith('slots_')]\n break\n total_slots = 0\n for slot_file in slot_files:\n with open(os.path.join(self.experiment_dir, slot_file), 'rb') as f:\n total_slots += int(f.read().strip())\n return total_slots\n\n def check_for_completed_trials(self):\n completed_trials = []\n watcher_result = {k: v for k, v in\n self.watcher.poll(slots=False, only=[t.trial for _, t in self.watch_active_trials],\n fields=False)['trials'] if not v['active']}\n for hyperband_idx, trial in self.watch_active_trials:\n if trial.trial in watcher_result:\n completed_trials.append(trial)\n trial_result = watcher_result[trial.trial]\n log_print(Fore.LIGHTBLACK_EX + f'obtained watcher result for {trial.trial}')\n if trial_result['budget'] != trial.budget:\n trial_result['metric'] = float('nan')\n if trial_result['metric'] is None:\n trial_result['metric'] = float('nan')\n metric = trial_result['metric']\n if self.is_maximize:\n metric = -metric\n self.hyperbands[hyperband_idx].report_trial(trial.bracket, trial.round, trial.trial, metric)\n self.watch_active_trials = [t for t in self.watch_active_trials if t[1] not in completed_trials]\n\n def early_stop_trials(self):\n if self.early_stop_min_budget is None:\n return\n for _, trial_info in self.watch_active_trials:\n try:\n with FastTSVTail(os.path.join(self.experiment_dir, trial_info.trial, 'metric.tsv')) as f:\n budget, _, metric_res = f.tail()\n budget = int(budget)\n metric = float(metric_res)\n if self.is_maximize:\n metric = -metric\n if budget >= self.early_stop_min_budget and metric > self.early_stop_threshold:\n log_print(Fore.RED + 'requesting early stopping of trial', trial_info.trial,\n 'budget', budget, 'metric', metric)\n open(os.path.join(self.experiment_dir, trial_info.trial, 'terminate'), 'ab').close()\n except FileNotFoundError:\n continue\n\n def start_trials(self):\n n_slots = self.get_available_slots()\n for _ in range(n_slots):\n hyperband_idx, new_trial = self.get_next_hyperband_trial()\n if new_trial is not None:\n # launch new trial\n # add to trials being watched\n log_print(Fore.LIGHTBLACK_EX + f'watching trial {new_trial.trial} of band {hyperband_idx}')\n self.watch_active_trials.append((hyperband_idx, new_trial))\n\n def save_hyberband_data(self):\n with open(os.path.join(self.experiment_dir, 'hyperbands.json'), 'w', encoding='utf-8') as f:\n h_data = {\n 'active': [(idx, trial.serialize()) for idx, trial in self.watch_active_trials],\n 'hyberbands': [hyperband.serialize() for hyperband in self.hyperbands]\n }\n json.dump(h_data, f, ensure_ascii=False, allow_nan=True, indent=2)\n\n def load_hyperband_data(self, path):\n log_print('loading hyperbands data from', path)\n with open(path, 'r', encoding='utf_8') as f:\n data = json.load(f)\n self.watch_active_trials = [BracketElement.deserialize(trial) for trial in data]\n self.hyperbands = [(idx, Hyperband.deserialize(hb)) for idx, hb in data['hyperbands']]\n\n def start(self):\n last_watching = set()\n while True:\n self.early_stop_trials()\n self.check_for_completed_trials()\n self.start_trials()\n cur_watching = set(t.trial for _, t in self.watch_active_trials)\n if last_watching != cur_watching:\n for idx, hb in enumerate(self.hyperbands):\n log_print(Fore.LIGHTBLACK_EX + '----- Hyperband', idx, id(hb), '-----')\n hb.pprint_brackets()\n self.save_hyberband_data()\n last_watching = cur_watching\n if len(self.hyperbands) == self.max_hyperbands and all(hb.is_complete() for hb in self.hyperbands):\n break\n time.sleep(self.sleep_interval)\n\n\n@click.command()\n@click.option('--exp-path', required=True)\n@click.option('--module', required=True)\n@click.option('--min-budget', type=int, default=1)\n@click.option('--max-budget', type=int, required=True)\n@click.option('--sleep-interval', type=float, default=20.)\n@click.option('--max-hyperbands', type=int, default=5)\n@click.option('--reduction-ratio', type=float, default=math.e)\n@click.option('--mode', type=click.Choice(['maximize', 'minimize']), default='minimize')\n@click.option('--bohb-random-ratio', type=float, default=0.2)\n@click.option('--bohb-guided-ratio', type=float, default=0.6)\n@click.option('--bohb-random-size', type=int, default=64)\n@click.option('--bohb-guided-size', type=int, default=64)\n@click.option('--bohb-result-size-threshold', type=int)\n@click.option('--bohb-good-ratio', type=float, default=0.30)\n@click.option('--bohb-model-cache-time', type=float, default=900.)\n@click.option('--bohb-min-bandwidth', type=float, default=1e-3)\n@click.option('--bohb-bandwidth-estimation', default='normal_reference')\n@click.option('--bohb-bandwidth-factor', type=float, default=3)\n@click.option('--reset-nan-trial/--no-reset-nan-trial', default=True)\n@click.option('--early-stop-min-budget', type=int)\n@click.option('--early-stop-threshold', type=float)\n@click.option('--load')\ndef run(module, exp_path, min_budget, max_budget, reduction_ratio, max_hyperbands, sleep_interval, mode,\n bohb_random_ratio, bohb_guided_ratio, bohb_random_size, bohb_guided_size,\n bohb_result_size_threshold, bohb_good_ratio, bohb_model_cache_time,\n bohb_min_bandwidth, bohb_bandwidth_estimation, bohb_bandwidth_factor, reset_nan_trial, load,\n early_stop_min_budget, early_stop_threshold):\n exp_path = os.path.abspath(exp_path)\n trial_gen = Generator(module, exp_path)\n param_gen = BOHBParamGen(module, exp_path,\n random_ratio=bohb_random_ratio,\n random_sample_size=bohb_random_size,\n guided_ratio=bohb_guided_ratio,\n guided_sample_size=bohb_guided_size,\n result_size_threshold=bohb_result_size_threshold,\n good_ratio=bohb_good_ratio,\n model_cache_time=bohb_model_cache_time,\n mode=mode,\n min_bandwidth=bohb_min_bandwidth,\n bandwidth_estimation=bohb_bandwidth_estimation,\n bandwidth_factor=bohb_bandwidth_factor,\n early_stop_ratio=1. / reduction_ratio)\n driver = HyperbandDriver(experiment_dir=exp_path,\n trial_generator=trial_gen,\n param_generator=param_gen,\n min_budget=min_budget,\n max_budget=max_budget,\n reduction_ratio=reduction_ratio,\n sleep_interval=sleep_interval,\n max_hyperbands=max_hyperbands,\n mode=mode,\n reset_nan_trial=reset_nan_trial,\n early_stop_min_budget=early_stop_min_budget,\n early_stop_threshold=early_stop_threshold)\n if load is not None:\n driver.load_hyperband_data(load)\n driver.start()\n\n\nif __name__ == '__main__':\n run()\n" ]
[ [ "numpy.random.normal", "scipy.stats.truncnorm.rvs", "numpy.clip" ] ]
brmprnk/jointomicscomp
[ "2074025f6b9847698c21f4c45cdb76cb6c583f4e" ]
[ "src/MVIB/training/vae.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.distributions import Normal, Independent\n\nfrom src.MVIB.utils.modules import Decoder\nfrom src.MVIB.training.base import RepresentationTrainer\nfrom src.MVIB.utils.schedulers import ExponentialScheduler\n\n\n###############\n# VAE Trainer #\n###############\nclass VAETrainer(RepresentationTrainer):\n def __init__(self, decoder_lr=1e-4, beta_start_value=1e-3, beta_end_value=1,\n beta_n_iterations=100000, beta_start_iteration=50000, **params):\n super(VAETrainer, self).__init__(**params)\n\n # Intialization of the decoder\n self.decoder = Decoder(self.z_dim)\n\n # Adding the parameters of the estimator to the optimizer\n self.opt.add_param_group(\n {'params': self.decoder.parameters(), 'lr': decoder_lr}\n )\n\n # Defining the prior distribution as a factorized normal distribution\n self.mu = nn.Parameter(torch.zeros(self.z_dim), requires_grad=False)\n self.sigma = nn.Parameter(torch.ones(self.z_dim), requires_grad=False)\n self.prior = Normal(loc=self.mu, scale=self.sigma)\n self.prior = Independent(self.prior, 1)\n\n self.beta_scheduler = ExponentialScheduler(start_value=beta_start_value, end_value=beta_end_value,\n n_iterations=beta_n_iterations, start_iteration=beta_start_iteration)\n\n def _get_items_to_store(self):\n items_to_store = super(VAETrainer, self)._get_items_to_store()\n\n # Add the mutual information estimator parameters to items_to_store\n items_to_store['decoder'] = self.decoder.state_dict()\n return items_to_store\n\n def _compute_loss(self, data):\n # Ignore the second view and the label\n x, _, _ = data\n\n # Encode a batch of data\n p_z_given_v1 = self.encoder(x)\n\n # Sample from the posteriors with reparametrization\n z = p_z_given_v1.rsample()\n\n # Rate\n rate = p_z_given_v1.log_prob(z) - self.prior.log_prob(z)\n\n # Distortion\n prob_x_given_z = self.decoder(z)\n distortion = -prob_x_given_z.log_prob(x.view(x.shape[0], -1))\n\n # Average across the batch\n rate = rate.mean()\n distortion = distortion.mean()\n\n # Update the value of beta according to the policy\n beta = self.beta_scheduler(self.iterations)\n\n # Logging the components\n self._add_loss_item('loss/distortion', distortion.item())\n self._add_loss_item('loss/rate', rate.item())\n self._add_loss_item('loss/beta', beta)\n\n # Computing the loss function\n loss = distortion + beta * rate\n\n return loss\n\n\n" ]
[ [ "torch.zeros", "torch.distributions.Normal", "torch.distributions.Independent", "torch.ones" ] ]
dogeplusplus/torch-actor-critic
[ "c12f5a6845fe27cf0f2b231a88e7b6d9f5985960" ]
[ "buffer/replay_buffer.py" ]
[ "import numpy as np\n\nfrom random import sample\nfrom torch import FloatTensor\nfrom dataclasses import dataclass\n\n\n@dataclass(frozen=True)\nclass Batch:\n states: FloatTensor\n actions: FloatTensor\n rewards: FloatTensor\n next_states: FloatTensor\n done: FloatTensor\n\n\nclass ReplayBuffer:\n def __init__(self, size: int, obs_dim: int, act_dim: int):\n self.state = np.zeros((size, obs_dim), dtype=np.float32)\n self.actions = np.zeros((size, act_dim), dtype=np.float32)\n self.rewards = np.zeros(size, dtype=np.float32)\n self.next_state = np.zeros((size, obs_dim), dtype=np.float32)\n self.done = np.zeros(size, dtype=np.bool)\n\n self.ptr = 0\n self.size = 0\n self.max_size = size\n\n def store(\n self,\n obs: np.ndarray,\n act: np.ndarray,\n rew: np.ndarray,\n next_obs: np.ndarray,\n done: np.ndarray\n ):\n self.state[self.ptr] = obs\n self.actions[self.ptr] = act\n self.rewards[self.ptr] = rew\n self.next_state[self.ptr] = next_obs\n self.done[self.ptr] = done\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample(self, batch_size) -> Batch:\n idx = sample(range(self.size), batch_size)\n\n return Batch(\n FloatTensor(self.state[idx]),\n FloatTensor(self.actions[idx]),\n FloatTensor(self.rewards[idx]),\n FloatTensor(self.next_state[idx]),\n FloatTensor(self.done[idx]),\n )\n" ]
[ [ "torch.FloatTensor", "numpy.zeros" ] ]
moebg/retailbox
[ "95541b654201e3b8108ec1a0132a1543283a0c41" ]
[ "retailbox/recommender.py" ]
[ "# -*- coding: utf-8 -*-\nimport sys\nimport time\nimport pandas as pd\nimport pickle\nimport numpy as np\nimport os.path\nimport scipy.sparse as sparse\nfrom scipy.sparse.linalg import spsolve\nimport random\nimport warnings\nimport implicit\n\nfrom scipy.sparse.linalg import spsolve\nfrom output import printGreen, printRed, printYellow, display_customer_information, display_recommender_items\nfrom implicit_als import implicit_weighted_als\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom data import preprocess_data_rec_engine, split_data_mask, validate_input, get_items_purchased, rec_items, list_rec, lookup_customer_id\nfrom data import validate_customer_id, search_customer\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef recommender(customer_id, status):\n # Start time\n start = time.time()\n if status:\n printGreen('✔ RetailBox started..\\t\\t{0:.1f}s'.format(time.time() - start))\n start = time.time()\n\n # Validate User Input\n validate_customer_id(customer_id)\n\n # Load Dataframe and create item_table, purchase matrix, etc.\n data = preprocess_data_rec_engine(status=True)\n\n item_table = data[0]\n purchase_sparse_matrix = data[1]\n customers = data[2]\n products = data[3]\n quantity = data[4]\n\n if status:\n printGreen('✔ Processed Data..\\t\\t{0:.1f}s'.format(time.time() - start))\n start = time.time()\n\n # Split Data (Training/Test Split)\n training_test_split_data = split_data_mask(purchase_sparse_matrix, pct_test = 0.2)\n \n product_training_set = training_test_split_data[0]\n product_test_set = training_test_split_data[1]\n product_user_altered = training_test_split_data[2]\n\n if status:\n printGreen('✔ Split Data into Training and Test Sets..\\t\\t{0:.1f}s'.format(time.time() - start))\n start = time.time()\n \n # Train Recommendation Engine on given algorithm\n alpha = 15\n recommender_vecs = implicit.alternating_least_squares((product_training_set * alpha).astype('double'),\n factors = 20,\n regularization = 0.1,\n iterations = 50)\n\n user_vecs = recommender_vecs[0]\n item_vecs = recommender_vecs[1]\n \n customers_arr = np.array(customers)\n products_arr = np.array(products)\n\n if status:\n printGreen('✔ Recommender System Training Done..\\t\\t{0:.1f}s'.format(time.time() - start))\n start = time.time()\n\n # Lookup customer id\n cid = lookup_customer_id(customer_id)\n\n # Generate Recommendations for Customer\n rec_output = rec_items(cid, product_training_set, user_vecs,\n item_vecs, customers_arr, products_arr,\n item_table)\n\n # Display Customer\n df = pd.read_pickle('../data/final/df_final.pkl')\n table_pickle_file = open('../data/final/df_customer_table.pkl', \"rb\")\n customer_table = pickle.load(table_pickle_file)\n table_pickle_file.close() \n search_customer(customer_id, df, customer_table)\n\n # Display Item Recommendations\n recommended_items_list = list_rec(rec_output)\n display_recommender_items(recommended_items_list)\n \n\ndef main():\n recommender(customer_id=5,\n status=True)\n \n\nif __name__ == '__main__':\n main()" ]
[ [ "pandas.read_pickle", "numpy.array" ] ]
AliOsm/semantic-question-similarity
[ "35fa6e3da5178ed1942437233abf878c317eae36" ]
[ "5_infer.py" ]
[ "import csv\nimport argparse\n\nimport numpy as np\nimport pickle as pkl\n\nfrom os import remove\nfrom os.path import join\nfrom keras.models import load_model\nfrom keras_self_attention import SeqWeightedAttention\nfrom keras_ordered_neurons import ONLSTM\n\nfrom helpers import load_embeddings_dict\nfrom helpers import map_sentence, f1\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data-dir', default='data_dir')\n parser.add_argument('--embeddings-type', default='elmo', choices=['elmo', 'bert'])\n parser.add_argument('--model-path', default='checkpoints/epoch100.h5')\n parser.add_argument('--threshold', default=0.5, type=float)\n args = parser.parse_args()\n\n embeddings_dict = load_embeddings_dict(join(args.data_dir, '%s_dict.pkl' % args.embeddings_type))\n\n model = load_model(\n filepath=args.model_path,\n custom_objects={\n 'f1': f1,\n 'SeqWeightedAttention': SeqWeightedAttention,\n 'ONLSTM': ONLSTM\n }\n )\n\n data = list()\n with open(join(args.data_dir, 'test_processed.csv'), 'r') as file:\n reader = csv.reader(file)\n for idx, row in enumerate(reader):\n print('Prepare Data: %s' % (idx + 1), end='\\r')\n data.append((\n map_sentence(row[0], embeddings_dict),\n map_sentence(row[1], embeddings_dict),\n int(row[2])\n ))\n\n try:\n remove(join(args.data_dir, 'submit.csv'))\n except:\n pass\n\n with open(join(args.data_dir, 'submit.csv'), 'w') as file:\n writer = csv.writer(file)\n writer.writerow(['QuestionPairID', 'Prediction'])\n\n for idx, example in enumerate(data):\n print('Predicting Example: %s' % (idx + 1), end='\\r')\n prediction = model.predict([[np.array(example[0])], [np.array(example[1])]]).squeeze()\n if prediction >= args.threshold:\n writer.writerow([example[2], 1])\n else:\n writer.writerow([example[2], 0])\n" ]
[ [ "numpy.array" ] ]
CASIA-IVA-Lab/DCFST
[ "ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b", "ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b" ]
[ "ltr/dataset/tracking_net.py", "pytracking/tracker/sbdt50/sbdt.py" ]
[ "import torch\nimport os\nimport os.path\nimport numpy as np\nimport pandas\nfrom collections import OrderedDict\n\nfrom ltr.data.image_loader import default_image_loader\nfrom .base_dataset import BaseDataset\nfrom ltr.admin.environment import env_settings\n\n\ndef list_sequences(root, set_ids):\n \"\"\" Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name)\n\n args:\n root: Root directory to TrackingNet\n set_ids: Sets (0-11) which are to be used\n\n returns:\n list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence\n \"\"\"\n sequence_list = []\n\n for s in set_ids:\n anno_dir = os.path.join(root, \"TRAIN_\" + str(s), \"anno\")\n\n sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')]\n sequence_list += sequences_cur_set\n\n return sequence_list\n\n\nclass TrackingNet(BaseDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=default_image_loader, set_ids=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n \"\"\"\n root = env_settings().trackingnet_dir if root is None else root\n super().__init__(root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root, self.set_ids)\n\n def get_name(self):\n return 'trackingnet'\n\n def _read_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\n gt = pandas.read_csv(anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def get_sequence_info(self, seq_id):\n anno = self._read_anno(seq_id)\n target_visible = (anno[:,2]>10) & (anno[:,3]>10) & (anno[:,2]/anno[:,3]<5) & (anno[:,3]/anno[:,2]<5)\n return anno, target_visible\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\n return self.image_loader(frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self._read_anno(seq_id)\n\n # Return as list of tensors\n anno_frames = [anno[f_id, :] for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta\n", "import os\nimport cv2\nimport sys\nimport pdb\nimport math\nimport time\nimport random\nimport shutil\nimport numpy as np\n\nimport torch\nimport torch.nn\nimport torch.nn.functional as F\n\nfrom pytracking import TensorList\nfrom pytracking.features import augmentation\nfrom pytracking.tracker.base import BaseTracker\nfrom pytracking.utils.plotting import show_tensor\nfrom pytracking.features.preprocessing import sample_patch\nfrom pytracking.features.preprocessing import numpy_to_torch\n\nclass SBDT(BaseTracker):\n\n # ------ MAIN INITIALIZE ------#\n def initialize(self, image, state, *args, **kwargs):\n self.frame_num = 1\n\n if self.params.output_image:\n # Make\n if not os.path.exists(self.params.output_image_path):\n os.mkdir(self.params.output_image_path)\n\n NUM = len(os.listdir(self.params.output_image_path))\n self.params.output_image_path = os.path.join(self.params.output_image_path, \"%d\"%(NUM+1))\n os.mkdir(self.params.output_image_path)\n\n # For debugging and display only\n image_show = image.copy() \n\n # For debugging\n torch.set_printoptions(threshold=20000) \n \n # Fix random seed\n np.random.seed(1024)\n torch.manual_seed(1024)\n torch.cuda.manual_seed_all(1024)\n\n # HEIGHT and WIDTH\n self.IMG_HEIGHT, self.IMG_WIDTH = image.shape[0], image.shape[1]\n\n # Initialize tracking model\n self.params.model.initialize()\n\n # Get target position and target size (y, x, h, w) (state = [xt, yt, w, h])\n self.target_pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2])\n self.target_sz = torch.Tensor([state[3], state[2]])\n self.initial_target_sz = self.target_sz.clone()\n\n # Set sample size and target area of search region (N)\n self.img_sample_sz = torch.Tensor([math.sqrt(self.params.img_sample_area)]) * torch.ones(2)\n self.target_sample_area = self.params.img_sample_area / self.params.search_padding**2\n\n # Get sampling area, sampling ratio and target size\n self.search_area = torch.prod(self.target_sz * self.params.search_padding)\n self.sample_scale = torch.sqrt(self.search_area / self.params.img_sample_area)\n self.target_sample_sz = self.target_sz / self.sample_scale\n\n # Initialize centers of proposals for locator (N)\n self.locator_proposals_xc, self.locator_proposals_yc, self.locator_labels = self.init_locator_proposals_center_function()\n self.locator_proposals = torch.zeros(1, self.locator_labels.shape[0], 4, device=self.params.device)\n assert(self.locator_labels.max().item()==1.0)\n\n # Creat output score window (N)\n self.output_window = None\n if getattr(self.params, 'window_output', True):\n self.output_window = self.init_output_window_function()\n\n # Extract transform samples\n im_tensor = numpy_to_torch(image)\n train_samples = self.generate_init_samples(im_tensor, self.target_pos, self.sample_scale)\n train_samples = train_samples.cuda()\n\n # Setup scale bounds\n self.image_sz = torch.Tensor([self.IMG_HEIGHT, self.IMG_WIDTH])\n self.min_scale_factor = torch.max(10 / self.target_sz)\n self.max_scale_factor = torch.min(self.image_sz / self.target_sz)\n\n # Generate locator proposals\n batch_size = train_samples.shape[0]\n locator_proposals = self.get_locator_proposals(self.target_sample_sz)\n locator_proposals = locator_proposals.repeat(batch_size,1,1)\n\n # Extract backbone features\n backbone_features = self.params.model.extract_backbone_features(train_samples)\n\n # Extract target iounet features \n self.target_iou_feat = self.init_iou_net(self.target_pos, self.target_sz, self.sample_scale, backbone_features)\n\n # Extract locator features and train locator model\n train_locator_features = self.params.model.extract_locator_features(backbone_features, locator_proposals)\n self.locator_XTX = torch.matmul(train_locator_features.permute(0,2,1), train_locator_features).mean(dim=0)\n self.locator_XTY = torch.matmul(train_locator_features.permute(0,2,1), self.locator_labels).mean(dim=0)\n self.locator_regularization = self.params.regularization * torch.eye(self.locator_XTX.shape[1], device=self.params.device)\n self.locator_model = self.train_locator_model(self.locator_XTX+self.locator_regularization, self.locator_XTY)\n\n # Save initial locator feature model\n self.locator_XTX_initial = self.locator_XTX.clone()\n self.locator_XTY_initial = self.locator_XTY.clone()\n\n # Initialize the detect region of hard negative samples\n self.hard_negative_region_mask = self.init_hard_negative_region_function()\n\n # Initialize the weight of first frame\n self.current_initial_frame_weight = 1.0\n\n # Output result image\n if self.params.output_image:\n self.output_result_image(image_show, state)\n\n # ------ MAIN TRACK ------#\n def track(self, image):\n self.frame_num += 1\n\n # For debugging and display only\n if self.params.output_image:\n image_show = image.copy()\n\n # Initialization\n hard_flag = False\n \n # Conver to tensor and GPU\n image_cuda = self.numpy_to_tensor_gpu(image)\n\n # ------- LOCALIZATION ------- #\n sample_pos = self.target_pos.clone()\n sample_scale = self.sample_scale.clone()\n target_sample_sz = self.target_sample_sz.clone()\n\n # Sample and extract backbone features\n test_sample = sample_patch(image_cuda, sample_pos, sample_scale*self.img_sample_sz, self.img_sample_sz)\n test_backbone_features = self.params.model.extract_backbone_features(test_sample)\n\n # Extract locator features and calcualte the localization score\n test_locator_proposals = self.get_locator_proposals(target_sample_sz)\n test_locator_features = self.params.model.extract_locator_features(test_backbone_features, test_locator_proposals).squeeze()\n test_locator_score = torch.mm(test_locator_features, self.locator_model)\n\n # Window output and find argmax\n if getattr(self.params, 'window_output', False):\n test_locator_score = test_locator_score * self.output_window\n max_score, max_id = torch.max(test_locator_score, dim=0)\n max_score, max_id = max_score.item(), max_id.item()\n\n # When target is found\n if max_score > self.params.target_not_found:\n # Update target position\n self.target_pos[1] += (self.locator_proposals_xc[max_id].item() - self.img_sample_sz[1]*0.5) * sample_scale # x\n self.target_pos[0] += (self.locator_proposals_yc[max_id].item() - self.img_sample_sz[0]*0.5) * sample_scale # y\n\n # ------- REFINEMENT ------- # \n # Extract iou backbone features and refine target box\n test_iou_backbone_features = self.params.model.extract_iou_features(test_backbone_features) \n new_target_box = self.refine_target_box(self.target_pos, self.target_sz, sample_pos, sample_scale, test_iou_backbone_features)\n\n # Update target box\n if new_target_box is not None:\n self.target_pos = sample_pos + (new_target_box[:2] + new_target_box[2:]/2 - (self.img_sample_sz - 1) / 2).flip((0,)) * sample_scale\n self.target_sz = self.params.scale_damp * self.target_sz + (1 - self.params.scale_damp) * new_target_box[2:].flip((0,)) * sample_scale\n\n self.target_sz = torch.min(self.target_sz, self.initial_target_sz*self.max_scale_factor)\n self.target_sz = torch.max(self.target_sz, self.initial_target_sz*self.min_scale_factor)\n\n # Update the sampling message\n self.search_area = torch.prod(self.target_sz * self.params.search_padding)\n self.sample_scale = torch.sqrt(self.search_area / self.params.img_sample_area)\n self.target_sample_sz = self.target_sz / self.sample_scale\n\n # ------- UPDAT FEATURE MODEL------- #\n train_sample = sample_patch(image_cuda, self.target_pos, self.sample_scale*self.img_sample_sz, self.img_sample_sz)\n train_backbone_features = self.params.model.extract_backbone_features(train_sample)\n\n # Extract locator features\n train_locator_proposals = self.get_locator_proposals(self.target_sample_sz)\n train_locator_features = self.params.model.extract_locator_features(train_backbone_features, train_locator_proposals).squeeze()\n\n # Hard negtive mining and Adaptive learning rate\n if self.params.hard_negative_mining:\n train_locator_score = torch.mm(train_locator_features, self.locator_model)\n max_score = train_locator_score.max()\n train_locator_score = train_locator_score * self.hard_negative_region_mask\n if (train_locator_score.max() > self.params.hard_negative_threshold*max_score) and (train_locator_score.max() > self.params.target_not_found):\n hard_flag = True\n learning_rate = self.params.hard_negative_learning_rate\n else:\n learning_rate = self.params.learning_rate\n\n # Update locator model\n self.locator_XTX = (1 - learning_rate) * self.locator_XTX + learning_rate * torch.mm(train_locator_features.t(), train_locator_features)\n self.locator_XTY = (1 - learning_rate) * self.locator_XTY + learning_rate * torch.mm(train_locator_features.t(), self.locator_labels)\n\n # Adjust weight of initial frame\n self.current_initial_frame_weight = (1 - learning_rate) * self.current_initial_frame_weight\n if self.current_initial_frame_weight < self.params.init_samples_minimum_weight:\n diff = self.params.init_samples_minimum_weight - self.current_initial_frame_weight\n coff = diff / (1 - self.current_initial_frame_weight)\n self.locator_XTX = (1 - coff) * self.locator_XTX + coff * self.locator_XTX_initial\n self.locator_XTY = (1 - coff) * self.locator_XTY + coff * self.locator_XTY_initial\n self.current_initial_frame_weight = self.params.init_samples_minimum_weight\n\n # ------- TRAIN ------- #\n if (self.frame_num % self.params.train_skipping == 0) or (hard_flag):\n self.locator_model = self.train_locator_model(self.locator_XTX+self.locator_regularization, self.locator_XTY, self.locator_model)\n\n # ------- RETURN ------- #\n # Return new state\n new_state = torch.cat((self.target_pos[[1,0]] - self.target_sz[[1,0]]*0.5, self.target_sz[[1,0]]))\n new_state[0], new_state[1] = new_state[0].clamp(0), new_state[1].clamp(0)\n new_state[2] = new_state[2].clamp(0, self.IMG_WIDTH -new_state[0])\n new_state[3] = new_state[3].clamp(0, self.IMG_HEIGHT-new_state[1])\n\n # Output result image\n if self.params.output_image:\n self.output_result_image(image_show, new_state)\n\n return new_state.tolist()\n\n def numpy_to_tensor_gpu(self, image):\n image = torch.from_numpy(image)\n image = image.cuda()\n image = image.permute(2,0,1).unsqueeze(0).to(torch.float32)\n return image\n\n def init_locator_proposals_center_function(self):\n search_padding = self.params.search_padding\n target_sample_area = self.target_sample_area\n sigma_factor = self.params.output_sigma_factor\n proposals_num = self.params.proposals_num\n WIDTH, HEIGHT = self.img_sample_sz[1], self.img_sample_sz[0]\n\n ## uniform proposals\n proposals_sqrt = int(math.sqrt(proposals_num))\n x_step = ((WIDTH - WIDTH/search_padding) / (proposals_sqrt-1))\n y_step = ((HEIGHT - HEIGHT/search_padding) / (proposals_sqrt-1))\n\n proposals_xc = torch.arange(proposals_sqrt).repeat(proposals_sqrt).type(torch.float32) * x_step + WIDTH/(search_padding*2)\n proposals_yc = torch.arange(proposals_sqrt).repeat(proposals_sqrt,1).t().reshape(-1).type(torch.float32) * y_step + HEIGHT/(search_padding*2)\n\n ## creat label\n x_dist = proposals_xc - WIDTH*0.5\n y_dist = proposals_yc - HEIGHT*0.5\n\n sigma = sigma_factor * math.sqrt(target_sample_area)\n proposals_label = torch.exp(-0.5 * (x_dist.pow(2)+y_dist.pow(2)) / sigma**2)\n proposals_label = proposals_label.view(-1,1)\n\n proposals_xc = proposals_xc.to(self.params.device)\n proposals_yc = proposals_yc.to(self.params.device)\n proposals_label = proposals_label.to(self.params.device)\n return proposals_xc, proposals_yc, proposals_label\n\n def init_labels_function(self):\n proposals_xc = self.proposals_xc\n proposals_yc = self.proposals_yc\n sigma_factor = self.params.output_sigma_factor\n target_sample_area = self.target_sample_area\n\n WIDTH, HEIGHT = self.img_sample_sz[0], self.img_sample_sz[1]\n x_dist = proposals_xc - (WIDTH * 0.5).item()\n y_dist = proposals_yc - (HEIGHT * 0.5).item()\n\n sigma = sigma_factor * math.sqrt(target_sample_area)\n labels = torch.exp(-0.5 * (x_dist.pow(2)+y_dist.pow(2))/sigma**2)\n labels = labels.to(self.params.device).reshape(-1,1)\n return labels\n\n def init_output_window_function(self):\n proposals_xc = self.locator_proposals_xc\n proposals_yc = self.locator_proposals_yc\n target_sample_area = self.target_sample_area\n sigma_factor = self.params.window_sigma_factor\n window_min_value = self.params.window_min_value\n WIDTH, HEIGHT = self.img_sample_sz[1], self.img_sample_sz[0]\n\n x_dist = proposals_xc - 0.5*WIDTH.item()\n y_dist = proposals_yc - 0.5*HEIGHT.item()\n sigma = sigma_factor * math.sqrt(target_sample_area)\n output_window = torch.exp(-0.5 * (x_dist.pow(2)+y_dist.pow(2)) / sigma**2)\n output_window = output_window.clamp(window_min_value)\n output_window = output_window.view(-1,1)\n return output_window\n\n def init_hard_negative_region_function(self):\n proposals_xc = self.locator_proposals_xc\n proposals_yc = self.locator_proposals_yc\n target_sample_area = self.target_sample_area\n distance_ratio = self.params.hard_negative_distance_ratio\n WIDTH, HEIGHT = self.img_sample_sz[1], self.img_sample_sz[0]\n\n x_dist = proposals_xc - 0.5*WIDTH.item()\n y_dist = proposals_yc - 0.5*HEIGHT.item()\n distance = torch.sqrt(x_dist.pow(2)+y_dist.pow(2))\n distance_threshold = math.sqrt(target_sample_area*distance_ratio**2)\n\n mask = torch.zeros(proposals_xc.shape, device=self.params.device)\n mask[distance>distance_threshold] = 1.0\n mask = mask.view(-1,1)\n return mask\n\n def generate_init_samples(self, im: torch.Tensor, target_pos, sample_scale) -> TensorList:\n # Compute augmentation size\n aug_expansion_factor = getattr(self.params, 'augmentation_expansion_factor', None)\n aug_expansion_sz = self.img_sample_sz.clone()\n aug_output_sz = None\n if aug_expansion_factor is not None and aug_expansion_factor != 1:\n aug_expansion_sz = (self.img_sample_sz * aug_expansion_factor).long()\n aug_expansion_sz += (aug_expansion_sz - self.img_sample_sz.long()) % 2\n aug_expansion_sz = aug_expansion_sz.float()\n aug_output_sz = self.img_sample_sz.long().tolist()\n\n # Random shift operator\n get_rand_shift = lambda: None\n\n # Create transofmations\n self.transforms = [augmentation.Identity(aug_output_sz)]\n if 'shift' in self.params.augmentation_method:\n self.transforms.extend([augmentation.Translation(shift, aug_output_sz) for shift in self.params.augmentation_method['shift']])\n if 'relativeshift' in self.params.augmentation_method:\n get_absolute = lambda shift: (torch.Tensor(shift) * self.img_sample_sz/2).long().tolist()\n self.transforms.extend([augmentation.Translation(get_absolute(shift), aug_output_sz) for shift in self.params.augmentation_method['relativeshift']])\n if 'fliplr' in self.params.augmentation_method and self.params.augmentation_method['fliplr']:\n self.transforms.append(augmentation.FlipHorizontal(aug_output_sz, get_rand_shift()))\n if 'blur' in self.params.augmentation_method:\n self.transforms.extend([augmentation.Blur(sigma, aug_output_sz, get_rand_shift()) for sigma in self.params.augmentation_method['blur']])\n if 'scale' in self.params.augmentation_method:\n self.transforms.extend([augmentation.Scale(scale_factor, aug_output_sz, get_rand_shift()) for scale_factor in self.params.augmentation_method['scale']])\n if 'rotate' in self.params.augmentation_method:\n self.transforms.extend([augmentation.Rotate(angle, aug_output_sz, get_rand_shift()) for angle in self.params.augmentation_method['rotate']])\n\n init_sample = sample_patch(im, target_pos, sample_scale*aug_expansion_sz, aug_expansion_sz)\n init_samples = torch.cat([T(init_sample) for T in self.transforms])\n if not self.params.augmentation:\n init_samples = init_samples[0:1,...]\n return init_samples\n\n def get_iounet_box(self, target_pos, target_sz, sample_pos, sample_scale):\n \"\"\"All inputs in original image coordinates\"\"\"\n box_center = (target_pos - sample_pos) / sample_scale + (self.img_sample_sz - 1) / 2\n box_sz = target_sz / sample_scale\n target_ul = box_center - (box_sz - 1) / 2\n return torch.cat([target_ul.flip((0,)), box_sz.flip((0,))])\n\n def init_iou_net(self, target_pos, target_sz, sample_scale, init_backbone_features):\n # Setup IoU net\n for p in self.params.model.iou_predictor.parameters():\n p.requires_grad = False\n\n # Get target boxes and convert\n target_boxes = self.get_iounet_box(target_pos, target_sz, target_pos.round(), sample_scale)\n target_boxes = target_boxes.unsqueeze(0).to(self.params.device)\n\n # Remove other augmentations such as rotation\n iou_backbone_features = TensorList([x[:target_boxes.shape[0],...] for x in init_backbone_features])\n\n # Extract target IoU feat\n with torch.no_grad():\n target_iou_feat = self.params.model.iou_predictor.get_filter(iou_backbone_features, target_boxes)\n target_iou_feat = TensorList([x.detach().mean(0) for x in target_iou_feat])\n return target_iou_feat\n\n def get_locator_proposals(self, target_sample_sz):\n proposals = self.locator_proposals\n proposals_xc = self.locator_proposals_xc\n proposals_yc = self.locator_proposals_yc\n\n proposals[0, :, 0] = proposals_xc - 0.5*target_sample_sz[1].item()\n proposals[0, :, 1] = proposals_yc - 0.5*target_sample_sz[0].item()\n proposals[0, :, 2] = target_sample_sz[1].item()\n proposals[0, :, 3] = target_sample_sz[0].item()\n return proposals\n\n def train_locator_model(self, model_XTX, model_XTY, model=None):\n if model is None:\n model = torch.potrs(model_XTY, torch.potrf(model_XTX))\n else:\n for _ in range(30):\n model, _ = torch.trtrs(model_XTY - torch.mm(torch.triu(model_XTX, diagonal=1), model), torch.tril(model_XTX, diagonal=0), upper=False)\n return model\n\n def refine_target_box(self, target_pos, target_sz, sample_pos, sample_scale, iou_backbone_bone_features):\n top_k = self.params.iounet_k\n jitter_sz = self.params.box_jitter_sz\n jitter_pos = self.params.box_jitter_pos\n refine_num = self.params.box_refinement_iter\n max_aspect_ratio = self.params.maximal_aspect_ratio\n num_init_random_boxes = self.params.num_init_random_boxes\n\n # Initial boxes for refinement\n init_box = self.get_iounet_box(target_pos, target_sz, sample_pos, sample_scale)\n init_boxes = init_box.view(1,4).clone()\n if num_init_random_boxes > 0:\n # Get random initial boxes\n square_box_sz = init_box[2:].prod().sqrt()\n rand_factor = square_box_sz * torch.cat([jitter_pos * torch.ones(2), jitter_sz * torch.ones(2)])\n minimal_edge_size = init_box[2:].min()/3\n rand_bb = (torch.rand(num_init_random_boxes, 4) - 0.5) * rand_factor\n new_sz = (init_box[2:] + rand_bb[:,2:]).clamp(minimal_edge_size)\n new_center = (init_box[:2] + init_box[2:]/2) + rand_bb[:,:2]\n init_boxes = torch.cat([new_center - new_sz/2, new_sz], 1)\n init_boxes = torch.cat([init_box.view(1,4), init_boxes])\n\n # Refine boxes by maximizing iou\n output_boxes, output_iou = self.optimize_boxes(iou_backbone_bone_features, init_boxes)\n\n # Remove weird boxes with extreme aspect ratios\n output_boxes[:, 2:].clamp_(1)\n aspect_ratio = output_boxes[:,2] / output_boxes[:,3]\n keep_ind = (aspect_ratio < max_aspect_ratio) * (aspect_ratio > 1/max_aspect_ratio)\n output_boxes = output_boxes[keep_ind,:]\n output_iou = output_iou[keep_ind]\n\n # If no box found\n if output_boxes.shape[0] == 0:\n return None\n\n # Take average of top k boxes\n top_k = min(top_k, output_boxes.shape[0])\n _, inds = torch.topk(output_iou, top_k)\n output_boxes = output_boxes[inds, :].mean(0).cpu()\n return output_boxes \n\n def optimize_boxes(self, iou_features, init_boxes):\n # Optimize iounet boxes\n output_boxes = init_boxes.view(1, -1, 4).to(self.params.device)\n\n for i_ in range(self.params.box_refinement_iter):\n # forward pass\n bb_init = output_boxes.clone().detach()\n bb_init.requires_grad = True\n\n outputs = self.params.model.iou_predictor.predict_iou(self.target_iou_feat, iou_features, bb_init)\n\n if isinstance(outputs, (list, tuple)):\n outputs = outputs[0]\n\n outputs.backward(gradient = torch.ones_like(outputs))\n\n # Update proposal\n output_boxes = bb_init + (bb_init.grad*100).round()/100 * bb_init[:, :, 2:].repeat(1, 1, 2)\n output_boxes.detach_()\n\n return output_boxes.view(-1,4), outputs.detach().view(-1)\n\n def output_result_image(self, image, state):\n if self.params.output_image:\n if not os.path.exists(self.params.output_image_path):\n os.mkdir(self.params.output_image_path)\n cv2.rectangle(image, (int(state[0]),int(state[1])),(int(state[0]+state[2]),int(state[1]+state[3])), (255,0,0), 3)\n cv2.imwrite(os.path.join(self.params.output_image_path,'{}.jpg'.format(self.frame_num)), cv2.cvtColor(image, cv2.COLOR_RGB2BGR))\n\n'''\nimport cv2\nfor i in range(train_samples.shape[0]):\n output_dir = '/home/zhenglinyu2/SBDT_tracking/debug/transform_image/'\n count = len(os.listdir(output_dir))\n transform_image = train_samples[i,...].permute(1,2,0)\n transform_image = transform_image.data.numpy()\n cv2.imwrite(os.path.join(output_dir,'{}.jpg'.format(count+1)),transform_image))\n'''\n\n'''\ntorch.cuda.synchronize()\nstart = time.time()\ntorch.cuda.synchronize()\nprint(time.time() - start)\n'''\n" ]
[ [ "pandas.read_csv", "torch.tensor" ], [ "torch.cat", "torch.prod", "torch.ones", "torch.set_printoptions", "torch.eye", "torch.topk", "torch.sqrt", "torch.tril", "torch.manual_seed", "torch.Tensor", "torch.zeros", "torch.potrf", "torch.cuda.manual_seed_all", "torch.min", "torch.max", "torch.mm", "torch.rand", "torch.arange", "numpy.random.seed", "torch.no_grad", "torch.from_numpy", "torch.ones_like", "torch.triu" ] ]
tuantranf/doctr
[ "69f9ced542999ef5fa35f3153d7a06661d3c4d3f" ]
[ "doctr/models/detection/linknet/tensorflow.py" ]
[ "# Copyright (C) 2021-2022, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\n# Credits: post-processing adapted from https://github.com/xuannianz/DifferentiableBinarization\n\nfrom copy import deepcopy\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import Model, Sequential, layers\n\nfrom doctr.models.classification import resnet18\nfrom doctr.models.utils import IntermediateLayerGetter, conv_sequence, load_pretrained_params\nfrom doctr.utils.repr import NestedObject\n\nfrom .base import LinkNetPostProcessor, _LinkNet\n\n__all__ = ['LinkNet', 'linknet_resnet18']\n\n\ndefault_cfgs: Dict[str, Dict[str, Any]] = {\n 'linknet_resnet18': {\n 'mean': (0.798, 0.785, 0.772),\n 'std': (0.264, 0.2749, 0.287),\n 'input_shape': (1024, 1024, 3),\n 'url': None,\n },\n}\n\n\ndef decoder_block(in_chan: int, out_chan: int, stride: int, **kwargs: Any) -> Sequential:\n \"\"\"Creates a LinkNet decoder block\"\"\"\n\n return Sequential([\n *conv_sequence(in_chan // 4, 'relu', True, kernel_size=1, **kwargs),\n layers.Conv2DTranspose(\n filters=in_chan // 4,\n kernel_size=3,\n strides=stride,\n padding=\"same\",\n use_bias=False,\n kernel_initializer='he_normal'\n ),\n layers.BatchNormalization(),\n layers.Activation('relu'),\n *conv_sequence(out_chan, 'relu', True, kernel_size=1),\n ])\n\n\nclass LinkNetFPN(Model, NestedObject):\n \"\"\"LinkNet Decoder module\"\"\"\n\n def __init__(\n self,\n out_chans: int,\n in_shapes: List[Tuple[int, ...]],\n ) -> None:\n\n super().__init__()\n self.out_chans = out_chans\n strides = [2] * (len(in_shapes) - 1) + [1]\n i_chans = [s[-1] for s in in_shapes[::-1]]\n o_chans = i_chans[1:] + [out_chans]\n self.decoders = [\n decoder_block(in_chan, out_chan, s, input_shape=in_shape)\n for in_chan, out_chan, s, in_shape in zip(i_chans, o_chans, strides, in_shapes[::-1])\n ]\n\n def call(\n self,\n x: List[tf.Tensor]\n ) -> tf.Tensor:\n out = 0\n for decoder, fmap in zip(self.decoders, x[::-1]):\n out = decoder(out + fmap)\n return out\n\n def extra_repr(self) -> str:\n return f\"out_chans={self.out_chans}\"\n\n\nclass LinkNet(_LinkNet, keras.Model):\n \"\"\"LinkNet as described in `\"LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation\"\n <https://arxiv.org/pdf/1707.03718.pdf>`_.\n\n Args:\n num_classes: number of channels for the output\n \"\"\"\n\n _children_names: List[str] = ['feat_extractor', 'fpn', 'classifier', 'postprocessor']\n\n def __init__(\n self,\n feat_extractor: IntermediateLayerGetter,\n fpn_channels: int = 64,\n num_classes: int = 1,\n assume_straight_pages: bool = True,\n cfg: Optional[Dict[str, Any]] = None,\n ) -> None:\n super().__init__(cfg=cfg)\n\n self.assume_straight_pages = assume_straight_pages\n\n self.feat_extractor = feat_extractor\n\n self.fpn = LinkNetFPN(fpn_channels, [_shape[1:] for _shape in self.feat_extractor.output_shape])\n self.fpn.build(self.feat_extractor.output_shape)\n\n self.classifier = Sequential([\n layers.Conv2DTranspose(\n filters=32,\n kernel_size=3,\n strides=2,\n padding=\"same\",\n use_bias=False,\n kernel_initializer='he_normal',\n input_shape=self.fpn.decoders[-1].output_shape[1:],\n ),\n layers.BatchNormalization(),\n layers.Activation('relu'),\n *conv_sequence(32, 'relu', True, kernel_size=3, strides=1),\n layers.Conv2DTranspose(\n filters=num_classes,\n kernel_size=2,\n strides=2,\n padding=\"same\",\n use_bias=True,\n kernel_initializer='he_normal'\n ),\n ])\n\n self.postprocessor = LinkNetPostProcessor(assume_straight_pages=assume_straight_pages)\n\n def compute_loss(\n self,\n out_map: tf.Tensor,\n target: List[np.ndarray],\n edge_factor: float = 2.,\n ) -> tf.Tensor:\n \"\"\"Compute linknet loss, BCE with boosted box edges or focal loss. Focal loss implementation based on\n <https://github.com/tensorflow/addons/>`_.\n\n Args:\n out_map: output feature map of the model of shape N x H x W x 1\n target: list of dictionary where each dict has a `boxes` and a `flags` entry\n edge_factor: boost factor for box edges (in case of BCE)\n\n Returns:\n A loss tensor\n \"\"\"\n seg_target, seg_mask, edge_mask = self.build_target(target, out_map.shape[1:3])\n\n seg_target = tf.convert_to_tensor(seg_target, dtype=out_map.dtype)\n seg_mask = tf.convert_to_tensor(seg_mask, dtype=tf.bool)\n if edge_factor > 0:\n edge_mask = tf.convert_to_tensor(edge_mask, dtype=tf.bool)\n\n # Get the cross_entropy for each entry\n loss = tf.keras.losses.binary_crossentropy(seg_target, out_map, from_logits=True)[..., None]\n\n # Compute BCE loss with highlighted edges\n if edge_factor > 0:\n loss = tf.math.multiply(\n 1 + (edge_factor - 1) * tf.cast(edge_mask, out_map.dtype),\n loss\n )\n\n return tf.reduce_mean(loss[seg_mask])\n\n def call(\n self,\n x: tf.Tensor,\n target: Optional[List[np.ndarray]] = None,\n return_model_output: bool = False,\n return_preds: bool = False,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n\n feat_maps = self.feat_extractor(x, **kwargs)\n logits = self.fpn(feat_maps, **kwargs)\n logits = self.classifier(logits, **kwargs)\n\n out: Dict[str, tf.Tensor] = {}\n if return_model_output or target is None or return_preds:\n prob_map = tf.math.sigmoid(logits)\n if return_model_output:\n out[\"out_map\"] = prob_map\n\n if target is None or return_preds:\n # Post-process boxes\n out[\"preds\"] = [preds[0] for preds in self.postprocessor(prob_map.numpy())]\n\n if target is not None:\n loss = self.compute_loss(logits, target)\n out['loss'] = loss\n\n return out\n\n\ndef _linknet(\n arch: str,\n pretrained: bool,\n backbone_fn,\n fpn_layers: List[str],\n pretrained_backbone: bool = True,\n input_shape: Optional[Tuple[int, int, int]] = None,\n **kwargs: Any\n) -> LinkNet:\n\n pretrained_backbone = pretrained_backbone and not pretrained\n\n # Patch the config\n _cfg = deepcopy(default_cfgs[arch])\n _cfg['input_shape'] = input_shape or default_cfgs[arch]['input_shape']\n\n # Feature extractor\n feat_extractor = IntermediateLayerGetter(\n backbone_fn(\n pretrained=pretrained_backbone,\n include_top=False,\n input_shape=_cfg['input_shape'],\n ),\n fpn_layers,\n )\n\n # Build the model\n model = LinkNet(feat_extractor, cfg=_cfg, **kwargs)\n # Load pretrained parameters\n if pretrained:\n load_pretrained_params(model, _cfg['url'])\n\n return model\n\n\ndef linknet_resnet18(pretrained: bool = False, **kwargs: Any) -> LinkNet:\n \"\"\"LinkNet as described in `\"LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation\"\n <https://arxiv.org/pdf/1707.03718.pdf>`_.\n\n Example::\n >>> import tensorflow as tf\n >>> from doctr.models import linknet_resnet18\n >>> model = linknet_resnet18(pretrained=True)\n >>> input_tensor = tf.random.uniform(shape=[1, 1024, 1024, 3], maxval=1, dtype=tf.float32)\n >>> out = model(input_tensor)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on our text detection dataset\n\n Returns:\n text detection architecture\n \"\"\"\n\n return _linknet(\n 'linknet_resnet18',\n pretrained,\n resnet18,\n ['resnet_block_1', 'resnet_block_3', 'resnet_block_5', 'resnet_block_7'],\n **kwargs,\n )\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.math.sigmoid", "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.Conv2DTranspose", "tensorflow.reduce_mean", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.losses.binary_crossentropy", "tensorflow.cast" ] ]
Nguyendat-bit/U-net
[ "0ee5f0beb2513fa6031947e42a7ad398ad3c8a28" ]
[ "GeneratorCustom.py" ]
[ "import numpy as np\nfrom tensorflow.keras.utils import Sequence\nimport cv2\nimport tensorflow as tf \nclass DataGenerator(Sequence):\n def __init__(self, all_filenames, input_size = (256, 256), batch_size = 32, shuffle = True, seed = 123, encode: dict = None, encode_with_kmean = None, color_mode = 'hsv', function = None) -> None:\n super(DataGenerator, self).__init__()\n assert (encode != None and encode_with_kmean == None) or (encode == None and encode_with_kmean != None), 'Not empty !'\n assert color_mode == 'hsv' or color_mode == 'rgb' or color_mode == 'gray'\n self.all_filenames = all_filenames\n self.input_size = input_size\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.color_mode = color_mode\n self.encode = encode\n self.function = function\n self.kmean = encode_with_kmean\n np.random.seed(seed)\n self.on_epoch_end()\n def processing(self, mask):\n d = list(map(lambda x: self.encode[tuple(x)], mask.reshape(-1,3)))\n return np.array(d).reshape(*self.input_size, 1)\n def __len__(self):\n return int(np.floor(len(self.all_filenames) / self.batch_size))\n def __getitem__(self, index):\n indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]\n all_filenames_temp = [self.all_filenames[k] for k in indexes]\n X, Y = self.__data_generation(all_filenames_temp)\n return X, Y\n def on_epoch_end(self):\n self.indexes = np.arange(len(self.all_filenames))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n def __data_generation(self, all_filenames_temp):\n batch = len(all_filenames_temp)\n if self.color_mode == 'gray':\n X = np.empty(shape=(batch, *self.input_size, 1))\n else:\n X = np.empty(shape=(batch, *self.input_size,3))\n Y = np.empty(shape=(batch, *self.input_size, 1))\n for i, (fn, label_fn) in enumerate(all_filenames_temp):\n # img\n img = cv2.imread(fn)\n if self.color_mode == 'hsv':\n img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n elif self.color_mode == 'rgb':\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n elif self.color_mode == 'gray':\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = tf.expand_dims(img, axis = 2)\n img = tf.image.resize(img, self.input_size, method = 'nearest')\n img = tf.cast(img, tf.float32)\n img /= 255.\n \n #mask\n mask = cv2.imread(label_fn, 0)\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)\n mask = tf.image.resize(mask, self.input_size, method= 'nearest')\n mask = np.array(mask)\n if self.function:\n mask = self.function(mask)\n if self.encode:\n mask = self.processing(mask)\n if self.kmean:\n mask = self.kmean.predict(mask.reshape(-1,3)).reshape(*self.input_size, 1)\n mask = tf.cast(mask, tf.float32)\n X[i,] = img\n Y[i,] = mask\n return X, Y" ]
[ [ "numpy.array", "numpy.empty", "tensorflow.expand_dims", "numpy.random.seed", "numpy.random.shuffle", "tensorflow.image.resize", "tensorflow.cast" ] ]
bqhuyy/SlowFast-clean
[ "3dc000dc9fe1951ab70cb835bfb91b71a07d8f63" ]
[ "slowfast/utils/checkpoint.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\n\"\"\"Functions that handle saving and loading of checkpoints.\"\"\"\n\nimport copy\nimport numpy as np\nimport os\nimport pickle\nfrom collections import OrderedDict\nimport torch\nfrom fvcore.common.file_io import PathManager\n\nimport slowfast.utils.distributed as du\nimport slowfast.utils.logging as logging\nfrom slowfast.utils.c2_model_loading import get_name_convert_func\n\nlogger = logging.get_logger(__name__)\n\n\ndef make_checkpoint_dir(path_to_job):\n \"\"\"\n Creates the checkpoint directory (if not present already).\n Args:\n path_to_job (string): the path to the folder of the current job.\n \"\"\"\n checkpoint_dir = os.path.join(path_to_job, \"checkpoints\")\n # Create the checkpoint dir from the master process\n if du.is_master_proc() and not PathManager.exists(checkpoint_dir):\n try:\n PathManager.mkdirs(checkpoint_dir)\n except Exception:\n pass\n return checkpoint_dir\n\n\ndef get_checkpoint_dir(path_to_job):\n \"\"\"\n Get path for storing checkpoints.\n Args:\n path_to_job (string): the path to the folder of the current job.\n \"\"\"\n return os.path.join(path_to_job, \"checkpoints\")\n\n\ndef get_path_to_checkpoint(path_to_job, epoch):\n \"\"\"\n Get the full path to a checkpoint file.\n Args:\n path_to_job (string): the path to the folder of the current job.\n epoch (int): the number of epoch for the checkpoint.\n \"\"\"\n name = \"checkpoint_epoch_{:05d}.pyth\".format(epoch)\n return os.path.join(get_checkpoint_dir(path_to_job), name)\n\n\ndef get_last_checkpoint(path_to_job):\n \"\"\"\n Get the last checkpoint from the checkpointing folder.\n Args:\n path_to_job (string): the path to the folder of the current job.\n \"\"\"\n\n d = get_checkpoint_dir(path_to_job)\n names = PathManager.ls(d) if PathManager.exists(d) else []\n names = [f for f in names if \"checkpoint\" in f]\n assert len(names), \"No checkpoints found in '{}'.\".format(d)\n # Sort the checkpoints by epoch.\n name = sorted(names)[-1]\n return os.path.join(d, name)\n\n\ndef has_checkpoint(path_to_job):\n \"\"\"\n Determines if the given directory contains a checkpoint.\n Args:\n path_to_job (string): the path to the folder of the current job.\n \"\"\"\n d = get_checkpoint_dir(path_to_job)\n files = PathManager.ls(d) if PathManager.exists(d) else []\n return any(\"checkpoint\" in f for f in files)\n\n\ndef is_checkpoint_epoch(cfg, cur_epoch, multigrid_schedule=None):\n \"\"\"\n Determine if a checkpoint should be saved on current epoch.\n Args:\n cfg (CfgNode): configs to save.\n cur_epoch (int): current number of epoch of the model.\n multigrid_schedule (List): schedule for multigrid training.\n \"\"\"\n if cur_epoch + 1 == cfg.SOLVER.MAX_EPOCH:\n return True\n if multigrid_schedule is not None:\n prev_epoch = 0\n for s in multigrid_schedule:\n if cur_epoch < s[-1]:\n period = max(\n (s[-1] - prev_epoch) // cfg.MULTIGRID.EVAL_FREQ + 1, 1\n )\n return (s[-1] - 1 - cur_epoch) % period == 0\n prev_epoch = s[-1]\n\n return (cur_epoch + 1) % cfg.TRAIN.CHECKPOINT_PERIOD == 0\n\n\ndef save_checkpoint(path_to_job, model, optimizer, epoch, cfg, best_top1_err, best_epoch, is_best=False):\n \"\"\"\n Save a checkpoint.\n Args:\n model (model): model to save the weight to the checkpoint.\n optimizer (optim): optimizer to save the historical state.\n epoch (int): current number of epoch of the model.\n cfg (CfgNode): configs to save.\n \"\"\"\n # Save checkpoints only from the master process.\n if not du.is_master_proc(cfg.NUM_GPUS * cfg.NUM_SHARDS):\n return\n # Ensure that the checkpoint dir exists.\n PathManager.mkdirs(get_checkpoint_dir(path_to_job))\n # Omit the DDP wrapper in the multi-gpu setting.\n sd = model.module.state_dict() if cfg.NUM_GPUS > 1 else model.state_dict()\n normalized_sd = sub_to_normal_bn(sd)\n\n # Record the state.\n checkpoint = {\n \"epoch\": epoch,\n \"model_state\": normalized_sd,\n \"optimizer_state\": optimizer.state_dict(),\n \"cfg\": cfg.dump(),\n \"best_top1_err\": best_top1_err,\n \"best_epoch\": best_epoch,\n }\n # Write the checkpoint.\n if not is_best:\n path_to_checkpoint = get_path_to_checkpoint(path_to_job, epoch + 1)\n else:\n path_to_checkpoint = os.path.join(path_to_job, \"slowfast_best.pyth\")\n with PathManager.open(path_to_checkpoint, \"wb\") as f:\n torch.save(checkpoint, f)\n return path_to_checkpoint\n\n\ndef inflate_weight(state_dict_2d, state_dict_3d):\n \"\"\"\n Inflate 2D model weights in state_dict_2d to the 3D model weights in\n state_dict_3d. The details can be found in:\n Joao Carreira, and Andrew Zisserman.\n \"Quo vadis, action recognition? a new model and the kinetics dataset.\"\n Args:\n state_dict_2d (OrderedDict): a dict of parameters from a 2D model.\n state_dict_3d (OrderedDict): a dict of parameters from a 3D model.\n Returns:\n state_dict_inflated (OrderedDict): a dict of inflated parameters.\n \"\"\"\n state_dict_inflated = OrderedDict()\n for k, v2d in state_dict_2d.items():\n assert k in state_dict_3d.keys()\n v3d = state_dict_3d[k]\n # Inflate the weight of 2D conv to 3D conv.\n if len(v2d.shape) == 4 and len(v3d.shape) == 5:\n logger.info(\n \"Inflate {}: {} -> {}: {}\".format(k, v2d.shape, k, v3d.shape)\n )\n # Dimension need to be match.\n assert v2d.shape[-2:] == v3d.shape[-2:]\n assert v2d.shape[:2] == v3d.shape[:2]\n v3d = (\n v2d.unsqueeze(2).repeat(1, 1, v3d.shape[2], 1, 1) / v3d.shape[2]\n )\n elif v2d.shape == v3d.shape:\n v3d = v2d\n else:\n logger.info(\n \"Unexpected {}: {} -|> {}: {}\".format(\n k, v2d.shape, k, v3d.shape\n )\n )\n state_dict_inflated[k] = v3d.clone()\n return state_dict_inflated\n\n\ndef load_checkpoint(\n path_to_checkpoint,\n model,\n data_parallel=True,\n optimizer=None,\n inflation=False,\n convert_from_caffe2=False,\n):\n \"\"\"\n Load the checkpoint from the given file. If inflation is True, inflate the\n 2D Conv weights from the checkpoint to 3D Conv.\n Args:\n path_to_checkpoint (string): path to the checkpoint to load.\n model (model): model to load the weights from the checkpoint.\n data_parallel (bool): if true, model is wrapped by\n torch.nn.parallel.DistributedDataParallel.\n optimizer (optim): optimizer to load the historical state.\n inflation (bool): if True, inflate the weights from the checkpoint.\n convert_from_caffe2 (bool): if True, load the model from caffe2 and\n convert it to pytorch.\n Returns:\n (int): the number of training epoch of the checkpoint.\n \"\"\"\n assert PathManager.exists(\n path_to_checkpoint\n ), \"Checkpoint '{}' not found\".format(path_to_checkpoint)\n # Account for the DDP wrapper in the multi-gpu setting.\n ms = model.module if data_parallel else model\n if convert_from_caffe2:\n with PathManager.open(path_to_checkpoint, \"rb\") as f:\n caffe2_checkpoint = pickle.load(f, encoding=\"latin1\")\n state_dict = OrderedDict()\n name_convert_func = get_name_convert_func()\n for key in caffe2_checkpoint[\"blobs\"].keys():\n converted_key = name_convert_func(key)\n converted_key = c2_normal_to_sub_bn(converted_key, ms.state_dict())\n if converted_key in ms.state_dict():\n c2_blob_shape = caffe2_checkpoint[\"blobs\"][key].shape\n model_blob_shape = ms.state_dict()[converted_key].shape\n # Load BN stats to Sub-BN.\n if (\n len(model_blob_shape) == 1\n and len(c2_blob_shape) == 1\n and model_blob_shape[0] > c2_blob_shape[0]\n and model_blob_shape[0] % c2_blob_shape[0] == 0\n ):\n caffe2_checkpoint[\"blobs\"][key] = np.concatenate(\n [caffe2_checkpoint[\"blobs\"][key]]\n * (model_blob_shape[0] // c2_blob_shape[0])\n )\n c2_blob_shape = caffe2_checkpoint[\"blobs\"][key].shape\n\n if c2_blob_shape == tuple(model_blob_shape):\n state_dict[converted_key] = torch.tensor(\n caffe2_checkpoint[\"blobs\"][key]\n ).clone()\n logger.info(\n \"{}: {} => {}: {}\".format(\n key,\n c2_blob_shape,\n converted_key,\n tuple(model_blob_shape),\n )\n )\n else:\n logger.warn(\n \"!! {}: {} does not match {}: {}\".format(\n key,\n c2_blob_shape,\n converted_key,\n tuple(model_blob_shape),\n )\n )\n else:\n if not any(\n prefix in key for prefix in [\"momentum\", \"lr\", \"model_iter\"]\n ):\n logger.warn(\n \"!! {}: can not be converted, got {}\".format(\n key, converted_key\n )\n )\n ms.load_state_dict(state_dict, strict=False)\n epoch = -1\n best_top1_err = -1\n best_epoch = -1\n else:\n # Load the checkpoint on CPU to avoid GPU mem spike.\n with PathManager.open(path_to_checkpoint, \"rb\") as f:\n checkpoint = torch.load(f, map_location=\"cpu\")\n model_state_dict_3d = (\n model.module.state_dict() if data_parallel else model.state_dict()\n )\n checkpoint[\"model_state\"] = normal_to_sub_bn(\n checkpoint[\"model_state\"], model_state_dict_3d\n )\n if inflation:\n # Try to inflate the model.\n inflated_model_dict = inflate_weight(\n checkpoint[\"model_state\"], model_state_dict_3d\n )\n ms.load_state_dict(inflated_model_dict, strict=False)\n else:\n ms.load_state_dict(checkpoint[\"model_state\"])\n # Load the optimizer state (commonly not done when fine-tuning)\n if optimizer:\n optimizer.load_state_dict(checkpoint[\"optimizer_state\"])\n if \"epoch\" in checkpoint.keys():\n epoch = checkpoint[\"epoch\"]\n else:\n epoch = -1\n if \"best_top1_err\" in checkpoint.keys():\n best_top1_err = checkpoint[\"best_top1_err\"]\n else:\n best_top1_err = -1\n if \"best_epoch\" in checkpoint.keys():\n best_epoch = checkpoint[\"best_epoch\"]\n else:\n best_epoch = -1\n return epoch, best_top1_err, best_epoch\n\n\ndef sub_to_normal_bn(sd):\n \"\"\"\n Convert the Sub-BN paprameters to normal BN parameters in a state dict.\n There are two copies of BN layers in a Sub-BN implementation: `bn.bn` and\n `bn.split_bn`. `bn.split_bn` is used during training and\n \"compute_precise_bn\". Before saving or evaluation, its stats are copied to\n `bn.bn`. We rename `bn.bn` to `bn` and store it to be consistent with normal\n BN layers.\n Args:\n sd (OrderedDict): a dict of parameters whitch might contain Sub-BN\n parameters.\n Returns:\n new_sd (OrderedDict): a dict with Sub-BN parameters reshaped to\n normal parameters.\n \"\"\"\n new_sd = copy.deepcopy(sd)\n modifications = [\n (\"bn.bn.running_mean\", \"bn.running_mean\"),\n (\"bn.bn.running_var\", \"bn.running_var\"),\n (\"bn.split_bn.num_batches_tracked\", \"bn.num_batches_tracked\"),\n ]\n to_remove = [\"bn.bn.\", \".split_bn.\"]\n for key in sd:\n for before, after in modifications:\n if key.endswith(before):\n new_key = key.split(before)[0] + after\n new_sd[new_key] = new_sd.pop(key)\n\n for rm in to_remove:\n if rm in key and key in new_sd:\n del new_sd[key]\n\n for key in new_sd:\n if key.endswith(\"bn.weight\") or key.endswith(\"bn.bias\"):\n if len(new_sd[key].size()) == 4:\n assert all(d == 1 for d in new_sd[key].size()[1:])\n new_sd[key] = new_sd[key][:, 0, 0, 0]\n\n return new_sd\n\n\ndef c2_normal_to_sub_bn(key, model_keys):\n \"\"\"\n Convert BN parameters to Sub-BN parameters if model contains Sub-BNs.\n Args:\n key (OrderedDict): source dict of parameters.\n mdoel_key (OrderedDict): target dict of parameters.\n Returns:\n new_sd (OrderedDict): converted dict of parameters.\n \"\"\"\n if \"bn.running_\" in key:\n if key in model_keys:\n return key\n\n new_key = key.replace(\"bn.running_\", \"bn.split_bn.running_\")\n if new_key in model_keys:\n return new_key\n else:\n return key\n\n\ndef normal_to_sub_bn(checkpoint_sd, model_sd):\n \"\"\"\n Convert BN parameters to Sub-BN parameters if model contains Sub-BNs.\n Args:\n checkpoint_sd (OrderedDict): source dict of parameters.\n model_sd (OrderedDict): target dict of parameters.\n Returns:\n new_sd (OrderedDict): converted dict of parameters.\n \"\"\"\n for key in model_sd:\n if key not in checkpoint_sd:\n if \"bn.split_bn.\" in key:\n load_key = key.replace(\"bn.split_bn.\", \"bn.\")\n bn_key = key.replace(\"bn.split_bn.\", \"bn.bn.\")\n checkpoint_sd[key] = checkpoint_sd.pop(load_key)\n checkpoint_sd[bn_key] = checkpoint_sd[key]\n\n for key in model_sd:\n if key in checkpoint_sd:\n model_blob_shape = model_sd[key].shape\n c2_blob_shape = checkpoint_sd[key].shape\n\n if (\n len(model_blob_shape) == 1\n and len(c2_blob_shape) == 1\n and model_blob_shape[0] > c2_blob_shape[0]\n and model_blob_shape[0] % c2_blob_shape[0] == 0\n ):\n before_shape = checkpoint_sd[key].shape\n checkpoint_sd[key] = torch.cat(\n [checkpoint_sd[key]]\n * (model_blob_shape[0] // c2_blob_shape[0])\n )\n logger.info(\n \"{} {} -> {}\".format(\n key, before_shape, checkpoint_sd[key].shape\n )\n )\n return checkpoint_sd\n\n\ndef load_test_checkpoint(cfg, model):\n \"\"\"\n Loading checkpoint logic for testing.\n \"\"\"\n # Load a checkpoint to test if applicable.\n if cfg.TEST.CHECKPOINT_FILE_PATH != \"\":\n # If no checkpoint found in MODEL_VIS.CHECKPOINT_FILE_PATH or in the current\n # checkpoint folder, try to load checkpoint from\n # TEST.CHECKPOINT_FILE_PATH and test it.\n checkpoint_epoch, best_top1_err, best_epoch = load_checkpoint(\n cfg.TEST.CHECKPOINT_FILE_PATH,\n model,\n cfg.NUM_GPUS > 1,\n None,\n inflation=False,\n convert_from_caffe2=cfg.TEST.CHECKPOINT_TYPE == \"caffe2\",\n )\n elif has_checkpoint(cfg.OUTPUT_DIR):\n last_checkpoint = get_last_checkpoint(cfg.OUTPUT_DIR)\n checkpoint_epoch, best_top1_err, best_epoch = load_checkpoint(last_checkpoint, model, cfg.NUM_GPUS > 1)\n elif cfg.TRAIN.CHECKPOINT_FILE_PATH != \"\":\n # If no checkpoint found in TEST.CHECKPOINT_FILE_PATH or in the current\n # checkpoint folder, try to load checkpoint from\n # TRAIN.CHECKPOINT_FILE_PATH and test it.\n checkpoint_epoch, best_top1_err, best_epoch = load_checkpoint(\n cfg.TRAIN.CHECKPOINT_FILE_PATH,\n model,\n cfg.NUM_GPUS > 1,\n None,\n inflation=False,\n convert_from_caffe2=cfg.TRAIN.CHECKPOINT_TYPE == \"caffe2\",\n )\n else:\n logger.info(\n \"Unknown way of loading checkpoint. Using with random initialization, only for debugging.\"\n )\n checkpoint_epoch = -1\n best_top1_err = -1\n best_epoch = -1\n \n return checkpoint_epoch, best_top1_err, best_epoch\n\n\ndef load_train_checkpoint(cfg, model, optimizer):\n \"\"\"\n Loading checkpoint logic for training.\n \"\"\"\n if cfg.TRAIN.AUTO_RESUME and has_checkpoint(cfg.OUTPUT_DIR):\n last_checkpoint = get_last_checkpoint(cfg.OUTPUT_DIR)\n logger.info(\"Load from last checkpoint, {}.\".format(last_checkpoint))\n checkpoint_epoch, best_top1_err, best_epoch = load_checkpoint(\n last_checkpoint, model, cfg.NUM_GPUS > 1, optimizer\n )\n start_epoch = checkpoint_epoch + 1\n elif cfg.TRAIN.CHECKPOINT_FILE_PATH != \"\":\n logger.info(\"Load from given checkpoint file.\")\n checkpoint_epoch, best_top1_err, best_epoch = load_checkpoint(\n cfg.TRAIN.CHECKPOINT_FILE_PATH,\n model,\n cfg.NUM_GPUS > 1,\n optimizer,\n inflation=cfg.TRAIN.CHECKPOINT_INFLATE,\n convert_from_caffe2=cfg.TRAIN.CHECKPOINT_TYPE == \"caffe2\",\n )\n start_epoch = checkpoint_epoch + 1\n else:\n start_epoch = 0\n best_top1_err = -1\n best_epoch = -1\n\n return start_epoch, best_top1_err, best_epoch\n" ]
[ [ "numpy.concatenate", "torch.cat", "torch.save", "torch.tensor", "torch.load" ] ]
alvaropp/AdventOfCode2017
[ "2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad" ]
[ "2019/day9-1.py" ]
[ "from itertools import permutations\n\nimport numpy as np\n\n\nclass Processor:\n def __init__(self, data: np.array, inputs: list):\n self.data = np.append(data, np.zeros(200000, dtype=int))\n self.inputs = inputs\n self.outputs = []\n self.pos = 0\n self.base = 0\n self.halted = 0\n\n @staticmethod\n def process_opcode_and_param(number):\n param = str(number)\n opcode = int(param[-2:])\n mode = param[:-2]\n mode = \"0\" * (3 - len(mode)) + mode\n return opcode, mode[::-1]\n\n def get_index(self, mode, index):\n if mode == \"0\":\n return self.data[index]\n elif mode == \"1\":\n return index\n elif mode == \"2\":\n return self.data[index] + self.base\n\n def get_value(self, mode, index):\n return self.data[self.get_index(mode, index)]\n\n def go(self):\n opcode, mode = self.process_opcode_and_param(self.data[self.pos])\n\n if opcode == 1:\n self.data[self.get_index(mode[2], self.pos + 3)] = self.get_value(\n mode[0], self.pos + 1\n ) + self.get_value(mode[1], self.pos + 2)\n\n self.pos += 4\n\n elif opcode == 2:\n self.data[self.get_index(mode[2], self.pos + 3)] = self.get_value(\n mode[0], self.pos + 1\n ) * self.get_value(mode[1], self.pos + 2)\n self.pos += 4\n\n elif opcode == 3:\n if len(self.inputs) == 0:\n raise ValueError(\"No more inputs!\")\n next_input = self.inputs.pop()\n self.data[self.get_index(mode[0], self.pos + 1)] = next_input\n self.pos += 2\n\n elif opcode == 4:\n self.outputs.append(self.get_value(mode[0], self.pos + 1))\n self.pos += 2\n\n elif opcode == 5:\n if self.get_value(mode[0], self.pos + 1) != 0:\n self.pos = self.get_value(mode[1], self.pos + 2)\n else:\n self.pos += 3\n\n elif opcode == 6:\n if self.get_value(mode[0], self.pos + 1) == 0:\n self.pos = self.get_value(mode[1], self.pos + 2)\n else:\n self.pos += 3\n\n elif opcode == 7:\n value = (\n 1\n if self.get_value(mode[0], self.pos + 1)\n < self.get_value(mode[1], self.pos + 2)\n else 0\n )\n self.data[self.get_index(mode[2], self.pos + 3)] = value\n self.pos += 4\n\n elif opcode == 8:\n value = (\n 1\n if self.get_value(mode[0], self.pos + 1)\n == self.get_value(mode[1], self.pos + 2)\n else 0\n )\n self.data[self.get_index(mode[2], self.pos + 3)] = value\n self.pos += 4\n\n elif opcode == 9:\n self.base += self.get_value(mode[0], self.pos + 1)\n self.pos += 2\n\n elif opcode == 99:\n self.halted = 1\n\n else:\n print(f\"opcode: {opcode}, mode: {mode}\")\n raise ValueError\n\n\ndef solve_amplifier(data, single_input):\n amplifier = Processor(data, single_input)\n while not amplifier.halted:\n amplifier.go()\n return amplifier\n\n\nif __name__ == \"__main__\":\n data = np.genfromtxt(\"day9.txt\", delimiter=\",\", dtype=int)\n inputs = [1]\n\n amplifier = solve_amplifier(data, inputs)\n print(amplifier.outputs[0])\n" ]
[ [ "numpy.genfromtxt", "numpy.zeros" ] ]
spmvg/evt
[ "bbba18007fce387bf877802d6512a66ff5dfd425", "bbba18007fce387bf877802d6512a66ff5dfd425" ]
[ "tests/test_dataset.py", "src/evt/estimators/gevmle.py" ]
[ "import hashlib\nimport io\nimport unittest\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfrom evt.dataset import Dataset\n\n\nclass TestDataset(unittest.TestCase):\n def setUp(self) -> None:\n pass\n\n def test_series(self):\n series = pd.Series(data=[1, 2, 3])\n dataset = Dataset(series)\n self.assertTrue(np.all(np.isclose(\n [1, 2, 3],\n dataset.series\n )))\n\n def test_nans(self):\n series = pd.Series(data=[1, 2, np.nan, 3])\n with self.assertRaises(ValueError):\n Dataset(series)\n\n def test_inf(self):\n series = pd.Series(data=[1, 2, np.inf, 3])\n with self.assertRaises(ValueError):\n Dataset(series)\n\n def test_deepcopy(self):\n series = pd.Series(data=[1, 2, 3])\n dataset = Dataset(series)\n self.assertNotEqual(id(series), id(dataset.series))\n\n def test_duplicated(self):\n series = pd.Series(data=[1, 2, 3, 4], index=[1, 1, 1, 2])\n with self.assertRaises(ValueError):\n Dataset(series)\n\n def test_plot_dataset(self):\n series = pd.Series(data=[1, 2, 3])\n dataset = Dataset(series)\n fig = plt.figure(figsize=(8, 6))\n ax = plt.gca()\n\n out_file = io.BytesIO()\n dataset.plot_dataset(ax)\n fig.savefig(out_file, format='raw')\n out_file.seek(0)\n hashed = hashlib.md5(out_file.read()).digest()\n\n self.assertEqual(\n b'\\xc5\\xb5\\x90\\r\\x88\\xe3\\x96\\xe1\\xa2\\x1c\\x9eg\\xcf\\xbc\\xd2\\xd9',\n hashed\n )\n\n def test_plot_boxplot(self):\n series = pd.Series(data=[1, 2, 3])\n dataset = Dataset(series)\n fig = plt.figure(figsize=(8, 6))\n ax = plt.gca()\n\n out_file = io.BytesIO()\n dataset.plot_boxplot(ax)\n fig.savefig(out_file, format='raw')\n out_file.seek(0)\n hashed = hashlib.md5(out_file.read()).digest()\n\n self.assertEqual(\n b',;\\x01\\xe9\\x95\\xb8\\xb8\\xeb\\xc2V\\xb4\\n\\xf3\\xc5\\x9f\\x90',\n hashed\n )\n\n def test_plot_mean_excess(self):\n series = pd.Series(data=[1, 2, 3])\n dataset = Dataset(series)\n fig = plt.figure(figsize=(8, 6))\n ax = plt.gca()\n\n out_file = io.BytesIO()\n dataset.plot_mean_excess(ax)\n fig.savefig(out_file, format='raw')\n out_file.seek(0)\n hashed = hashlib.md5(out_file.read()).digest()\n\n self.assertEqual(\n b'\\xe2\\x11\\x0bS\\xc5\\x11^\\xb2\\x84\\x0f\\x87\\x9d\\x9c\\xfc\\xfb\\x89',\n hashed\n )\n\n def test_plot_maximum_to_sum(self):\n series = pd.Series(data=[1, 2, 3])\n dataset = Dataset(series)\n fig = plt.figure(figsize=(8, 6))\n ax = plt.gca()\n\n out_file = io.BytesIO()\n dataset.plot_maximum_to_sum(ax)\n fig.savefig(out_file, format='raw')\n out_file.seek(0)\n hashed = hashlib.md5(out_file.read()).digest()\n\n self.assertEqual(\n b'\\xec\\xac\\x9f\\x1cl\\xbdB\\xf5d\\xf2\\xb2;\\x9a\\x05\\xc7\\x99',\n hashed\n )\n", "from typing import List\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom evt import utils\nfrom evt._compiled_expressions.compiled_expressions import gevmle_fisher_information\nfrom evt.estimators.estimator_abc import Estimator, Estimate\nfrom evt.methods.block_maxima import BlockMaxima\nfrom scipy.stats import genextreme\n\n\nclass GEVMLE(Estimator):\n r\"\"\"\n Maximum likelihood estimator for the generalized extreme value distribution in the block maxima approach with\n distribution\n\n .. math::\n \\exp \\bigg[\n -(1+ \\gamma (x - \\mu) / \\sigma) ^{-1/\\gamma}\n \\bigg]\n\n where\n\n * ``self.tail_index`` corresponds to the tail index :math:`\\gamma`,\n * ``self.loc`` corresponds to the location parameter :math:`\\mu`,\n * ``self.scale`` corresponds to the scale parameter :math:`\\sigma`.\n\n Confidence intervals are estimated using the observed Fisher information.\n \"\"\"\n def __init__(\n self,\n block_maxima: BlockMaxima\n ):\n super().__init__()\n\n self.block_maxima = block_maxima\n self.tail_index, self.loc, self.scale = None, None, None\n\n def estimate(self) -> List[Estimate]:\n \"\"\"\n Returns maximum likelihood estimates including confidence intervals for the tail index, location parameter\n and scale of the generalized extreme value distribution.\n\n Estimates for the confidence intervals are based on asymptotic behaviour of the observed Fisher information\n for the generalized extreme value distribution.\n\n The returned estimate might be only locally optimal or fail altogether.\n Moreover, if the confidence intervals are unable to be determined numerically, the ``.ci_lower`` and\n ``.ci_upper`` of the estimate will be ``nan``.\n\n :return: maximum likelihood ``Estimate`` including confidence intervals for the tail index,\n location parameter and scale of the generalized extreme value distribution.\n \"\"\"\n tail_index, self.loc, self.scale = genextreme.fit(self.block_maxima.block_maxima)\n self.tail_index = -tail_index # scipy uses opposite sign for tail index\n\n std_tail_index, std_loc, std_scale = np.sqrt(np.diag(np.linalg.inv(gevmle_fisher_information(\n self.block_maxima.block_maxima.to_numpy(),\n self.tail_index,\n self.loc,\n self.scale\n )))) / np.sqrt(len(self.block_maxima.block_maxima))\n std_factor = utils.confidence_interval_to_std(Estimate.confidence_level)\n return [\n Estimate(\n self.tail_index,\n self.tail_index - std_factor * std_tail_index,\n self.tail_index + std_factor * std_tail_index,\n ),\n Estimate(\n self.loc,\n self.loc - std_factor * std_loc,\n self.loc + std_factor * std_loc,\n ),\n Estimate(\n self.scale,\n self.scale - std_factor * std_scale,\n self.scale + std_factor * std_scale,\n ),\n ]\n\n def plot_qq_gev(\n self,\n ax: plt.Axes\n ):\n \"\"\"\n Quantile-quantile plot of the empirical survival function of the block maxima against the fitted generalized\n extreme value distribution.\n The ``.estimate`` method must be called before this function.\n \"\"\"\n if self.tail_index is None:\n raise RuntimeError('The .estimate method must be called before plotting is possible.')\n\n empirical_survival = 1 - utils.empirical_cdf(self.block_maxima.block_maxima)\n survival_function = genextreme.sf(\n empirical_survival.index,\n -self.tail_index,\n loc=self.loc,\n scale=self.scale\n )\n ax.loglog(\n survival_function,\n empirical_survival,\n 'xk',\n alpha=.8\n )\n ax.plot(\n survival_function,\n survival_function,\n 'r--',\n alpha=.8,\n label='Diagonal'\n )\n ax.invert_xaxis()\n ax.invert_yaxis()\n ax.set_xlabel('GEV survival function')\n ax.set_ylabel('Empirical survival function')\n ax.legend(loc='upper left')\n ax.set_title('Q–Q plot against generalized extreme value distribution')\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.isclose", "matplotlib.pyplot.figure", "pandas.Series" ], [ "scipy.stats.genextreme.fit", "scipy.stats.genextreme.sf" ] ]
UtahDave/great_expectations
[ "99a54370f7ebeea5d95bca726200db01c7326d68" ]
[ "tests/expectations/metrics/test_core.py" ]
[ "import copy\nimport logging\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.core.batch import Batch\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n)\nfrom great_expectations.execution_engine.sqlalchemy_execution_engine import (\n SqlAlchemyBatchData,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.registry import get_metric_provider\nfrom great_expectations.self_check.util import (\n build_pandas_engine,\n build_sa_engine,\n build_spark_engine,\n)\nfrom great_expectations.validator.validation_graph import MetricConfiguration\nfrom tests.expectations.test_util import get_table_columns_metric\n\n\ndef test_metric_loads_pd():\n assert get_metric_provider(\"column.max\", PandasExecutionEngine()) is not None\n\n\ndef test_basic_metric_pd():\n df = pd.DataFrame({\"a\": [1, 2, 3, 3, None]})\n batch = Batch(data=df)\n engine = PandasExecutionEngine(batch_data_dict={batch.id: batch.data})\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results == {desired_metric.id: 3}\n\n\ndef test_mean_metric_pd():\n engine = build_pandas_engine(pd.DataFrame({\"a\": [1, 2, 3, None]}))\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.mean\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results == {desired_metric.id: 2}\n\n\ndef test_stdev_metric_pd():\n engine = build_pandas_engine(pd.DataFrame({\"a\": [1, 2, 3, None]}))\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.standard_deviation\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results == {desired_metric.id: 1}\n\n\ndef test_max_metric_column_exists_pd():\n df = pd.DataFrame({\"a\": [1, 2, 3, 3, None]})\n batch = Batch(data=df)\n engine = PandasExecutionEngine(batch_data_dict={batch.id: batch.data})\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results == {desired_metric.id: 3}\n\n\ndef test_max_metric_column_does_not_exist_pd():\n df = pd.DataFrame({\"a\": [1, 2, 3, 3, None]})\n batch = Batch(data=df)\n engine = PandasExecutionEngine(batch_data_dict={batch.id: batch.data})\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"non_existent_column\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n\n with pytest.raises(ge_exceptions.ExecutionEngineError) as eee:\n # noinspection PyUnusedLocal\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert (\n str(eee.value)\n == 'Error: The column \"non_existent_column\" in BatchData does not exist.'\n )\n\n\ndef test_max_metric_column_exists_sa(sa):\n engine = build_sa_engine(pd.DataFrame({\"a\": [1, 2, 1, None]}), sa)\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n partial_metric = MetricConfiguration(\n metric_name=\"column.max.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n\n results = engine.resolve_metrics(\n metrics_to_resolve=(partial_metric,), metrics=metrics\n )\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": partial_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results == {desired_metric.id: 2}\n\n\ndef test_max_metric_column_does_not_exist_sa(sa):\n engine = build_sa_engine(pd.DataFrame({\"a\": [1, 2, 1, None]}), sa)\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n partial_metric = MetricConfiguration(\n metric_name=\"column.max.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"non_existent_column\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n\n with pytest.raises(ge_exceptions.ExecutionEngineError) as eee:\n # noinspection PyUnusedLocal\n results = engine.resolve_metrics(\n metrics_to_resolve=(partial_metric,), metrics=metrics\n )\n metrics.update(results)\n assert (\n 'Error: The column \"non_existent_column\" in BatchData does not exist.'\n in str(eee.value)\n )\n\n\ndef test_max_metric_column_exists_spark(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame({\"a\": [1, 2, 1]}),\n batch_id=\"my_id\",\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n partial_metric = MetricConfiguration(\n metric_name=\"column.max.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n\n results = engine.resolve_metrics(\n metrics_to_resolve=(partial_metric,), metrics=metrics\n )\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": partial_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results == {desired_metric.id: 2}\n\n\ndef test_max_metric_column_does_not_exist_spark(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame({\"a\": [1, 2, 1]}),\n batch_id=\"my_id\",\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n partial_metric = MetricConfiguration(\n metric_name=\"column.max.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"non_existent_column\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n\n with pytest.raises(ge_exceptions.ExecutionEngineError) as eee:\n # noinspection PyUnusedLocal\n results = engine.resolve_metrics(\n metrics_to_resolve=(partial_metric,), metrics=metrics\n )\n metrics.update(results)\n assert (\n str(eee.value)\n == 'Error: The column \"non_existent_column\" in BatchData does not exist.'\n )\n\n\ndef test_map_value_set_sa(sa):\n engine = build_sa_engine(pd.DataFrame({\"a\": [1, 2, 3, 3, None]}), sa)\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.in_set.condition\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"value_set\": [1, 2, 3]},\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n metrics = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n\n # Note: metric_dependencies is optional here in the config when called from a validator.\n aggregate_partial = MetricConfiguration(\n metric_name=\"column_values.in_set.unexpected_count.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"value_set\": [1, 2, 3]},\n metric_dependencies={\"unexpected_condition\": desired_metric},\n )\n\n metrics = engine.resolve_metrics(\n metrics_to_resolve=(aggregate_partial,), metrics=metrics\n )\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.in_set.unexpected_count\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"value_set\": [1, 2, 3]},\n metric_dependencies={\"metric_partial_fn\": aggregate_partial},\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n assert results == {desired_metric.id: 0}\n\n\ndef test_map_of_type_sa(sa):\n eng = sa.create_engine(\"sqlite://\")\n df = pd.DataFrame({\"a\": [1, 2, 3, 3, None]})\n df.to_sql(name=\"test\", con=eng, index=False)\n batch_data = SqlAlchemyBatchData(\n execution_engine=eng, table_name=\"test\", source_table_name=\"test\"\n )\n engine = SqlAlchemyExecutionEngine(\n engine=eng, batch_data_dict={\"my_id\": batch_data}\n )\n desired_metric = MetricConfiguration(\n metric_name=\"table.column_types\",\n metric_domain_kwargs={},\n metric_value_kwargs=None,\n )\n\n results = engine.resolve_metrics(metrics_to_resolve=(desired_metric,))\n assert results[desired_metric.id][0][\"name\"] == \"a\"\n assert isinstance(results[desired_metric.id][0][\"type\"], sa.FLOAT)\n\n\ndef test_map_value_set_spark(spark_session, basic_spark_df_execution_engine):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 3, None]},\n ),\n batch_id=\"my_id\",\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n condition_metric = MetricConfiguration(\n metric_name=\"column_values.in_set.condition\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"value_set\": [1, 2, 3]},\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Note: metric_dependencies is optional here in the config when called from a validator.\n aggregate_partial = MetricConfiguration(\n metric_name=\"column_values.in_set.unexpected_count.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"value_set\": [1, 2, 3]},\n metric_dependencies={\"unexpected_condition\": condition_metric},\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(aggregate_partial,), metrics=metrics\n )\n metrics.update(results)\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.in_set.unexpected_count\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"value_set\": [1, 2, 3]},\n metric_dependencies={\"metric_partial_fn\": aggregate_partial},\n )\n\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results == {desired_metric.id: 0}\n\n # We run the same computation again, this time with None being replaced by nan instead of NULL\n # to demonstrate this behavior\n df = pd.DataFrame({\"a\": [1, 2, 3, 3, None]})\n df = spark_session.createDataFrame(df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"my_id\", batch_data=df)\n\n condition_metric = MetricConfiguration(\n metric_name=\"column_values.in_set.condition\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"value_set\": [1, 2, 3]},\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Note: metric_dependencies is optional here in the config when called from a validator.\n aggregate_partial = MetricConfiguration(\n metric_name=\"column_values.in_set.unexpected_count.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"value_set\": [1, 2, 3]},\n metric_dependencies={\"unexpected_condition\": condition_metric},\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(aggregate_partial,), metrics=metrics\n )\n metrics.update(results)\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.in_set.unexpected_count\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"value_set\": [1, 2, 3]},\n metric_dependencies={\"metric_partial_fn\": aggregate_partial},\n )\n\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results == {desired_metric.id: 1}\n\n\ndef test_map_column_value_lengths_between_pd():\n engine = build_pandas_engine(\n pd.DataFrame({\"a\": [\"a\", \"aaa\", \"bcbc\", \"defgh\", None]})\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.value_length.map\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n ser_expected_lengths = pd.Series([1, 3, 4, 5])\n result_series, _, _ = results[desired_metric.id]\n assert ser_expected_lengths.equals(result_series)\n\n\ndef test_map_unique_column_exists_pd():\n engine = build_pandas_engine(pd.DataFrame({\"a\": [1, 2, 3, 3, 4, None]}))\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n condition_metric = MetricConfiguration(\n metric_name=\"column_values.unique.condition\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=\"column_values.unique.unexpected_count\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert list(metrics[condition_metric.id][0]) == [False, False, True, True, False]\n assert metrics[unexpected_count_metric.id] == 2\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=\"column_values.unique.unexpected_rows\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 1}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id][\"a\"].index == [2]\n assert metrics[unexpected_rows_metric.id][\"a\"].values == [3]\n\n\ndef test_map_unique_column_does_not_exist_pd():\n engine = build_pandas_engine(pd.DataFrame({\"a\": [1, 2, 3, 3, None]}))\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.unique.condition\",\n metric_domain_kwargs={\"column\": \"non_existent_column\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n\n with pytest.raises(ge_exceptions.ExecutionEngineError) as eee:\n # noinspection PyUnusedLocal\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n assert (\n str(eee.value)\n == 'Error: The column \"non_existent_column\" in BatchData does not exist.'\n )\n\n\ndef test_map_unique_column_exists_sa(sa):\n engine = build_sa_engine(\n pd.DataFrame(\n {\"a\": [1, 2, 3, 3, None], \"b\": [\"foo\", \"bar\", \"baz\", \"qux\", \"fish\"]}\n ),\n sa,\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n condition_metric = MetricConfiguration(\n metric_name=\"column_values.unique.condition\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # This is no longer a MAP_CONDITION because mssql does not support it. Instead, it is a WINDOW_CONDITION\n #\n # aggregate_fn = MetricConfiguration(\n # metric_name=\"column_values.unique.unexpected_count.aggregate_fn\",\n # metric_domain_kwargs={\"column\": \"a\"},\n # metric_value_kwargs=None,\n # metric_dependencies={\"unexpected_condition\": condition_metric},\n # )\n # aggregate_fn_metrics = engine.resolve_metrics(\n # metrics_to_resolve=(aggregate_fn,), metrics=metrics\n # )\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.unique.unexpected_count\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n # metric_dependencies={\"metric_partial_fn\": aggregate_fn},\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,),\n metrics=metrics, # metrics=aggregate_fn_metrics\n )\n metrics.update(results)\n assert results[desired_metric.id] == 2\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.unique.unexpected_values\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"BASIC\", \"partial_unexpected_count\": 20}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results[desired_metric.id] == [3, 3]\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.unique.unexpected_value_counts\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"BASIC\", \"partial_unexpected_count\": 20}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n assert results[desired_metric.id] == [(3, 2)]\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.unique.unexpected_rows\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"BASIC\", \"partial_unexpected_count\": 20}\n },\n metric_dependencies={\"unexpected_condition\": condition_metric},\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results[desired_metric.id] == [(3, \"baz\"), (3, \"qux\")]\n\n\ndef test_map_unique_column_does_not_exist_sa(sa):\n engine = build_sa_engine(\n pd.DataFrame(\n {\"a\": [1, 2, 3, 3, None], \"b\": [\"foo\", \"bar\", \"baz\", \"qux\", \"fish\"]}\n ),\n sa,\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n condition_metric = MetricConfiguration(\n metric_name=\"column_values.unique.condition\",\n metric_domain_kwargs={\"column\": \"non_existent_column\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n with pytest.raises(ge_exceptions.ExecutionEngineError) as eee:\n # noinspection PyUnusedLocal\n metrics = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,), metrics=metrics\n )\n assert (\n 'Error: The column \"non_existent_column\" in BatchData does not exist.'\n in str(eee.value)\n )\n\n\ndef test_map_unique_column_exists_spark(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\n \"a\": [1, 2, 3, 3, 4, None],\n \"b\": [None, \"foo\", \"bar\", \"baz\", \"qux\", \"fish\"],\n }\n ),\n batch_id=\"my_id\",\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n condition_metric = MetricConfiguration(\n metric_name=\"column_values.unique.condition\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # unique is a *window* function so does not use the aggregate_fn version of unexpected count\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.unique.unexpected_count\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results[desired_metric.id] == 2\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.unique.unexpected_values\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"BASIC\", \"partial_unexpected_count\": 20}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results[desired_metric.id] == [3, 3]\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.unique.unexpected_value_counts\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"BASIC\", \"partial_unexpected_count\": 20}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results[desired_metric.id] == [(3, 2)]\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.unique.unexpected_rows\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"BASIC\", \"partial_unexpected_count\": 20}\n },\n metric_dependencies={\"unexpected_condition\": condition_metric},\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results[desired_metric.id] == [(3, \"bar\"), (3, \"baz\")]\n\n\ndef test_map_unique_column_does_not_exist_spark(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\n \"a\": [1, 2, 3, 3, 4, None],\n \"b\": [None, \"foo\", \"bar\", \"baz\", \"qux\", \"fish\"],\n }\n ),\n batch_id=\"my_id\",\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n condition_metric = MetricConfiguration(\n metric_name=\"column_values.unique.condition\",\n metric_domain_kwargs={\"column\": \"non_existent_column\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n\n with pytest.raises(ge_exceptions.ExecutionEngineError) as eee:\n # noinspection PyUnusedLocal\n metrics = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,), metrics=metrics\n )\n assert (\n str(eee.value)\n == 'Error: The column \"non_existent_column\" in BatchData does not exist.'\n )\n\n\ndef test_z_score_under_threshold_pd():\n df = pd.DataFrame({\"a\": [1, 2, 3, None]})\n engine = PandasExecutionEngine(batch_data_dict={\"my_id\": df})\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n mean = MetricConfiguration(\n metric_name=\"column.mean\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n stdev = MetricConfiguration(\n metric_name=\"column.standard_deviation\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metrics = (mean, stdev)\n results = engine.resolve_metrics(\n metrics_to_resolve=desired_metrics, metrics=metrics\n )\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.z_score.map\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"column.standard_deviation\": stdev,\n \"column.mean\": mean,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.z_score.under_threshold.condition\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"double_sided\": True, \"threshold\": 2},\n metric_dependencies={\n \"column_values.z_score.map\": desired_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n assert list(results[desired_metric.id][0]) == [False, False, False]\n metrics.update(results)\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.z_score.under_threshold.unexpected_count\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"double_sided\": True, \"threshold\": 2},\n metric_dependencies={\"unexpected_condition\": desired_metric},\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n assert results[desired_metric.id] == 0\n\n\ndef test_z_score_under_threshold_spark(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 3, None]},\n ),\n batch_id=\"my_id\",\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n mean = MetricConfiguration(\n metric_name=\"column.mean.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n stdev = MetricConfiguration(\n metric_name=\"column.standard_deviation.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metrics = (mean, stdev)\n results = engine.resolve_metrics(\n metrics_to_resolve=desired_metrics, metrics=metrics\n )\n metrics.update(results)\n\n mean = MetricConfiguration(\n metric_name=\"column.mean\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\"metric_partial_fn\": mean},\n )\n stdev = MetricConfiguration(\n metric_name=\"column.standard_deviation\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": stdev,\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metrics = (mean, stdev)\n results = engine.resolve_metrics(\n metrics_to_resolve=desired_metrics, metrics=metrics\n )\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.z_score.map\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"column.standard_deviation\": stdev,\n \"column.mean\": mean,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.z_score.under_threshold.condition\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"double_sided\": True, \"threshold\": 2},\n metric_dependencies={\n \"column_values.z_score.map\": desired_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.z_score.under_threshold.unexpected_count.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"double_sided\": True, \"threshold\": 2},\n metric_dependencies={\"unexpected_condition\": desired_metric},\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column_values.z_score.under_threshold.unexpected_count\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"double_sided\": True, \"threshold\": 2},\n metric_dependencies={\"metric_partial_fn\": desired_metric},\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n assert results[desired_metric.id] == 0\n\n\ndef test_table_metric_pd(caplog):\n df = pd.DataFrame({\"a\": [1, 2, 3, 3, None], \"b\": [1, 2, 3, 3, None]})\n engine = PandasExecutionEngine(batch_data_dict={\"my_id\": df})\n desired_metric = MetricConfiguration(\n metric_name=\"table.row_count\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n )\n results = engine.resolve_metrics(metrics_to_resolve=(desired_metric,))\n assert results == {desired_metric.id: 5}\n assert (\n 'Unexpected key(s) \"column\" found in domain_kwargs for domain type \"table\"'\n in caplog.text\n )\n\n\ndef test_map_column_pairs_equal_metric_pd():\n engine = build_pandas_engine(\n pd.DataFrame(\n data={\n \"a\": [0, 1, 9, 2],\n \"b\": [5, 4, 3, 6],\n \"c\": [5, 4, 3, 6],\n \"d\": [7, 8, 9, 0],\n }\n )\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n \"\"\"\n Two tests:\n 1. Pass -- no unexpected rows.\n 2. Fail -- three unexpected rows.\n \"\"\"\n\n # Save original metrics for testing unexpected results.\n metrics_save: dict = copy.deepcopy(metrics)\n\n metric_name: str = \"column_pair_values.equal\"\n condition_metric_name: str = f\"{metric_name}.condition\"\n unexpected_count_metric_name: str = f\"{metric_name}.unexpected_count\"\n unexpected_rows_metric_name: str = f\"{metric_name}.unexpected_rows\"\n unexpected_values_metric_name: str = f\"{metric_name}.unexpected_values\"\n\n # First, assert Pass (no unexpected results).\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"b\",\n \"column_B\": \"c\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"b\",\n \"column_B\": \"c\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert list(metrics[condition_metric.id][0]) == [False, False, False, False]\n assert metrics[unexpected_count_metric.id] == 0\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"b\",\n \"column_B\": \"c\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id].empty\n assert len(metrics[unexpected_rows_metric.id].columns) == 4\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"b\",\n \"column_B\": \"c\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 0\n assert metrics[unexpected_values_metric.id] == []\n\n # Restore from saved original metrics in order to start fresh on testing for unexpected results.\n metrics = copy.deepcopy(metrics_save)\n\n # Second, assert Fail (one unexpected result).\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"d\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"d\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert list(metrics[condition_metric.id][0]) == [True, True, False, True]\n assert metrics[unexpected_count_metric.id] == 3\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"d\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id].equals(\n pd.DataFrame(\n data={\"a\": [0, 1, 2], \"b\": [5, 4, 6], \"c\": [5, 4, 6], \"d\": [7, 8, 0]},\n index=pd.Index([0, 1, 3]),\n )\n )\n assert len(metrics[unexpected_rows_metric.id].columns) == 4\n pd.testing.assert_index_equal(\n metrics[unexpected_rows_metric.id].index, pd.Index([0, 1, 3])\n )\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"d\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 3\n assert metrics[unexpected_values_metric.id] == [(0, 7), (1, 8), (2, 0)]\n\n\ndef test_map_column_pairs_equal_metric_sa(sa):\n engine = build_sa_engine(\n pd.DataFrame(\n data={\n \"a\": [0, 1, 9, 2],\n \"b\": [5, 4, 3, 6],\n \"c\": [5, 4, 3, 6],\n \"d\": [7, 8, 9, 0],\n }\n ),\n sa,\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n \"\"\"\n Two tests:\n 1. Pass -- no unexpected rows.\n 2. Fail -- three unexpected rows.\n \"\"\"\n\n # Save original metrics for testing unexpected results.\n metrics_save: dict = copy.deepcopy(metrics)\n\n metric_name: str = \"column_pair_values.equal\"\n condition_metric_name: str = f\"{metric_name}.condition\"\n unexpected_count_metric_name: str = f\"{metric_name}.unexpected_count\"\n unexpected_rows_metric_name: str = f\"{metric_name}.unexpected_rows\"\n unexpected_values_metric_name: str = f\"{metric_name}.unexpected_values\"\n\n # First, assert Pass (no unexpected results).\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"b\",\n \"column_B\": \"c\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"b\",\n \"column_B\": \"c\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert isinstance(metrics[condition_metric.id][0], sa.sql.elements.AsBoolean)\n assert metrics[unexpected_count_metric.id] == 0\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"b\",\n \"column_B\": \"c\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_rows_metric.id]) == 0\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"b\",\n \"column_B\": \"c\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 0\n assert metrics[unexpected_values_metric.id] == []\n\n # Restore from saved original metrics in order to start fresh on testing for unexpected results.\n metrics = copy.deepcopy(metrics_save)\n\n # Second, assert Fail (one unexpected result).\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"d\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"d\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert isinstance(metrics[condition_metric.id][0], sa.sql.elements.AsBoolean)\n assert metrics[unexpected_count_metric.id] == 3\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"d\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id] == [\n (0, 5, 5, 7),\n (1, 4, 4, 8),\n (2, 6, 6, 0),\n ]\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"d\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 3\n assert metrics[unexpected_values_metric.id] == [(0, 7), (1, 8), (2, 0)]\n\n\ndef test_map_column_pairs_greater_metric_pd():\n df = pd.DataFrame({\"a\": [2, 3, 4, None, 3, None], \"b\": [1, 2, 3, None, 3, 5]})\n engine = PandasExecutionEngine(batch_data_dict={\"my_id\": df})\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n condition_metric = MetricConfiguration(\n metric_name=\"column_pair_values.a_greater_than_b.condition\",\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n \"ignore_row_if\": \"either_value_is_missing\",\n },\n metric_value_kwargs={\n \"or_equal\": True,\n \"result_format\": {\n \"result_format\": \"SUMMARY\",\n \"partial_unexpected_count\": 6,\n },\n },\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n assert (\n results[condition_metric.id][0]\n .reset_index(drop=True)\n .equals(pd.Series([False, False, False, False]))\n )\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=\"column_pair_values.a_greater_than_b.unexpected_values\",\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n \"ignore_row_if\": \"either_value_is_missing\",\n },\n metric_value_kwargs={\n \"or_equal\": True,\n \"result_format\": {\n \"result_format\": \"SUMMARY\",\n \"partial_unexpected_count\": 6,\n },\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 0\n assert metrics[unexpected_values_metric.id] == []\n\n\ndef test_map_column_pairs_greater_metric_sa(sa):\n engine = build_sa_engine(\n pd.DataFrame(\n data={\n \"a\": [2, 3, 4, None, 3, None],\n \"b\": [1, 2, 3, None, 3, 5],\n }\n ),\n sa,\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n condition_metric = MetricConfiguration(\n metric_name=\"column_pair_values.a_greater_than_b.condition\",\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n \"ignore_row_if\": \"either_value_is_missing\",\n },\n metric_value_kwargs={\n \"or_equal\": True,\n \"result_format\": {\n \"result_format\": \"SUMMARY\",\n \"partial_unexpected_count\": 6,\n },\n },\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n assert isinstance(metrics[condition_metric.id][0], sa.sql.elements.AsBoolean)\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=\"column_pair_values.a_greater_than_b.unexpected_values\",\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n \"ignore_row_if\": \"either_value_is_missing\",\n },\n metric_value_kwargs={\n \"or_equal\": True,\n \"result_format\": {\n \"result_format\": \"SUMMARY\",\n \"partial_unexpected_count\": 6,\n },\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 0\n assert metrics[unexpected_values_metric.id] == []\n\n\ndef test_map_column_pairs_in_set_metric_pd():\n df = pd.DataFrame({\"a\": [10, 3, 4, None, 3, None], \"b\": [1, 2, 3, None, 3, 5]})\n engine = PandasExecutionEngine(batch_data_dict={\"my_id\": df})\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n condition_metric = MetricConfiguration(\n metric_name=\"column_pair_values.in_set.condition\",\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n \"ignore_row_if\": \"either_value_is_missing\",\n },\n metric_value_kwargs={\n \"value_pairs_set\": [(2, 1), (3, 2), (4, 3), (3, 3)],\n },\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n assert (\n results[condition_metric.id][0]\n .reset_index(drop=True)\n .equals(pd.Series([True, False, False, False]))\n )\n\n\ndef test_map_column_pairs_in_set_metric_sa(sa):\n engine = build_sa_engine(\n pd.DataFrame(\n {\"a\": [10, 9, 3, 4, None, 3, None], \"b\": [1, 4, 2, 3, None, 3, 5]}\n ),\n sa,\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n condition_metric = MetricConfiguration(\n metric_name=\"column_pair_values.in_set.condition\",\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n \"ignore_row_if\": \"either_value_is_missing\",\n },\n metric_value_kwargs={\n \"value_pairs_set\": [(2, 1), (3, 2), (4, 3), (3, 3)],\n \"result_format\": {\n \"result_format\": \"SUMMARY\",\n \"partial_unexpected_count\": 6,\n },\n },\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n assert isinstance(metrics[condition_metric.id][0], sa.sql.elements.AsBoolean)\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=\"column_pair_values.in_set.unexpected_values\",\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n \"ignore_row_if\": \"either_value_is_missing\",\n },\n metric_value_kwargs={\n \"value_pairs_set\": [(2, 1), (3, 2), (4, 3), (3, 3)],\n \"result_format\": {\n \"result_format\": \"SUMMARY\",\n \"partial_unexpected_count\": 6,\n },\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n assert results[unexpected_values_metric.id] == [(10, 1), (9, 4)]\n\n condition_metric = MetricConfiguration(\n metric_name=\"column_pair_values.in_set.condition\",\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n \"ignore_row_if\": \"both_values_are_missing\",\n },\n metric_value_kwargs={\n \"result_format\": {\n \"result_format\": \"SUMMARY\",\n \"partial_unexpected_count\": 6,\n },\n },\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n assert isinstance(metrics[condition_metric.id][0], sa.sql.elements.AsBoolean)\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=\"column_pair_values.in_set.unexpected_values\",\n metric_domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n \"ignore_row_if\": \"either_value_is_missing\",\n },\n metric_value_kwargs={\n \"result_format\": {\n \"result_format\": \"SUMMARY\",\n \"partial_unexpected_count\": 6,\n },\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n assert results[unexpected_values_metric.id] == []\n\n\ndef test_table_metric_spark(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 1]},\n ),\n batch_id=\"my_id\",\n )\n\n desired_metric = MetricConfiguration(\n metric_name=\"table.row_count.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n )\n results = engine.resolve_metrics(metrics_to_resolve=(desired_metric,))\n\n desired_metric = MetricConfiguration(\n metric_name=\"table.row_count\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\"metric_partial_fn\": desired_metric},\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=results\n )\n\n assert results == {desired_metric.id: 3}\n\n\ndef test_median_metric_spark(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3]},\n ),\n batch_id=\"my_id\",\n )\n\n desired_metric = MetricConfiguration(\n metric_name=\"table.row_count.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n )\n metrics = engine.resolve_metrics(metrics_to_resolve=(desired_metric,))\n\n row_count = MetricConfiguration(\n metric_name=\"table.row_count\",\n metric_domain_kwargs={},\n metric_value_kwargs=None,\n metric_dependencies={\"metric_partial_fn\": desired_metric},\n )\n metrics = engine.resolve_metrics(metrics_to_resolve=(row_count,), metrics=metrics)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.median\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\"table.row_count\": row_count},\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n assert results == {desired_metric.id: 2}\n\n\ndef test_distinct_metric_spark(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 1, 2, 3, 3, None]},\n ),\n batch_id=\"my_id\",\n )\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.value_counts\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"sort\": \"value\", \"collate\": None},\n )\n\n metrics = engine.resolve_metrics(metrics_to_resolve=(desired_metric,))\n assert pd.Series(index=[1, 2, 3], data=[2, 2, 2]).equals(metrics[desired_metric.id])\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\"column.value_counts\": desired_metric},\n )\n\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n assert results == {desired_metric.id: {1, 2, 3}}\n\n\ndef test_distinct_metric_sa(sa):\n engine = build_sa_engine(\n pd.DataFrame({\"a\": [1, 2, 1, 2, 3, 3], \"b\": [4, 4, 4, 4, 4, 4]}), sa\n )\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.value_counts\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"sort\": \"value\", \"collate\": None},\n )\n desired_metric_b = MetricConfiguration(\n metric_name=\"column.value_counts\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs={\"sort\": \"value\", \"collate\": None},\n )\n\n metrics = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric, desired_metric_b)\n )\n assert pd.Series(index=[1, 2, 3], data=[2, 2, 2]).equals(metrics[desired_metric.id])\n assert pd.Series(index=[4], data=[6]).equals(metrics[desired_metric_b.id])\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\"column.value_counts\": desired_metric},\n )\n desired_metric_b = MetricConfiguration(\n metric_name=\"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\"column.value_counts\": desired_metric_b},\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric, desired_metric_b), metrics=metrics\n )\n assert results[desired_metric.id] == {1, 2, 3}\n assert results[desired_metric_b.id] == {4}\n\n\ndef test_distinct_metric_pd():\n engine = build_pandas_engine(pd.DataFrame({\"a\": [1, 2, 1, 2, 3, 3]}))\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.value_counts\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs={\"sort\": \"value\", \"collate\": None},\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert pd.Series(index=[1, 2, 3], data=[2, 2, 2]).equals(metrics[desired_metric.id])\n\n desired_metric = MetricConfiguration(\n metric_name=\"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"column.value_counts\": desired_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n\n results = engine.resolve_metrics(\n metrics_to_resolve=(desired_metric,), metrics=metrics\n )\n metrics.update(results)\n assert results == {desired_metric.id: {1, 2, 3}}\n\n\ndef test_batch_aggregate_metrics_sa(caplog, sa):\n import datetime\n\n engine = build_sa_engine(\n pd.DataFrame({\"a\": [1, 2, 1, 2, 3, 3], \"b\": [4, 4, 4, 4, 4, 4]}), sa\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n desired_metric_1 = MetricConfiguration(\n metric_name=\"column.max.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_2 = MetricConfiguration(\n metric_name=\"column.min.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_3 = MetricConfiguration(\n metric_name=\"column.max.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_4 = MetricConfiguration(\n metric_name=\"column.min.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(\n desired_metric_1,\n desired_metric_2,\n desired_metric_3,\n desired_metric_4,\n ),\n metrics=metrics,\n )\n metrics.update(results)\n\n desired_metric_1 = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": desired_metric_1,\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_2 = MetricConfiguration(\n metric_name=\"column.min\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": desired_metric_2,\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_3 = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": desired_metric_3,\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_4 = MetricConfiguration(\n metric_name=\"column.min\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": desired_metric_4,\n \"table.columns\": table_columns_metric,\n },\n )\n caplog.clear()\n caplog.set_level(logging.DEBUG, logger=\"great_expectations\")\n start = datetime.datetime.now()\n results = engine.resolve_metrics(\n metrics_to_resolve=(\n desired_metric_1,\n desired_metric_2,\n desired_metric_3,\n desired_metric_4,\n ),\n metrics=metrics,\n )\n metrics.update(results)\n end = datetime.datetime.now()\n print(\"t1\")\n print(end - start)\n assert results[desired_metric_1.id] == 3\n assert results[desired_metric_2.id] == 1\n assert results[desired_metric_3.id] == 4\n assert results[desired_metric_4.id] == 4\n\n # Check that all four of these metrics were computed on a single domain\n found_message = False\n for record in caplog.records:\n if (\n record.message\n == \"SqlAlchemyExecutionEngine computed 4 metrics on domain_id ()\"\n ):\n found_message = True\n assert found_message\n\n\ndef test_batch_aggregate_metrics_spark(caplog, spark_session):\n import datetime\n\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 1, 2, 3, 3], \"b\": [4, 4, 4, 4, 4, 4]},\n ),\n batch_id=\"my_id\",\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n desired_metric_1 = MetricConfiguration(\n metric_name=\"column.max.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_2 = MetricConfiguration(\n metric_name=\"column.min.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_3 = MetricConfiguration(\n metric_name=\"column.max.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_4 = MetricConfiguration(\n metric_name=\"column.min.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(\n desired_metric_1,\n desired_metric_2,\n desired_metric_3,\n desired_metric_4,\n ),\n metrics=metrics,\n )\n metrics.update(results)\n\n desired_metric_1 = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\"metric_partial_fn\": desired_metric_1},\n )\n desired_metric_2 = MetricConfiguration(\n metric_name=\"column.min\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\"metric_partial_fn\": desired_metric_2},\n )\n desired_metric_3 = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\"metric_partial_fn\": desired_metric_3},\n )\n desired_metric_4 = MetricConfiguration(\n metric_name=\"column.min\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\"metric_partial_fn\": desired_metric_4},\n )\n start = datetime.datetime.now()\n caplog.clear()\n caplog.set_level(logging.DEBUG, logger=\"great_expectations\")\n results = engine.resolve_metrics(\n metrics_to_resolve=(\n desired_metric_1,\n desired_metric_2,\n desired_metric_3,\n desired_metric_4,\n ),\n metrics=metrics,\n )\n metrics.update(results)\n end = datetime.datetime.now()\n print(end - start)\n assert results[desired_metric_1.id] == 3\n assert results[desired_metric_2.id] == 1\n assert results[desired_metric_3.id] == 4\n assert results[desired_metric_4.id] == 4\n\n # Check that all four of these metrics were computed on a single domain\n found_message = False\n for record in caplog.records:\n if (\n record.message\n == \"SparkDFExecutionEngine computed 4 metrics on domain_id ()\"\n ):\n found_message = True\n assert found_message\n\n\ndef test_map_multicolumn_sum_equal_pd():\n engine = build_pandas_engine(\n pd.DataFrame(\n data={\"a\": [0, 1, 2], \"b\": [5, 4, 3], \"c\": [0, 0, 1], \"d\": [7, 8, 9]}\n )\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n \"\"\"\n Two tests:\n 1. Pass -- no unexpected rows.\n 2. Fail -- one unexpected row.\n \"\"\"\n\n # Save original metrics for testing unexpected results.\n metrics_save: dict = copy.deepcopy(metrics)\n\n metric_name: str = \"multicolumn_sum.equal\"\n condition_metric_name: str = f\"{metric_name}.condition\"\n unexpected_count_metric_name: str = f\"{metric_name}.unexpected_count\"\n unexpected_rows_metric_name: str = f\"{metric_name}.unexpected_rows\"\n unexpected_values_metric_name: str = f\"{metric_name}.unexpected_values\"\n\n # First, assert Pass (no unexpected results).\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\"],\n },\n metric_value_kwargs={\n \"sum_total\": 5,\n },\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\"],\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert list(metrics[condition_metric.id][0]) == [False, False, False]\n assert metrics[unexpected_count_metric.id] == 0\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\"],\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id].empty\n assert len(metrics[unexpected_rows_metric.id].columns) == 4\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\"],\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 0\n assert metrics[unexpected_values_metric.id] == []\n\n # Restore from saved original metrics in order to start fresh on testing for unexpected results.\n metrics = copy.deepcopy(metrics_save)\n\n # Second, assert Fail (one unexpected result).\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n },\n metric_value_kwargs={\n \"sum_total\": 5,\n },\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert list(metrics[condition_metric.id][0]) == [False, False, True]\n assert metrics[unexpected_count_metric.id] == 1\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id].equals(\n pd.DataFrame(data={\"a\": [2], \"b\": [3], \"c\": [1], \"d\": [9]}, index=[2])\n )\n assert len(metrics[unexpected_rows_metric.id].columns) == 4\n pd.testing.assert_index_equal(\n metrics[unexpected_rows_metric.id].index, pd.Index([2])\n )\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 1\n assert metrics[unexpected_values_metric.id] == [{\"a\": 2, \"b\": 3, \"c\": 1}]\n\n\ndef test_map_multicolumn_sum_equal_sa(sa):\n engine = build_sa_engine(\n pd.DataFrame(\n data={\"a\": [0, 1, 2], \"b\": [5, 4, 3], \"c\": [0, 0, 1], \"d\": [7, 8, 9]}\n ),\n sa,\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n \"\"\"\n Two tests:\n 1. Pass -- no unexpected rows.\n 2. Fail -- one unexpected row.\n \"\"\"\n\n # Save original metrics for testing unexpected results.\n metrics_save: dict = copy.deepcopy(metrics)\n\n metric_name: str = \"multicolumn_sum.equal\"\n condition_metric_name: str = f\"{metric_name}.condition\"\n unexpected_count_metric_name: str = f\"{metric_name}.unexpected_count\"\n unexpected_rows_metric_name: str = f\"{metric_name}.unexpected_rows\"\n unexpected_values_metric_name: str = f\"{metric_name}.unexpected_values\"\n\n # First, assert Pass (no unexpected results).\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\"],\n },\n metric_value_kwargs={\n \"sum_total\": 5,\n },\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\"],\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert isinstance(metrics[condition_metric.id][0], sa.sql.elements.AsBoolean)\n assert metrics[unexpected_count_metric.id] == 0\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\"],\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_rows_metric.id]) == 0\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\"],\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 0\n assert metrics[unexpected_values_metric.id] == []\n\n # Restore from saved original metrics in order to start fresh on testing for unexpected results.\n metrics = copy.deepcopy(metrics_save)\n\n # Second, assert Fail (one unexpected result).\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n },\n metric_value_kwargs={\n \"sum_total\": 5,\n },\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert isinstance(metrics[condition_metric.id][0], sa.sql.elements.AsBoolean)\n assert metrics[unexpected_count_metric.id] == 1\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id] == [(2, 3, 1, 9)]\n assert len(metrics[unexpected_rows_metric.id][0]) == 4\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 1\n assert metrics[unexpected_values_metric.id] == [{\"a\": 2, \"b\": 3, \"c\": 1}]\n\n\ndef test_map_compound_columns_unique_pd():\n engine = build_pandas_engine(\n pd.DataFrame(data={\"a\": [0, 1, 1], \"b\": [1, 2, 3], \"c\": [0, 2, 2]})\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n \"\"\"\n Two tests:\n 1. Pass -- no duplicated compound column keys.\n 2. Fail -- two duplicated compound column keys.\n \"\"\"\n\n # Save original metrics for testing unexpected results.\n metrics_save: dict = copy.deepcopy(metrics)\n\n metric_name: str = \"compound_columns.unique\"\n condition_metric_name: str = f\"{metric_name}.condition\"\n unexpected_count_metric_name: str = f\"{metric_name}.unexpected_count\"\n unexpected_rows_metric_name: str = f\"{metric_name}.unexpected_rows\"\n unexpected_values_metric_name: str = f\"{metric_name}.unexpected_values\"\n\n # First, assert Pass (no unexpected results).\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\"],\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\"],\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert list(metrics[condition_metric.id][0]) == [False, False, False]\n assert metrics[unexpected_count_metric.id] == 0\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\"],\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id].empty\n assert len(metrics[unexpected_rows_metric.id].columns) == 3\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\"],\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 0\n assert metrics[unexpected_values_metric.id] == []\n\n # Restore from saved original metrics in order to start fresh on testing for unexpected results.\n metrics = copy.deepcopy(metrics_save)\n\n # Second, assert Fail (one unexpected result).\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"c\"],\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"c\"],\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert list(metrics[condition_metric.id][0]) == [False, True, True]\n assert metrics[unexpected_count_metric.id] == 2\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"c\"],\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id].equals(\n pd.DataFrame(data={\"a\": [1, 1], \"b\": [2, 3], \"c\": [2, 2]}, index=[1, 2])\n )\n assert len(metrics[unexpected_rows_metric.id].columns) == 3\n pd.testing.assert_index_equal(\n metrics[unexpected_rows_metric.id].index, pd.Index([1, 2])\n )\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"c\"],\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 2\n assert metrics[unexpected_values_metric.id] == [{\"a\": 1, \"c\": 2}, {\"a\": 1, \"c\": 2}]\n\n\ndef test_map_select_column_values_unique_within_record_pd():\n engine = build_pandas_engine(\n pd.DataFrame(\n data={\n \"a\": [1, 1, 8, 1, 4, None, None, 7],\n \"b\": [1, 2, 2, 2, 4, None, None, 1],\n \"c\": [2, 3, 7, 3, 4, None, 9, 0],\n }\n )\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n # Save original metrics for testing unexpected results.\n metrics_save: dict = copy.deepcopy(metrics)\n\n metric_name: str = \"select_column_values.unique.within_record\"\n condition_metric_name: str = f\"{metric_name}.condition\"\n unexpected_count_metric_name: str = f\"{metric_name}.unexpected_count\"\n unexpected_rows_metric_name: str = f\"{metric_name}.unexpected_rows\"\n unexpected_values_metric_name: str = f\"{metric_name}.unexpected_values\"\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"all_values_are_missing\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"all_values_are_missing\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert list(metrics[condition_metric.id][0]) == [\n True,\n False,\n False,\n False,\n True,\n True,\n False,\n ]\n assert metrics[unexpected_count_metric.id] == 3\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"all_values_are_missing\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 8}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id].equals(\n pd.DataFrame(\n data={\"a\": [1.0, 4.0, None], \"b\": [1.0, 4.0, None], \"c\": [2.0, 4.0, 9.0]},\n index=[0, 4, 6],\n )\n )\n assert len(metrics[unexpected_rows_metric.id].columns) == 3\n pd.testing.assert_index_equal(\n metrics[unexpected_rows_metric.id].index, pd.Index([0, 4, 6])\n )\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"all_values_are_missing\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 3\n\n unexpected_values = []\n for unexpected_value_dict in metrics[unexpected_values_metric.id]:\n updated_unexpected_value_dict = {\n key: \"NULL\" if np.isnan(value) else value\n for key, value in unexpected_value_dict.items()\n }\n unexpected_values.append(updated_unexpected_value_dict)\n\n assert unexpected_values == [\n {\"a\": 1.0, \"b\": 1.0, \"c\": 2.0},\n {\"a\": 4.0, \"b\": 4.0, \"c\": 4.0},\n {\"a\": \"NULL\", \"b\": \"NULL\", \"c\": 9.0},\n ]\n\n # Restore from saved original metrics in order to start fresh on testing for unexpected results.\n metrics = copy.deepcopy(metrics_save)\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"any_value_is_missing\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"any_value_is_missing\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert list(metrics[condition_metric.id][0]) == [\n True,\n False,\n False,\n False,\n True,\n False,\n ]\n assert metrics[unexpected_count_metric.id] == 2\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"any_value_is_missing\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id].equals(\n pd.DataFrame(\n data={\"a\": [1.0, 4.0], \"b\": [1.0, 4.0], \"c\": [2.0, 4.0]}, index=[0, 4]\n )\n )\n assert len(metrics[unexpected_rows_metric.id].columns) == 3\n pd.testing.assert_index_equal(\n metrics[unexpected_rows_metric.id].index, pd.Index([0, 4])\n )\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"any_value_is_missing\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 2\n assert metrics[unexpected_values_metric.id] == [\n {\"a\": 1.0, \"b\": 1.0, \"c\": 2.0},\n {\"a\": 4.0, \"b\": 4.0, \"c\": 4.0},\n ]\n\n\ndef test_map_select_column_values_unique_within_record_sa(sa):\n engine = build_sa_engine(\n pd.DataFrame(\n data={\n \"a\": [1, 1, 8, 1, 4, None, None, 7],\n \"b\": [1, 2, 2, 2, 4, None, None, 1],\n \"c\": [2, 3, 7, 3, 4, None, 9, 0],\n }\n ),\n sa,\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n metrics.update(results)\n\n # Save original metrics for testing unexpected results.\n metrics_save: dict = copy.deepcopy(metrics)\n\n metric_name: str = \"select_column_values.unique.within_record\"\n condition_metric_name: str = f\"{metric_name}.condition\"\n unexpected_count_metric_name: str = f\"{metric_name}.unexpected_count\"\n unexpected_rows_metric_name: str = f\"{metric_name}.unexpected_rows\"\n unexpected_values_metric_name: str = f\"{metric_name}.unexpected_values\"\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"all_values_are_missing\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"all_values_are_missing\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert metrics[unexpected_count_metric.id] == 3\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"all_values_are_missing\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 8}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id] == [\n (1.0, 1.0, 2.0),\n (4.0, 4.0, 4.0),\n (None, None, 9.0),\n ]\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"all_values_are_missing\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 3\n\n assert metrics[unexpected_values_metric.id] == [\n {\"a\": 1.0, \"b\": 1.0, \"c\": 2.0},\n {\"a\": 4.0, \"b\": 4.0, \"c\": 4.0},\n {\"a\": None, \"b\": None, \"c\": 9.0},\n ]\n\n # Restore from saved original metrics in order to start fresh on testing for unexpected results.\n metrics = copy.deepcopy(metrics_save)\n\n condition_metric = MetricConfiguration(\n metric_name=condition_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"any_value_is_missing\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(condition_metric,),\n metrics=metrics,\n )\n metrics.update(results)\n\n unexpected_count_metric = MetricConfiguration(\n metric_name=unexpected_count_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"any_value_is_missing\",\n },\n metric_value_kwargs=None,\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_count_metric,), metrics=metrics\n )\n metrics.update(results)\n\n # Condition metrics return \"negative logic\" series.\n assert metrics[unexpected_count_metric.id] == 2\n\n unexpected_rows_metric = MetricConfiguration(\n metric_name=unexpected_rows_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"any_value_is_missing\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert metrics[unexpected_rows_metric.id] == [(1.0, 1.0, 2.0), (4.0, 4.0, 4.0)]\n\n unexpected_values_metric = MetricConfiguration(\n metric_name=unexpected_values_metric_name,\n metric_domain_kwargs={\n \"column_list\": [\"a\", \"b\", \"c\"],\n \"ignore_row_if\": \"any_value_is_missing\",\n },\n metric_value_kwargs={\n \"result_format\": {\"result_format\": \"SUMMARY\", \"partial_unexpected_count\": 3}\n },\n metric_dependencies={\n \"unexpected_condition\": condition_metric,\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(unexpected_values_metric,), metrics=metrics\n )\n metrics.update(results)\n\n assert len(metrics[unexpected_values_metric.id]) == 2\n assert metrics[unexpected_values_metric.id] == [\n {\"a\": 1.0, \"b\": 1.0, \"c\": 2.0},\n {\"a\": 4.0, \"b\": 4.0, \"c\": 4.0},\n ]\n" ]
[ [ "pandas.DataFrame", "pandas.Index", "numpy.isnan", "pandas.Series" ] ]
HenryKenlay/DeepRobust
[ "3f56dcc45f1fed788423d32cc179c26513416e2e", "3f56dcc45f1fed788423d32cc179c26513416e2e" ]
[ "deeprobust/graph/targeted_attack/nettack.py", "deeprobust/image/attack/lbfgs.py" ]
[ "'''\n Adversarial Attacks on Neural Networks for Graph Data. KDD 2018.\n https://arxiv.org/pdf/1805.07984.pdf\n Author's Implementation\n https://github.com/danielzuegner/nettack\n\n Since pytorch does not have good enough support to the operations on sparse tensor,\nthis part of code is heavily based on the author's implementation.\n'''\n\nimport torch\nfrom deeprobust.graph.targeted_attack import BaseAttack\nfrom torch.nn.parameter import Parameter\nfrom deeprobust.graph import utils\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.nn import functional as F\nfrom torch.nn.modules.module import Module\nfrom torch.nn.parameter import Parameter\nimport numpy as np\nimport scipy.sparse as sp\nfrom copy import deepcopy\nfrom numba import jit\nfrom torch import spmm\n\nclass Nettack(BaseAttack):\n\n def __init__(self, model, nnodes=None, attack_structure=True, attack_features=False, device='cpu'):\n\n super(Nettack, self).__init__(model, nnodes, attack_structure=attack_structure, attack_features=attack_features, device=device)\n\n self.structure_perturbations = []\n self.feature_perturbations = []\n self.influencer_nodes = []\n self.potential_edges = []\n\n self.cooc_constraint = None\n\n def filter_potential_singletons(self, modified_adj):\n \"\"\"\n Computes a mask for entries potentially leading to singleton nodes, i.e. one of the two nodes corresponding to\n the entry have degree 1 and there is an edge between the two nodes.\n \"\"\"\n\n degrees = modified_adj.sum(0)\n degree_one = (degrees == 1)\n resh = degree_one.repeat(self.nnodes, 1).float()\n l_and = resh * modified_adj\n logical_and_symmetric = l_and + l_and.t()\n flat_mask = 1 - logical_and_symmetric\n return flat_mask\n\n def get_linearized_weight(self):\n surrogate = self.surrogate\n W = surrogate.gc1.weight @ surrogate.gc2.weight\n return W.detach().cpu().numpy()\n\n def attack(self, features, adj, labels, target_node, n_perturbations, direct=True, n_influencers= 0, ll_cutoff=0.004, verbose=True):\n \"\"\"\n Perform an attack on the surrogate model.\n \"\"\"\n\n if self.nnodes is None:\n self.nnodes = adj.shape[0]\n\n self.target_node = target_node\n # ori_adj = adj\n # modified_adj = deepcopy(adj)\n if type(adj) is torch.Tensor:\n self.ori_adj = utils.to_scipy(adj).tolil()\n self.modified_adj = utils.to_scipy(adj).tolil()\n self.ori_features = utils.to_scipy(features).tolil()\n self.modified_features = utils.to_scipy(features).tolil()\n else:\n self.ori_adj = adj.tolil()\n self.modified_adj = adj.tolil()\n self.ori_features = features.tolil()\n self.modified_features = features.tolil()\n\n self.cooc_matrix = self.modified_features.T.dot(self.modified_features).tolil()\n\n attack_features = self.attack_features\n attack_structure = self.attack_structure\n assert not (direct==False and n_influencers==0), \"indirect mode requires at least one influencer node\"\n assert n_perturbations > 0, \"need at least one perturbation\"\n assert attack_features or attack_structure, \"either attack_features or attack_structure must be true\"\n\n # adj_norm = utils.normalize_adj_tensor(modified_adj, sparse=True)\n self.adj_norm = utils.normalize_adj(self.modified_adj)\n self.W = self.get_linearized_weight()\n\n logits = (self.adj_norm @ self.adj_norm @ self.modified_features @ self.W )[target_node]\n\n self.label_u = labels[target_node]\n label_target_onehot = np.eye(int(self.nclass))[labels[target_node]]\n best_wrong_class = (logits - 1000*label_target_onehot).argmax()\n surrogate_losses = [logits[labels[target_node]] - logits[best_wrong_class]]\n\n if verbose:\n print(\"##### Starting attack #####\")\n if attack_structure and attack_features:\n print(\"##### Attack node with ID {} using structure and feature perturbations #####\".format(target_node))\n elif attack_features:\n print(\"##### Attack only using feature perturbations #####\")\n elif attack_structure:\n print(\"##### Attack only using structure perturbations #####\")\n if direct:\n print(\"##### Attacking the node directly #####\")\n else:\n print(\"##### Attacking the node indirectly via {} influencer nodes #####\".format(n_influencers))\n print(\"##### Performing {} perturbations #####\".format(n_perturbations))\n\n if attack_structure:\n # Setup starting values of the likelihood ratio test.\n degree_sequence_start = self.ori_adj.sum(0).A1\n current_degree_sequence = self.modified_adj.sum(0).A1\n d_min = 2\n\n S_d_start = np.sum(np.log(degree_sequence_start[degree_sequence_start >= d_min]))\n current_S_d = np.sum(np.log(current_degree_sequence[current_degree_sequence >= d_min]))\n n_start = np.sum(degree_sequence_start >= d_min)\n current_n = np.sum(current_degree_sequence >= d_min)\n alpha_start = compute_alpha(n_start, S_d_start, d_min)\n\n log_likelihood_orig = compute_log_likelihood(n_start, alpha_start, S_d_start, d_min)\n\n if len(self.influencer_nodes) == 0:\n if not direct:\n # Choose influencer nodes\n infls, add_infls = self.get_attacker_nodes(n_influencers, add_additional_nodes=True)\n self.influencer_nodes = np.concatenate((infls, add_infls)).astype(\"int\")\n # Potential edges are all edges from any attacker to any other node, except the respective\n # attacker itself or the node being attacked.\n self.potential_edges = np.row_stack([np.column_stack((np.tile(infl, self.nnodes - 2),\n np.setdiff1d(np.arange(self.nnodes),\n np.array([target_node,infl])))) for infl in\n self.influencer_nodes])\n if verbose:\n print(\"Influencer nodes: {}\".format(self.influencer_nodes))\n else:\n # direct attack\n influencers = [target_node]\n self.potential_edges = np.column_stack((np.tile(target_node, self.nnodes-1), np.setdiff1d(np.arange(self.nnodes), target_node)))\n self.influencer_nodes = np.array(influencers)\n\n self.potential_edges = self.potential_edges.astype(\"int32\")\n\n for _ in range(n_perturbations):\n if verbose:\n print(\"##### ...{}/{} perturbations ... #####\".format(_+1, n_perturbations))\n if attack_structure:\n\n # Do not consider edges that, if removed, result in singleton edges in the graph.\n singleton_filter = filter_singletons(self.potential_edges, self.modified_adj)\n filtered_edges = self.potential_edges[singleton_filter]\n\n # Update the values for the power law likelihood ratio test.\n\n deltas = 2 * (1 - self.modified_adj[tuple(filtered_edges.T)].toarray()[0] )- 1\n d_edges_old = current_degree_sequence[filtered_edges]\n d_edges_new = current_degree_sequence[filtered_edges] + deltas[:, None]\n new_S_d, new_n = update_Sx(current_S_d, current_n, d_edges_old, d_edges_new, d_min)\n new_alphas = compute_alpha(new_n, new_S_d, d_min)\n new_ll = compute_log_likelihood(new_n, new_alphas, new_S_d, d_min)\n alphas_combined = compute_alpha(new_n + n_start, new_S_d + S_d_start, d_min)\n new_ll_combined = compute_log_likelihood(new_n + n_start, alphas_combined, new_S_d + S_d_start, d_min)\n new_ratios = -2 * new_ll_combined + 2 * (new_ll + log_likelihood_orig)\n\n # Do not consider edges that, if added/removed, would lead to a violation of the\n # likelihood ration Chi_square cutoff value.\n powerlaw_filter = filter_chisquare(new_ratios, ll_cutoff)\n filtered_edges_final = filtered_edges[powerlaw_filter]\n\n # Compute new entries in A_hat_square_uv\n a_hat_uv_new = self.compute_new_a_hat_uv(filtered_edges_final, target_node)\n # Compute the struct scores for each potential edge\n struct_scores = self.struct_score(a_hat_uv_new, self.modified_features @ self.W)\n best_edge_ix = struct_scores.argmin()\n best_edge_score = struct_scores.min()\n best_edge = filtered_edges_final[best_edge_ix]\n\n if attack_features:\n # Compute the feature scores for each potential feature perturbation\n feature_ixs, feature_scores = self.feature_scores()\n best_feature_ix = feature_ixs[0]\n best_feature_score = feature_scores[0]\n\n if attack_structure and attack_features:\n # decide whether to choose an edge or feature to change\n if best_edge_score < best_feature_score:\n if verbose:\n print(\"Edge perturbation: {}\".format(best_edge))\n change_structure = True\n else:\n if verbose:\n print(\"Feature perturbation: {}\".format(best_feature_ix))\n change_structure=False\n\n elif attack_structure:\n change_structure = True\n elif attack_features:\n change_structure = False\n\n if change_structure:\n # perform edge perturbation\n self.modified_adj[tuple(best_edge)] = self.modified_adj[tuple(best_edge[::-1])] = 1 - self.modified_adj[tuple(best_edge)]\n self.adj_norm = utils.normalize_adj(self.modified_adj)\n\n self.structure_perturbations.append(tuple(best_edge))\n self.feature_perturbations.append(())\n surrogate_losses.append(best_edge_score)\n\n # Update likelihood ratio test values\n current_S_d = new_S_d[powerlaw_filter][best_edge_ix]\n current_n = new_n[powerlaw_filter][best_edge_ix]\n current_degree_sequence[best_edge] += deltas[powerlaw_filter][best_edge_ix]\n\n else:\n self.modified_features[tuple(best_feature_ix)] = 1 - self.modified_features[tuple(best_feature_ix)]\n self.feature_perturbations.append(tuple(best_feature_ix))\n self.structure_perturbations.append(())\n surrogate_losses.append(best_feature_score)\n\n # return self.modified_adj, self.modified_features\n\n def get_attacker_nodes(self, n=5, add_additional_nodes = False):\n assert n < self.nnodes-1, \"number of influencers cannot be >= number of nodes in the graph!\"\n neighbors = self.ori_adj[self.target_node].nonzero()[1]\n assert self.target_node not in neighbors\n\n potential_edges = np.column_stack((np.tile(self.target_node, len(neighbors)),neighbors)).astype(\"int32\")\n\n # The new A_hat_square_uv values that we would get if we removed the edge from u to each of the neighbors, respectively\n a_hat_uv = self.compute_new_a_hat_uv(potential_edges, self.target_node)\n\n # XW = self.compute_XW()\n XW = self.modified_features @ self.W\n\n # compute the struct scores for all neighbors\n struct_scores = self.struct_score(a_hat_uv, XW)\n if len(neighbors) >= n: # do we have enough neighbors for the number of desired influencers?\n influencer_nodes = neighbors[np.argsort(struct_scores)[:n]]\n if add_additional_nodes:\n return influencer_nodes, np.array([])\n return influencer_nodes\n else:\n\n influencer_nodes = neighbors\n if add_additional_nodes: # Add additional influencers by connecting them to u first.\n # Compute the set of possible additional influencers, i.e. all nodes except the ones\n # that are already connected to u.\n poss_add_infl = np.setdiff1d(np.setdiff1d(np.arange(self.nnodes),neighbors), self.target_node)\n n_possible_additional = len(poss_add_infl)\n n_additional_attackers = n-len(neighbors)\n possible_edges = np.column_stack((np.tile(self.target_node, n_possible_additional), poss_add_infl))\n\n # Compute the struct_scores for all possible additional influencers, and choose the one\n # with the best struct score.\n a_hat_uv_additional = self.compute_new_a_hat_uv(possible_edges, self.target_node)\n additional_struct_scores = self.struct_score(a_hat_uv_additional, XW)\n additional_influencers = poss_add_infl[np.argsort(additional_struct_scores)[-n_additional_attackers::]]\n\n return influencer_nodes, additional_influencers\n else:\n return influencer_nodes\n\n def compute_logits(self):\n return (self.adj_norm @ self.adj_norm @ self.modified_features @ self.W)[self.target_node]\n\n def strongest_wrong_class(self, logits):\n label_u_onehot = np.eye(self.nclass)[self.label_u]\n return (logits - 1000*label_u_onehot).argmax()\n\n def feature_scores(self):\n \"\"\"\n Compute feature scores for all possible feature changes.\n \"\"\"\n\n if self.cooc_constraint is None:\n self.compute_cooccurrence_constraint(self.influencer_nodes)\n logits = self.compute_logits()\n best_wrong_class = self.strongest_wrong_class(logits)\n surrogate_loss = logits[self.label_u] - logits[best_wrong_class]\n\n gradient = self.gradient_wrt_x(self.label_u) - self.gradient_wrt_x(best_wrong_class)\n # gradients_flipped = (gradient * -1).tolil()\n gradients_flipped = sp.lil_matrix(gradient * -1)\n gradients_flipped[self.modified_features.nonzero()] *= -1\n\n X_influencers = sp.lil_matrix(self.modified_features.shape)\n X_influencers[self.influencer_nodes] = self.modified_features[self.influencer_nodes]\n gradients_flipped = gradients_flipped.multiply((self.cooc_constraint + X_influencers) > 0)\n nnz_ixs = np.array(gradients_flipped.nonzero()).T\n\n sorting = np.argsort(gradients_flipped[tuple(nnz_ixs.T)]).A1\n sorted_ixs = nnz_ixs[sorting]\n grads = gradients_flipped[tuple(nnz_ixs[sorting].T)]\n\n scores = surrogate_loss - grads\n return sorted_ixs[::-1], scores.A1[::-1]\n\n def compute_cooccurrence_constraint(self, nodes):\n \"\"\"\n Co-occurrence constraint as described in the paper.\n\n Parameters\n ----------\n nodes: np.array\n Nodes whose features are considered for change\n\n Returns\n -------\n np.array [len(nodes), D], dtype bool\n Binary matrix of dimension len(nodes) x D. A 1 in entry n,d indicates that\n we are allowed to add feature d to the features of node n.\n\n \"\"\"\n\n words_graph = self.cooc_matrix.copy()\n D = self.modified_features.shape[1]\n words_graph.setdiag(0)\n words_graph = (words_graph > 0)\n word_degrees = np.sum(words_graph, axis=0).A1\n\n inv_word_degrees = np.reciprocal(word_degrees.astype(float) + 1e-8)\n\n sd = np.zeros([self.nnodes])\n for n in range(self.nnodes):\n n_idx = self.modified_features[n, :].nonzero()[1]\n sd[n] = np.sum(inv_word_degrees[n_idx.tolist()])\n\n scores_matrix = sp.lil_matrix((self.nnodes, D))\n\n for n in nodes:\n common_words = words_graph.multiply(self.modified_features[n])\n idegs = inv_word_degrees[common_words.nonzero()[1]]\n nnz = common_words.nonzero()[0]\n scores = np.array([idegs[nnz == ix].sum() for ix in range(D)])\n scores_matrix[n] = scores\n self.cooc_constraint = sp.csr_matrix(scores_matrix - 0.5 * sd[:, None] > 0)\n\n def gradient_wrt_x(self, label):\n # return self.adj_norm.dot(self.adj_norm)[self.target_node].T.dot(self.W[:, label].T)\n return self.adj_norm.dot(self.adj_norm)[self.target_node].T.dot(self.W[:, label].reshape(1, -1))\n\n def reset(self):\n \"\"\"\n Reset Nettack\n \"\"\"\n self.modified_adj = self.ori_adj.copy()\n self.modified_features = self.ori_features.copy()\n self.structure_perturbations = []\n self.feature_perturbations = []\n self.influencer_nodes = []\n self.potential_edges = []\n self.cooc_constraint = None\n\n\n def struct_score(self, a_hat_uv, XW):\n \"\"\"\n Compute structure scores, cf. Eq. 15 in the paper\n\n Parameters\n ----------\n a_hat_uv: sp.sparse_matrix, shape [P,2]\n Entries of matrix A_hat^2_u for each potential edge (see paper for explanation)\n\n XW: sp.sparse_matrix, shape [N, K], dtype float\n The class logits for each node.\n\n Returns\n -------\n np.array [P,]\n The struct score for every row in a_hat_uv\n \"\"\"\n\n logits = a_hat_uv.dot(XW)\n label_onehot = np.eye(XW.shape[1])[self.label_u]\n best_wrong_class_logits = (logits - 1000 * label_onehot).max(1)\n logits_for_correct_class = logits[:,self.label_u]\n struct_scores = logits_for_correct_class - best_wrong_class_logits\n\n return struct_scores\n\n def compute_new_a_hat_uv(self, potential_edges, target_node):\n \"\"\"\n Compute the updated A_hat_square_uv entries that would result from inserting/deleting the input edges,\n for every edge.\n\n Parameters\n ----------\n potential_edges: np.array, shape [P,2], dtype int\n The edges to check.\n\n Returns\n -------\n sp.sparse_matrix: updated A_hat_square_u entries, a sparse PxN matrix, where P is len(possible_edges).\n \"\"\"\n\n edges = np.array(self.modified_adj.nonzero()).T\n edges_set = {tuple(x) for x in edges}\n A_hat_sq = self.adj_norm @ self.adj_norm\n values_before = A_hat_sq[target_node].toarray()[0]\n node_ixs = np.unique(edges[:, 0], return_index=True)[1]\n twohop_ixs = np.array(A_hat_sq.nonzero()).T\n degrees = self.modified_adj.sum(0).A1 + 1\n\n ixs, vals = compute_new_a_hat_uv(edges, node_ixs, edges_set, twohop_ixs, values_before, degrees,\n potential_edges.astype(np.int32), target_node)\n ixs_arr = np.array(ixs)\n a_hat_uv = sp.coo_matrix((vals, (ixs_arr[:, 0], ixs_arr[:, 1])), shape=[len(potential_edges), self.nnodes])\n\n return a_hat_uv\n\n@jit(nopython=True)\ndef connected_after(u, v, connected_before, delta):\n if u == v:\n if delta == -1:\n return False\n else:\n return True\n else:\n return connected_before\n\n\n@jit(nopython=True)\ndef compute_new_a_hat_uv(edge_ixs, node_nb_ixs, edges_set, twohop_ixs, values_before, degs, potential_edges, u):\n \"\"\"\n Compute the new values [A_hat_square]_u for every potential edge, where u is the target node. C.f. Theorem 5.1\n equation 17.\n\n \"\"\"\n N = degs.shape[0]\n\n twohop_u = twohop_ixs[twohop_ixs[:, 0] == u, 1]\n nbs_u = edge_ixs[edge_ixs[:, 0] == u, 1]\n nbs_u_set = set(nbs_u)\n\n return_ixs = []\n return_values = []\n\n for ix in range(len(potential_edges)):\n edge = potential_edges[ix]\n edge_set = set(edge)\n degs_new = degs.copy()\n delta = -2 * ((edge[0], edge[1]) in edges_set) + 1\n degs_new[edge] += delta\n\n nbs_edge0 = edge_ixs[edge_ixs[:, 0] == edge[0], 1]\n nbs_edge1 = edge_ixs[edge_ixs[:, 0] == edge[1], 1]\n\n affected_nodes = set(np.concatenate((twohop_u, nbs_edge0, nbs_edge1)))\n affected_nodes = affected_nodes.union(edge_set)\n a_um = edge[0] in nbs_u_set\n a_un = edge[1] in nbs_u_set\n\n a_un_after = connected_after(u, edge[0], a_un, delta)\n a_um_after = connected_after(u, edge[1], a_um, delta)\n\n for v in affected_nodes:\n a_uv_before = v in nbs_u_set\n a_uv_before_sl = a_uv_before or v == u\n\n if v in edge_set and u in edge_set and u != v:\n if delta == -1:\n a_uv_after = False\n else:\n a_uv_after = True\n else:\n a_uv_after = a_uv_before\n a_uv_after_sl = a_uv_after or v == u\n\n from_ix = node_nb_ixs[v]\n to_ix = node_nb_ixs[v + 1] if v < N - 1 else len(edge_ixs)\n node_nbs = edge_ixs[from_ix:to_ix, 1]\n node_nbs_set = set(node_nbs)\n a_vm_before = edge[0] in node_nbs_set\n\n a_vn_before = edge[1] in node_nbs_set\n a_vn_after = connected_after(v, edge[0], a_vn_before, delta)\n a_vm_after = connected_after(v, edge[1], a_vm_before, delta)\n\n mult_term = 1 / np.sqrt(degs_new[u] * degs_new[v])\n\n sum_term1 = np.sqrt(degs[u] * degs[v]) * values_before[v] - a_uv_before_sl / degs[u] - a_uv_before / \\\n degs[v]\n sum_term2 = a_uv_after / degs_new[v] + a_uv_after_sl / degs_new[u]\n sum_term3 = -((a_um and a_vm_before) / degs[edge[0]]) + (a_um_after and a_vm_after) / degs_new[edge[0]]\n sum_term4 = -((a_un and a_vn_before) / degs[edge[1]]) + (a_un_after and a_vn_after) / degs_new[edge[1]]\n new_val = mult_term * (sum_term1 + sum_term2 + sum_term3 + sum_term4)\n\n return_ixs.append((ix, v))\n return_values.append(new_val)\n\n return return_ixs, return_values\n\ndef filter_singletons(edges, adj):\n \"\"\"\n Filter edges that, if removed, would turn one or more nodes into singleton nodes.\n \"\"\"\n\n\n degs = np.squeeze(np.array(np.sum(adj,0)))\n existing_edges = np.squeeze(np.array(adj.tocsr()[tuple(edges.T)]))\n if existing_edges.size > 0:\n edge_degrees = degs[np.array(edges)] + 2*(1-existing_edges[:,None]) - 1\n else:\n edge_degrees = degs[np.array(edges)] + 1\n\n zeros = edge_degrees == 0\n zeros_sum = zeros.sum(1)\n return zeros_sum == 0\n\ndef compute_alpha(n, S_d, d_min):\n \"\"\"\n Approximate the alpha of a power law distribution.\n\n \"\"\"\n\n return n / (S_d - n * np.log(d_min - 0.5)) + 1\n\n\ndef update_Sx(S_old, n_old, d_old, d_new, d_min):\n \"\"\"\n Update on the sum of log degrees S_d and n based on degree distribution resulting from inserting or deleting\n a single edge.\n \"\"\"\n\n old_in_range = d_old >= d_min\n new_in_range = d_new >= d_min\n\n d_old_in_range = np.multiply(d_old, old_in_range)\n d_new_in_range = np.multiply(d_new, new_in_range)\n\n new_S_d = S_old - np.log(np.maximum(d_old_in_range, 1)).sum(1) + np.log(np.maximum(d_new_in_range, 1)).sum(1)\n new_n = n_old - np.sum(old_in_range, 1) + np.sum(new_in_range, 1)\n\n return new_S_d, new_n\n\n\ndef compute_log_likelihood(n, alpha, S_d, d_min):\n \"\"\"\n Compute log likelihood of the powerlaw fit.\n\n \"\"\"\n\n return n * np.log(alpha) + n * alpha * np.log(d_min) + (alpha + 1) * S_d\n\ndef filter_chisquare(ll_ratios, cutoff):\n return ll_ratios < cutoff\n", "import torch\nimport torch.nn as nn\nimport scipy.optimize as so\nimport numpy as np\nimport torch.nn.functional as F #233\n\nfrom deeprobust.image.attack.base_attack import BaseAttack\n\nclass LBFGS(BaseAttack):\n\n def __init__(self, model, label, device = 'cuda' ):\n super(LBFGS, self).__init__(model, device)\n\n def generate(self, image, label, target_label, **kwargs):\n assert self.check_type_device(image, label)\n assert self.parse_params(**kwargs)\n self.target_label = target_label\n adv_img, self.dist, self.loss = optimize(self.model,\n self.image,\n self.label,\n self.target_label,\n self.bounds,\n self.epsilon,\n self.maxiter,\n self.class_num,\n self.device)\n return adv_img\n\n def distance(self):\n return self.dist\n\n def loss(self):\n return self.loss\n\n def parse_params(self,\n clip_max = 1,\n clip_min = 0,\n class_num = 10,\n epsilon = 1e-5, #step of finding initial c\n maxiter = 20, #maximum of iteration in lbfgs optimization\n ):\n self.epsilon = epsilon\n self.maxiter = maxiter\n self.class_num = class_num\n self.bounds = (clip_min, clip_max)\n return True\n\ndef optimize(model, image, label, target_label, bounds, epsilon, maxiter, class_num, device):\n x_t = image\n x0 = image[0].to('cpu').detach().numpy()\n min_, max_ = bounds\n\n target_dist = torch.tensor(target_label)\n target_dist = target_dist.unsqueeze_(0).long().to(device)\n\n # store the shape for later and operate on the flattened input\n\n shape = x0.shape\n dtype = x0.dtype\n x0 = x0.flatten().astype(np.float64)\n\n n = len(x0)\n bounds = [(min_, max_)] * n\n\n def distance(x,y):\n # calculate the distance\n x = torch.from_numpy(x).double()\n y = torch.from_numpy(y).double()\n\n dist_squ = torch.norm(x - y)\n return dist_squ **2\n\n def loss(x, c):\n #calculate the target function\n v1 = distance(x0,x)\n\n x = torch.tensor(x.astype(dtype).reshape(shape))\n x = x.unsqueeze_(0).float().to(device)\n\n predict = model(x)\n v2 = F.nll_loss(predict, target_dist)\n\n v = c * v1 + v2\n #print(v)\n return np.float64(v)\n\n def pending_attack(target_model, adv_exp, target_label):\n # pending if the attack success\n adv_exp = adv_exp.reshape(shape).astype(dtype)\n adv_exp = torch.from_numpy(adv_exp)\n adv_exp = adv_exp.unsqueeze_(0).float().to(device)\n\n predict1 = target_model(adv_exp)\n label = predict1.argmax(dim=1, keepdim=True)\n if label == target_label:\n return True\n else:\n return False\n\n def lbfgs_b(c):\n\n #initial the variables\n approx_grad_eps = (max_ - min_) / 100\n print('in lbfgs_b:', 'c =', c)\n\n #start optimization\n optimize_output, f, d = so.fmin_l_bfgs_b(\n loss,\n x0,\n args=(c,),\n approx_grad = True,\n bounds = bounds,\n m = 15,\n maxiter = maxiter,\n factr = 1e10, #optimization accuracy\n maxls = 5,\n epsilon = approx_grad_eps)\n print('finish optimization')\n\n # LBFGS-B does not always exactly respect the boundaries\n if np.amax(optimize_output) > max_ or np.amin(optimize_output) < min_: # pragma: no coverage\n logging.info('Input out of bounds (min, max = {}, {}). Performing manual clip.'.format(\n np.amin(optimize_output), np.amax(optimize_output)))\n\n optimize_output = np.clip(optimize_output, min_, max_)\n\n #optimize_output = optimize_output.reshape(shape).astype(dtype)\n #test_input = torch.from_numpy(optimize_output)\n #print(test_input)\n #test_input = test_input.unsqueeze_(0).float()\n is_adversarial = pending_attack(target_model = model, adv_exp = optimize_output, target_label = target_label)\n return optimize_output, is_adversarial\n\n #x_new, isadv = lbfgs_b(0)\n\n\n # finding initial c\n c = epsilon\n print('finding initial c:')\n\n for i in range(30):\n c = 2 * c\n x_new, is_adversarial = lbfgs_b(c)\n if is_adversarial == False:\n break\n\n print('start binary search:')\n if is_adversarial == True: # pragma: no cover\n print('Could not find an adversarial; maybe the model returns wrong gradients')\n return\n\n print('c_high:',c)\n\n # binary search\n c_low = 0\n c_high = c\n while c_high - c_low >= epsilon:\n print(c_high,' ',c_low)\n c_half = (c_low + c_high) / 2\n x_new, is_adversarial = lbfgs_b(c_half)\n\n if is_adversarial:\n c_low = c_half\n else:\n c_high = c_half\n\n x_new, is_adversarial = lbfgs_b(c_low)\n dis = distance(x_new, x0)\n mintargetfunc = loss(x_new, c_low)\n\n x_new = x_new.astype(dtype)\n x_new = x_new.reshape(shape)\n\n x_new = torch.from_numpy(x_new).unsqueeze_(0).float().to(device)\n\n return x_new, dis, mintargetfunc\n\n\n\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.log", "numpy.sum", "numpy.tile", "numpy.eye", "numpy.multiply", "numpy.arange", "numpy.sqrt", "numpy.argsort", "scipy.sparse.lil_matrix", "scipy.sparse.csr_matrix", "numpy.unique", "numpy.maximum" ], [ "scipy.optimize.fmin_l_bfgs_b", "torch.norm", "torch.from_numpy", "numpy.float64", "numpy.amin", "torch.tensor", "numpy.amax", "numpy.clip", "torch.nn.functional.nll_loss" ] ]
SimonLBSoerensen/IntroductionToArtificialIntelligenceGroup9-NotRobot
[ "a26f2badc4f7625ccee14faf194d051071cff627" ]
[ "map_gen/.ipynb_checkpoints/map_gen-checkpoint.py" ]
[ "import numpy as np\nimport hashlib\n\ndef flatten(arr):\n return [el for sl in arr for el in sl]\n\ntemplets = [\n [\n [\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"],\n ],\n []\n ],\n [\n [\n [\"#\", \" \", \" \"],\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"],\n ],\n []\n ],\n [\n [\n [\"#\", \"#\", \" \"],\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"],\n ],\n [\n [\".\", \".\", \".\", \" \", \" \"],\n [\".\", \" \"],\n [\".\", \".\"],\n [\".\", \".\"],\n [\".\", \".\", \".\", \".\", \".\"]\n ]\n ],\n [\n [\n [\"#\", \"#\", \"#\"],\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"],\n ],\n []\n ],\n [\n [\n [\"#\", \"#\", \"#\"],\n [\"#\", \" \", \" \"],\n [\"#\", \" \", \" \"],\n ],\n []\n ],\n [\n [\n [\"#\", \" \", \" \"],\n [\" \", \" \", \" \"],\n [\" \", \" \", \"#\"],\n ],\n [\n [\".\", \".\", \" \", \".\", \".\"],\n [\".\", \".\"],\n [\" \", \".\"],\n [\".\", \".\"],\n [\".\", \".\", \".\", \".\", \".\"]\n ]\n ],\n [\n [\n [\"#\", \" \", \" \"],\n [\" \", \" \", \" \"],\n [\"#\", \" \", \" \"],\n ],\n [\n [\".\", \".\", \".\", \".\", \".\"],\n [\".\", \".\"],\n [\" \", \".\"],\n [\".\", \".\"],\n [\".\", \".\", \".\", \".\", \".\"]\n ]\n ],\n [\n [\n [\"#\", \" \", \" \"],\n [\" \", \" \", \" \"],\n [\"#\", \" \", \"#\"],\n ],\n [\n [\".\", \".\", \" \", \".\", \".\"],\n [\".\", \".\"],\n [\" \", \".\"],\n [\".\", \".\"],\n [\".\", \".\", \" \", \".\", \".\"]\n ]\n ],\n [\n [\n [\"#\", \" \", \"#\"],\n [\" \", \" \", \" \"],\n [\"#\", \" \", \"#\"],\n ],\n [\n [\".\", \".\", \" \", \".\", \".\"],\n [\".\", \".\"],\n [\" \", \" \"],\n [\".\", \".\"],\n [\".\", \".\", \" \", \".\", \".\"]\n ]\n ],\n [\n [\n [\"#\", \" \", \"#\"],\n [\"#\", \" \", \" \"],\n [\"#\", \"#\", \"#\"],\n ],\n [\n [\".\", \".\", \" \", \".\", \".\"],\n [\".\", \".\"],\n [\".\", \" \"],\n [\".\", \".\"],\n [\".\", \".\", \".\", \".\", \".\"]\n ]\n ],\n [\n [\n [\"#\", \"#\", \"#\"],\n [\" \", \" \", \" \"],\n [\"#\", \"#\", \"#\"],\n ],\n [\n [\".\", \".\", \".\", \".\", \".\"],\n [\".\", \".\"],\n [\" \", \" \"],\n [\".\", \".\"],\n [\".\", \".\", \".\", \".\", \".\"]\n ]\n ],\n [\n [\n [\" \", \" \", \" \"],\n [\" \", \"#\", \" \"],\n [\" \", \" \", \" \"],\n ],\n [\n [\".\", \".\", \".\", \".\", \".\"],\n [\".\", \" \"],\n [\".\", \" \"],\n [\".\", \".\"],\n [\".\", \".\", \".\", \".\", \".\"]\n ]\n ],\n [\n [\n [\"#\", \"#\", \"#\"],\n [\"#\", \"#\", \"#\"],\n [\"#\", \"#\", \"#\"],\n ],\n []\n ],\n [\n [\n [\"#\", \"#\", \"#\"],\n [\"#\", \" \", \" \"],\n [\" \", \" \", \" \"],\n ],\n [\n [\".\", \".\", \".\", \".\", \".\"],\n [\".\", \".\"],\n [\".\", \".\"],\n [\" \", \".\"],\n [\" \", \" \", \".\", \".\", \".\"]\n ]\n ],\n [\n [\n [\" \", \" \", \" \"],\n [\"#\", \" \", \"#\"],\n [\" \", \" \", \" \"],\n ],\n [\n [\".\", \" \", \".\", \" \", \".\"],\n [\".\", \".\"],\n [\".\", \".\"],\n [\".\", \".\"],\n [\".\", \" \", \".\", \" \", \".\"]\n ]\n ],\n [\n [\n [\"#\", \"#\", \"#\"],\n [\"#\", \"#\", \"#\"],\n [\" \", \" \", \" \"],\n ],\n [\n [\".\", \".\", \".\", \".\", \".\"],\n [\".\", \".\"],\n [\".\", \".\"],\n [\".\", \".\"],\n [\".\", \" \", \" \", \" \", \".\"]\n ]\n ],\n [\n [\n [\"#\", \"#\", \"#\"],\n [\" \", \"#\", \" \"],\n [\" \", \" \", \" \"],\n ],\n [\n [\".\", \".\", \".\", \".\", \".\"],\n [\".\", \".\"],\n [\" \", \" \"],\n [\".\", \".\"],\n [\".\", \" \", \" \", \".\", \".\"]\n ]\n ]\n]\n\ntail_color = {\n \"#\": [156, 134, 89, 255],\n \"P\": [138, 48, 74, 255],\n \"C\": [219, 185, 48, 255],\n \"X\": [41, 128, 73, 255],\n \" \": [188, 194, 193, 255],\n \"?\": [255, 0, 0, 255],\n}\n\ndef makeGhostHole(goust_block):\n if len(goust_block):\n for i in range(len(goust_block)):\n if len(goust_block[i]) != 5:\n goust_block[i] = [goust_block[i][0], \".\", \".\", \".\", goust_block[i][1]]\n goust_block = np.array(goust_block)\n else:\n goust_block = np.full((5,5), \".\")\n return goust_block\n\ndef vis_templet(templet):\n templet_block = np.array(templet[0])\n goust_block = templet[1]\n goust_block = makeGhostHole(goust_block)\n \n tamplet_img = np.full((5,5, 4), [139, 140, 143, 255])\n \n \n \n for tail in tail_color:\n x, y = np.where(templet_block == tail)\n \n tamplet_img[x+1,y+1] = tail_color[tail]\n \n if len(goust_block) != 0:\n x, y = np.where(goust_block == tail)\n\n gost_color = [tail_color[tail][0], tail_color[tail][1], tail_color[tail][2], 255//2]\n tamplet_img[x,y] = gost_color\n \n return tamplet_img\n \ndef rotate_templet(templet, rotatison = 1):\n blocks = np.rot90(templet[0], rotatison)\n ghost = np.rot90(makeGhostHole(templet[1]), rotatison)\n\n new_templet = [blocks, ghost]\n return new_templet\n\n\ndef _makeGhostSize(templet):\n blocks = np.array(templet[0])\n ghost = makeGhostHole(templet[1])\n\n new_templet = [blocks, ghost]\n return new_templet\n\ndef _get_blokCollsionArea(block, direction):\n block_block = np.array(block[0])\n block_ghost = np.array(block[1])\n \n if direction == \"up\":\n return np.array([list(block_ghost[0,:]), [\".\"]+list(block_block[0,:])+[\".\"]])\n if direction == \"down\":\n return np.array([[\".\"]+list(block_block[-1,:])+[\".\"], list(block_ghost[-1,:])])\n if direction == \"right\":\n return np.array([[\".\"]+list(block_block[:,-1])+[\".\"], list(block_ghost[:,-1])])\n if direction == \"left\":\n return np.array([list(block_ghost[:,0]), [\".\"]+list(block_block[:,0])+[\".\"]])\n \ndef _chekCollison(tail1, tail2):\n if tail1 == \".\" or tail2 == \".\":\n return False\n if tail1 == tail2:\n return False\n return True\n \ndef _chekBlockConfig(block1, block2, direction):\n if direction == \"up\":\n block1_ca = _get_blokCollsionArea(block1, direction)\n block2_ca = _get_blokCollsionArea(block2, \"down\")\n elif direction == \"down\":\n block1_ca = _get_blokCollsionArea(block1, direction)\n block2_ca = _get_blokCollsionArea(block2, \"up\")\n elif direction == \"right\":\n block1_ca = _get_blokCollsionArea(block1, direction)\n block2_ca = _get_blokCollsionArea(block2, \"left\")\n elif direction == \"left\":\n block1_ca = _get_blokCollsionArea(block1, direction)\n block2_ca = _get_blokCollsionArea(block2, \"right\")\n else:\n print(\"Error\", direction)\n \n n, m = block1_ca.shape\n for n_index in range(n):\n for m_index in range(m):\n tail1 = block1_ca[n_index,m_index]\n tail2 = block2_ca[n_index,m_index]\n if _chekCollison(tail1, tail2):\n return False\n return True\n \n \n\ndef chekBlockConfig(center, up=None, up_rot = 0, left=None, left_rot = 0, down=None, down_rot = 0, right=None, right_rot = 0):\n center = _makeGhostSize(center)\n if up is not None:\n up = _makeGhostSize(up)\n if up_rot:\n up = rotate_templet(up, up_rot)\n if _chekBlockConfig(center, up, \"up\") == False:\n return False\n if left is not None:\n left = _makeGhostSize(left)\n if left_rot:\n left = rotate_templet(left, left_rot)\n if _chekBlockConfig(center, left, \"left\") == False:\n return False\n if down is not None:\n down = _makeGhostSize(down)\n if down_rot:\n down = rotate_templet(down, down_rot)\n if _chekBlockConfig(center, down, \"down\") == False:\n return False\n if right is not None:\n right = _makeGhostSize(right)\n if right_rot:\n right = rotate_templet(right, right_rot)\n if _chekBlockConfig(center, right, \"right\") == False:\n return False\n return True\n \n \ndef get_neighbors_index(index, maps_size):\n n, m, = maps_size\n n_idx, m_idx = index\n up = None\n if n_idx > 0:\n up = (n_idx-1, m_idx)\n down = None\n if n_idx < n-1:\n down = (n_idx+1, m_idx)\n\n left = None\n if m_idx > 0:\n left = (n_idx, m_idx-1)\n right = None\n if m_idx < m-1:\n right = (n_idx, m_idx+1)\n\n return up, down, left, right\n\ndef _get_templet_from_index(index, templet_map):\n res = None\n rot = 0\n if index != None:\n res, rot = templet_map[index]\n if res == -1:\n res = None\n else:\n res = templets[res]\n else:\n res = templets[12]\n return res, rot\n\ndef get_neighbors_templet(index, templet_map):\n up_idx, down_idx, left_idx, right_idx = get_neighbors_index(index, [templet_map.shape[0],templet_map.shape[1]])\n \n up, up_rot = _get_templet_from_index(up_idx, templet_map)\n right, right_rot = _get_templet_from_index(right_idx, templet_map)\n down, down_rot = _get_templet_from_index(down_idx, templet_map)\n left, left_rot = _get_templet_from_index(left_idx, templet_map)\n \n return up, up_rot, right, right_rot, down, down_rot, left, left_rot\n\ndef gen_templet_placement(n, m, use_rot = True):\n rots = [0]\n if use_rot:\n rots = [0,1,2,3]\n templet_map = np.full((n,m, 2), [-1, 0])\n for n_idx in range(n):\n for m_idx in range(m):\n up, up_rot, right, right_rot, down, down_rot, left, left_rot = get_neighbors_templet((n_idx,m_idx), templet_map)\n indexs_of_templets = list(range(len(templets)))\n\n tail_incet = False\n while not tail_incet and len(indexs_of_templets):\n templet_to_set_index =np.random.choice(indexs_of_templets)\n\n possible_tailes = []\n\n for rot in rots:\n templet_to_set = rotate_templet(templets[templet_to_set_index], rot)\n config_good = chekBlockConfig(templet_to_set, \n up=up, up_rot=up_rot, \n left = left, left_rot=left_rot, \n down = down, down_rot = down_rot,\n right = right, right_rot = right_rot)\n if config_good:\n possible_tailes.append(rot)\n\n if len(possible_tailes) > 0:\n rot_to_use = np.random.choice(possible_tailes)\n templet_map[(n_idx,m_idx)] = [templet_to_set_index, rot_to_use]\n tail_incet = True\n else:\n indexs_of_templets.remove(templet_to_set_index)\n if not tail_incet:\n return False, None\n return True, templet_map\n \ndef make_char_map(templet_map):\n n, m, _ = templet_map.shape\n templet_iner_size = 3\n game_map = np.full((n*templet_iner_size+2,m*templet_iner_size+2), \"?\")\n \n game_map[0,:] = \"#\"\n game_map[-1,:] = \"#\"\n game_map[:,0] = \"#\"\n game_map[:,-1] = \"#\"\n \n for n_idx in range(n):\n for m_idx in range(m):\n templet_idx, rot = templet_map[n_idx, m_idx]\n\n templet = templets[templet_idx]\n templet = rotate_templet(templet, rot)\n templet_block = templet[0]\n\n game_map[n_idx*templet_iner_size+1 : (n_idx+1)*templet_iner_size+1, m_idx*templet_iner_size+1 : (m_idx+1)*templet_iner_size+1 ] = templet_block\n return game_map\n \ndef char_map_to_img(char_map, tail_color = tail_color):\n n, m = char_map.shape\n map_img = np.full((n, m, 4), [139, 140, 143, 255], dtype=np.uint8)\n \n \n \n for tail in tail_color:\n x, y = np.where(char_map == tail)\n map_img[x,y] = tail_color[tail]\n \n \n return map_img\n \ndef char_map_to_fw(char_map, tailes_remove = [\"P\", \"C\", \"X\"]):\n free_char_map = char_map.copy()\n for tail in tailes_remove:\n x, y = np.where(char_map==tail)\n free_char_map[x,y] = \" \"\n \n return free_char_map\n\ndef cal_connectivity(char_map):\n char_map_fw = char_map_to_fw(char_map)\n return _cal_connectivity(char_map_fw)\n\ndef _cal_connectivity(char_map_fw):\n free_space_x, free_space_y = np.where(char_map_fw == \" \")\n start_point_idx = np.random.choice(list(range(len(free_space_x))))\n start_point = [free_space_x[start_point_idx], free_space_y[start_point_idx]]\n\n connectivity = np.zeros(char_map_fw.shape)\n to_visit = [start_point]\n\n #visited_list = []\n\n while len(to_visit):\n point = to_visit.pop()\n connectivity[point[0], point[1]] = 1\n #visited_list.append(visited.copy())\n\n for neighbor_index_x, neighbor_index_y in np.array(point) + np.array([[0, -1], [0, 1], [-1, 0], [1, 0]]):\n\n if char_map_fw[neighbor_index_x, neighbor_index_y ] != \"#\":\n if connectivity[neighbor_index_x, neighbor_index_y ] == 0:\n\n to_visit.append([neighbor_index_x, neighbor_index_y ])\n return connectivity\n\ndef find_largest_connectivity(char_map):\n char_map_fw = char_map_to_fw(char_map)\n \n connectivity_maps = []\n \n while np.sum(char_map_fw == \" \"):\n connectivity = _cal_connectivity(char_map_fw)\n connectivity_maps.append(connectivity)\n \n char_map_fw[connectivity == 1] = \"#\"\n \n free_tailes = [np.sum(connectivity == 1) for connectivity in connectivity_maps]\n max_index = np.argmax(free_tailes)\n \n return connectivity_maps[max_index]\n \nmax_free_spaces = [\n [\n [1, 1, 1, 0],\n [1, 1, 1, 0],\n [1, 1, 1, 0],\n [1, 1, 1, 0],\n ],\n [\n [0, 1, 1, 1],\n [0, 1, 1, 1],\n [0, 1, 1, 1],\n [0, 1, 1, 1],\n ],\n [\n [1, 1, 1, 1],\n [1, 1, 1, 1],\n [1, 1, 1, 1],\n [0, 0, 0, 0],\n ],\n [\n [0, 0, 0, 0],\n [1, 1, 1, 1],\n [1, 1, 1, 1],\n [1, 1, 1, 1],\n ],\n]\nmax_free_spaces = np.array(max_free_spaces)\n\ndef _chek_free_space(b_map_part, free_spaces= max_free_spaces):\n for free_space in free_spaces:\n count_indexs = np.where(free_space == 1)\n if np.sum((b_map_part == free_space)[count_indexs]) == 12:\n return True\n return False\n\ndef chek_max_free_space(char_map):\n n, m = char_map.shape\n\n char_map_fw = char_map_to_fw(char_map)\n b_map = np.zeros((n, m))\n\n free_tail_index = np.where(char_map_fw == \" \")\n b_map[free_tail_index] = 1\n\n for n_idx in range(0, n-4): \n for m_idx in range(0, m-4): \n b_map_part = b_map[n_idx:n_idx+4, m_idx:m_idx+4]\n if _chek_free_space(b_map_part):\n return True, n_idx, n_idx+4, m_idx, m_idx+4\n return False, -1, -1, -1, -1\n\ndef cal_max_objects(char_map):\n char_map_fw = char_map_to_fw(char_map)\n return np.sum(char_map_fw == \" \")-1\n\n\ndef chek_deadend(char_map):\n n, m = char_map.shape\n dead_ends = np.zeros((n, m))\n char_map_fw = char_map_to_fw(char_map)\n free_tailes = np.where(char_map_fw == \" \")\n free_tailes_xy = np.transpose(free_tailes)\n \n for taile in free_tailes_xy:\n neighbor_indexs = np.array(taile) + np.array([[0, -1], [0, 1], [-1, 0], [1, 0]])\n wall_sum = 0\n for neighbor_index in neighbor_indexs:\n wall_sum += char_map_fw[neighbor_index[0], neighbor_index[1]] == \"#\"\n \n if wall_sum > 2:\n dead_ends[taile[0], taile[1]] = 1\n \n return dead_ends\n\ndef gen_char_map(n, m, traies = 1000, deadend_itr = -1):\n pos, templet_map = gen_templet_placement(n,m)\n if not pos:\n return False\n \n char_map = make_char_map(templet_map)\n max_traies = traies\n max_free_ext, n_s, n_e, m_s, m_e = chek_max_free_space(char_map)\n while (max_free_ext and traies > 0):\n pos, templet_map = gen_templet_placement(n,m)\n char_map = make_char_map(templet_map)\n max_free_ext, n_s, n_e, m_s, m_e = chek_max_free_space(char_map)\n traies -= 1\n\n \n visited = find_largest_connectivity(char_map)\n\n\n char_map[visited == 0] = \"#\"\n \n \n deadends = chek_deadend(char_map)\n if deadend_itr == 0:\n deadends[:,:] = 0\n deadends_hist = None\n \n while (deadend_itr == -1 or deadend_itr >= 1) and np.sum(deadends):\n \n deadends = chek_deadend(char_map)\n if deadends_hist is None:\n deadends_hist = deadends.copy() \n else:\n deadends_hist[deadends == 1] = 1\n \n char_map[deadends == 1] = \"#\"\n \n if deadend_itr != -1:\n deadend_itr -= 1\n\n \n max_objects = cal_max_objects(char_map)\n return char_map\n\ndef get_free_indexs(char_map):\n return np.transpose(np.where(char_map == \" \"))\n\ndef get_rand_free_index(char_map):\n free_indexs = get_free_indexs(char_map)\n return free_indexs[np.random.choice(np.arange(len(free_indexs)))]\n\ndef get_no_can_pos(move_map, free_spaeces):\n no_move_points_mask = [len(el) == 0 for el in list(move_map.values())]\n indexs = free_spaeces[no_move_points_mask]\n return np.transpose(indexs)\n \ndef cal_move_point(point, move):\n point = np.asarray(point)\n move = np.asarray(move)\n return point+move\n\ndef is_free_space(*points, char_map):\n all_free = True\n for point in points:\n if char_map[point[0], point[1]] != \" \":\n all_free = False\n return all_free\n\ndef finde_all_in_dir(point, directions, char_map):\n points_dir = {}\n for dire in directions:\n points = []\n move_point = point + dire\n while is_free_space(move_point, char_map = char_map):\n points.append(move_point)\n move_point = move_point + dire\n points_dir[tuple(dire)] = points\n return points_dir\n\ndef mark_done(points, vh, done_dir):\n for point in points:\n if tuple(point) not in done_dir:\n done_dir[tuple(point)] = [False, False]\n \n if vh == \"v\":\n done_dir[tuple(point)][0] = True\n elif vh == \"h\":\n done_dir[tuple(point)][1] = True\n \ndef finde_membership(points, grup, point_grup_member):\n if len(points) > 1:\n weak_members = [points[0]] + [points[-1]]\n else:\n weak_members = points\n \n for point in weak_members:\n if tuple(point) not in point_grup_member:\n point_grup_member[tuple(point)] = []\n point_grup_member[tuple(point)].append([grup, \"weak\"])\n \n strong_members = points[1:-1]\n for point in strong_members:\n if tuple(point) not in point_grup_member:\n point_grup_member[tuple(point)] = []\n point_grup_member[tuple(point)].append([grup, \"strong\"])\n \n \n \ndef find_point_grup_member(free_spaeces, char_map): \n done = {}\n dire_move = np.array([[-1,0],[1,0], [0,-1],[0,1]])\n grup = 0\n grups = {}\n\n point_grup_member = {}\n for point in free_spaeces:\n move_points = finde_all_in_dir(point, dire_move, char_map)\n v_dove = False\n h_done = False\n if tuple(point) in done:\n v_dove, h_done = done[tuple(point)]\n\n if not v_dove:\n vertical_points = np.array(move_points[(-1,0)] + [point] + move_points[(1,0)])\n grups[grup] = vertical_points\n finde_membership(vertical_points, grup, point_grup_member)\n grup += 1\n\n mark_done(vertical_points, \"v\", done)\n\n if not h_done:\n hertical_points = np.array(move_points[(0,-1)] + [point] + move_points[(0,1)])\n grups[grup] = hertical_points\n finde_membership(hertical_points, grup, point_grup_member)\n grup += 1\n\n mark_done(hertical_points, \"h\", done)\n return point_grup_member\n\ndef get_no_can_points(char_map):\n free_spaeces = get_free_indexs(char_map)\n point_grup_member = find_point_grup_member(free_spaeces, char_map)\n only_weak_points = []\n for point in point_grup_member:\n only_weak = True\n for grup, membership in point_grup_member[point]:\n if membership == 'strong':\n only_weak = False\n if only_weak:\n only_weak_points.append(list(point))\n return np.array(only_weak_points)\n\ndef get_can_indexs(char_map):\n can_points_map = np.zeros(char_map.shape)\n free_spaeces_x,free_spaeces_y = np.transpose(get_free_indexs(char_map))\n can_points_map[free_spaeces_x,free_spaeces_y] = 1\n \n no_can_points_x, no_can_points_y = np.transpose(get_no_can_points(char_map))\n can_points_map[no_can_points_x, no_can_points_y] = 0\n \n return np.transpose(np.where(can_points_map == 1))\n \n\ndef _make_map_with_cx(number_of_cans, char_map):\n char_map_itr = char_map.copy()\n\n player_index = get_rand_free_index(char_map_itr)\n\n char_map_itr[player_index[0], player_index[1]] = \"P\"\n\n pos_cand_points = get_can_indexs(char_map)\n pos_cand_points_indexs = np.random.choice(len(pos_cand_points), size=number_of_cans, replace=False)\n\n if len(pos_cand_points_indexs) == number_of_cans:\n for c_point in pos_cand_points[pos_cand_points_indexs]:\n char_map_itr[c_point[0], c_point[1]] = \"C\"\n\n for i in range(number_of_cans):\n glod_index = get_rand_free_index(char_map_itr) #Use a indexer there removes som obs bad choses \n char_map_itr[glod_index[0], glod_index[1]] = \"X\"\n return char_map_itr\n\ndef make_map_with_cx(number_of_cans, char_map, n, trails = 1000):\n maps = []\n maps_chek = []\n for _ in range(n):\n cx_map = _make_map_with_cx(number_of_cans, char_map)\n \n hash_cx_map = hashlib.md5(str(cmap).encode()).hexdigest()\n while hash_cx_map in maps_chek and trails > 0:\n cx_map = _make_map_with_cx(number_of_cans, char_map)\n hash_cx_map = hashlib.md5(str(cmap).encode()).hexdigest()\n trails -= 1\n \n maps.append(cx_map)\n maps_chek.append(hash_cx_map)\n return maps\n \nn = int(input(\"Map size with (will be 3*input):\"))\nm = int(input(\"Map size height (will be 3*input):\"))\ncnas_n = int(input(\"Number of cans:\"))\nmaps_n = int(input(\"Number of maps:\"))\nprint(\"Making base map......\")\nchar_map = gen_char_map(n,m)\nprint(\"Making maps from base map.....\", \"\\n\")\nfor cmap in make_map_with_cx(number_of_cans = cnas_n, char_map = char_map, n = maps_n):\n print(cmap, \"\\n\")" ]
[ [ "numpy.rot90", "numpy.full", "numpy.array", "numpy.random.choice", "numpy.asarray", "numpy.zeros", "numpy.sum", "numpy.where", "numpy.argmax", "numpy.transpose" ] ]
StijnMatsHendriks/adversarial_attack_demo
[ "ff784e90f107a43afdbec826c7e5365b9db90750" ]
[ "applications/graphpipe/graphpipe_onnx_tool.py" ]
[ "#coding=utf-8\n\n# Copyright 2017 - 2018 Baidu Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport logging\nlogging.basicConfig(level=logging.INFO,format=\"%(filename)s[line:%(lineno)d] %(levelname)s %(message)s\")\nlogger=logging.getLogger(__name__)\n\n\n#pip install graphpipe\n#pip install pillow # needed for image manipulation\n\n\n\n'''\n#服务器端启动方式为:\ndocker run -it --rm \\\n -e https_proxy=${https_proxy} \\\n -p 9000:9000 \\\n sleepsonthefloor/graphpipe-onnx:cpu \\\n --value-inputs=https://oracle.github.io/graphpipe/models/squeezenet.value_inputs.json \\\n --model=https://oracle.github.io/graphpipe/models/squeezenet.onnx \\\n --listen=0.0.0.0:9000\n\n\ndocker run -it --rm \\\n -v \"$PWD:/models/\" \\\n -p 9000:9000 \\\n sleepsonthefloor/graphpipe-onnx:cpu \\\n --value-inputs=https://oracle.github.io/graphpipe/models/squeezenet.value_inputs.json \\\n --model=/models/squeezenet.onnx \\\n --listen=0.0.0.0:9000\n'''\n\nfrom io import BytesIO\nfrom PIL import Image, ImageOps\nimport numpy as np\nimport requests\n\nfrom graphpipe import remote\n\ndef main(image_path):\n\n print(\"image_path:{}\".format(image_path))\n\n data = np.array(Image.open(image_path))\n data = data.reshape([1] + list(data.shape))\n data = np.rollaxis(data, 3, 1).astype(np.float32) # channels first\n #print(data.shape)\n\n pred = remote.execute(\"http://127.0.0.1:9000\", data)\n\n print(pred.shape)\n\n dims=pred.shape\n dim=np.max(dims)\n print(dim)\n\n pred=pred.reshape([1,dim])\n #pred = np.squeeze(pred)\n #print(pred)\n print(pred.shape)\n\n print(\"{}\".format(np.argmax(pred, axis=1)))\n\n\nif __name__ == '__main__':\n import sys\n main(sys.argv[1])" ]
[ [ "numpy.max", "numpy.rollaxis", "numpy.argmax" ] ]
disktnk/chainer
[ "133798db470f6fd95973b882b9ccbd0c9726ac13", "133798db470f6fd95973b882b9ccbd0c9726ac13" ]
[ "tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_nd.py", "chainer/optimizers/ada_delta.py" ]
[ "import unittest\n\nimport functools\nimport math\nimport numpy\nfrom operator import mul\nimport six\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer import functions\nfrom chainer import gradient_check\nfrom chainer import testing\nfrom chainer.testing import attr\nfrom chainer.testing import condition\nfrom chainer.utils import conv\nfrom chainer_tests.functions_tests.pooling_tests import pooling_nd_helper\n\n\n@testing.parameterize(*testing.product({\n 'dims': [(4,), (4, 3), (4, 3, 2), (1, 1, 1, 1)],\n 'cover_all': [True, False],\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n}))\nclass TestMaxPoolingND(unittest.TestCase):\n\n def setUp(self):\n self.ndim = len(self.dims)\n self.ksize = (3,) * self.ndim\n self.stride = (2,) * self.ndim\n self.pad = (1,) * self.ndim\n\n # Avoid unstability of numerical gradient\n x_shape = (2, 3) + self.dims\n self.x = numpy.arange(\n functools.reduce(mul, x_shape), dtype=self.dtype).reshape(x_shape)\n self.x = 2 * self.x / self.x.size - 1\n\n outs = tuple(conv.get_conv_outsize(d, k, s, p, self.cover_all)\n for (d, k, s, p)\n in six.moves.zip(\n self.dims, self.ksize, self.stride, self.pad))\n gy_shape = (2, 3) + outs\n self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)\n self.ggx = numpy.random.uniform(\n -1, 1, x_shape).astype(self.dtype)\n\n self.check_backward_options = {}\n if self.dtype == numpy.float16:\n self.check_backward_options = {\n 'atol': 1e-3, 'rtol': 1e-2}\n self.check_double_backward_options = {\n 'atol': 1e-3, 'rtol': 1e-2}\n else:\n self.check_backward_options = {\n 'atol': 1e-4, 'rtol': 1e-3}\n self.check_double_backward_options = {\n 'atol': 1e-4, 'rtol': 1e-3}\n\n def check_forward(self, x_data, use_cudnn='always'):\n dims = self.dims\n ksize = self.ksize\n stride = self.stride\n pad = self.pad\n x = chainer.Variable(x_data)\n with chainer.using_config('use_cudnn', use_cudnn):\n y = functions.max_pooling_nd(x, ksize, stride=stride, pad=pad,\n cover_all=self.cover_all)\n self.assertEqual(y.data.dtype, self.dtype)\n y_data = cuda.to_cpu(y.data)\n\n self.assertEqual(self.gy.shape, y_data.shape)\n patches = pooling_nd_helper.pooling_patches(\n dims, ksize, stride, pad, self.cover_all)\n for k in six.moves.range(2):\n for c in six.moves.range(3):\n x = self.x[k, c]\n expect = numpy.array([x[idx].max() for idx in patches])\n expect = expect.reshape(y_data.shape[2:])\n testing.assert_allclose(expect, y_data[k, c])\n\n @condition.retry(3)\n def test_forward_cpu(self):\n self.check_forward(self.x, use_cudnn='never')\n\n def test_forward_cpu_wide(self): # see #120\n ndim = self.ndim\n x_shape = (2, 3) + (15,) * ndim\n x_data = numpy.random.rand(*x_shape).astype(self.dtype)\n x = chainer.Variable(x_data)\n ksize = stride = int(math.ceil(pow(32, 1.0 / ndim)))\n functions.max_pooling_nd(x, ksize, stride=stride, pad=0)\n\n @attr.cudnn\n @condition.retry(3)\n def test_forward_gpu(self):\n self.check_forward(cuda.to_gpu(self.x))\n\n @attr.cudnn\n @condition.retry(3)\n def test_forward_gpu_non_contiguous(self):\n self.check_forward(cuda.cupy.asfortranarray(cuda.to_gpu(self.x)))\n\n @attr.gpu\n @condition.retry(3)\n def test_forward_gpu_no_cudnn(self):\n self.check_forward(cuda.to_gpu(self.x), 'never')\n\n def check_forward_consistency_regression(self, x_data, use_cudnn='always'):\n # Regression test to max_pooling_2d.\n\n if len(self.dims) != 2:\n return\n\n ksize = self.ksize\n stride = self.stride\n pad = self.pad\n\n with chainer.using_config('use_cudnn', use_cudnn):\n y_nd = functions.max_pooling_nd(self.x, ksize, stride=stride,\n pad=pad, cover_all=self.cover_all)\n y_2d = functions.max_pooling_2d(self.x, ksize, stride=stride,\n pad=pad, cover_all=self.cover_all)\n testing.assert_allclose(y_nd.data, y_2d.data)\n\n @condition.retry(3)\n def test_forward_consistency_regression_cpu(self):\n self.check_forward_consistency_regression(self.x)\n\n @attr.cudnn\n @condition.retry(3)\n def test_forward_consistency_regression_gpu(self):\n self.check_forward_consistency_regression(cuda.to_gpu(self.x))\n\n @attr.gpu\n @condition.retry(3)\n def test_forward_consistency_regression_no_cudnn(self):\n self.check_forward_consistency_regression(cuda.to_gpu(self.x), 'never')\n\n def check_backward(self, x_data, y_grad, use_cudnn='always'):\n def f(x):\n return functions.max_pooling_nd(\n x, self.ksize, stride=self.stride, pad=self.pad,\n cover_all=self.cover_all)\n with chainer.using_config('use_cudnn', use_cudnn):\n gradient_check.check_backward(\n f, x_data, y_grad, dtype='d', **self.check_backward_options)\n\n @condition.retry(3)\n def test_backward_cpu(self):\n self.check_backward(self.x, self.gy)\n\n @attr.cudnn\n @condition.retry(3)\n def test_backward_gpu(self):\n self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))\n\n @attr.cudnn\n @condition.retry(3)\n def test_backward_gpu_non_contiguous(self):\n self.check_backward(\n cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),\n cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)))\n\n @attr.gpu\n @condition.retry(3)\n def test_backward_gpu_no_cudnn(self):\n self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), 'never')\n\n def check_backward_consistency_regression(self, x_data, gy_data,\n use_cudnn='always'):\n # Regression test to two-dimensional max pooling layer.\n\n if len(self.dims) != 2:\n return\n\n ksize = self.ksize\n stride = self.stride\n pad = self.pad\n xp = cuda.get_array_module(x_data)\n\n # Backward computation for N-dimensional max pooling layer.\n x_nd = chainer.Variable(xp.array(x_data))\n with chainer.using_config('use_cudnn', use_cudnn):\n func_nd = functions.MaxPoolingND(self.ndim, ksize, stride=stride,\n pad=pad, cover_all=self.cover_all)\n y_nd = func_nd.apply((x_nd,))[0]\n y_nd.grad = gy_data\n y_nd.backward()\n\n # Backward computation for two-dimensional max pooling layer.\n x_2d = chainer.Variable(xp.array(x_data))\n with chainer.using_config('use_cudnn', use_cudnn):\n func_2d = functions.MaxPooling2D(ksize, stride=stride, pad=pad,\n cover_all=self.cover_all)\n y_2d = func_2d.apply((x_2d,))[0]\n y_2d.grad = gy_data\n y_2d.backward()\n\n # Test that the two result gradients are close enough.\n testing.assert_allclose(x_nd.grad, x_2d.grad)\n\n @condition.retry(3)\n def test_backward_consistency_regression_cpu(self):\n self.check_backward_consistency_regression(self.x, self.gy)\n\n @attr.cudnn\n @condition.retry(3)\n def test_backward_consistency_regression_gpu(self):\n self.check_backward_consistency_regression(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy))\n\n @attr.gpu\n @condition.retry(3)\n def test_backward_consistency_regression_no_cudnn(self):\n self.check_backward_consistency_regression(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), use_cudnn='never')\n\n def test_backward_cpu_more_than_once(self):\n func = functions.MaxPoolingND(\n self.ndim, self.ksize, stride=self.stride, pad=self.pad,\n cover_all=self.cover_all)\n func.apply((self.x,))\n func.backward((self.x,), (self.gy,))\n func.backward((self.x,), (self.gy,))\n\n def check_double_backward(self, x_data, y_grad, x_grad_grad,\n use_cudnn='always'):\n def f(x):\n y = functions.max_pooling_nd(\n x, self.ksize, stride=self.stride, pad=self.pad,\n cover_all=self.cover_all)\n return y * y\n with chainer.using_config('use_cudnn', use_cudnn):\n gradient_check.check_double_backward(\n f, x_data, y_grad, x_grad_grad,\n dtype='d',\n **self.check_double_backward_options)\n\n def test_double_backward_cpu(self):\n self.check_double_backward(self.x, self.gy, self.ggx, 'never')\n\n @attr.gpu\n def test_double_backward_gpu(self):\n self.check_double_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))\n\n @attr.gpu\n def test_double_backward_gpu_non_contiguous(self):\n self.check_double_backward(\n cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),\n cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),\n cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))\n\n @attr.gpu\n def test_double_backward_gpu_no_cudnn(self):\n self.check_double_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),\n 'never')\n\n\n@testing.parameterize(*testing.product({\n 'dims': [(4, 3, 2), (3, 2), (2,)],\n 'use_cudnn': ['always', 'auto', 'never'],\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n}))\n@attr.cudnn\nclass TestMaxPoolingNDCudnnCall(unittest.TestCase):\n\n def setUp(self):\n self.ndim = len(self.dims)\n self.ksize = (3,) * self.ndim\n self.stride = (2,) * self.ndim\n self.pad = (1,) * self.ndim\n x_shape = (2, 3) + self.dims\n self.x = cuda.cupy.arange(functools.reduce(mul, x_shape),\n dtype=self.dtype).reshape(x_shape)\n gy_shape = (2, 3) + tuple(\n conv.get_conv_outsize(d, k, s, p)\n for (d, k, s, p)\n in six.moves.zip(self.dims, self.ksize, self.stride, self.pad))\n self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)\n\n def forward(self):\n x = chainer.Variable(self.x)\n return functions.max_pooling_nd(\n x, self.ksize, self.stride, self.pad, cover_all=False)\n\n def test_call_cudnn_forward(self):\n with chainer.using_config('use_cudnn', self.use_cudnn):\n with testing.patch('cupy.cuda.cudnn.poolingForward') as func:\n self.forward()\n self.assertEqual(func.called,\n chainer.should_use_cudnn('>=auto') and\n self.ndim > 1)\n\n def test_call_cudnn_backward(self):\n with chainer.using_config('use_cudnn', self.use_cudnn):\n expect = chainer.should_use_cudnn('>=auto') and self.ndim > 1\n y = self.forward()\n # should be consistent to forward regardless of use_cudnn config\n y.grad = self.gy\n with testing.patch('cupy.cuda.cudnn.poolingBackward') as func:\n y.backward()\n self.assertEqual(func.called, expect)\n\n\ntesting.run_module(__name__, __file__)\n", "import numpy\n\nfrom chainer.backends import cuda\nfrom chainer import optimizer\n\n\n_default_hyperparam = optimizer.Hyperparameter()\n_default_hyperparam.rho = 0.95\n_default_hyperparam.eps = 1e-6\n\n\nclass AdaDeltaRule(optimizer.UpdateRule):\n\n \"\"\"Update rule of Zeiler's ADADELTA.\n\n See :class:`~chainer.optimizers.AdaDelta` for the default values of the\n hyperparameters.\n\n Args:\n parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter\n that provides the default values.\n rho (float): Exponential decay rate of the first and second order\n moments.\n eps (float): Small value for the numerical stability.\n\n \"\"\"\n\n def __init__(self, parent_hyperparam=None, rho=None, eps=None):\n super(AdaDeltaRule, self).__init__(\n parent_hyperparam or _default_hyperparam)\n if rho is not None:\n self.hyperparam.rho = rho\n if eps is not None:\n self.hyperparam.eps = eps\n\n def init_state(self, param):\n xp = cuda.get_array_module(param.data)\n with cuda.get_device_from_array(param.data):\n self.state['msg'] = xp.zeros_like(param.data)\n self.state['msdx'] = xp.zeros_like(param.data)\n\n def update_core_cpu(self, param):\n grad = param.grad\n if grad is None:\n return\n msg, msdx = self.state['msg'], self.state['msdx']\n rho = self.hyperparam.rho\n eps = self.hyperparam.eps\n\n msg *= rho\n msg += (1 - rho) * grad * grad\n dx = numpy.sqrt((msdx + eps) / (msg + eps)) * grad\n msdx *= rho\n msdx += (1 - rho) * dx * dx\n param.data -= dx\n\n def update_core_gpu(self, param):\n grad = param.grad\n if grad is None:\n return\n cuda.elementwise(\n 'T grad, T one_minus_rho, T eps',\n 'T param, T msg, T msdx',\n '''msg = msg + one_minus_rho * (grad * grad - msg);\n T dx = sqrt((msdx + eps) / (msg + eps)) * grad;\n msdx += one_minus_rho * (dx * dx - msdx);\n param -= dx;''',\n 'adadelta')(grad, 1 - self.hyperparam.rho,\n self.hyperparam.eps, param.data,\n self.state['msg'], self.state['msdx'])\n\n\nclass AdaDelta(optimizer.GradientMethod):\n\n \"\"\"Zeiler's ADADELTA.\n\n See: http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf\n\n Args:\n rho (float): Exponential decay rate of the first and second order\n moments.\n eps (float): Small value for the numerical stability.\n\n \"\"\"\n\n def __init__(self, rho=_default_hyperparam.rho,\n eps=_default_hyperparam.eps):\n super(AdaDelta, self).__init__()\n self.hyperparam.rho = rho\n self.hyperparam.eps = eps\n\n rho = optimizer.HyperparameterProxy('rho')\n eps = optimizer.HyperparameterProxy('eps')\n\n def create_update_rule(self):\n return AdaDeltaRule(self.hyperparam)\n" ]
[ [ "numpy.random.uniform", "numpy.random.rand" ], [ "numpy.sqrt" ] ]
tomguluson92/Regularized-AutoEncoder
[ "c79633dbe0b908cddec31324b786d8e236356a2b" ]
[ "opts/opts.py" ]
[ "# coding: UTF-8\n\"\"\"\n @author: samuel ko\n\"\"\"\nimport argparse\nimport torch\nimport os\n\n\ndef INFO(inputs):\n print(\"[ Regularized AE ] %s\" % (inputs))\n\n\ndef presentParameters(args_dict):\n \"\"\"\n Print the parameters setting line by line\n\n Arg: args_dict - The dict object which is transferred from argparse Namespace object\n \"\"\"\n INFO(\"========== Parameters ==========\")\n for key in sorted(args_dict.keys()):\n INFO(\"{:>15} : {}\".format(key, args_dict[key]))\n INFO(\"===============================\")\n\n\nclass TrainOptions():\n def __init__(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('--path', type=str, default='/home/samuel/gaodaiheng/生成模型/face_dataset/')\n parser.add_argument('--epoch', type=int, default=50)\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--loss_choice', help='choice make between l1 and l2', type=str, default='l2')\n parser.add_argument('--resume', type=str, default='train_result/models/latest.pth')\n parser.add_argument('--det', type=str, default='train_result')\n self.opts = parser.parse_args()\n\n def parse(self):\n self.opts.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n # Create the destination folder\n if not os.path.exists(self.opts.det):\n os.mkdir(self.opts.det)\n if not os.path.exists(os.path.join(self.opts.det, 'images')):\n os.mkdir(os.path.join(self.opts.det, 'images'))\n if not os.path.exists(os.path.join(self.opts.det, 'models')):\n os.mkdir(os.path.join(self.opts.det, 'models'))\n\n # Print the options\n presentParameters(vars(self.opts))\n return self.opts\n\n\nclass InferenceOptions():\n def __init__(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('--resume', type=str, default='train_result/model/latest.pth')\n parser.add_argument('--num_face', type=int, default=32)\n parser.add_argument('--det', type=str, default='result.png')\n self.opts = parser.parse_args()\n\n def parse(self):\n self.opts.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n # Print the options\n presentParameters(vars(self.opts))\n return self.opts\n" ]
[ [ "torch.cuda.is_available" ] ]
cospectrum/pylinal
[ "2e2d95c23ef2d9f87f89a6ddba972da260dae514" ]
[ "examples/torus.py" ]
[ "import itertools\nfrom numpy import linspace\nfrom pylinal import VectorFunc\nfrom math import cos, sin, pi\n\n \nR = 8 # distance from the center of the tube to the center of the torus\nr = 1 # radius of the tube\n\n# theta, phi in [0, 2*pi)\nx = lambda theta, phi: (R + r*cos(theta)) * cos(phi)\ny = lambda theta, phi: (R + r*cos(theta)) * sin(phi)\nz = lambda theta, phi: r*sin(theta)\n\ntorus = VectorFunc([x, y, z])\n\narg = linspace(0, 2*pi, 40, endpoint=False)\ncartesian = itertools.product(arg, arg)\n\npoints = [torus(theta, phi) for (theta, phi) in cartesian]\n\n" ]
[ [ "numpy.linspace" ] ]
Abas-Khan/thesis
[ "b733bd4382371203cc4992571890619a2e314047" ]
[ "gensim/gensim/sklearn_api/atmodel.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Author: Chinmaya Pancholi <chinmayapancholi13@gmail.com>\n# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nScikit learn interface for gensim for easy use of gensim with scikit-learn\nFollows scikit-learn API conventions\n\"\"\"\nimport numpy as np\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.exceptions import NotFittedError\n\nfrom gensim import models\nfrom gensim import matutils\n\n\nclass AuthorTopicTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Base AuthorTopic module\n \"\"\"\n\n def __init__(self, num_topics=100, id2word=None, author2doc=None, doc2author=None,\n chunksize=2000, passes=1, iterations=50, decay=0.5, offset=1.0,\n alpha='symmetric', eta='symmetric', update_every=1, eval_every=10,\n gamma_threshold=0.001, serialized=False, serialization_path=None,\n minimum_probability=0.01, random_state=None):\n \"\"\"\n Sklearn wrapper for AuthorTopic model. See gensim.models.AuthorTopicModel for parameter details.\n \"\"\"\n self.gensim_model = None\n self.num_topics = num_topics\n self.id2word = id2word\n self.author2doc = author2doc\n self.doc2author = doc2author\n self.chunksize = chunksize\n self.passes = passes\n self.iterations = iterations\n self.decay = decay\n self.offset = offset\n self.alpha = alpha\n self.eta = eta\n self.update_every = update_every\n self.eval_every = eval_every\n self.gamma_threshold = gamma_threshold\n self.serialized = serialized\n self.serialization_path = serialization_path\n self.minimum_probability = minimum_probability\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"\n Fit the model according to the given training data.\n Calls gensim.models.AuthorTopicModel\n \"\"\"\n self.gensim_model = models.AuthorTopicModel(\n corpus=X, num_topics=self.num_topics, id2word=self.id2word,\n author2doc=self.author2doc, doc2author=self.doc2author, chunksize=self.chunksize, passes=self.passes,\n iterations=self.iterations, decay=self.decay, offset=self.offset, alpha=self.alpha, eta=self.eta,\n update_every=self.update_every, eval_every=self.eval_every, gamma_threshold=self.gamma_threshold,\n serialized=self.serialized, serialization_path=self.serialization_path,\n minimum_probability=self.minimum_probability, random_state=self.random_state\n )\n return self\n\n def transform(self, author_names):\n \"\"\"\n Return topic distribution for input authors as a list of\n (topic_id, topic_probabiity) 2-tuples.\n \"\"\"\n # The input as array of array\n if self.gensim_model is None:\n raise NotFittedError(\n \"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.\"\n )\n\n if not isinstance(author_names, list):\n author_names = [author_names]\n # returning dense representation for compatibility with sklearn\n # but we should go back to sparse representation in the future\n topics = [matutils.sparse2full(self.gensim_model[author_name], self.num_topics) for author_name in author_names]\n return np.reshape(np.array(topics), (len(author_names), self.num_topics))\n\n def partial_fit(self, X, author2doc=None, doc2author=None):\n \"\"\"\n Train model over X.\n \"\"\"\n if self.gensim_model is None:\n self.gensim_model = models.AuthorTopicModel(\n corpus=X, num_topics=self.num_topics, id2word=self.id2word,\n author2doc=self.author2doc, doc2author=self.doc2author, chunksize=self.chunksize, passes=self.passes,\n iterations=self.iterations, decay=self.decay, offset=self.offset, alpha=self.alpha, eta=self.eta,\n update_every=self.update_every, eval_every=self.eval_every, gamma_threshold=self.gamma_threshold,\n serialized=self.serialized, serialization_path=self.serialization_path,\n minimum_probability=self.minimum_probability, random_state=self.random_state\n )\n\n self.gensim_model.update(corpus=X, author2doc=author2doc, doc2author=doc2author)\n return self\n" ]
[ [ "numpy.array", "sklearn.exceptions.NotFittedError" ] ]
Alberto199810/Master-s-thesis-Moneyball-II
[ "0bbfa1dba6c67d2ccee95eb92aa09533817b228c" ]
[ "01_Datasets/Leagues Comparison/Scrapers/SPI_Index_Scraper.py" ]
[ "import pandas as pd\nimport numpy as np\n\nsecond_ranking = pd.read_html('https://www.globalfootballrankings.com/')[0]\n\nsecond_ranking = second_ranking[['League','Average SPI']]\n\nsecond_ranking['League'] = ['Premier League', 'La Liga', 'Fußball-Bundesliga', 'Serie A', 'Ligue 1', \n 'Primeira Liga', 'Dutch Eredivisie', 'Brasileiro Série A',\n 'Mexican Primera Division Torneo Clausura', 'Russian Premier League', \n 'English League Championship', 'Austrian Football Bundesliga','Belgian First Division A', \n 'Süper Lig', 'Swiss Super League', 'Superliga', 'MLS', \n 'Argentina Primera Division', 'Scottish Premiership', \n 'Japanese J League' , 'German 2. Bundesliga', 'Super League Greece', \n 'Eliteserien', 'Italy Serie B','Spanish Segunda Division', \n 'French Ligue 2','Allsvenskan', 'Chinese Super League',\n 'Australian A-League', 'English League One','United Soccer League', \n 'South African ABSA Premier League', 'English League Two']\n\n\n\nlista_SPI = [i for i in second_ranking['Average SPI']]\nlista_SPI.append(0)\n\ndef NormalizeData(data):\n return (data - np.min(data)) / (np.max(data) - np.min(data))\n\nscaled_SPI = NormalizeData(lista_SPI)\nsecond_ranking['Average SPI'] = scaled_SPI[:-1]\n\nsecond_ranking.to_csv('SPI_Index.csv', index = False, encoding='utf-8-sig')" ]
[ [ "numpy.max", "pandas.read_html", "numpy.min" ] ]
jinmyeonglee/LKVOLearner
[ "8d6a167d50942131dc9e379c280f442c37579d37" ]
[ "DEN/den.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torchvision.models import resnet152\n\n\nclass Flatten(nn.Module):\n def __init__(self):\n super(Flatten, self).__init__()\n\n def forward(self, input):\n return input.view(input.size()[0], -1)\n\n\nclass AuxConv(nn.Module):\n def __init__(self, in_channels, c_tag, stride=1, p=0):\n super(AuxConv, self).__init__()\n self.aux = nn.Sequential(nn.Conv2d(in_channels, c_tag, kernel_size=(3, 1)),\n nn.ReLU(),\n nn.Dropout(p),\n nn.Conv2d(c_tag, c_tag, kernel_size=(1, 3)),\n nn.ReLU(),\n nn.Dropout(p),\n Flatten())\n\n def forward(self, input):\n return self.aux(input)\n\n\nclass DEN(nn.Module):\n def __init__(self, backbone_wts=None, backbone_freeze=True, p=0):\n super(DEN, self).__init__()\n\n resnet = resnet152(pretrained=False)\n if backbone_wts != None:\n resnet = self._init_resnet(resnet, backbone_wts)\n \n if backbone_freeze:\n for param in resnet.parameters():\n param.requires_grad = False\n \n \n # prepare the network\n self._flat_resnet152(resnet)\n\n aux_1024 = [AuxConv(in_channels=1024, c_tag=8, p=p) for _ in range(16)]\n aux_2048 = [AuxConv(in_channels=2048, c_tag=64, p=p) for _ in range(3)]\n self.aux_modules = nn.ModuleList(aux_1024 + aux_2048)\n \n self._init_added_weights()\n \n def _init_resnet(self, resnet, backbone_wts):\n num_ftrs = resnet.fc.in_features\n print(\"fc\", num_ftrs, \"x\", 128*416)\n resnet.fc = nn.Linear(num_ftrs, 128 * 416)\n resnet.load_state_dict(torch.load(backbone_wts))\n\n return resnet\n\n\n def _init_added_weights(self):\n \n nn.init.xavier_uniform_(self.fc.weight)\n for name,param in self.aux_modules.named_parameters():\n if 'weight' in name:\n nn.init.xavier_uniform_(param)\n \n \n def _flat_resnet152(self, model):\n \n # break the resent to its building blocks\n # into a list\n flattened = []\n flattened += list(model.children())[:4]\n\n for i in range(4,8):\n sequence = list(model.children())[i]\n flattened += list(sequence.children())\n\n flattened += list(model.children())[-2:]\n\n self.resnet_top = nn.Sequential(*flattened[:35])\n # self.resnet_mid = nn.ModuleList(flattened[35:54])\n self.resnet_mid = nn.ModuleList(flattened[35:51])\n self.avg_pool2d = flattened[54]\n self.fc = nn.Linear(25280, 128 * 416)\n # self.fc = nn.Linear(59392, 128*416)\n \n def forward(self, input):\n # print(\"right after in den\", input.shape) \n x = self.resnet_top(input)\n # print(\"after resnet_top\", x.shape)\n outputs = []\n for i, block in enumerate(self.resnet_mid):\n x = block(x)\n # print(\"resnet_mid loop\", x.shape)\n outputs.append(self.aux_modules[i](x))\n \n x = self.avg_pool2d(x)\n print(\"after pooling\", x.shape)\n x = x.view(x.shape[0], -1)\n outputs.append(x)\n outputs_concat = torch.cat(outputs, dim=1)\n print(\"output concat\", outputs_concat.shape)\n out = self.fc(outputs_concat)\n print(\"output shape\", out.shape)\n\n return out\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.nn.Dropout", "torch.nn.ModuleList", "torch.nn.Sequential", "torch.nn.init.xavier_uniform_", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.load" ] ]
yjchoi1/gns-1
[ "31c712dbb721be81b5fd193a23baaf56adf9c336" ]
[ "gns/noise_utils.py" ]
[ "import torch\nfrom gns import learned_simulator\n\n\ndef get_random_walk_noise_for_position_sequence(\n position_sequence: torch.tensor,\n noise_std_last_step):\n \"\"\"Returns random-walk noise in the velocity applied to the position.\n\n Args: \n position_sequence: A sequence of particle positions. Shape is\n (nparticles, 6, dim). Includes current + last 5 positions.\n noise_std_last_step: Standard deviation of noise in the last step.\n\n \"\"\"\n velocity_sequence = learned_simulator.time_diff(position_sequence)\n\n # We want the noise scale in the velocity at the last step to be fixed.\n # Because we are going to compose noise at each step using a random_walk:\n # std_last_step**2 = num_velocities * std_each_step**2\n # so to keep `std_last_step` fixed, we apply at each step:\n # std_each_step `std_last_step / np.sqrt(num_input_velocities)`\n num_velocities = velocity_sequence.shape[1]\n velocity_sequence_noise = torch.randn(\n list(velocity_sequence.shape)) * (noise_std_last_step/num_velocities**0.5)\n\n # Apply the random walk.\n velocity_sequence_noise = torch.cumsum(velocity_sequence_noise, dim=1)\n\n # Integrate the noise in the velocity to the positions, assuming\n # an Euler intergrator and a dt = 1, and adding no noise to the very first\n # position (since that will only be used to calculate the first position\n # change).\n position_sequence_noise = torch.cat([\n torch.zeros_like(velocity_sequence_noise[:, 0:1]),\n torch.cumsum(velocity_sequence_noise, dim=1)], dim=1)\n\n return position_sequence_noise\n" ]
[ [ "torch.zeros_like", "torch.cumsum" ] ]
jiseokcube/PyTorch-GAN
[ "285c260934d37261d4c55fffbbeea32ce308cc53" ]
[ "implementations/esrgan/test_on_image.py" ]
[ "from models import GeneratorRRDB\nfrom datasets import denormalize, mean, std\nimport torch\nfrom torch.autograd import Variable\nimport argparse\nimport os\nfrom torchvision.utils import save_image\nfrom PIL import Image\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--image_path\", type=str, required=True, help=\"Path to image\")\nparser.add_argument(\"--checkpoint_model\", type=str, required=True, help=\"Path to checkpoint model\")\nparser.add_argument(\"--channels\", type=int, default=3, help=\"Number of image channels\")\nparser.add_argument(\"--residual_blocks\", type=int, default=23, help=\"Number of residual blocks in G\")\nopt = parser.parse_args()\nprint(opt)\n\nos.makedirs(\"images/outputs\", exist_ok=True)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Define model and load model checkpoint\ngenerator = GeneratorRRDB(opt.channels, filters=64, num_res_blocks=opt.residual_blocks).to(device)\ngenerator.load_state_dict(torch.load(opt.checkpoint_model))\ngenerator.eval()\n\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])\n\n# Prepare input\nimage_tensor = Variable(transform(Image.open(opt.image_path))).to(device).unsqueeze(0)\n\n# Upsample image\nwith torch.no_grad():\n sr_image = denormalize(generator(image_tensor)).cpu()\n\n# Save image\nfn = opt.image_path.split(\"/\")[-1]\nsave_image(sr_image, f\"images/outputs/sr-{fn}\")\n" ]
[ [ "torch.no_grad", "torch.cuda.is_available", "torch.load" ] ]
layer6ai-labs/CMLMC
[ "78fe1cb9c45a836dd0e1a2ee541e4a364ae24abe" ]
[ "fairseq/options.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport sys\nfrom typing import Callable, List, Optional\n\nimport torch\n# this import is for backward compatibility\nfrom fairseq.utils import csv_str_list, eval_str_list, eval_str_dict, eval_bool # noqa\nfrom fairseq import utils\nfrom fairseq.data.indexed_dataset import get_available_dataset_impl\n\n\ndef get_preprocessing_parser(default_task=\"translation\"):\n parser = get_parser(\"Preprocessing\", default_task)\n add_preprocess_args(parser)\n return parser\n\n\ndef get_training_parser(default_task=\"translation\"):\n parser = get_parser(\"Trainer\", default_task)\n add_dataset_args(parser, train=True)\n add_distributed_training_args(parser)\n add_model_args(parser)\n add_optimization_args(parser)\n add_checkpoint_args(parser)\n return parser\n\n\ndef get_generation_parser(interactive=False, default_task=\"translation\"):\n parser = get_parser(\"Generation\", default_task)\n add_dataset_args(parser, gen=True)\n add_distributed_training_args(parser, default_world_size=1)\n add_generation_args(parser)\n if interactive:\n add_interactive_args(parser)\n return parser\n\n\ndef get_interactive_generation_parser(default_task=\"translation\"):\n return get_generation_parser(interactive=True, default_task=default_task)\n\n\ndef get_eval_lm_parser(default_task=\"language_modeling\"):\n parser = get_parser(\"Evaluate Language Model\", default_task)\n add_dataset_args(parser, gen=True)\n add_distributed_training_args(parser, default_world_size=1)\n add_eval_lm_args(parser)\n return parser\n\n\ndef get_validation_parser(default_task=None):\n parser = get_parser(\"Validation\", default_task)\n add_dataset_args(parser, train=True)\n add_distributed_training_args(parser, default_world_size=1)\n group = parser.add_argument_group(\"Evaluation\")\n add_common_eval_args(group)\n return parser\n\n\ndef parse_args_and_arch(\n parser: argparse.ArgumentParser,\n input_args: List[str] = None,\n parse_known: bool = False,\n suppress_defaults: bool = False,\n modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,\n):\n \"\"\"\n Args:\n parser (ArgumentParser): the parser\n input_args (List[str]): strings to parse, defaults to sys.argv\n parse_known (bool): only parse known arguments, similar to\n `ArgumentParser.parse_known_args`\n suppress_defaults (bool): parse while ignoring all default values\n modify_parser (Optional[Callable[[ArgumentParser], None]]):\n function to modify the parser, e.g., to set default values\n \"\"\"\n if suppress_defaults:\n # Parse args without any default values. This requires us to parse\n # twice, once to identify all the necessary task/model args, and a second\n # time with all defaults set to None.\n args = parse_args_and_arch(\n parser,\n input_args=input_args,\n parse_known=parse_known,\n suppress_defaults=False,\n )\n suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])\n suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})\n args = suppressed_parser.parse_args(input_args)\n return argparse.Namespace(\n **{k: v for k, v in vars(args).items() if v is not None}\n )\n\n from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY\n\n # Before creating the true parser, we need to import optional user module\n # in order to eagerly import custom tasks, optimizers, architectures, etc.\n usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)\n usr_parser.add_argument(\"--user-dir\", default=None)\n usr_args, _ = usr_parser.parse_known_args(input_args)\n utils.import_user_module(usr_args)\n\n if modify_parser is not None:\n modify_parser(parser)\n\n # The parser doesn't know about model/criterion/optimizer-specific args, so\n # we parse twice. First we parse the model/criterion/optimizer, then we\n # parse a second time after adding the *-specific arguments.\n # If input_args is given, we will parse those args instead of sys.argv.\n args, _ = parser.parse_known_args(input_args)\n\n # Add model-specific args to parser.\n if hasattr(args, \"arch\"):\n model_specific_group = parser.add_argument_group(\n \"Model-specific configuration\",\n # Only include attributes which are explicitly given as command-line\n # arguments or which have default values.\n argument_default=argparse.SUPPRESS,\n )\n ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)\n\n # Add *-specific args to parser.\n from fairseq.registry import REGISTRIES\n\n for registry_name, REGISTRY in REGISTRIES.items():\n choice = getattr(args, registry_name, None)\n if choice is not None:\n cls = REGISTRY[\"registry\"][choice]\n if hasattr(cls, \"add_args\"):\n cls.add_args(parser)\n if hasattr(args, \"task\"):\n from fairseq.tasks import TASK_REGISTRY\n\n TASK_REGISTRY[args.task].add_args(parser)\n if getattr(args, \"use_bmuf\", False):\n # hack to support extra args for block distributed data parallelism\n from fairseq.optim.bmuf import FairseqBMUF\n\n FairseqBMUF.add_args(parser)\n\n # Modify the parser a second time, since defaults may have been reset\n if modify_parser is not None:\n modify_parser(parser)\n\n # Parse a second time.\n if parse_known:\n args, extra = parser.parse_known_args(input_args)\n else:\n args = parser.parse_args(input_args)\n extra = None\n\n # Post-process args.\n if hasattr(args, \"max_sentences_valid\") and args.max_sentences_valid is None:\n args.max_sentences_valid = args.max_sentences\n if hasattr(args, \"max_tokens_valid\") and args.max_tokens_valid is None:\n args.max_tokens_valid = args.max_tokens\n if getattr(args, \"memory_efficient_fp16\", False):\n args.fp16 = True\n if getattr(args, \"memory_efficient_bf16\", False):\n args.bf16 = True\n args.tpu = getattr(args, \"tpu\", False)\n args.bf16 = getattr(args, \"bf16\", False)\n if args.bf16:\n args.tpu = True\n if args.tpu and args.fp16:\n raise ValueError(\"Cannot combine --fp16 and --tpu, use --bf16 on TPUs\")\n\n if getattr(args, \"seed\", None) is None:\n args.seed = 1 # default seed for training\n args.no_seed_provided = True\n else:\n args.no_seed_provided = False\n\n # Apply architecture configuration.\n if hasattr(args, \"arch\"):\n ARCH_CONFIG_REGISTRY[args.arch](args)\n\n if parse_known:\n return args, extra\n else:\n return args\n\n\ndef get_parser(desc, default_task=\"translation\"):\n # Before creating the true parser, we need to import optional user module\n # in order to eagerly import custom tasks, optimizers, architectures, etc.\n usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)\n usr_parser.add_argument(\"--user-dir\", default=None)\n usr_args, _ = usr_parser.parse_known_args()\n utils.import_user_module(usr_args)\n\n parser = argparse.ArgumentParser(allow_abbrev=False)\n # fmt: off\n parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')\n parser.add_argument('--log-interval', type=int, default=100, metavar='N',\n help='log progress every N batches (when progress bar is disabled)')\n parser.add_argument('--log-format', default=None, help='log format to use',\n choices=['json', 'none', 'simple', 'tqdm'])\n parser.add_argument('--tensorboard-logdir', metavar='DIR', default='',\n help='path to save logs for tensorboard, should match --logdir '\n 'of running tensorboard (default: no tensorboard logging)')\n parser.add_argument('--seed', default=None, type=int, metavar='N',\n help='pseudo random number generator seed')\n parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA')\n parser.add_argument('--tpu', action='store_true', help='use TPU instead of CUDA')\n parser.add_argument('--bf16', action='store_true', help='use bfloat16; implies --tpu')\n parser.add_argument('--fp16', action='store_true', help='use FP16')\n parser.add_argument('--memory-efficient-bf16', action='store_true',\n help='use a memory-efficient version of BF16 training; implies --bf16')\n parser.add_argument('--memory-efficient-fp16', action='store_true',\n help='use a memory-efficient version of FP16 training; implies --fp16')\n parser.add_argument('--fp16-no-flatten-grads', action='store_true',\n help='don\\'t flatten FP16 grads tensor')\n parser.add_argument('--fp16-init-scale', default=2 ** 7, type=int,\n help='default FP16 loss scale')\n parser.add_argument('--fp16-scale-window', type=int,\n help='number of updates before increasing loss scale')\n parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float,\n help='pct of updates that can overflow before decreasing the loss scale')\n parser.add_argument('--min-loss-scale', default=1e-4, type=float, metavar='D',\n help='minimum FP16 loss scale, after which training is stopped')\n parser.add_argument('--threshold-loss-scale', type=float,\n help='threshold FP16 loss scale from below')\n parser.add_argument('--user-dir', default=None,\n help='path to a python module containing custom extensions (tasks and/or architectures)')\n parser.add_argument('--empty-cache-freq', default=0, type=int,\n help='how often to clear the PyTorch CUDA cache (0 to disable)')\n parser.add_argument('--all-gather-list-size', default=16384, type=int,\n help='number of bytes reserved for gathering stats from workers')\n parser.add_argument('--model-parallel-size', type=int, metavar='N',\n default=1,\n help='total number of GPUs to parallelize model over')\n parser.add_argument('--checkpoint-suffix', default='',\n help='suffix to add to the checkpoint file name')\n parser.add_argument('--quantization-config-path', default=None,\n help='path to quantization config file')\n parser.add_argument('--profile', action='store_true', help='enable autograd profiler emit_nvtx')\n\n from fairseq.registry import REGISTRIES\n for registry_name, REGISTRY in REGISTRIES.items():\n parser.add_argument(\n '--' + registry_name.replace('_', '-'),\n default=REGISTRY['default'],\n choices=REGISTRY['registry'].keys(),\n )\n\n # Task definitions can be found under fairseq/tasks/\n from fairseq.tasks import TASK_REGISTRY\n parser.add_argument('--task', metavar='TASK', default=default_task,\n choices=TASK_REGISTRY.keys(),\n help='task')\n # fmt: on\n return parser\n\n\ndef add_preprocess_args(parser):\n group = parser.add_argument_group(\"Preprocessing\")\n # fmt: off\n group.add_argument(\"-s\", \"--source-lang\", default=None, metavar=\"SRC\",\n help=\"source language\")\n group.add_argument(\"-t\", \"--target-lang\", default=None, metavar=\"TARGET\",\n help=\"target language\")\n group.add_argument(\"--trainpref\", metavar=\"FP\", default=None,\n help=\"train file prefix\")\n group.add_argument(\"--validpref\", metavar=\"FP\", default=None,\n help=\"comma separated, valid file prefixes\")\n group.add_argument(\"--testpref\", metavar=\"FP\", default=None,\n help=\"comma separated, test file prefixes\")\n group.add_argument(\"--align-suffix\", metavar=\"FP\", default=None,\n help=\"alignment file suffix\")\n group.add_argument(\"--destdir\", metavar=\"DIR\", default=\"data-bin\",\n help=\"destination dir\")\n group.add_argument(\"--thresholdtgt\", metavar=\"N\", default=0, type=int,\n help=\"map words appearing less than threshold times to unknown\")\n group.add_argument(\"--thresholdsrc\", metavar=\"N\", default=0, type=int,\n help=\"map words appearing less than threshold times to unknown\")\n group.add_argument(\"--tgtdict\", metavar=\"FP\",\n help=\"reuse given target dictionary\")\n group.add_argument(\"--srcdict\", metavar=\"FP\",\n help=\"reuse given source dictionary\")\n group.add_argument(\"--nwordstgt\", metavar=\"N\", default=-1, type=int,\n help=\"number of target words to retain\")\n group.add_argument(\"--nwordssrc\", metavar=\"N\", default=-1, type=int,\n help=\"number of source words to retain\")\n group.add_argument(\"--alignfile\", metavar=\"ALIGN\", default=None,\n help=\"an alignment file (optional)\")\n parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',\n choices=get_available_dataset_impl(),\n help='output dataset implementation')\n group.add_argument(\"--joined-dictionary\", action=\"store_true\",\n help=\"Generate joined dictionary\")\n group.add_argument(\"--only-source\", action=\"store_true\",\n help=\"Only process the source language\")\n group.add_argument(\"--padding-factor\", metavar=\"N\", default=8, type=int,\n help=\"Pad dictionary size to be multiple of N\")\n group.add_argument(\"--workers\", metavar=\"N\", default=1, type=int,\n help=\"number of parallel workers\")\n # fmt: on\n return parser\n\n\ndef add_dataset_args(parser, train=False, gen=False):\n group = parser.add_argument_group(\"Dataset and data loading\")\n # fmt: off\n group.add_argument('--num-workers', default=1, type=int, metavar='N',\n help='how many subprocesses to use for data loading')\n group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true',\n help='ignore too long or too short lines in valid and test set')\n group.add_argument('--max-tokens', type=int, metavar='N',\n help='maximum number of tokens in a batch')\n group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N',\n help='maximum number of sentences in a batch')\n group.add_argument('--required-batch-size-multiple', default=8, type=int, metavar='N',\n help='batch size will either be less than this value, '\n 'or a multiple of this value')\n parser.add_argument('--dataset-impl', metavar='FORMAT',\n choices=get_available_dataset_impl(),\n help='output dataset implementation')\n group.add_argument('--data-buffer-size', default=10, type=int, metavar='N',\n help='number of batches to preload')\n if train:\n group.add_argument('--train-subset', default='train', metavar='SPLIT',\n help='data subset to use for training (e.g. train, valid, test)')\n group.add_argument('--valid-subset', default='valid', metavar='SPLIT',\n help='comma separated list of data subsets to use for validation'\n ' (e.g. train, valid, test)')\n group.add_argument('--validate-interval', type=int, default=1, metavar='N',\n help='validate every N epochs')\n group.add_argument('--validate-interval-updates', type=int, default=0, metavar='N',\n help='validate every N updates')\n group.add_argument('--validate-after-updates', type=int, default=0, metavar='N',\n help='dont validate until reaching this many updates')\n group.add_argument('--fixed-validation-seed', default=None, type=int, metavar='N',\n help='specified random seed for validation')\n group.add_argument('--disable-validation', action='store_true',\n help='disable validation')\n group.add_argument('--max-tokens-valid', type=int, metavar='N',\n help='maximum number of tokens in a validation batch'\n ' (defaults to --max-tokens)')\n group.add_argument('--max-sentences-valid', type=int, metavar='N',\n help='maximum number of sentences in a validation batch'\n ' (defaults to --max-sentences)')\n group.add_argument('--curriculum', default=0, type=int, metavar='N',\n help='don\\'t shuffle batches for first N epochs')\n if gen:\n group.add_argument('--gen-subset', default='test', metavar='SPLIT',\n help='data subset to generate (train, valid, test)')\n group.add_argument('--num-shards', default=1, type=int, metavar='N',\n help='shard generation over N shards')\n group.add_argument('--shard-id', default=0, type=int, metavar='ID',\n help='id of the shard to generate (id < num_shards)')\n # fmt: on\n return group\n\n\ndef add_distributed_training_args(parser, default_world_size=None):\n group = parser.add_argument_group(\"Distributed training\")\n # fmt: off\n if default_world_size is None:\n default_world_size = max(1, torch.cuda.device_count())\n group.add_argument('--distributed-world-size', type=int, metavar='N',\n default=default_world_size,\n help='total number of GPUs across all nodes (default: all visible GPUs)')\n group.add_argument('--distributed-rank', default=0, type=int,\n help='rank of the current worker')\n group.add_argument('--distributed-backend', default='nccl', type=str,\n help='distributed backend')\n group.add_argument('--distributed-init-method', default=None, type=str,\n help='typically tcp://hostname:port that will be used to '\n 'establish initial connetion')\n group.add_argument('--distributed-port', default=-1, type=int,\n help='port number (not required if using --distributed-init-method)')\n group.add_argument('--device-id', '--local_rank', default=0, type=int,\n help='which GPU to use (usually configured automatically)')\n group.add_argument('--distributed-no-spawn', action='store_true',\n help='do not spawn multiple processes even if multiple GPUs are visible')\n group.add_argument('--distributed-num-procs', default=None, type=int,\n help='number of processes to spawn (usually configured automatically)')\n # \"c10d\" is PyTorch's DDP implementation and provides the fastest\n # training. \"no_c10d\" is a more robust, but slightly slower DDP\n # implementation. Try this if you get warning messages about\n # inconsistent gradients between workers, or if some of your model\n # parameters are not always used.\n group.add_argument('--ddp-backend', default='c10d', type=str,\n choices=['c10d', 'no_c10d'],\n help='DistributedDataParallel backend')\n group.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB',\n help='bucket size for reduction')\n group.add_argument('--fix-batches-to-gpus', action='store_true',\n help='don\\'t shuffle batches between GPUs; this reduces overall '\n 'randomness and may affect precision but avoids the cost of '\n 're-reading the data')\n group.add_argument('--find-unused-parameters', default=False, action='store_true',\n help='disable unused parameter detection (not applicable to '\n 'no_c10d ddp-backend')\n group.add_argument('--fast-stat-sync', default=False, action='store_true',\n help='[deprecated] this is now defined per Criterion')\n group.add_argument('--broadcast-buffers', default=False, action='store_true',\n help='Copy non-trainable parameters between GPUs, such as '\n 'batchnorm population statistics')\n\n group.add_argument('--distributed-wrapper', default='DDP', type=str,\n choices=['DDP', 'SlowMo'],\n help='DistributedDataParallel backend')\n # Add arguments for SlowMo - these will be used when SlowMo is enabled via above\n group.add_argument('--slowmo-momentum', default=None, type=float,\n help='SlowMo momentum term; by default use 0.0 for 16 GPUs, '\n '0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs')\n group.add_argument('--slowmo-algorithm', default='LocalSGD', choices=['LocalSGD', 'SGP'],\n help='whether to use LocalSGD or SGP')\n group.add_argument('--localsgd-frequency', default=3, type=int,\n help='Local SGD allreduce frequency')\n group.add_argument('--nprocs-per-node', type=int, metavar='N',\n default=max(1, torch.cuda.device_count()),\n help='number of GPUs in each node. An allreduce operation across GPUs in '\n 'a node is very fast. Hence, we do allreduce across GPUs in a node, '\n 'and gossip across different nodes')\n # Pipeline Parallel Arguments\n group.add_argument('--pipeline-model-parallel', default=False, action='store_true',\n help='if set, use pipeline model parallelism across GPUs')\n group.add_argument('--pipeline-balance', metavar='N1,N2,...,N_K',\n type=lambda x: eval_str_list(x, type=int),\n help='partition the model into N_K pieces, where each piece '\n 'contains N_i layers. The sum(args.pipeline_balance) '\n 'should equal the total number of layers in the model')\n group.add_argument('--pipeline-devices', metavar='N1,N2,...,N_K',\n type=lambda x: eval_str_list(x, type=int),\n help='a list of device indices indicating which device to place '\n 'each of the N_K partitions. The length of this list should '\n 'equal the length of the --pipeline-balance argument')\n group.add_argument('--pipeline-chunks', type=int, metavar='N',\n help='microbatch count for pipeline model parallelism')\n group.add_argument('--pipeline-checkpoint', type=str, metavar='STR',\n choices=['always', 'never', 'except_last'],\n default='never',\n help='checkpointing mode for pipeline model parallelism')\n # Add argument for ZeRO sharding of OptimizerState(os), gradients(g) and parameters(p)\n group.add_argument('--zero-sharding', default='none', type=str,\n choices=['none', 'os'],\n help='ZeRO sharding')\n # fmt: on\n return group\n\n\ndef add_optimization_args(parser):\n group = parser.add_argument_group(\"Optimization\")\n # fmt: off\n group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N',\n help='force stop training at specified epoch')\n group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N',\n help='force stop training at specified update')\n group.add_argument('--stop-time-hours', default=0, type=float, metavar='N',\n help='force stop training after specified cumulative time (if >0)')\n group.add_argument('--clip-norm', default=0.0, type=float, metavar='NORM',\n help='clip threshold of gradients')\n group.add_argument('--sentence-avg', action='store_true',\n help='normalize gradients by the number of sentences in a batch'\n ' (default is to normalize by number of tokens)')\n group.add_argument('--update-freq', default='1', metavar='N1,N2,...,N_K',\n type=lambda uf: eval_str_list(uf, type=int),\n help='update parameters every N_i batches, when in epoch i')\n group.add_argument('--lr', '--learning-rate', default='0.25', type=eval_str_list,\n metavar='LR_1,LR_2,...,LR_N',\n help='learning rate for the first N epochs; all epochs >N using LR_N'\n ' (note: this may be interpreted differently depending on --lr-scheduler)')\n group.add_argument('--min-lr', default=-1, type=float, metavar='LR',\n help='stop training when the learning rate reaches this minimum')\n group.add_argument('--use-bmuf', default=False, action='store_true',\n help='specify global optimizer for syncing models on different GPUs/shards')\n # fmt: on\n return group\n\n\ndef add_checkpoint_args(parser):\n group = parser.add_argument_group(\"Checkpointing\")\n # fmt: off\n group.add_argument('--save-dir', metavar='DIR', default='checkpoints',\n help='path to save checkpoints')\n group.add_argument('--restore-file', default='checkpoint_last.pt',\n help='filename from which to load checkpoint '\n '(default: <save-dir>/checkpoint_last.pt')\n group.add_argument('--finetune-from-model', default=None, type=str,\n help='finetune from a pretrained model; '\n 'note that meters and lr scheduler will be reset')\n group.add_argument('--reset-dataloader', action='store_true',\n help='if set, does not reload dataloader state from the checkpoint')\n group.add_argument('--reset-lr-scheduler', action='store_true',\n help='if set, does not load lr scheduler state from the checkpoint')\n group.add_argument('--reset-meters', action='store_true',\n help='if set, does not load meters from the checkpoint')\n group.add_argument('--reset-optimizer', action='store_true',\n help='if set, does not load optimizer state from the checkpoint')\n group.add_argument('--optimizer-overrides', default=\"{}\", type=str, metavar='DICT',\n help='a dictionary used to override optimizer args when loading a checkpoint')\n group.add_argument('--save-interval', type=int, default=1, metavar='N',\n help='save a checkpoint every N epochs')\n group.add_argument('--save-interval-updates', type=int, default=0, metavar='N',\n help='save a checkpoint (and validate) every N updates')\n group.add_argument('--keep-interval-updates', type=int, default=-1, metavar='N',\n help='keep the last N checkpoints saved with --save-interval-updates')\n group.add_argument('--keep-last-epochs', type=int, default=-1, metavar='N',\n help='keep last N epoch checkpoints')\n group.add_argument('--keep-best-checkpoints', type=int, default=-1, metavar='N',\n help='keep best N checkpoints based on scores')\n group.add_argument('--no-save', action='store_true',\n help='don\\'t save models or checkpoints')\n group.add_argument('--no-epoch-checkpoints', action='store_true',\n help='only store last and best checkpoints')\n group.add_argument('--no-last-checkpoints', action='store_true',\n help='don\\'t store last checkpoints')\n group.add_argument('--no-save-optimizer-state', action='store_true',\n help='don\\'t save optimizer-state as part of checkpoint')\n group.add_argument('--best-checkpoint-metric', type=str, default='loss',\n help='metric to use for saving \"best\" checkpoints')\n group.add_argument('--maximize-best-checkpoint-metric', action='store_true',\n help='select the largest metric value for saving \"best\" checkpoints')\n group.add_argument('--patience', type=int, default=-1, metavar='N',\n help=('early stop training if valid performance doesn\\'t '\n 'improve for N consecutive validation runs; note '\n 'that this is influenced by --validate-interval'))\n # fmt: on\n return group\n\n\ndef add_common_eval_args(group):\n # fmt: off\n group.add_argument('--path', metavar='FILE',\n help='path(s) to model file(s), colon separated')\n group.add_argument('--remove-bpe', '--post-process', nargs='?', const='@@ ', default=None,\n help='remove BPE tokens before scoring (can be set to sentencepiece)')\n group.add_argument('--quiet', action='store_true',\n help='only print final scores')\n group.add_argument('--model-overrides', default=\"{}\", type=str, metavar='DICT',\n help='a dictionary used to override model args at generation '\n 'that were used during model training')\n group.add_argument('--results-path', metavar='RESDIR', type=str, default=None,\n help='path to save eval results (optional)\"')\n # fmt: on\n\n\ndef add_eval_lm_args(parser):\n group = parser.add_argument_group(\"LM Evaluation\")\n add_common_eval_args(group)\n # fmt: off\n group.add_argument('--output-word-probs', action='store_true',\n help='if set, outputs words and their predicted log probabilities to standard output')\n group.add_argument('--output-word-stats', action='store_true',\n help='if set, outputs word statistics such as word count, average probability, etc')\n group.add_argument('--context-window', default=0, type=int, metavar='N',\n help='ensures that every evaluated token has access to a context of at least this size,'\n ' if possible')\n group.add_argument('--softmax-batch', default=sys.maxsize, type=int, metavar='N',\n help='if BxT is more than this, will batch the softmax over vocab to this amount of tokens'\n ' in order to fit into GPU memory')\n # fmt: on\n\n\ndef add_generation_args(parser):\n group = parser.add_argument_group(\"Generation\")\n add_common_eval_args(group)\n # fmt: off\n ############################################################\n #### generation options for CMLM Mask-Predict ####\n # group.add_argument('--decoding-strategy', default='left_to_right', choices=STRATEGY_REGISTRY.keys())\n group.add_argument('--decoding-strategy', default='mask_predict')\n group.add_argument('--gold-target-len', action='store_true', help='use gold target length')\n group.add_argument('--dehyphenate', action='store_true', help='turn hyphens into independent tokens')\n parser.add_argument('--decoding-iterations', default=None, type=int, metavar='N',\n help='number of decoding iterations in mask-predict')\n group.add_argument('--length-beam', default=5, type=int, metavar='N',\n help='length beam size')\n ############################################################\n\n group.add_argument('--beam', default=5, type=int, metavar='N',\n help='beam size')\n group.add_argument('--nbest', default=1, type=int, metavar='N',\n help='number of hypotheses to output')\n group.add_argument('--max-len-a', default=0, type=float, metavar='N',\n help=('generate sequences of maximum length ax + b, '\n 'where x is the source length'))\n group.add_argument('--max-len-b', default=200, type=int, metavar='N',\n help=('generate sequences of maximum length ax + b, '\n 'where x is the source length'))\n group.add_argument('--min-len', default=1, type=float, metavar='N',\n help=('minimum generation length'))\n group.add_argument('--match-source-len', default=False, action='store_true',\n help=('generations should match the source length'))\n group.add_argument('--no-early-stop', action='store_true',\n help='deprecated')\n group.add_argument('--unnormalized', action='store_true',\n help='compare unnormalized hypothesis scores')\n group.add_argument('--no-beamable-mm', action='store_true',\n help='don\\'t use BeamableMM in attention layers')\n group.add_argument('--lenpen', default=1, type=float,\n help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')\n group.add_argument('--unkpen', default=0, type=float,\n help='unknown word penalty: <0 produces more unks, >0 produces fewer')\n group.add_argument('--replace-unk', nargs='?', const=True, default=None,\n help='perform unknown replacement (optionally with alignment dictionary)')\n group.add_argument('--sacrebleu', action='store_true',\n help='score with sacrebleu')\n group.add_argument('--score-reference', action='store_true',\n help='just score the reference translation')\n group.add_argument('--prefix-size', default=0, type=int, metavar='PS',\n help='initialize generation by target prefix of given length')\n group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N',\n help='ngram blocking such that this size ngram cannot be repeated in the generation')\n group.add_argument('--sampling', action='store_true',\n help='sample hypotheses instead of using beam search')\n group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',\n help='sample from top K likely next words instead of all words')\n group.add_argument('--sampling-topp', default=-1.0, type=float, metavar='PS',\n help='sample from the smallest set whose cumulative probability mass exceeds p for next words')\n group.add_argument('--constraints', const=\"ordered\", nargs=\"?\", choices=[\"ordered\", \"unordered\"],\n help='enables lexically constrained decoding')\n group.add_argument('--temperature', default=1., type=float, metavar='N',\n help='temperature for generation')\n group.add_argument('--diverse-beam-groups', default=-1, type=int, metavar='N',\n help='number of groups for Diverse Beam Search')\n group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N',\n help='strength of diversity penalty for Diverse Beam Search')\n group.add_argument('--diversity-rate', default=-1.0, type=float, metavar='N',\n help='strength of diversity penalty for Diverse Siblings Search')\n group.add_argument('--print-alignment', action='store_true',\n help='if set, uses attention feedback to compute and print alignment to source tokens')\n group.add_argument('--print-step', action='store_true')\n\n # arguments for iterative refinement generator\n group.add_argument('--iter-decode-eos-penalty', default=0.0, type=float, metavar='N',\n help='if > 0.0, it penalized early-stopping in decoding.')\n group.add_argument('--iter-decode-max-iter', default=10, type=int, metavar='N',\n help='maximum iterations for iterative refinement.')\n group.add_argument('--iter-decode-force-max-iter', action='store_true',\n help='if set, run exact the maximum number of iterations without early stop')\n group.add_argument('--iter-decode-with-beam', default=1, type=int, metavar='N',\n help='if > 1, model will generate translations varying by the lengths.')\n group.add_argument('--iter-decode-with-external-reranker', action='store_true',\n help='if set, the last checkpoint are assumed to be a reranker to rescore the translations'),\n group.add_argument('--retain-iter-history', action='store_true',\n help='if set, decoding returns the whole history of iterative refinement')\n group.add_argument('--retain-dropout', action='store_true',\n help='Use dropout at inference time')\n group.add_argument('--retain-dropout-modules', default=None, nargs='+', type=str,\n help='if set, only retain dropout for the specified modules; '\n 'if not set, then dropout will be retained for all modules')\n\n # special decoding format for advanced decoding.\n group.add_argument('--decoding-format', default=None, type=str, choices=['unigram', 'ensemble', 'vote', 'dp', 'bs'])\n # fmt: on\n return group\n\n\ndef add_interactive_args(parser):\n group = parser.add_argument_group(\"Interactive\")\n # fmt: off\n group.add_argument('--buffer-size', default=0, type=int, metavar='N',\n help='read this many sentences into a buffer before processing them')\n group.add_argument('--input', default='-', type=str, metavar='FILE',\n help='file to read from; use - for stdin')\n # fmt: on\n\n\ndef add_model_args(parser):\n group = parser.add_argument_group(\"Model configuration\")\n # fmt: off\n\n # Model definitions can be found under fairseq/models/\n #\n # The model architecture can be specified in several ways.\n # In increasing order of priority:\n # 1) model defaults (lowest priority)\n # 2) --arch argument\n # 3) --encoder/decoder-* arguments (highest priority)\n from fairseq.models import ARCH_MODEL_REGISTRY\n group.add_argument('--arch', '-a', metavar='ARCH',\n choices=ARCH_MODEL_REGISTRY.keys(),\n help='model architecture')\n # fmt: on\n return group\n" ]
[ [ "torch.cuda.device_count" ] ]
realityengines/post_hoc_debiasing
[ "e0efc81e6e317b9c203e79106c3529e159bc3fa8" ]
[ "deco/src/deco/compute_shapley.py" ]
[ "import numpy as np\nimport pandas as pd\nimport pickle\nimport time\nimport argparse\nimport yaml\nimport keras\nfrom utils import accuracy_from_scores\nimport fairness\nimport gc\nfrom tqdm import tqdm\nimport os.path\n\n# References\n# [1] https://christophm.github.io/interpretable-ml-book/shapley.html#the-shapley-value-in-detail\n\nclass Shapley(object):\n\n def __init__(self, nn, config):\n\n self.nn = nn\n self.config = config\n self.val_df = pd.read_csv(config[\"data_files\"][\"val\"])\n self.layer_weights = self.nn.get_weights()\n\n def compute(self):\n return [self.__single_layer_contribution__(i) for i in tqdm(range(1, len(self.nn.layers)))]\n\n def compute_accuracy(self):\n X = self.val_df.drop(columns=[self.config[\"fairness\"][\"output_label\"]]).values\n y = self.val_df[self.config[\"fairness\"][\"output_label\"]].values\n\n y_preds = self.nn.predict(X)[:, 0]\n y_preds_orig = y_preds\n y_preds_orig[y_preds_orig <= self.config[\"prediction\"][\"threshold\"]] = 0\n y_preds_orig[y_preds_orig > self.config[\"prediction\"][\"threshold\"]] = 1\n return accuracy_from_scores(y, y_preds, self.config[\"prediction\"][\"threshold\"])\n\n def payoff(self, model, weights):\n\n model.set_weights(weights)\n scores = fairness.classifier_score(pred_fun=model.predict,\n metrics=self.config[\"optimization\"][\"metrics\"],\n df=self.val_df,\n protected_label=self.config[\"fairness\"][\"protected_feature\"],\n privileged_value=self.config[\"fairness\"][\"privileged_value\"],\n output_name=self.config[\"fairness\"][\"output_label\"],\n threshold=0.5,\n output_index=0)\n\n scores = scores[np.isfinite(scores)]\n val_bias = np.max(np.abs(scores))\n\n val_accuracy = self.compute_accuracy()\n return (1 + val_bias) * (1 - val_accuracy)\n\n def __get_weights__(self, index, randomized_indices):\n\n if index in randomized_indices:\n return np.random.randn(*self.layer_weights[index].shape)\n else:\n return self.layer_weights[index]\n\n def __single_layer_contribution__(self, k):\n # This is the number of iterations where we calculate the difference in contribution.\n # This is the M variable in [1]\n total_iterations = 10\n\n total_number_of_layers = len(self.nn.layers)\n sum_of_contributions = 0.0\n\n model = keras.models.clone_model(self.nn)\n model.set_weights(self.nn.get_weights())\n\n for m in range(0, total_iterations):\n gc.collect()\n\n r = np.random.choice(total_number_of_layers)\n\n randomized_indices_all = np.hstack([np.random.choice(np.arange(total_number_of_layers), r), [k]])\n\n random_weights = [self.__get_weights__(i, randomized_indices_all) for i in range(total_number_of_layers)]\n\n w_plus = [random_weights[i] for i in range(k)] + \\\n [self.layer_weights[k]] + \\\n [random_weights[i] for i in range(k + 1, total_number_of_layers)]\n\n w_minus = random_weights\n\n v_with = self.payoff(model, w_plus)\n v_without = self.payoff(model, w_minus)\n\n sum_of_contributions = sum_of_contributions + (v_with - v_without)\n w_plus.clear()\n w_minus.clear()\n del w_plus[:]\n del w_minus[:]\n\n return sum_of_contributions / total_iterations\n\n\ndef main(input_file, output_file):\n with open(os.path.join(experiment_directory, 'config.yaml')) as file:\n config = yaml.load(file, Loader=yaml.FullLoader)\n\n np.random.seed(round(time.time()))\n\n model_info = pickle.load(open(input_file, \"rb\"))\n nn = model_info['model']\n\n shapley = Shapley(nn, config)\n vals = shapley.compute()\n\n if (os.path.isfile(output_file)):\n print(\".... adding to old values ...\")\n df = pd.DataFrame([vals], columns=list(map(str, range(len(vals)))))\n df = pd.concat([df, pd.read_csv(output_file)])\n else:\n df = pd.DataFrame([vals])\n\n df.to_csv(output_file, index=False, header=list(map(str, range(len(vals)))))\n\n print(vals)\n print(np.argsort(np.abs(vals)))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"dir\", help=\"directory containing an experiment config.yaml file\")\n parser.add_argument(\"train_df_file\")\n parser.add_argument(\"val_df_file\")\n\n args = parser.parse_args()\n experiment_directory = args.dir\n\n main(input_file=os.path.join(experiment_directory, \"model_info.p\"),\n output_file=os.path.join(experiment_directory, \"shapley.csv\"),\n )\n" ]
[ [ "numpy.random.choice", "pandas.DataFrame", "numpy.random.randn", "numpy.arange", "numpy.abs", "numpy.isfinite", "pandas.read_csv" ] ]
HuiliCui/ROMP
[ "8606d695e104f550a83bd74da3f3927c8e01faaf" ]
[ "simple_romp/setup.py" ]
[ "import setuptools\nfrom distutils.core import setup, Extension\nfrom Cython.Build import cythonize\nimport numpy\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nrequireds = [\"opencv-python\",\"torch\"]\n\nsetuptools.setup(\n name='simple_romp',\n version='0.0.3',\n author=\"Yu Sun\",\n author_email=\"yusun@stu.hit.edu.cn\",\n setup_requires=[\n # Setuptools 18.0 properly handles Cython extensions.\n 'setuptools>=18.0',\n 'cython',\n 'numpy>=1.21',\n 'typing-extensions>=4.1',\n 'black'\n ],\n install_requires=requireds,\n description=\"ROMP: Monocular, One-stage, Regression of Multiple 3D People, ICCV21\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Arthur151/ROMP\",\n packages=[\n 'romp',\n 'romp_visualizer',\n 'romp_visualizer.sim3drender',\n 'romp_visualizer.sim3drender.lib',\n ],\n ext_modules=cythonize([Extension(\"Sim3DR_Cython\",\n sources=[\"romp_visualizer/sim3drender/lib/rasterize.pyx\",\n \"romp_visualizer/sim3drender/lib/rasterize_kernel.cpp\"],\n language='c++',\n include_dirs=[numpy.get_include()],\n extra_compile_args=[\"-std=c++11\"])]),\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: Other/Proprietary License\",\n \"Operating System :: OS Independent\",\n ],\n project_urls={\n \"Bug Tracker\": \"https://github.com/Arthur151/ROMP/issues\",\n },\n entry_points={\n \"console_scripts\": [\n \"romp=romp.main:main\",\n ],\n },\n)\n" ]
[ [ "numpy.get_include" ] ]
mfkiwl/kernel_tuner
[ "7f2bdc85a9c355d38b04dba1ce518fba1d65ffb7" ]
[ "examples/cuda/convolution_correct.py" ]
[ "#!/usr/bin/env python\n\"\"\" convolution with correctness checks\n\nThis example is mostly the same as the Convolution example. The only\ndifference is that a naive kernel is used to compute a reference\noutput. This reference output is used to check the correctness of\nevery kernel before it is benchmarked.\n\nThis is done using the run_kernel() function of the Kernel Tuner and\nthe `answer` option of the tune_kernel function.\n\nThe run_kernel function simply runs a kernel using much of the same\ninterface as tune_kernel, however, for each tuning_parameter you pass\na single value instead of a list of options. The run_kernel function\nreturns a list of arguments that contains the output of the kernel.\n\nWhen calling tune_kernel you specify the `answer` as a list, which\nis similar to the arguments list of the kernel. To separate input\nand output arguments you insert a `None` value in the answer list\nfor all arguments that are actually inputs to the kernel. The\nvalues in the answers list that are not None are used to verify\nthe correctness of every kernel in the parameter space before it is\nbenchmarked.\n\"\"\"\nimport numpy\nimport kernel_tuner\nfrom collections import OrderedDict\n\ndef tune():\n with open('convolution.cu', 'r') as f:\n kernel_string = f.read()\n\n filter_size = (17, 17)\n problem_size = (4096, 4096)\n size = numpy.prod(problem_size)\n border_size = (filter_size[0]//2*2, filter_size[1]//2*2)\n input_size = ((problem_size[0]+border_size[0]) * (problem_size[1]+border_size[1]))\n\n output = numpy.zeros(size).astype(numpy.float32)\n input = numpy.random.randn(input_size).astype(numpy.float32)\n\n filter = numpy.random.randn(filter_size[0]*filter_size[1]).astype(numpy.float32)\n cmem_args= {'d_filter': filter }\n\n args = [output, input, filter]\n tune_params = OrderedDict()\n tune_params[\"filter_width\"] = [filter_size[0]]\n tune_params[\"filter_height\"] = [filter_size[1]]\n\n #tune_params[\"block_size_x\"] = [16*i for i in range(1,3)]\n tune_params[\"block_size_x\"] = [16*i for i in range(1,9)]\n #tune_params[\"block_size_y\"] = [2**i for i in range(1,5)]\n tune_params[\"block_size_y\"] = [2**i for i in range(1,6)]\n\n tune_params[\"tile_size_x\"] = [2**i for i in range(3)]\n tune_params[\"tile_size_y\"] = [2**i for i in range(3)]\n\n tune_params[\"use_padding\"] = [0,1] #toggle the insertion of padding in shared memory\n tune_params[\"read_only\"] = [0,1] #toggle using the read-only cache\n\n grid_div_x = [\"block_size_x\", \"tile_size_x\"]\n grid_div_y = [\"block_size_y\", \"tile_size_y\"]\n\n #compute the answer using a naive kernel\n params = { \"block_size_x\": 16, \"block_size_y\": 16}\n tune_params[\"filter_width\"] = [filter_size[0]]\n tune_params[\"filter_height\"] = [filter_size[1]]\n results = kernel_tuner.run_kernel(\"convolution_naive\", kernel_string,\n problem_size, args, params,\n grid_div_y=[\"block_size_y\"], grid_div_x=[\"block_size_x\"], lang='cupy')\n\n #set non-output fields to None\n answer = [results[0], None, None]\n\n #start kernel tuning with correctness verification\n return kernel_tuner.tune_kernel(\"convolution_kernel\", kernel_string,\n problem_size, args, tune_params,\n grid_div_y=grid_div_y, grid_div_x=grid_div_x, verbose=True, cmem_args=cmem_args, answer=answer, lang='cupy')\n\n\nif __name__ == \"__main__\":\n import time\n s1 = time.time()*1000\n results = tune()\n\n e1 = time.time()*1000\n print(\"\\n Actualy time used:\", e1-s1)\n import json\n with open(\"convolution_RTX_2070.json\", 'w') as fp:\n json.dump(results, fp)\n\n" ]
[ [ "numpy.prod", "numpy.random.randn", "numpy.zeros" ] ]
eugval/sim2real_dynamics_simulation
[ "2ed175803faa38792f6becc2dc91f44ae71ed9c2", "2ed175803faa38792f6becc2dc91f44ae71ed9c2" ]
[ "robosuite-extra/robosuite_extra/push_env/sawyer_push.py", "sim2real-policies/sim2real_policies/sys_id/env_probing_interaction/epi.py" ]
[ "from collections import OrderedDict\nimport numpy as np\nfrom robosuite_extra.env_base.sawyer import SawyerEnv\n\nfrom robosuite.models.arenas import TableArena\nfrom robosuite.models.objects import BoxObject, CylinderObject\nfrom robosuite_extra.models.generated_objects import FullyFrictionalBoxObject\nfrom robosuite_extra.models.tasks import UniformSelectiveSampler\nfrom robosuite.utils.mjcf_utils import array_to_string\nfrom robosuite_extra.push_env.push_task import PushTask\nfrom robosuite_extra.utils import transform_utils as T\nfrom robosuite_extra.controllers import SawyerEEFVelocityController\nimport copy\nfrom collections import deque\n\nclass SawyerPush(SawyerEnv):\n \"\"\"\n This class corresponds to a Pushing task for the sawyer robot arm.\n\n This task consists of pushing a rectangular puck from some initial position to a final goal.\n The goal and initial positions are chosen randomly within some starting bounds\n \"\"\"\n\n def __init__(\n self,\n gripper_type=\"PushingGripper\",\n parameters_to_randomise=None,\n randomise_initial_conditions=True,\n table_full_size=(0.8, 1.6, 0.719),\n table_friction=(1e-4, 5e-3, 1e-4),\n use_camera_obs=False,\n use_object_obs=True,\n reward_shaping=True,\n placement_initializer=None,\n gripper_visualization=True,\n use_indicator_object=False,\n has_renderer=False,\n has_offscreen_renderer=True,\n render_collision_mesh=False,\n render_visual_mesh=True,\n control_freq=10,\n horizon=80,\n ignore_done=False,\n camera_name=\"frontview\",\n camera_height=256,\n camera_width=256,\n camera_depth=False,\n pid=True,\n ):\n \"\"\"\n Args:\n\n gripper_type (str): type of gripper, used to instantiate\n gripper models from gripper factory.\n\n parameters_to_randomise [string,] : List of keys for parameters to randomise, None means all the available parameters are randomised\n\n\n randomise_initial_conditions [bool,]: Whether or not to randomise the starting configuration of the task.\n\n\n table_full_size (3-tuple): x, y, and z dimensions of the table.\n\n table_friction (3-tuple): the three mujoco friction parameters for\n the table.\n\n use_camera_obs (bool): if True, every observation includes a\n rendered image.\n\n use_object_obs (bool): if True, include object (cube) information in\n the observation.\n\n reward_shaping (bool): if True, use dense rewards.\n\n placement_initializer (ObjectPositionSampler instance): if provided, will\n be used to place objects on every reset, else a UniformRandomSampler\n is used by default.\n\n gripper_visualization (bool): True if using gripper visualization.\n Useful for teleoperation.\n\n use_indicator_object (bool): if True, sets up an indicator object that\n is useful for debugging.\n\n has_renderer (bool): If true, render the simulation state in\n a viewer instead of headless mode.\n\n has_offscreen_renderer (bool): True if using off-screen rendering.\n\n render_collision_mesh (bool): True if rendering collision meshes\n in camera. False otherwise.\n\n render_visual_mesh (bool): True if rendering visual meshes\n in camera. False otherwise.\n\n control_freq (float): how many control signals to receive\n in every second. This sets the amount of simulation time\n that passes between every action input.\n\n horizon (int): Every episode lasts for exactly @horizon timesteps.\n\n ignore_done (bool): True if never terminating the environment (ignore @horizon).\n\n camera_name (str): name of camera to be rendered. Must be\n set if @use_camera_obs is True.\n\n camera_height (int): height of camera frame.\n\n camera_width (int): width of camera frame.\n\n camera_depth (bool): True if rendering RGB-D, and RGB otherwise.\n\n pid (bool) : True if using a velocity PID controller for controlling the arm, false if using a\n mujoco-implemented proportional controller.\n \"\"\"\n\n self.initialised = False\n\n # settings for table\n self.table_full_size = table_full_size\n self.table_friction = table_friction\n\n # whether to use ground-truth object states\n self.use_object_obs = use_object_obs\n\n # reward configuration\n self.reward_shaping = reward_shaping\n if (self.reward_shaping):\n self.reward_range = [-np.inf, horizon * (0.1)]\n else:\n self.reward_range = [0, 1]\n\n # Domain Randomisation Parameters\n self.parameters_to_randomise = parameters_to_randomise\n self.randomise_initial_conditions = randomise_initial_conditions\n self.dynamics_parameters = OrderedDict()\n self.default_dynamics_parameters = OrderedDict()\n self.parameter_sampling_ranges = OrderedDict()\n self.factors_for_param_randomisation = OrderedDict()\n\n\n # object placement initializer\n if placement_initializer:\n self.placement_initializer = placement_initializer\n else:\n self.placement_initializer = UniformSelectiveSampler(\n x_range=None,\n y_range=None,\n ensure_object_boundary_in_range=True,\n z_rotation=None,\n np_random=None\n )\n\n # Param for storing a specific goal and object starting positions\n self.specific_goal_position = None\n self.specific_gripper_position = None\n self.gripper_pos_neutral = [0.44969246, 0.16029991, 1.00875409]\n\n super().__init__(\n gripper_type=gripper_type,\n gripper_visualization=gripper_visualization,\n use_indicator_object=use_indicator_object,\n has_renderer=has_renderer,\n has_offscreen_renderer=has_offscreen_renderer,\n render_collision_mesh=render_collision_mesh,\n render_visual_mesh=render_visual_mesh,\n control_freq=control_freq,\n horizon=horizon,\n ignore_done=ignore_done,\n use_camera_obs=use_camera_obs,\n camera_name=camera_name,\n camera_height=camera_height,\n camera_width=camera_width,\n camera_depth=camera_depth,\n pid=pid,\n )\n\n self._set_default_dynamics_parameters(pid)\n self._set_default_parameter_sampling_ranges()\n self._set_dynamics_parameters(self.default_dynamics_parameters)\n self._set_factors_for_param_randomisation(self.default_dynamics_parameters)\n\n # Check that the parameters to randomise are within the allowed parameters\n if (self.parameters_to_randomise is not None):\n self._check_allowed_parameters(self.parameters_to_randomise)\n\n # IK solver for placing the arm at desired locations during reset\n self.IK_solver = SawyerEEFVelocityController()\n\n self.placement_initializer.set_random_number_generator(self.np_random)\n\n self.init_control_timestep = self.control_timestep\n self.init_qpos = self.mujoco_robot.init_qpos\n\n # Storing parameters for temporary switching\n self.cached_parameters_to_randomise = None\n self.cached_dynamics_parameters = None\n\n\n self.initialised = True\n self.reset()\n\n def _set_dynamics_parameters(self, parameters):\n self.dynamics_parameters = copy.deepcopy(parameters)\n\n def _default_damping_params(self):\n # return np.array([0.01566, 1.171, 0.4906, 0.1573, 1.293, 0.08688, 0.1942]) # -real world calibration\n # return np.array([0.8824,2.3357,1.1729, 0.0 , 0.5894, 0.0 ,0.0082]) #- command calibration\n return np.array([8.19520686e-01, 1.25425414e+00, 1.04222253e+00,\n 0.00000000e+00, 1.43146116e+00, 1.26807887e-01, 1.53680244e-01, ]) # - command calibration 2\n\n def _default_armature_params(self):\n return np.array([0.00000000e+00, 0.00000000e+00, 2.70022664e-02, 5.35581203e-02,\n 3.31204140e-01, 2.59623415e-01, 2.81964631e-01, ])\n\n def _default_joint_friction_params(self):\n return np.array([4.14390483e-03,\n 9.30938506e-02, 2.68656509e-02, 0.00000000e+00, 0.00000000e+00,\n 4.24867204e-04, 8.62040317e-04])\n\n def _set_default_dynamics_parameters(self, use_pid):\n \"\"\"\n Setting the the default environment parameters.\n \"\"\"\n self.default_dynamics_parameters['joint_forces'] = np.zeros((7,))\n self.default_dynamics_parameters['acceleration_forces'] = np.zeros((7,))\n self.default_dynamics_parameters['eef_forces'] = np.zeros((6,))\n self.default_dynamics_parameters['obj_forces'] = np.zeros((6,))\n\n\n self.default_dynamics_parameters['eef_timedelay'] = np.asarray(0)\n self.default_dynamics_parameters['obj_timedelay'] = np.asarray(0)\n self.default_dynamics_parameters['timestep_parameter'] = np.asarray(0.0)\n self.default_dynamics_parameters['pid_iteration_time'] = np.asarray(0.)\n self.default_dynamics_parameters['mujoco_timestep'] = np.asarray(0.002)\n\n self.default_dynamics_parameters['action_additive_noise'] = np.asarray(0.0)\n self.default_dynamics_parameters['action_multiplicative_noise'] = np.asarray(0.0)\n self.default_dynamics_parameters['action_systematic_noise'] = np.asarray(0.0)\n\n self.default_dynamics_parameters['eef_obs_position_noise'] = np.asarray(0.0)\n self.default_dynamics_parameters['eef_obs_velocity_noise'] = np.asarray(0.0)\n self.default_dynamics_parameters['obj_obs_position_noise'] = np.asarray(0.0)\n self.default_dynamics_parameters['obj_obs_velocity_noise'] = np.asarray(0.0)\n self.default_dynamics_parameters['obj_angle_noise'] = np.asarray(0.0)\n\n self.default_dynamics_parameters['obj_density'] = np.asarray(400)\n self.default_dynamics_parameters['obj_size'] = np.array([0.0555 / 2, 0.0555 / 2, 0.03 / 2])\n self.default_dynamics_parameters['obj_sliding_friction'] = np.asarray(0.4)\n self.default_dynamics_parameters['obj_torsional_friction'] = np.asarray(0.01)\n\n link_masses = np.zeros((7,))\n for link_name, idx, body_node, mass_node, joint_node in self._robot_link_nodes_generator():\n if (mass_node is not None):\n dynamics_parameter_value = float(mass_node.get(\"mass\"))\n link_masses[idx] = dynamics_parameter_value\n\n self.default_dynamics_parameters['link_masses'] = link_masses\n self.default_dynamics_parameters['joint_dampings'] = self._default_damping_params()\n self.default_dynamics_parameters['armatures'] = self._default_armature_params()\n self.default_dynamics_parameters['joint_frictions'] = self._default_joint_friction_params()\n\n if (use_pid):\n gains = self.mujoco_robot.velocity_pid_gains\n kps = np.array([gains['right_j{}'.format(actuator)]['p'] for actuator in range(7)])\n kis = np.array([gains['right_j{}'.format(actuator)]['i'] for actuator in range(7)])\n kds = np.array([gains['right_j{}'.format(actuator)]['d'] for actuator in range(7)])\n #\n self.default_dynamics_parameters['kps'] = kps\n self.default_dynamics_parameters['kis'] = kis\n self.default_dynamics_parameters['kds'] = kds\n else:\n kvs = np.zeros((7,))\n for target_joint, jnt_idx, node in self._velocity_actuator_nodes_generator():\n gains_value = float(node.get(\"kv\"))\n kvs[jnt_idx] = gains_value\n\n self.default_dynamics_parameters['kvs'] = kvs\n\n def _set_default_parameter_sampling_ranges(self):\n \"\"\"\n Returns the parameter ranges to draw samples from in the domain randomisation.\n \"\"\"\n parameter_ranges = {\n 'joint_forces': np.array([[0.,0.,0.,0.,0.,0.,0.], [1.5,1.5,1.5,1.5,1.5,1.5,1.5]]),#\n 'acceleration_forces': np.array([[0.,0.,0.,0.,0.,0.,0.], [0.05,0.05,0.05,0.05,0.05,0.05,0.05]]),#\n 'eef_forces': np.array([[0.,0.,0.,0.,0.,0.], [0.06 ,0.06,0.06,0.01,0.01,0.01,]]), #\n 'obj_forces': np.array([[0., 0., 0., 0., 0., 0., ], [0.0011, 0.0011, 0.0011, 0.0005, 0.0005, 0.0005, ]]),\n\n 'eef_timedelay': np.array([0, 1]),\n 'obj_timedelay': np.array([0,2]),\n 'timestep_parameter': np.array([0.0, 0.01]),\n 'pid_iteration_time': np.array([0., 0.04]),\n 'mujoco_timestep': np.array([0.001,0.002]),\n\n 'action_additive_noise': np.array([0.01, 0.1]),\n 'action_multiplicative_noise': np.array([0.005,0.02]),\n 'action_systematic_noise': np.array([-0.05, 0.05]),\n\n 'eef_obs_position_noise': np.array([0.0005, 0.001]),\n 'eef_obs_velocity_noise': np.array([0.0005, 0.001]),\n 'obj_obs_position_noise': np.array([0.0005, 0.001]),\n 'obj_obs_velocity_noise': np.array([0.0005, 0.0015]),\n 'obj_angle_noise': np.array([0.005, 0.05]),\n\n 'obj_density': np.array([100, 800]),\n 'obj_size': np.array([0.995, 1.005]),\n 'obj_sliding_friction': np.array([0.01, 0.8]),\n 'obj_torsional_friction': np.array([0.001, 0.3]),\n\n 'link_masses': np.array([0.98, 1.02]),\n 'joint_dampings': np.array([0.5, 2.]),\n 'armatures': np.array([0.66, 1.5]),\n 'joint_frictions': np.array([0.66, 1.5]),\n }\n\n if (self.pid):\n parameter_ranges['kps'] = np.array([0.66, 1.5])\n parameter_ranges['kis'] = np.array([0.66, 1.5])\n parameter_ranges['kds'] = np.array([0.66, 1.5])\n else:\n parameter_ranges['kvs'] = [0.5, 2]\n\n self.parameter_sampling_ranges = parameter_ranges\n\n\n def _set_factors_for_param_randomisation(self, parameters):\n factors = copy.deepcopy(parameters)\n\n factors['joint_forces'] = np.ones((7,))\n factors['acceleration_forces'] = np.ones((7,))\n factors['eef_forces'] = np.ones((1,))\n factors['obj_forces'] = np.ones((6,))\n\n factors['eef_timedelay'] = 1.0\n factors['timestep_parameter'] = 1.0\n factors['pid_iteration_time'] = 1.0\n factors['mujoco_timestep'] = 1.0\n factors['obj_timedelay'] = 1.0\n\n factors['action_additive_noise'] = 1.0\n factors['action_multiplicative_noise'] = 1.0\n factors['action_systematic_noise'] = 1.0\n\n factors['eef_obs_position_noise'] = 1.0\n factors['eef_obs_velocity_noise'] = 1.0\n factors['obj_obs_position_noise'] = 1.0\n factors['obj_obs_velocity_noise'] = 1.0\n factors['obj_angle_noise'] = 1.0\n\n factors['obj_density'] = 1.0\n factors['obj_sliding_friction'] = 1.0\n factors['obj_torsional_friction'] = 1.0\n\n\n self.factors_for_param_randomisation = factors\n\n def _velocity_actuator_nodes_generator(self):\n \"\"\"\n Caching the xml nodes for the velocity actuators for use when setting the parameters\n \"\"\"\n\n for node in self.model.root.findall(\".//velocity[@kv]\"):\n target_joint = node.get(\"joint\")\n jnt_idx = int(target_joint[-1])\n yield target_joint, jnt_idx, node\n\n def _robot_link_nodes_generator(self):\n \"\"\"\n Caching the xml nodes for the velocity actuators for use when setting the parameters\n \"\"\"\n\n for link_idx, link_name in enumerate(self.mujoco_robot.links):\n body_node = self.mujoco_robot.root.find(\".//body[@name='{}']\".format(link_name))\n mass_node = body_node.find(\"./inertial[@mass]\")\n joint_node = body_node.find(\"./joint\")\n\n yield link_name, link_idx, body_node, mass_node, joint_node\n\n def _check_allowed_parameters(self, parameters):\n allowed_parameters = self.get_parameter_keys()\n\n for param in parameters:\n assert param in allowed_parameters, '{} not allowed. Only allowed parameters are {}'.format(param,\n allowed_parameters)\n\n def _select_appropriate_distribution(self, key):\n '''\n Which distribution to use to sample the different dynamics parameters.\n :param key: The parameter to consider.\n '''\n if (\n key == 'joint_forces'\n or key == 'acceleration_forces'\n or key == 'eef_forces'\n or key == 'obj_forces'\n\n or key == 'timestep_parameter'\n or key == 'pid_iteration_time'\n or key == 'mujoco_timestep'\n\n or key == 'action_additive_noise'\n or key == 'action_multiplicative_noise'\n or key == 'action_systematic_noise'\n\n or key == 'eef_obs_position_noise'\n or key == 'eef_obs_velocity_noise'\n or key == 'obj_obs_position_noise'\n or key == 'obj_obs_velocity_noise'\n or key == 'obj_angle_noise'\n\n or key == 'link_masses'\n or key == 'obj_size'\n\n or key == 'obj_density'\n or key == 'obj_sliding_friction'\n ):\n return self.np_random.uniform\n elif (\n key == 'eef_timedelay'\n or key == 'obj_timedelay'\n ):\n return self._ranged_random_choice\n else:\n return self._loguniform\n\n def _loguniform(self, low=1e-10, high=1., size=None):\n return np.asarray(np.exp(self.np_random.uniform(np.log(low), np.log(high), size)))\n\n def _ranged_random_choice(self,low, high, size=1):\n vals = np.arange(low,high+1)\n return self.np_random.choice(vals, size)\n\n def _parameter_for_randomisation_generator(self, parameters=None):\n '''\n Generates (key,value) pairs of sampled dynamics parameters.\n :param parameters: The parameters to be sampled for randomisation, if None, all the allowed parameters are sampled.\n '''\n parameter_ranges = self.parameter_sampling_ranges\n\n if (parameters is None):\n parameters = self.get_parameter_keys()\n\n for key in parameters:\n\n parameter_range = parameter_ranges[key]\n\n if (parameter_range.shape[0] == 1):\n yield key, np.asarray(parameter_range[0])\n elif (parameter_range.shape[0] == 2):\n distribution = self._select_appropriate_distribution(key)\n size = self.default_dynamics_parameters[key].shape\n yield key, np.asarray(\n self.factors_for_param_randomisation[key] * distribution(*parameter_ranges[key], size=size))\n else:\n raise RuntimeError('Parameter radomisation range needs to be of shape {1,2}xN')\n\n\n def _load_model(self):\n \"\"\"\n Loads an xml model, puts it in self.model. This sets up the mujoco xml for the scene.\n \"\"\"\n super()._load_model()\n self.mujoco_robot.set_base_xpos([0, 0, 0])\n obj_size = np.array([0.0555 / 2, 0.0555 / 2, 0.03 / 2])\n\n ### Domain Randomisation ###\n if (self.initialised):\n for key, val in self._parameter_for_randomisation_generator(parameters=self.parameters_to_randomise):\n self.dynamics_parameters[key] = val\n\n ## Queues for adding time delays\n self.eef_pos_queue = deque(maxlen=int(self.dynamics_parameters['eef_timedelay'] + 1))\n self.eef_vel_queue = deque(maxlen=int(self.dynamics_parameters['eef_timedelay'] + 1))\n\n self.obj_pos_queue = deque(maxlen=int(self.dynamics_parameters['obj_timedelay'] + 1))\n self.obj_vel_queue = deque(maxlen=int(self.dynamics_parameters['obj_timedelay'] + 1))\n self.obj_angle_queue = deque(maxlen=int(self.dynamics_parameters['obj_timedelay'] + 1))\n\n if (self.pid is not None):\n self.pid.sample_time = self.dynamics_parameters['pid_iteration_time']\n\n obj_size = self.dynamics_parameters['obj_size']\n\n ### Create the Task ###\n ## Load the Arena ##\n self.mujoco_arena = TableArena(\n table_full_size=self.table_full_size, table_friction=self.table_friction\n )\n if self.use_indicator_object:\n self.mujoco_arena.add_pos_indicator()\n\n # The sawyer robot has a pedestal, we want to align it with the table\n self.mujoco_arena.set_origin([0.16 + self.table_full_size[0] / 2, 0, 0])\n\n ## Create the objects that will go into the arena ##\n # Create object and goal\n\n if(self.initialised):\n density = self.dynamics_parameters['obj_density']\n friction = np.array([self.dynamics_parameters['obj_sliding_friction'],\n self.dynamics_parameters['obj_torsional_friction'],\n self.table_friction[2]])\n else:\n density = None\n friction = None\n\n rectangle = FullyFrictionalBoxObject(\n size_min= obj_size, #\n size_max= obj_size, #\n rgba=[1, 0, 0, 1],\n density=density,\n friction=friction\n )\n\n\n self.mujoco_objects = OrderedDict([(\"rectangle\", rectangle)])\n\n\n goal = CylinderObject(\n size=[0.03, 0.001],\n rgba=[0, 1, 0, 1],\n )\n self.mujoco_goal = goal\n\n ## Put everything together into the task ##\n self.model = PushTask(\n self.mujoco_arena,\n self.mujoco_robot,\n self.mujoco_goal,\n self.mujoco_objects,\n initializer=self.placement_initializer,\n )\n\n # Add some small damping to the objects to prevent infinite acceleration\n for obj in self.model.xml_objects:\n obj.find('./joint').set('damping', '0.005')\n\n ## Manipulate objects in task ##\n # Reduce penetration of objects\n for obj in self.model.xml_objects:\n obj.find('geom').set('solimp', \"0.99 0.99 0.01\")\n obj.find('geom').set('solref', \"0.01 1\")\n\n self.model.arena.table_collision.set('solimp', \"0.99 0.99 0.01\")\n self.model.arena.table_collision.set('solref', \"0.01 1\")\n\n # Place goal: it can be placed anywhere in a 16x30 cm box centered 15 cm away\n # from the center of the table along its length\n if (self.specific_goal_position is not None):\n g_pos = np.array([self.gripper_pos_neutral[0] + self.specific_goal_position[0],\n self.gripper_pos_neutral[1] + self.specific_goal_position[1],\n self.model.table_top_offset[2]])\n\n elif (self.randomise_initial_conditions):\n noise = self.np_random.uniform(-1, 1, 3) * np.array([0.15, 0.08, 0.0])\n offset = np.array([0.0, 0.15, 0.0])\n g_pos = noise + offset + self.model.table_top_offset\n else:\n g_pos = [0.44969246, 0.16029991 + 0.335, self.model.table_top_offset[2]] #Placing the object at 30 cm , the eef needs to be at 33.5 cm\n\n self.model.xml_goal.set(\"pos\", array_to_string(g_pos))\n\n ### Set the xml parameters to the values given by the dynamics_parameters attribute ###\n if (self.initialised):\n self._apply_xml_dynamics_parameters()\n\n def _apply_xml_dynamics_parameters(self):\n \"\"\"\n Applying the values contained in dynamics_parameters to the xml elements of the model. If a pid is used this\n also applied the pid gains contained in the dynamics parameters.\n \"\"\"\n\n opt_node = self.model.root.find('option')\n opt_node.set(\"timestep\", str(self.dynamics_parameters['mujoco_timestep']))\n\n for link_name, idx, body_node, mass_node, joint_node in self._robot_link_nodes_generator():\n if (mass_node is not None):\n mass_node.set(\"mass\", str(self.dynamics_parameters['link_masses'][idx]))\n\n if (joint_node is not None):\n joint_node.set(\"damping\", str(self.dynamics_parameters['joint_dampings'][idx]))\n joint_node.set(\"armature\", str(self.dynamics_parameters['armatures'][idx]))\n joint_node.set(\"frictionloss\", str(self.dynamics_parameters['joint_frictions'][idx]))\n\n if (self.pid):\n self.pid.tunings = (self.dynamics_parameters['kps'],\n self.dynamics_parameters['kis'],\n self.dynamics_parameters['kds'],\n )\n else:\n for target_joint, jnt_idx, node in self._velocity_actuator_nodes_generator():\n node.set(\"kv\", str(self.dynamics_parameters['kvs'][jnt_idx]))\n\n\n\n\n def set_parameter_sampling_ranges(self, sampling_ranges):\n '''\n Set a new sampling range for the dynamics parameters.\n :param sampling_ranges: (Dict) Dictionary of the sampling ranges for the different parameters of the form\n (param_name, range) where param_name is a valid param name string and range is a numpy array of dimensionality\n {1,2}xN where N is the dimension of the given parameter\n '''\n for candidate_name, candidate_value in sampling_ranges.items():\n assert candidate_name in self.parameter_sampling_ranges, 'Valid parameters are {}'.format(self.parameter_sampling_ranges.keys())\n assert candidate_value.shape[0] == 1 or candidate_value.shape[0]==2, 'First dimension of the sampling parameter needs to have value 1 or 2'\n assert len(candidate_value.shape) == len(self.parameter_sampling_ranges[candidate_name].shape), '{} has the wrong number of dimensions'.format(candidate_name)\n if(len(self.parameter_sampling_ranges[candidate_name].shape) >1):\n assert self.parameter_sampling_ranges[candidate_name].shape[1] == candidate_value.shape[1], '{} has the wrong shape'.format(candidate_name)\n\n self.parameter_sampling_ranges[candidate_name] = candidate_value\n\n def get_parameter_sampling_ranges(self):\n return copy.deepcopy(self.parameter_sampling_ranges)\n\n def get_parameter_keys(self):\n return self.default_dynamics_parameters.keys()\n\n def get_total_parameter_dimension(self):\n total_dimension = 0\n for key, val in self.default_dynamics_parameters.items():\n param_shape = val.shape\n if(param_shape ==()):\n total_dimension += 1\n else:\n total_dimension += param_shape[0]\n return total_dimension\n\n def get_internal_state(self):\n return np.concatenate([self._joint_positions, self._joint_velocities]).tolist()\n\n def get_internal_state_dimension(self):\n internal_state = self.get_internal_state()\n return len(internal_state)\n\n def change_parameters_to_randomise(self, parameters):\n self._check_allowed_parameters(parameters)\n self._set_dynamics_parameters(self.default_dynamics_parameters)\n self.parameters_to_randomise = parameters\n\n def get_randomised_parameters(self):\n if (self.parameters_to_randomise is not None):\n return self.parameters_to_randomise\n else:\n return self.get_parameter_keys()\n\n def get_randomised_parameter_dimensions(self):\n \"\"\" Return the number of dimensions of the ranomised parameters\"\"\"\n randomised_parameter_names = self.get_randomised_parameters()\n\n total_dimension = 0\n for param in randomised_parameter_names:\n param_shape = self.default_dynamics_parameters[param].shape\n if(param_shape ==()):\n total_dimension += 1\n else:\n total_dimension += param_shape[0]\n return total_dimension\n\n def get_dynamics_parameters(self):\n \"\"\"\n Returns the values of the current dynamics parameters.\n \"\"\"\n return copy.deepcopy(self.dynamics_parameters)\n\n def get_default_dynamics_parameters(self):\n \"\"\"\n Returns the default values of the dynamics parameters.\n \"\"\"\n return copy.deepcopy(self.default_dynamics_parameters)\n\n def get_factors_for_randomisation(self):\n \"\"\"\n Returns the factor used for domain randomisation.\n \"\"\"\n return copy.deepcopy(self.factors_for_param_randomisation)\n\n def set_dynamics_parameters(self, dynamics_parameter_dict):\n \"\"\"\n Setting the dynamics parameters of the environment to specific values. These are going to be used the next\n time the environment is reset, and will be overriden if domain randomisation is on.\n :param dynamics_parameter_dict: Dictionary with the values of the parameters to set.\n \"\"\"\n for key, value in dynamics_parameter_dict.items():\n assert key in self.dynamics_parameters, 'Setting a parameter that does not exist'\n self.dynamics_parameters[key] = value\n\n def randomisation_off(self,):\n '''\n Disable the parameter randomisation temporarily and cache the current set of parameters and\n which parameters are being randomised.This can be useful for evaluation.\n '''\n current_params_to_randomise = self.get_randomised_parameters()\n current_params = self.get_dynamics_parameters()\n\n self.cached_parameters_to_randomise = current_params_to_randomise\n self.cached_dynamics_parameters = current_params\n\n self.parameters_to_randomise = []\n\n return current_params, current_params_to_randomise\n\n def randomisation_on(self):\n '''\n Restoring the randomisation as they were before the call to switch_params\n '''\n if(self.cached_dynamics_parameters is None):\n print(\"Randomisation was not switched off before switching it back on.\")\n return\n\n self.parameters_to_randomise = self.cached_parameters_to_randomise\n self.set_dynamics_parameters(self.cached_dynamics_parameters)\n self.cached_parameters_to_randomise = None\n self.cached_dynamics_parameters = None\n\n def sample_parameter_randomisation(self, parameters=None):\n ''' Samples a dictionary of dynamics parameters values using the randomisation process currently set in the environment\n parameters ([string,]) : List of parameters to sample a randomisation from. If None, all the allowed parameters are sampled.\n '''\n if (not self.initialised):\n print('Function has undefined behaviour if environment fully initialised, returning with no effect')\n return\n\n parameters_sample = {}\n\n for key, val in self._parameter_for_randomisation_generator(parameters):\n assert key in self.get_parameter_keys(), '{} not allowed. Choose from {}'.format(key,\n self.get_parameter_keys())\n parameters_sample[key] = val\n\n return parameters_sample\n\n def _set_goal_neutral_offset(self, goal_x, goal_y):\n self.specific_goal_position = np.array([goal_x, goal_y])\n\n def _set_gripper_neutral_offset(self, gripper_x, gripper_y):\n self.specific_gripper_position = np.array([gripper_x, gripper_y])\n\n def _get_reference(self):\n \"\"\"\n Sets up references to important components. A reference is typically an\n index or a list of indices that point to the corresponding elements\n in a flatten array, which is how MuJoCo stores physical simulation data.\n \"\"\"\n super()._get_reference()\n\n # Pushing object ids\n self.push_obj_name = self.model.object_names[self.model.push_object_idx]\n\n self.object_body_id = self.sim.model.body_name2id(self.push_obj_name)\n self.object_geom_id = self.sim.model.geom_name2id(self.push_obj_name)\n\n # Pushing object qpos indices for the object\n object_qpos = self.sim.model.get_joint_qpos_addr(self.push_obj_name)\n self._ref_object_pos_low, self._ref_object_pos_high = object_qpos\n\n # goal ids\n self.goal_body_id = self.sim.model.body_name2id(\"goal\")\n self.goal_site_id = self.sim.model.site_name2id(\"goal\")\n\n # Gripper ids\n self.l_finger_geom_ids = [\n self.sim.model.geom_name2id(x) for x in self.gripper.left_finger_geoms\n ]\n self.r_finger_geom_ids = [\n self.sim.model.geom_name2id(x) for x in self.gripper.right_finger_geoms\n ]\n\n def _reset_internal(self):\n \"\"\"\n Resets simulation internal configurations. Is called upon environment reset.\n \"\"\"\n super()._reset_internal()\n self.sim.forward()\n\n if (self.initialised):\n ### Set the arm position using IK ###\n\n ## Get the pose of the gripper in the initial position ##\n\n # Find the gripper length\n gripper_site = self.sim.data.site_xpos[self.eef_site_id]\n right_hand_pos = self.sim.data.get_body_xpos('right_hand')\n gripper_length = (right_hand_pos - gripper_site)[2]\n\n if(self.specific_gripper_position is not None):\n init_pos = np.array([self.gripper_pos_neutral[0] + self.specific_gripper_position[0],\n self.gripper_pos_neutral[1] + self.specific_gripper_position[1],\n self.model.table_top_offset.copy()[2] + 0.007+ gripper_length])\n\n\n init_pose = T.make_pose(init_pos, np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]]))\n\n elif (self.randomise_initial_conditions):\n # Get the initial position of interest :\n # A box of size 12x12cm, 15 cm away from the center of the table in the y axis\n noise = self.np_random.uniform(-1, 1, 3) * np.array([0.12, 0.12, 0.0])\n offset = np.array([0.0, -0.15, 0.007])\n init_pos = self.model.table_top_offset.copy() + noise + offset\n init_pos[2] = init_pos[2] + gripper_length\n init_pose = T.make_pose(init_pos, np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]])) #\n else:\n gripper_pos = self.sim.data.get_site_xpos('grip_site')\n init_pos = np.concatenate([gripper_pos[:2], [self.model.table_top_offset.copy()[2] + 0.007]])\n init_pos[2] = init_pos[2] + gripper_length\n init_pose = T.make_pose(init_pos, np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]]))\n\n ## Do the IK to find the joint angles for this initial pose ##\n # Start the IK search from the rest qpos\n ref_q = self.mujoco_robot.init_qpos\n\n # Express init_pose in the base frame of the robot\n init_pose_in_base = self.pose_in_base(init_pose)\n\n # Do the IK\n joint_angles = self.IK_solver.compute_joint_angles_for_endpoint_pose(init_pose_in_base, ref_q)\n\n # Set the robot joint angles\n self.set_robot_joint_positions(joint_angles)\n\n # Set reference attributes\n self.init_qpos = joint_angles\n self.init_right_hand_quat = self._right_hand_quat\n self.init_right_hand_orn = self._right_hand_orn\n self.init_right_hand_pos = self._right_hand_pos\n\n eef_rot_in_world = self.sim.data.get_body_xmat(\"right_hand\").reshape((3, 3))\n self.world_rot_in_eef = copy.deepcopy(eef_rot_in_world.T)\n\n ### Set the object position next to the arm ###\n\n # Find End effector position\n eef_pos = np.array(self.sim.data.site_xpos[self.eef_site_id])\n\n # Get the mujoco pusing object\n obj = self.model.mujoco_objects[self.model.push_object_idx]\n\n # Find the position just next to the eef\n obj_radius = obj.get_horizontal_radius()\n obj_bottom_offset = obj.get_bottom_offset()\n if (self.randomise_initial_conditions):\n obj_pos = np.array([eef_pos[0], eef_pos[1] + obj_radius + 0.00701,\n self.model.table_top_offset[2] - obj_bottom_offset[2]])\n obj_pos += self.np_random.uniform(size=3) * np.array([0.0012, 0.001, 0.0])\n\n # Get the object orientation\n obj_angle = np.pi / 2. + self.np_random.uniform(-1, 1) * np.pi / 6.\n obj_quat = np.array([np.cos(obj_angle / 2.), 0., 0., np.sin(obj_angle / 2.)])\n else:\n obj_pos = np.array([eef_pos[0], eef_pos[1] + obj.size[0] +0.0071+ 0.0002 , #0.0071 is the griper half length\n self.model.table_top_offset[2] - obj_bottom_offset[2]])\n obj_angle = np.pi/2.\n obj_quat = np.array([np.cos(obj_angle/2.), 0., 0., np.sin(obj_angle/2.)])\n\n # Concatenate to get the object qpos\n obj_qpos = np.concatenate([obj_pos, obj_quat])\n\n self.sim.data.qpos[self._ref_object_pos_low:self._ref_object_pos_high] = obj_qpos\n self.sim.forward()\n\n def reward(self, action=None):\n \"\"\"\n Reward function for the task.\n\n The dense reward has three components.\n\n Reaching: in [-inf, 0], to encourage the arm to reach the object\n Goal Distance: in [-inf, 0] the distance between the pushed object and the goal\n Safety reward in [-inf, 0], -1 for every joint that is at its limit.\n\n The sparse reward only receives a {0,1} upon reaching the goal\n\n Args:\n action (np array): The action taken in that timestep\n\n Returns:\n reward (float or dict): the reward if sparce rewards are used otherwise a dictionary\n with the total reward, and the subcoponents of the dense reward.\n \"\"\"\n reward = 0.\n\n # sparse completion reward\n if not self.reward_shaping and self._check_success():\n reward = 1.0\n\n # use a dense reward\n if self.reward_shaping:\n object_pos = self.sim.data.body_xpos[self.object_body_id]\n\n # max joint angles reward\n joint_limits = self._joint_ranges\n current_joint_pos = self._joint_positions\n\n hitting_limits_reward = - int(any([(x < joint_limits[i, 0] + 0.05 or x > joint_limits[i, 1] - 0.05) for i, x in\n enumerate(current_joint_pos)]))\n\n reward += hitting_limits_reward\n\n # reaching reward\n gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]\n dist = np.linalg.norm(gripper_site_pos[:2] - object_pos[:2])\n reaching_reward = -0.1 * dist\n reward += reaching_reward\n\n # Success Reward\n success = self._check_success()\n if (success):\n reward += 0.1\n\n # goal distance reward\n goal_pos = self.sim.data.site_xpos[self.goal_site_id]\n\n dist = np.linalg.norm(goal_pos[:2] - object_pos[:2])\n goal_distance_reward = -dist\n reward += goal_distance_reward\n\n unstable = reward < -2.5\n\n # Return all three types of rewards\n reward = {\"reward\": reward, \"reaching_distance\": -10 * reaching_reward,\n \"goal_distance\": - goal_distance_reward,\n \"hitting_limits_reward\": hitting_limits_reward,\n \"unstable\":unstable}\n\n return reward\n\n def _check_success(self):\n \"\"\"\n Returns True if task has been completed.\n \"\"\"\n object_pos = self.sim.data.body_xpos[self.object_body_id][:2]\n goal_pos = self.sim.data.site_xpos[self.goal_site_id][:2]\n\n dist = np.linalg.norm(goal_pos - object_pos)\n goal_horizontal_radius = self.model.mujoco_goal.get_horizontal_radius()\n\n # object centre is within the goal radius\n return dist < goal_horizontal_radius\n\n def _pre_action(self, action):\n \"\"\" Takes the action, randomised the control timestep, and adds some additional random noise to the action.\"\"\"\n # Change control timestep to simulate various random time delays\n timestep_parameter = self.dynamics_parameters['timestep_parameter']\n self.control_timestep = self.init_control_timestep + self.np_random.exponential(scale=timestep_parameter)\n\n # Add action noise to simulate unmodelled effects\n additive_noise = self.dynamics_parameters['action_additive_noise'] * self.np_random.uniform(-1, 1, action.shape)\n additive_systematic_noise = self.dynamics_parameters['action_systematic_noise']\n multiplicative_noise = 1.0 + (\n self.dynamics_parameters['action_multiplicative_noise'] * self.np_random.uniform(-1, 1,\n action.shape))\n\n action = action * (1.0 + additive_noise + additive_systematic_noise) * multiplicative_noise\n\n super()._pre_action(action)\n\n # Adding forces\n\n # Addding forces to the joint\n self.sim.data.qfrc_applied[\n self._ref_joint_vel_indexes\n ] += self.dynamics_parameters['joint_forces'] * self.np_random.uniform(-1, 1, 7)\n\n # Adding force proportional to acceleration\n self.sim.data.qfrc_applied[\n self._ref_joint_vel_indexes\n ] += self.dynamics_parameters['acceleration_forces'] * self.sim.data.qacc[\n self._ref_joint_vel_indexes\n ]\n\n self.sim.data.xfrc_applied[\n self._ref_gripper_body_indx\n ] = self.dynamics_parameters['eef_forces'] * self.np_random.uniform(-1, 1, 6)\n\n # Adding forces to the object\n obj_qvel_low_idx , obj_qvel_high_idx = self.sim.model.get_joint_qvel_addr('rectangle')\n self.sim.data.qfrc_applied[\n obj_qvel_low_idx: obj_qvel_high_idx\n ] += self.dynamics_parameters['obj_forces'] * self.np_random.uniform(-1, 1, 6)\n\n def _post_action(self, action):\n \"\"\"\n Add dense reward subcomponents to info, and checks for success of the task.\n \"\"\"\n reward, done, info = super()._post_action(action)\n\n if self.reward_shaping:\n info = reward\n reward = reward[\"reward\"]\n\n if(info[\"unstable\"]):\n done = True\n\n info[\"success\"] = self._check_success()\n\n return reward, done, info\n\n def _get_observation(self):\n \"\"\"\n Returns an OrderedDict containing observations [(name_string, np.array), ...].\n\n Important keys:\n gripper_to_object : The x-y component of the gripper to object distance\n object_to_goal : The x-y component of the object-to-goal distance\n object_z_rot : the roation of the object around an axis sticking out the table\n\n object_xvelp: x-y linear velocity of the object\n gripper_xvelp: x-y linear velocity of the gripper\n\n\n task-state : a concatenation of all the above.\n \"\"\"\n di = OrderedDict()\n\n push_obj_name = self.model.object_names[self.model.push_object_idx]\n # camera observations\n if self.use_camera_obs:\n camera_obs = self.sim.render(\n camera_name=self.camera_name,\n width=self.camera_width,\n height=self.camera_height,\n depth=self.camera_depth,\n )\n if self.camera_depth:\n di[\"image\"], di[\"depth\"] = camera_obs\n else:\n di[\"image\"] = camera_obs\n\n # low-level object information\n if self.use_object_obs:\n # Extract position and velocity of the eef\n eef_pos_in_world = self.sim.data.get_body_xpos(\"right_hand\")\n eef_xvelp_in_world = self.sim.data.get_body_xvelp(\"right_hand\")\n\n # Apply time delays\n eef_pos_in_world = self._apply_time_delay(eef_pos_in_world, self.eef_pos_queue)\n eef_xvelp_in_world = self._apply_time_delay(eef_xvelp_in_world, self.eef_vel_queue)\n\n # Add random noise\n position_noise = self.dynamics_parameters['eef_obs_position_noise']\n velocity_noise = self.dynamics_parameters['eef_obs_velocity_noise']\n\n eef_pos_in_world = eef_pos_in_world + self.np_random.normal(loc=0., scale=position_noise)\n eef_xvelp_in_world = eef_xvelp_in_world + self.np_random.normal(loc=0., scale=velocity_noise)\n\n # Get the position, velocity, rotation and rotational velocity of the object in the world frame\n object_pos_in_world = self.sim.data.body_xpos[self.object_body_id]\n object_xvelp_in_world = self.sim.data.get_body_xvelp(push_obj_name)\n object_rot_in_world = self.sim.data.get_body_xmat(self.push_obj_name)\n\n # Apply time delays\n object_pos_in_world = self._apply_time_delay(object_pos_in_world, self.obj_pos_queue)\n object_xvelp_in_world = self._apply_time_delay(object_xvelp_in_world, self.obj_vel_queue)\n object_rot_in_world = self._apply_time_delay(object_rot_in_world, self.obj_angle_queue)\n\n # Get the z-angle with respect to the reference position and do sin-cosine encoding\n world_rotation_in_reference = np.array([[0., 1., 0., ], [-1., 0., 0., ], [0., 0., 1., ]])\n object_rotation_in_ref = world_rotation_in_reference.dot(object_rot_in_world)\n object_euler_in_ref = T.mat2euler(object_rotation_in_ref)\n z_angle = object_euler_in_ref[2]\n\n # Add random noise\n position_noise = self.dynamics_parameters['obj_obs_position_noise']\n velocity_noise = self.dynamics_parameters['obj_obs_velocity_noise']\n angle_noise = self.dynamics_parameters['obj_angle_noise']\n\n object_pos_in_world = object_pos_in_world + self.np_random.normal(loc=0., scale=position_noise)\n object_xvelp_in_world = object_xvelp_in_world + self.np_random.normal(loc=0., scale=velocity_noise)\n z_angle = z_angle + self.np_random.normal(loc=0., scale=angle_noise)\n\n\n # construct vectors for policy observation\n sine_cosine = np.array([np.sin(8*z_angle), np.cos(8*z_angle)])\n\n\n # Get the goal position in the world\n goal_site_pos_in_world = np.array(self.sim.data.site_xpos[self.goal_site_id])\n\n # Get the eef to object and object to goal vectors in EEF frame\n eef_to_object_in_world = object_pos_in_world - eef_pos_in_world\n eef_to_object_in_eef = self.world_rot_in_eef.dot(eef_to_object_in_world)\n\n object_to_goal_in_world = goal_site_pos_in_world - object_pos_in_world\n object_to_goal_in_eef = self.world_rot_in_eef.dot(object_to_goal_in_world)\n\n # Get the object's and the eef's velocities in EED frame\n object_xvelp_in_eef = self.world_rot_in_eef.dot(object_xvelp_in_world)\n eef_xvelp_in_eef = self.world_rot_in_eef.dot(eef_xvelp_in_world)\n\n\n # Record observations into a dictionary\n di['goal_pos_in_world'] = goal_site_pos_in_world\n di['eef_pos_in_world'] = eef_pos_in_world\n di['eef_vel_in_world'] = eef_xvelp_in_world\n di['object_pos_in_world'] = object_pos_in_world\n di['object_vel_in_world'] = object_xvelp_in_world\n di[\"z_angle\"] = np.array([z_angle])\n\n di[\"task-state\"] = np.concatenate(\n [eef_to_object_in_eef[:2],object_to_goal_in_eef[:2],\n eef_xvelp_in_eef[:2], object_xvelp_in_eef[:2],\n sine_cosine]\n )\n\n return di\n\n def _apply_time_delay(self, object, queue):\n queue.appendleft(copy.deepcopy(object))\n\n if (len(queue) == queue.maxlen):\n return queue.pop()\n else:\n return queue[-1]\n\n\n def _check_contact(self):\n \"\"\"\n Returns True if gripper is in contact with an object.\n \"\"\"\n collision = False\n for contact in self.sim.data.contact[: self.sim.data.ncon]:\n if (\n self.sim.model.geom_id2name(contact.geom1)\n in self.gripper.contact_geoms()\n or self.sim.model.geom_id2name(contact.geom2)\n in self.gripper.contact_geoms()\n ):\n collision = True\n break\n return collision\n\n def _check_contact_with(self, object):\n \"\"\"\n Returns True if gripper is in contact with an object.\n \"\"\"\n collision = False\n for contact in self.sim.data.contact[: self.sim.data.ncon]:\n if (\n (self.sim.model.geom_id2name(contact.geom1) in self.gripper.contact_geoms()\n and contact.geom2 == self.sim.model.geom_name2id(object))\n\n or (self.sim.model.geom_id2name(contact.geom2) in self.gripper.contact_geoms()\n and contact.geom1 == self.sim.model.geom_name2id(object))\n ):\n collision = True\n break\n return collision\n\n\n\n def _gripper_visualization(self):\n \"\"\"\n Do any needed visualization here. Overrides superclass implementations.\n \"\"\"\n\n # color the gripper site appropriately based on distance to object\n if self.gripper_visualization:\n # get distance to object\n object_site_id = self.sim.model.site_name2id(self.model.object_names[self.model.push_object_idx])\n dist = np.sum(\n np.square(\n self.sim.data.site_xpos[object_site_id]\n - self.sim.data.get_site_xpos(\"grip_site\")\n )\n )\n\n # set RGBA for the EEF site here\n max_dist = 0.1\n scaled = (1.0 - min(dist / max_dist, 1.)) ** 15\n rgba = np.zeros(4)\n rgba[0] = 1 - scaled\n rgba[1] = scaled\n rgba[3] = 0.5\n\n self.sim.model.site_rgba[self.eef_site_id] = rgba\n\n\n\n\n\n# 'joint_dampings': np.array([[1./56.3,1./2.0,1./2.4,1./3.0,1./2.2,1./3.0,1./23.8],\n # [56.3,2.0,2.4,3.0,2.2,3.0,23.8,]])\n\n\n# parameter_ranges['kps'] = np.array([[1./6.9,1./4.7,1./3.6,1/2.2,1./1.1,1./2.0,1/5.6],\n# [6.9,4.6,2.2,3.6,1.1,2.0,5.6]])\n# parameter_ranges['kis'] = np.array([[1./1.03,1./1.45,1./1.75,1./12.5,1./1.6,1./191.8,1./1.4],\n# [1.03,1.45,1.75,12.5,1.6,191.8,1.35]])\n# parameter_ranges['kds'] = np.array([[1./5.7,1./81.2,1./4.6,1./21.4,1./3.,1./2.1,1./3.],\n# [5.7,81.2,24.6,21.4,3.,2.1,3.,]])", "\"\"\"\nEnvironment Probing Interaction (EPI)\nhttps://arxiv.org/abs/1907.11740\n\nModules:\n0. pretrained task-specific policy for collecting transition dataset\n1. EPI policy\n2. Embedding model\n3. EPI prediction model\n4. prediction model\n\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\nimport math\nimport os,sys,inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0,parentdir) # add parent path\nfrom sim2real_policies.utils.rl_utils import load, load_model\nfrom utils.choose_env import choose_env\nimport torch.optim as optim\nimport torch.multiprocessing as mp\nfrom torch.multiprocessing import Process\nfrom multiprocessing.managers import BaseManager\ntorch.multiprocessing.set_start_method('forkserver', force=True) # critical for make multiprocessing work\nfrom torch.utils.tensorboard import SummaryWriter\nfrom gym import spaces\nfrom sim2real_policies.utils.policy_networks import DPG_PolicyNetwork, RandomPolicy, PPO_PolicyNetwork\nfrom sim2real_policies.utils.buffers import ReplayBuffer\nfrom sim2real_policies.sys_id.common.utils import query_params, query_key_params, offline_history_collection\nfrom sim2real_policies.sys_id.common.operations import plot, size_splits\nfrom sim2real_policies.sys_id.common.nets import PredictionNetwork, EmbedNetwork\nfrom sim2real_policies.utils.envs import make_env\nfrom sim2real_policies.utils.evaluate import evaluate, evaluate_epi\nfrom sim2real_policies.ppo.ppo_multiprocess import PPO\nfrom sim2real_policies.td3.td3_multiprocess import TD3_Trainer\nfrom sim2real_policies.utils.load_params import load_params\nimport pickle\nimport copy\nimport time\nimport argparse\nfrom mujoco_py import MujocoException\n\n# EPI algorthm hyper parameters\nDEFAULT_REWARD_SCALE = 1.\nPREDICTION_REWARD_SCALE = 0.5\nPREDICTION_REWARD_SCALE0 = 1e5\nPREDICTOR_LR = 1e-4\nEPI_PREDICTOR_LR = 1e-4\nEPI_TOTAL_ITR = 1000\nEPI_POLICY_ITR = 5 # udpate iterations of EPI polciy; without considering 10 times inside update of ppo policy\nPREDICTOR_ITER = 5 # udpate itertations of predictors and embedding net \nEVAL_INTERVAL = 100 # evaluation interval (episodes) of task-specific policy\nHIDDEN_DIM=512\nPREFIX=''\nNUM_WORKERS = 3\nENV_NAME = ['SawyerReach', 'SawyerPush', 'SawyerSlide'][0]\nRANDOMSEED = 2 # random seed\nEPI_TRAJ_LENGTH = 10\nEPI_EPISODE_LENGTH = 30 # shorter than normal task-specific episode length, only probing through initial exploration steps\nEMBEDDING_DIM = 10\nDISCRETE = True # if true, discretized randomization range\nVINE = True # if true, data collected in vine manner\nSEPARATION_LOSS = True\nNO_RESET = True # the env is not reset after EPI rollout in each episode\nEPI_POLICY_ALG = ['ppo', 'random'][0]\nTASK_POLICY_ALG = ['td3', 'ppo'][0]\n# task specific policy\nEP_MAX=12000\nEP_LEN=100\n\nwriter = SummaryWriter()\n\nclass EPI(object):\n \"\"\"\n The class of environment probing interaction policies\n \"\"\"\n def __init__(self, env, traj_length=EPI_TRAJ_LENGTH, \\\n GAMMA=0.9, data_path='./data/', model_path='./model/epi'):\n self.embed_dim = EMBEDDING_DIM\n self.data_path = data_path\n self.model_path = model_path\n self.traj_length = traj_length\n self.GAMMA = GAMMA\n action_space = env.action_space\n state_space = env.observation_space\n self.state_dim = state_space.shape[0]\n self.action_dim = action_space.shape[0]\n traj_dim = traj_length*(self.state_dim+self.action_dim)\n state_embedding_space = spaces.Box(-np.inf, np.inf, shape=(state_space.shape[0]+self.embed_dim, )) # add the embedding param dim\n # initialize epi policy\n if EPI_POLICY_ALG == 'ppo':\n self.epi_policy = PPO(self.state_dim, self.action_dim)\n self.epi_policy.to_cuda()\n elif EPI_POLICY_ALG == 'random':\n self.epi_policy = RandomPolicy(self.action_dim)\n else: \n raise NotImplementedError\n # initialize task specific policy \n if TASK_POLICY_ALG == 'ppo':\n self.task_specific_policy = PPO(self.state_dim+self.embed_dim, self.action_dim)\n elif TASK_POLICY_ALG == 'td3':\n self.task_specific_policy = TD3_Trainer(replay_buffer, state_embedding_space, action_space, \\\n hidden_dim, q_lr, policy_lr, action_range, policy_target_update_interval)\n else: \n raise NotImplementedError\n self.embed_net = EmbedNetwork(traj_dim, self.embed_dim, HIDDEN_DIM).cuda()\n self.sa_dim=self.state_dim+self.action_dim\n self.predict_net = PredictionNetwork(self.sa_dim, self.state_dim, HIDDEN_DIM).cuda()\n self.sae_dim = self.sa_dim+self.embed_dim\n self.epi_predict_net = PredictionNetwork(self.sae_dim, self.state_dim, HIDDEN_DIM).cuda()\n \n self.predict_net_optimizer = optim.Adam(self.predict_net.parameters(), PREDICTOR_LR)\n embed_epi_predict_net_params = list(self.epi_predict_net.parameters()) + list(self.embed_net.parameters())\n self.embed_epi_predict_net_optimizer = optim.Adam(embed_epi_predict_net_params, EPI_PREDICTOR_LR)\n self.criterion = nn.MSELoss()\n\n def save_model(self, model_name=None):\n if model_name is 'predictor_and_embedding':\n torch.save(self.predict_net.state_dict(), self.model_path +'_predictor')\n torch.save(self.epi_predict_net.state_dict(), self.model_path+'_EPIpredictor')\n torch.save(self.embed_net.state_dict(), self.model_path+'_embedding')\n # print('Predictor, EPI Predictor, and Embedding model saved.')\n elif model_name is 'epi_policy':\n self.epi_policy.save_model(path = self.model_path +'_'+EPI_POLICY_ALG+ '_epi_policy')\n # print('EPI policy saved.')\n elif model_name is 'task_specific_policy':\n self.task_specific_policy.save_model(path = self.model_path+'_'+TASK_POLICY_ALG + '_policy')\n # print('Task specific policy saved.')\n\n def load_model(self, model_name=None):\n if model_name is 'predictor_and_embedding':\n self.predict_net.load_state_dict(torch.load(self.model_path+'_predictor'))\n self.epi_predict_net.load_state_dict(torch.load(self.model_path+'_EPIpredictor'))\n self.embed_net.load_state_dict(torch.load(self.model_path+'_embedding'))\n self.predict_net.eval()\n self.epi_predict_net.eval()\n self.embed_net.eval()\n # print('Predictor, EPI_Predictor, and Embedding model loaded.')\n elif model_name is 'epi_policy':\n self.epi_policy.load_model(path = self.model_path +'_'+EPI_POLICY_ALG+ '_epi_policy')\n # print('EPI policy loaded.')\n elif model_name is 'task_specific_policy':\n self.task_specific_policy.load_model(path =self.model_path+'_'+TASK_POLICY_ALG + '_policy')\n # print('Task specific policy loaded.')\n \n\n def load_transition_data(self, path = None):\n \"\"\"\n transition data format: \n {\n 'x_train': (# episodes 1, # steps, state_dim + action_dim)\n 'x_test': (# episodes 2, # steps, state_dim + action_dim)\n 'y_train': (# episodes 1, # steps, state_dim)\n 'y_test': (# episodes 2, # steps, state_dim)\n 'param_train': (# episodes 1, # steps, param_dic)\n 'param_test': (# episodes 2, # steps, param_dic)\n }\n \"\"\"\n if path is None:\n path = self.data_path\n if DISCRETE:\n path+='_discrete'\n if VINE:\n path+='_vine'\n path+='_data.pckl'\n path = os.path.join(os.path.dirname(os.path.realpath(__file__)), path)\n data = pickle.load(open(os.path.abspath(path),'rb' ))\n return data\n\n def where_equal(self, array_of_array, array): \n \"\"\"Return index of array_of_array where its item equals to array\"\"\"\n idx=[]\n for i in range(array_of_array.shape[0]):\n if (array_of_array[i]==array).all():\n idx.append(i)\n\n return idx\n\n # \"\"\"\n # original implementation of separation loss in EPI paper is follow:\n # def separation_loss(y_true, y_pred):\n #\n # y_true = tf.squeeze(y_true)\n # env_id, _ = tf.unique(y_true)\n #\n # mu = []\n # sigma = []\n # for i in range(EPI.NUM_OF_ENVS):\n # idx = tf.where(tf.equal(y_true, env_id[i]))\n # traj = tf.gather(y_pred, idx)\n # mu.append(tf.squeeze(K.mean(traj, axis=0)))\n # this_sigma = tf.maximum(K.mean(K.std(traj, axis=0))-0.1, 0)\n # sigma.append(this_sigma)\n #\n # mu = tf.stack(mu)\n # r = tf.reduce_sum(mu * mu, 1)\n # r = tf.reshape(r, [-1, 1])\n # D = (r - 2 * tf.matmul(mu, tf.transpose(mu)) + tf.transpose(r))/tf.constant(EPI.EMBEDDING_DIMENSION, dtype=tf.float32)\n # D = tf.sqrt(D + tf.eye(EPI.NUM_OF_ENVS, dtype=tf.float32))\n # distance = K.mean(tf.reduce_sum(0.1 - tf.minimum(D, 0.1)))\n #\n # sigma = tf.stack(sigma)\n #\n # return (distance + K.mean(sigma))*0.01\n # \"\"\"\n def separation_loss(self, params_list, trajs):\n # get list of parameters from ordered dictionary\n list_params = []\n for params in params_list:\n set_params = []\n for key, value in list(params.items()):\n # print(type(value))\n if isinstance(value, np.ndarray):\n value = value.reshape(-1).astype(float)\n set_params = np.concatenate((set_params, value)).tolist()\n else:\n set_params.append(value.astype(float).tolist())\n list_params.append(set_params)\n \n # unique_params_list = np.unique(np.array(list_params).astype('float32'), axis=0)\n unique_params_list = [list(x) for x in set(tuple(x) for x in list_params)]\n number_envs = len(unique_params_list)\n mu=[]\n sigma=[]\n\n for unique_params in unique_params_list:\n specific_env_idx = self.where_equal(np.array(list_params), np.array(unique_params))\n specific_env_trajs = trajs[specific_env_idx]\n specific_env_trajs = torch.FloatTensor(specific_env_trajs).cuda()\n embedding = self.embed_net(specific_env_trajs) # (len(specific_env_trajs), len(embedding))\n if len(embedding.shape)>2:\n embedding = embedding.view(-1, embedding.shape[-1])\n mu.append(torch.squeeze(torch.mean(embedding, dim=0)))\n this_sigma = torch.max(torch.mean(torch.std(embedding, dim=0))-0.1, 0)[0] # values of max\n sigma.append(this_sigma)\n mu = torch.stack(mu)\n r = torch.sum(mu*mu, 1)\n r = r.view(-1,1)\n D = (r-2*torch.mm(mu, torch.t(mu)) + torch.t(r))/self.embed_dim\n D = torch.sqrt(D+torch.eye(number_envs).cuda())\n distance = torch.mean(torch.sum(0.1-torch.min(D, torch.as_tensor(0.1).cuda())))\n sigma = torch.stack(sigma)\n\n return (distance + torch.mean(sigma)) * 0.01\n\n def predictor_update(self, input, label, trajs, params_list):\n \"\"\"\n Update the two predictors: 1. predictor with (s,a) as input 2. predictor with (embedding, s,a) as input\n \"\"\"\n # prediction network update\n state_action_in_trajs = np.array(trajs[:, :, :, :-1]) # remove the rewards\n state_action_in_trajs = state_action_in_trajs.reshape(state_action_in_trajs.shape[0], state_action_in_trajs.shape[1], -1 )# merge last two dims\n input = torch.FloatTensor(input).cuda()\n label = torch.FloatTensor(label).cuda()\n predict = self.predict_net(input)\n predict_loss = self.criterion(predict, label)\n self.predict_net_optimizer.zero_grad()\n predict_loss.backward()\n self.predict_net_optimizer.step()\n\n batch_loss = []\n # embedding prediction network update\n for epi in range(state_action_in_trajs.shape[0]):\n for traj in range(state_action_in_trajs.shape[1]):\n embedding = self.embed_net(state_action_in_trajs[epi][traj])\n embed_input = torch.cat((embedding.repeat(input[epi].shape[0], 1), input[epi]), dim=1) # repeat embedding to the batch size\n epi_predict = self.epi_predict_net(embed_input)\n epi_predict_loss = self.criterion(epi_predict, label[epi])\n if torch.isnan(epi_predict_loss).any(): # capture nan cases\n print('Nan EPI prediction loss')\n print(state_action_in_trajs[epi][traj], embedding, embed_input, epi_predict, label[epi])\n else:\n batch_loss.append(epi_predict_loss)\n \n sum_epi_predict_loss = sum(batch_loss)\n if SEPARATION_LOSS:\n separation_loss=self.separation_loss(params_list, state_action_in_trajs)\n if torch.isnan(separation_loss).any(): # capture nan cases\n print('Nan separation loss')\n else:\n sum_epi_predict_loss+=separation_loss\n else:\n separation_loss = 0.\n self.embed_epi_predict_net_optimizer.zero_grad()\n sum_epi_predict_loss.backward()\n self.embed_epi_predict_net_optimizer.step()\n \n return predict_loss, torch.mean(torch.stack(batch_loss)).detach().cpu().numpy(), separation_loss.detach().cpu().numpy()\n\n def prediction_reward(self, input, label, trajs):\n \"\"\" Generate prediction reward for each trajectory \"\"\"\n r_trajs = []\n sa_trajs = []\n rewards = []\n predict_rewards = []\n state_action_in_trajs = trajs[:, :, :, :-1] # remove the rewards\n reward_in_trajs = trajs[:, :, :, -1:] # only the rewards\n state_action_in_trajs_ = state_action_in_trajs.reshape(state_action_in_trajs.shape[0], state_action_in_trajs.shape[1], -1 ) # merge last two dims\n reward_in_trajs_ = reward_in_trajs.reshape(reward_in_trajs.shape[0], reward_in_trajs.shape[1], -1) # merge last two dims\n input = torch.FloatTensor(input).cuda()\n label = torch.FloatTensor(label).cuda()\n for epi in range(state_action_in_trajs_.shape[0]):\n predict = self.predict_net(input[epi])\n predict_loss = self.criterion(predict, label[epi])\n episode_epi_predict_loss = []\n for traj in range(state_action_in_trajs_.shape[1]):\n embedding = self.embed_net(state_action_in_trajs_[epi][traj])\n # print(embedding)\n embed_input = torch.cat((embedding.repeat(input[epi].shape[0], 1), input[epi]), dim=1)\n epi_predict = self.epi_predict_net(embed_input)\n epi_predict_loss = self.criterion(epi_predict, label[epi]) \n predict_r = (epi_predict_loss - predict_loss).detach().cpu().numpy()\n predict_r = np.clip(predict_r*PREDICTION_REWARD_SCALE0, 0, 1000) # accoring to original implementation, multiplied factor and non-negative\n # print(predict_r)\n augmented_r = DEFAULT_REWARD_SCALE * reward_in_trajs_[epi][traj] + PREDICTION_REWARD_SCALE * predict_r\n rewards.append(augmented_r)\n predict_rewards.append(predict_r)\n r_trajs.append(augmented_r)\n sa_trajs.append(state_action_in_trajs[epi][traj])\n return [np.array(sa_trajs), r_trajs], np.mean(rewards), np.mean(predict_rewards)\n\n def EPIpolicy_rollout(self, env, max_steps=30, params=None):\n \"\"\"\n Roll one trajectory with max_steps\n return: \n traj: shape of (max_steps, state_dim+action_dim+reward_dim)\n s_: next observation\n env.get_state(): next underlying state\n \"\"\"\n if params is not None: \n env.set_dynamics_parameters(params)\n # env.renderer_on()\n for _ in range(10): # max iteration for getting a single rollout \n s = env.reset()\n traj=[]\n for _ in range(max_steps):\n a = self.epi_policy.choose_action(s)\n # env.render()\n try: \n s_, r, done, _ = env.step(a)\n except MujocoException:\n print('EPI Rollout: MujocoException')\n break\n s_a_r = np.concatenate((s,a, [r])) # state, action, reward\n traj.append(s_a_r)\n s=s_\n if len(traj) == max_steps:\n break \n\n if len(traj)<max_steps:\n print('EPI rollout length smaller than expected!')\n \n return traj, [s_, env.get_state()]\n \n def EPIpolicy_update(self, buffer):\n \"\"\" \n update the EPI policy \n buffer = [trajectories, rewards]\n trajectories: (# trajs, traj_length, state_dim+action_dim)\n rewards: (# trajs, traj_length)\n \"\"\"\n # how to learn the policy with the reward for policy, and batch of trajectories to generate one reward\n [trajs, rewards]=buffer\n states = trajs[:, :, :self.state_dim]\n actions = trajs[:, :, self.state_dim:]\n\n # update ppo\n s_ = states[:, -1]\n # not considering done here, for reaching no problem for pushing may have problem (done is terminal state)\n v_s_ = self.epi_policy.critic(torch.Tensor([s_]).cuda()).cpu().detach().numpy()[0, 0] \n discounted_r = []\n for r in np.array(rewards).swapaxes(0,1)[-2::-1]: # on episode steps dim, [1,2,3,4][-2::-1] gives 3,2,1\n v_s_ = np.expand_dims(r, axis=1) + self.GAMMA * v_s_ # make r has same shape as v_s_: (N, 1)\n discounted_r.append(v_s_)\n discounted_r.reverse()\n # print('r: ', np.array(discounted_r).shape) # (traj_length, # trajs, 1)\n bs, ba, br = np.vstack(states[:, :-1]), \\\n np.vstack(actions[:, :-1]), \\\n np.vstack(np.array(discounted_r).swapaxes(0,1))\n # print(bs.shape, ba.shape, br.shape)\n self.epi_policy.update(bs, ba, br)\n\n def sample_randomized_params(self, dataset_params, batch_size):\n \"\"\"\n Sample environment parameters from the loaded transition dataset. \n Note: this implementation is different from original implementation for the paper, \n but it saves the steps for matching the env idexes between the trajectory generation\n of EPI policy in randomized envs and the envs in transition dataset.\n \"\"\"\n random_idx = np.random.randint(len(dataset_params), size=batch_size) # with replacement\n return random_idx, np.array(dataset_params)[random_idx]\n\n def transitions_to_trajs(self, transitions):\n \"\"\" \n Process episodic transition data into trajectories of traj_length.\n Episode length needs to be multiple trajectory length.\n Return: trajectories (np.array), shape: (trans_batch_size, num_trajs_per_episode_transition, traj_length, s_dim+a_dim)\n \"\"\"\n episode_length = np.array(transitions).shape[1]\n assert episode_length % self.traj_length == 0 # episode length is multiple of trajectory length\n num_trajs_per_episode_transition = int(episode_length/self.traj_length)\n # print(\"num_trajs_per_episode_transition: \", num_trajs_per_episode_transition)\n split_sizes = num_trajs_per_episode_transition * [self.traj_length]\n trajs = size_splits(torch.Tensor(transitions), split_sizes, dim=1) # split episodic transitions into trajectories, return: tuple\n trajs = np.array([t.numpy() for t in list(trajs)]) # tuple to np.array, (num_trajs_per_episode_transition, trans_batch_size, traj_length, s_dim+a_dim)\n trajs = np.swapaxes(trajs, 0,1) #(trans_batch_size, num_trajs_per_episode_transition, traj_length, s_dim+a_dim)\n return trajs\n\n def embedding_learn(self, env, epi_episode_length=50, itr=10000, trans_batch_size=20):\n \"\"\" \n Update the embedding network through interatively udpating the predictors and the EPI policy. \n \"\"\"\n data = self.load_transition_data() # pre-collected transition dataset\n print('Data loaded. Training data: {} episodes. Test data: {} episodes.'.format(len(data['x_train']),len(data['x_test'])))\n prediction_loss_list = []\n epi_prediction_loss_list=[]\n separation_loss_list=[]\n overall_reward_list=[]\n prediction_reward_list=[]\n env.randomisation_off()\n for i in range(itr): # while not converge\n transitions=[]\n # collect transitions with EPI policy\n sampled_index_list, sampled_params_list = self.sample_randomized_params(data['param_train'], trans_batch_size)\n for j in range(trans_batch_size):\n episode_transition, _= self.EPIpolicy_rollout(env, epi_episode_length, sampled_params_list[j])\n if len(episode_transition)< epi_episode_length:\n episode_transition, _= self.EPIpolicy_rollout(env, epi_episode_length, sampled_params_list[j])\n transitions.append(episode_transition)\n trajs = self.transitions_to_trajs(transitions)\n\n # update the predictors and the embedding net\n data_x = np.array(data['x_train'])[sampled_index_list]\n data_y = np.array(data['y_train'])[sampled_index_list]\n itr_f_loss = 0\n itr_f_epi_loss = 0\n itr_separation_loss = 0\n for _ in range(PREDICTOR_ITER):\n f_loss, f_epi_loss, separation_loss = self.predictor_update(data_x, data_y, trajs, sampled_params_list)\n itr_f_loss += f_loss\n itr_f_epi_loss += f_epi_loss\n itr_separation_loss += separation_loss\n print('Itr: {}: Predictor loss: {:.5f} | EPI predictor loss: {:.5f} | Separation loss: {:.5f}'\\\n .format(i, itr_f_loss, itr_f_epi_loss, itr_separation_loss))\n writer.add_scalar('Loss/Predictor Update', itr_f_loss, i)\n writer.add_scalar('Loss/EPI Predictor Update', itr_f_epi_loss, i)\n writer.add_scalar('Loss/Embedding Separation', itr_separation_loss, i)\n self.save_model(model_name='predictor_and_embedding')\n transitions=[]\n\n # collect transitions for reward prediction\n sampled_index_list, sampled_params_list = self.sample_randomized_params(data['param_test'], trans_batch_size)\n for j in range(trans_batch_size):\n episode_transition, _ = self.EPIpolicy_rollout(env, epi_episode_length, sampled_params_list[j])\n if len(episode_transition)< epi_episode_length:\n episode_transition, _= self.EPIpolicy_rollout(env, epi_episode_length, sampled_params_list[j])\n transitions.append(episode_transition)\n\n trajs = self.transitions_to_trajs(transitions)\n epi_buffer, mean_rewards, mean_predict_rewards = self.prediction_reward(data['x_test'][sampled_index_list], data['y_test'][sampled_index_list], trajs) # generate reward for each traj\n \n # mean rewards as measures for EPI policy\n writer.add_scalar('Mean Trajectory Reward/EPI policy', mean_rewards, i)\n writer.add_scalar('Mean Trajectory Prediction Reward/EPI policy', mean_predict_rewards, i)\n\n prediction_loss_list.append(itr_f_loss)\n epi_prediction_loss_list.append(itr_f_epi_loss)\n separation_loss_list.append(itr_separation_loss)\n overall_reward_list.append(mean_rewards)\n prediction_reward_list.append(mean_predict_rewards)\n # update the EPI policy with buffer data of prediction reward\n if EPI_POLICY_ALG != 'random': # random policy no need for update\n for _ in range(EPI_POLICY_ITR):\n self.EPIpolicy_update(epi_buffer)\n\n if i%20 == 0 and i>0:\n self.save_model('epi_policy')\n np.save('prediction_loss', prediction_loss_list)\n np.save('epi_prediction_loss', epi_prediction_loss_list)\n np.save('separation_loss', separation_loss_list)\n np.save('overall_reward', overall_reward_list)\n np.save('prediction_reward', prediction_reward_list)\n env.randomisation_on()\n\ndef ppo_worker(id, epi, environment_params, environment_wrappers,environment_wrapper_arguments,\\\n eval_rewards_queue, eval_success_queue, batch_size, no_reset):\n \"\"\"\n learn the task-specific policy with learned embedding network, conditioned on state and embedding;\n general rl training, but use EPI policy to generate trajectory and use embedding net to predict embedding for each episode\n \"\"\"\n with torch.cuda.device(id % torch.cuda.device_count()):\n # same device\n epi.task_specific_policy.to_cuda()\n epi.epi_policy.to_cuda()\n epi.embed_net.cuda()\n env= make_env('robosuite.'+ENV_NAME, RANDOMSEED, id, environment_params, environment_wrappers, environment_wrapper_arguments)()\n all_ep_r = []\n for ep in range(EP_MAX):\n env.reset()\n params=env.get_dynamics_parameters()\n env.randomisation_off()\n ep_r = 0\n # epi rollout first for each episode\n traj, [last_obs, last_state] = epi.EPIpolicy_rollout(env, max_steps = epi.traj_length, params=params) # only one traj; pass in params to ensure it's not changed\n state_action_in_traj = np.array(traj)[:, :-1] # remove the rewards\n embedding = epi.embed_net(state_action_in_traj.reshape(-1))\n embedding = embedding.detach().cpu().numpy()\n if no_reset:\n s = last_obs # last observation\n env.set_state(last_state) # last underlying state\n else:\n env.set_dynamics_parameters(params) # same as the rollout env\n s = env.reset()\n\n for t in range(EP_LEN): # in one episode\n s=np.concatenate((s, embedding))\n a = epi.task_specific_policy.choose_action(s)\n try:\n s_, r, done, info = env.step(a)\n except MujocoException:\n print('MujocoException')\n break\n if info[\"unstable\"]: # capture the case with cube flying away for pushing task\n break\n\n epi.task_specific_policy.store_transition(s,a,r)\n s = s_\n s_=np.concatenate((s_, embedding))\n ep_r += r\n # update ppo\n if len(epi.task_specific_policy.state_buffer) == batch_size:\n epi.task_specific_policy.finish_path(s_, done)\n epi.task_specific_policy.update() # update using the buffer's data\n if done:\n break\n env.randomisation_on()\n\n epi.task_specific_policy.finish_path(s_, done)\n all_ep_r.append(ep_r)\n if ep%EVAL_INTERVAL==0 and ep>0:\n eval_r, eval_succ = evaluate_epi(env, epi.epi_policy, epi.embed_net, epi.task_specific_policy.actor, epi.traj_length)\n eval_rewards_queue.put(eval_r)\n eval_success_queue.put(eval_succ) \n epi.save_model('task_specific_policy')\n print('Worker: ', id, '| Episode: ', ep, '| Episode Reward: {:.4f} '.format(ep_r))\n\n epi.save_model('task_specific_policy') \n\n\ndef td3_worker(id, epi, environment_params, environment_wrappers, environment_wrapper_arguments, rewards_queue, eval_rewards_queue, success_queue,\\\n eval_success_queue, replay_buffer, batch_size, explore_steps, noise_decay, update_itr, explore_noise_scale, \\\n eval_noise_scale, reward_scale, DETERMINISTIC, hidden_dim, no_reset):\n '''\n the function for sampling with multi-processing\n '''\n\n with torch.cuda.device(id % torch.cuda.device_count()):\n # same device\n epi.task_specific_policy.to_cuda()\n epi.epi_policy.to_cuda()\n epi.embed_net.cuda()\n print(epi.task_specific_policy, replay_buffer)\n env= make_env('robosuite.'+ENV_NAME, RANDOMSEED, id, environment_params, environment_wrappers,environment_wrapper_arguments)()\n action_dim = env.action_space.shape[0]\n frame_idx=0\n rewards=[]\n current_explore_noise_scale = explore_noise_scale\n # training loop\n for eps in range(EP_MAX):\n env.reset()\n params=env.get_dynamics_parameters()\n env.randomisation_off()\n # epi rollout first for each episode\n traj, [last_obs, last_state] = epi.EPIpolicy_rollout(env, max_steps = epi.traj_length, params=params) # only one traj; pass in params to ensure it's not changed\n state_action_in_traj = np.array(traj)[:, :-1] # remove the rewards\n embedding = epi.embed_net(state_action_in_traj.reshape(-1))\n embedding = embedding.detach().cpu().numpy()\n\n episode_reward = 0\n if no_reset:\n state = last_obs # last observation\n env.set_state(last_state) # last underlying state\n else:\n env.set_dynamics_parameters(params) # same as the rollout env\n state = env.reset()\n\n current_explore_noise_scale = current_explore_noise_scale*noise_decay\n state=np.concatenate((state, embedding))\n for step in range(EP_LEN):\n if frame_idx > explore_steps:\n action = epi.task_specific_policy.policy_net.get_action(state, noise_scale=current_explore_noise_scale)\n else:\n action = epi.task_specific_policy.policy_net.sample_action()\n \n try:\n next_state, reward, done, info = env.step(action)\n if environment_params[\"has_renderer\"] and environment_params[\"render_visual_mesh\"]:\n env.render() \n except KeyboardInterrupt:\n print('Finished')\n epi.save_model('task_specific_policy') \n except MujocoException:\n print('Task specific policy: MujocoException')\n break\n\n if info[\"unstable\"]: # capture the case with cube flying away for pushing task\n break\n next_state=np.concatenate((next_state, embedding))\n replay_buffer.push(state, action, reward, next_state, done)\n \n state = next_state\n episode_reward += reward\n frame_idx += 1\n \n # if len(replay_buffer) > batch_size:\n if replay_buffer.get_length() > batch_size:\n for i in range(update_itr):\n _=epi.task_specific_policy.update(batch_size, eval_noise_scale=eval_noise_scale, reward_scale=reward_scale)\n \n if done:\n break\n env.randomisation_on()\n print('Worker: ', id, '| Episode: ', eps, '| Episode Reward: ', episode_reward)\n rewards_queue.put(episode_reward)\n success_queue.put(info['success'])\n\n if eps % EVAL_INTERVAL == 0 and eps>0:\n # plot(rewards, id)\n epi.save_model('task_specific_policy')\n eval_r, eval_succ = evaluate_epi(env, epi.epi_policy, epi.embed_net, epi.task_specific_policy.policy_net, epi.traj_length)\n eval_rewards_queue.put(eval_r)\n eval_success_queue.put(eval_succ)\n\n epi.save_model('task_specific_policy') \n\ndef specific_policy_learn(epi, environment_params, environment_wrappers, environment_wrapper_arguments, no_reset=True):\n \"\"\" \n multi-process for learning the task-specific policy rather than\n using the single-process in epi class\n \"\"\"\n epi.load_model('predictor_and_embedding')\n epi.load_model('epi_policy')\n epi.task_specific_policy.share_memory()\n rewards_queue=mp.Queue() # used for get rewards from all processes and plot the curve\n eval_rewards_queue = mp.Queue() # used for get offline evaluated rewards from all processes and plot the curve\n success_queue = mp.Queue() # used for get success events from all processes\n eval_success_queue = mp.Queue()\n processes=[]\n rewards=[]\n success = []\n eval_rewards = []\n eval_success = []\n\n for i in range(NUM_WORKERS):\n if TASK_POLICY_ALG == 'ppo':\n process = Process(target=ppo_worker, args=(i, epi, environment_params, environment_wrappers, \\\n environment_wrapper_arguments, eval_rewards_queue, eval_success_queue, batch_size, no_reset)) # the args contain shared and not shared\n elif TASK_POLICY_ALG == 'td3':\n process = Process(target=td3_worker, args=(i, epi, environment_params, environment_wrappers,\\\n environment_wrapper_arguments, rewards_queue, eval_rewards_queue, success_queue, eval_success_queue,\\\n replay_buffer, batch_size, explore_steps, noise_decay,\\\n update_itr, explore_noise_scale, eval_noise_scale, reward_scale, DETERMINISTIC, hidden_dim, no_reset))\n else: \n raise NotImplementedError\n process.daemon=True # all processes closed when the main stops\n processes.append(process)\n\n [p.start() for p in processes]\n while True: # keep geting the episode reward from the queue\n eval_r = eval_rewards_queue.get() \n eval_succ = eval_success_queue.get() \n\n eval_rewards.append(eval_r)\n eval_success.append(eval_succ)\n\n if len(eval_rewards)%20==0 and len(eval_rewards)>0:\n np.save(PREFIX+'eval_rewards', eval_rewards)\n np.save(PREFIX+'eval_success', eval_success)\n\n [p.join() for p in processes] # finished at the same time\n\ndef test(env, no_reset):\n epi.load_model('predictor_and_embedding')\n epi.load_model('epi_policy')\n epi.load_model('task_specific_policy')\n epi.task_specific_policy.to_cuda()\n epi.epi_policy.to_cuda()\n epi.embed_net.cuda()\n action_dim = env.action_space.shape[0]\n env.renderer_on()\n for eps in range(10):\n env.reset()\n params=env.get_dynamics_parameters()\n env.randomisation_off()\n # epi rollout first for each episode\n traj, [last_obs, last_state] = epi.EPIpolicy_rollout(env, max_steps = epi.traj_length, params=params) # only one traj; pass in params to ensure it's not changed\n state_action_in_traj = np.array(traj)[:, :-1] # remove the rewards\n embedding = epi.embed_net(state_action_in_traj.reshape(-1))\n embedding = embedding.detach().cpu().numpy()\n episode_reward = 0\n if no_reset:\n state = last_obs # last observation\n env.set_state(last_state) # last underlying state\n else:\n env.set_dynamics_parameters(params) # same as the rollout env\n state = env.reset()\n state=np.concatenate((state, embedding))\n for step in range(EP_LEN):\n action = epi.task_specific_policy.policy_net.get_action(state, noise_scale=0.0)\n next_state, reward, done, info = env.step(action)\n env.render() \n next_state=np.concatenate((next_state, embedding))\n state = next_state\n episode_reward += reward \n if done:\n break\n env.randomisation_on()\n print('Worker: ', id, '| Episode: ', eps, '| Episode Reward: ', episode_reward)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='EPI.')\n parser.add_argument('--epi', dest='epi', action='store_true', default=False)\n parser.add_argument('--task', dest='task', action='store_true', default=False)\n parser.add_argument('--test', dest='test', action='store_true', default=False)\n\n args = parser.parse_args()\n\n env, environment_params, environment_wrappers, environment_wrapper_arguments = choose_env(ENV_NAME)\n prefix=ENV_NAME+str(len(environment_params[\"parameters_to_randomise\"])) # number of randomised parameters\n model_path = '../../../../../data/epi/model/'+prefix+'_epi'\n if TASK_POLICY_ALG =='td3': \n # load td3 hyper parameters\n [action_range, batch_size, explore_steps, update_itr, explore_noise_scale, eval_noise_scale, reward_scale, \\\n hidden_dim, noise_decay, policy_target_update_interval, q_lr, policy_lr, replay_buffer_size, DETERMINISTIC] = \\\n load_params('td3', ['action_range', 'batch_size', 'explore_steps', 'update_itr', 'explore_noise_scale',\\\n 'eval_noise_scale', 'reward_scale', 'hidden_dim', 'noise_decay', \\\n 'policy_target_update_interval', 'q_lr', 'policy_lr','replay_buffer_size', 'deterministic'] )\n \n # load replay buffer when off-policy\n BaseManager.register('ReplayBuffer', ReplayBuffer)\n manager = BaseManager()\n manager.start()\n replay_buffer = manager.ReplayBuffer(replay_buffer_size) # share the replay buffer through manager\n elif TASK_POLICY_ALG =='ppo':\n [batch_size] = load_params('ppo', ['batch_size'])\n\n epi = EPI(env, data_path='./data/'+ENV_NAME, model_path = model_path)\n \n if args.epi:\n epi.embedding_learn(env, epi_episode_length=EPI_EPISODE_LENGTH, itr = EPI_TOTAL_ITR)\n elif args.task:\n specific_policy_learn(epi, environment_params, environment_wrappers, environment_wrapper_arguments, no_reset=NO_RESET)\n elif args.test:\n test(env, no_reset = NO_RESET)\n else: \n pass\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.linalg.norm", "numpy.sin", "numpy.asarray", "numpy.zeros", "numpy.log", "numpy.ones", "numpy.arange", "numpy.cos" ], [ "torch.stack", "torch.isnan", "numpy.mean", "torch.eye", "torch.load", "torch.multiprocessing.Queue", "torch.sum", "numpy.concatenate", "torch.FloatTensor", "torch.multiprocessing.set_start_method", "numpy.save", "numpy.swapaxes", "torch.t", "torch.as_tensor", "torch.Tensor", "torch.utils.tensorboard.SummaryWriter", "numpy.vstack", "numpy.expand_dims", "numpy.array", "torch.cuda.device_count", "numpy.clip", "torch.multiprocessing.Process", "torch.nn.MSELoss", "torch.optim.Adam", "torch.std", "torch.mean" ] ]
bebosudo/boppy
[ "2197f77e78ed64d23a90ef9158e867cd228b468a" ]
[ "boppy/simulators/gpu/ssa_gpu.py" ]
[ "\"\"\"Run Stochastic Simulation Algorithm on a Nvidia GPU device, using pycuda.\n\nEach thread is assigned one iteration of the algorithm, because each iteration is a distinct\nstochastic process, and therefore can only be horizontally parallelized.\n\nInside this kernel we use a \"binary selection\" to randomly pick a value according to its rate.\n\n\nTODO:\n- this kernel should be repeated many times, until the maximum time requested is exhausted, because\n there are no \"lists\" on cuda/C kernels, so we have to split execution in chunks of defined length\n (_num_steps) in order to use arrays: the following chunk starts from the end of the previous one;\n- use a proper random generator and initialize its seed;\n- deal with GPUs with different specs, such as the block and grid thread dimensions, the device\n memory (adapt the number of repetitions to the amount of memory available)\n https://documen.tician.de/pycuda/util.html#pycuda.tools.DeviceData\n\"\"\"\n\nfrom copy import deepcopy\nimport numpy as np\nimport pycuda.autoinit # noqa\nfrom pycuda.compiler import SourceModule\nfrom pycuda.curandom import rand as curand\nimport pycuda.gpuarray as gpuarray\n\n_kernel_str = \"\"\"\n__global__ void ssa_simple(float *_update_matrix,\n float *_init_conditions,\n const float start_time,\n const float end_time,\n float *_time_and_states,\n const float *_rand_arr) {\n\n size_t rep = 0, th_id = threadIdx.x;\n\n // Set the initial time and conditions associated with this run.\n _time_and_states[th_id * (@num__reacs@ + 1) * @num__rep@] = start_time;\n for (size_t i = 0; i < @num__reacs@; ++i)\n _time_and_states[th_id * (@num__reacs@ + 1) * @num__rep@ + 1 + i] = _init_conditions[th_id * (@num__reacs@ + 1) + i];\n\n\n float simul_t = start_time, _prev_simul_t = start_time;\n float _rates_arr[@num__reacs@];\n float _binary_rates_arr[@num__reacs@ * 2 - 1];\n // The index where start the real rates in the binary vector; before it there are the partial\n // sums of rates.\n size_t _stop_bin_search = (@num__reacs@ + 1) / 2 - 1;\n\n\n while (simul_t < end_time and rep + 1 < @num__rep@) {\n\n // -------------- start unrolling user functions --------------\n @unroll__func__rate@\n // -------------- end unrolling user functions --------------\n\n float total_rate = 0;\n for (size_t i = 0; i < @num__reacs@; ++i)\n total_rate += _rates_arr[i];\n\n float rnd_react = _rand_arr[th_id * @num__rep@ * 2 + rep * 2] * total_rate;\n float rnd_time = _rand_arr[th_id * @num__rep@ * 2 + rep * 2 + 1] + 1e-10;\n\n simul_t = -logf(rnd_time) / total_rate + _prev_simul_t;\n\n // When selecting the next reaction to occur in the algorithm, we want to randomly select a\n // reaction: the reactions with the highest rates should be selected more often.\n // We produce a tree containing partial sums of rates, then we descend into the tree with a\n // top-down traversal and select a branch only if it's greater than the random value\n // extracted.\n\n // The binary array is made of the sums of elements 2*j+1 and 2*j+2 in j-th position, and\n // of the original rates at the end.\n for (size_t i = 0; i < @num__reacs@; ++i) // copy original rates at the end of the temp array\n _binary_rates_arr[@num__reacs@ - 1 + i] = _rates_arr[i];\n for (size_t i = @num__reacs@ - 2; i > 0; --i) // compute the partial sums from the original rates\n _binary_rates_arr[i] = _binary_rates_arr[2 * i + 1] + _binary_rates_arr[2 * i + 2];\n _binary_rates_arr[0] = _binary_rates_arr[1] + _binary_rates_arr[2];\n\n size_t idx_react = 0;\n while (idx_react < _stop_bin_search) {\n if (_rates_arr[2 * idx_react + 1] >= rnd_react)\n idx_react = 2 * idx_react + 1;\n else {\n rnd_react -= _rates_arr[2 * idx_react + 1];\n idx_react = 2 * idx_react + 2;\n }\n }\n size_t chosen_react = idx_react - _stop_bin_search;\n\n // Save time and states of this run.\n _time_and_states[th_id * (@num__reacs@ + 1) * @num__rep@ + (rep + 1) * (@num__reacs@ + 1)] = simul_t;\n for (size_t i = 0; i < @num__reacs@; ++i)\n _time_and_states[th_id * (@num__reacs@ + 1) * @num__rep@ + (rep + 1) * (@num__reacs@ + 1) + 1 + i] = _time_and_states[th_id * (@num__reacs@ + 1) * @num__rep@ + rep * (@num__reacs@ + 1) + 1 + i] + _update_matrix[chosen_react * @num__reacs@ + i];\n\n _prev_simul_t = simul_t;\n ++rep;\n }\n}\n\"\"\"\n\n\ndef SSA(update_matrix, initial_conditions, function_rates, t_max, **kwargs): # noqa\n\n # Fix the maximum number of steps available at each repetition. Should be function of the\n # amount of memory available on the device and the number of iterations (= threads) requested.\n _num_steps = 20\n _num_reacs = len(kwargs[\"variables\"])\n start_time, end_time = np.float32(0), np.float32(t_max)\n\n function_rates_wo_param = deepcopy(function_rates)\n for fr_id, f_rate in enumerate(function_rates_wo_param):\n for par, val in kwargs[\"parameters\"].items():\n f_rate = f_rate.replace(par, str(val))\n for sp_id, spec in enumerate(kwargs[\"variables\"]):\n f_rate = f_rate.replace(spec, \"_time_and_states[th_id * (@num__reacs@ + 1) * @num__rep@\"\n \" + rep * (@num__reacs@ + 1) + 1 + {}]\".format(sp_id))\n\n function_rates_wo_param[fr_id] = f_rate\n\n unroll_func_rate = \"\\n\".join((f_rate.join((\"_rates_arr[{}] = \".format(fr_id), \";\"))\n for fr_id, f_rate in enumerate(function_rates_wo_param)))\n\n kernel_ready = _kernel_str \\\n .replace(\"@unroll__func__rate@\", unroll_func_rate) \\\n .replace(\"@num__iter@\", str(kwargs[\"iterations\"])) \\\n .replace(\"@num__rep@\", str(_num_steps)) \\\n .replace(\"@num__reacs@\", str(_num_reacs))\n\n if kwargs.get(\"print_cuda\"):\n print(\"\\n\".join(\" \".join((str(line_no + 2), line))\n for line_no, line in enumerate(kernel_ready.split(\"\\n\"))))\n\n upd_mat_dev = gpuarray.to_gpu(update_matrix.astype(np.float32))\n\n # The vector of initial conditions has to be repeated for each thread, since in the future,\n # when we will split in chunks, each chunk will restart from a different initial condition.\n init_cond_dev = gpuarray.to_gpu(np.tile(initial_conditions.astype(np.float32),\n (kwargs[\"iterations\"], 1)))\n\n # Each thread should produce its own array of random numbers or at least have access to a\n # private set of random numbers: we need two numbers for each repetition, one to select the\n # reaction and one to select the time.\n # Note that pycuda.curandom.rand is a toy-random generator, and all the threads share the array.\n # https://documen.tician.de/pycuda/array.html?highlight=random#module-pycuda.curandom\n rand_arr_dev = curand((_num_steps, 2, kwargs[\"iterations\"]))\n\n # There seems to be no need to manually copy back to host gpuarrays, see example/demo.py.\n time_states_dev = gpuarray.GPUArray((kwargs[\"iterations\"], _num_steps, _num_reacs + 1),\n dtype=np.float32)\n\n mod = SourceModule(kernel_ready)\n func = mod.get_function(\"ssa_simple\")\n func(upd_mat_dev, init_cond_dev, start_time, end_time, time_states_dev, rand_arr_dev,\n block=(kwargs[\"iterations\"], 1, 1))\n\n return time_states_dev\n" ]
[ [ "numpy.float32" ] ]
sbird/FastPMRunner
[ "f38f6e69c603fb699436b645fe7b4eb418ee82c2" ]
[ "fastpm/python/comparehalos.py" ]
[ "from nbodykit.lab import FFTPower, BigFileCatalog\nfrom nbodykit import setup_logging\nimport numpy\nimport argparse\nimport warnings\nfrom mpi4py import MPI\nimport os\n\n# usage:\n#\n# python halobias.py output --nmin x --nmax x --nn x dmcatalog [...] -- halocatalog [...]\n#\n# measure the bias and growth rate from kaiser model by cross correlating halos of different size\n# to the dark matter.\n#\n# example\n#\n# python halobias.py test.json --with-plot ../tests/nbodykit/fastpm_1.0000/ --dataset=1 -- ../tests/nbodykit/fof_1.0000/ --dataset=LL-0.200\n\n# for matter\n# python halobias.py test.json --with-plot ../tests/nbodykit/fastpm_1.0000/ --dataset=1 -- ../tests/nbodykit/fof_1.0000/ --dataset=1\n\nap = argparse.ArgumentParser()\nap.add_argument(\"output\", help='e.g. power.json (FFTPower.load) or power.txt (numpy.loadtxt)')\nap.add_argument(\"--nmin\", default=8, type=int)\nap.add_argument(\"--kmax\", default=None, type=float, help=\"cut to stop using kmax, scale where kaiser is bad\")\nap.add_argument(\"--nmax\", default=1000, type=int)\nap.add_argument(\"--nn\", default=10, type=int)\nap.add_argument(\"--unique-k\", action='store_true', default=False, help='compute for all unique k values.')\nap.add_argument(\"--nmesh\", type=int, default=256, help='mesh resolution')\nap.add_argument(\"--verbose\", action='store_true', default=False, help='print progress')\n\ncat_ap = argparse.ArgumentParser()\n\ncat_ap.add_argument(\"catalog\", help='e.g. fastpm_1.0000 or fof_1.0000')\ncat_ap.add_argument(\"--dataset\", default='LL-0.200', help='data set to select; for a dm catalog, use 1 for a halo catalog, usually LL-0.200')\n\nns, args = ap.parse_known_args()\n\nif '--' in args:\n split = args.index('--')\n ns1 = cat_ap.parse_args(args[:split])\n ns2 = cat_ap.parse_args(args[split+1:])\nelse:\n ns1 = cat_ap.parse_args(args)\n ns2 = ns1\n\ndef read_cat(ns, nmin=None):\n cat = BigFileCatalog(ns.catalog, header='Header', dataset=ns.dataset)\n volume = cat.attrs['BoxSize'][0] ** 3\n\n if nmin is not None and nmin != 0:\n sel = True\n sel = sel & (cat['Length'] >= nmin)\n\n cat['Selection'] = sel\n# cat = cat[sel]\n\n cat['RSDPosition'] = cat['Position'] + cat.attrs['RSDFactor'] * cat['Velocity'] * [0, 0, 1]\n return cat\n\n# use bisection to find the nmin to match the number of nsel\n\ndef read_cat_nsel(ns, nsel, nmin0, nmin1):\n cat = BigFileCatalog(ns.catalog, header='Header', dataset=ns.dataset)\n volume = cat.attrs['BoxSize'][0] ** 3\n\n if 'Length' in cat.columns:\n while nmin1 - nmin0 > 1:\n nminc = (nmin1 + nmin0) / 2\n\n sel = True\n sel = sel & (cat['Length'] >= nminc)\n\n nsel1 = cat.comm.allreduce(sel.sum().compute())\n if nsel1 < nsel: # too few\n nmin1 = nminc\n else:\n nmin0 = nminc\n\n if cat.comm.rank == 0:\n print('found nmin', nmin1, nmin0, 'nsel is', nsel1, 'target is', nsel)\n\n cat['Selection'] = sel\n cat['RSDPosition'] = cat['Position'] + cat.attrs['RSDFactor'] * cat['Velocity'] * [0, 0, 1]\n return cat\n\ndef main(ns, ns1, ns2):\n if ns.verbose:\n setup_logging('info')\n\n if ns.unique_k:\n dk = 0\n else:\n dk = None\n\n cat1 = read_cat(ns1)\n cat2 = read_cat(ns2)\n\n nmin = numpy.unique(numpy.int32(numpy.logspace(numpy.log10(ns.nmin), numpy.log10(ns.nmax), ns.nn, endpoint=True)))\n if 'Length' in cat1.columns:\n nmin0 = cat1.comm.allreduce(cat1['Length'].min().compute() if cat1.size > 0 else 10000000, MPI.MIN)\n nmax0 = cat1.comm.allreduce(cat1['Length'].max().compute() if cat1.size > 0 else 0, MPI.MAX)\n nmin = nmin[nmin >= nmin0]\n nmin = nmin[nmin < nmax0]\n else:\n nmin = [0]\n\n if 'Length' in cat2.columns:\n nmin2 = cat2.comm.allreduce(cat2['Length'].min().compute() if cat2.size > 0 else 10000000, MPI.MIN)\n nmax2 = cat2.comm.allreduce(cat2['Length'].max().compute() if cat2.size > 0 else 0, MPI.MAX)\n else:\n nmin2 = 0\n nmax2 = 1\n\n if cat1.comm.rank == 0:\n os.makedirs(os.path.dirname(ns.output), exist_ok=True)\n\n for nmin1 in nmin:\n cat1 = read_cat(ns1, nmin1)\n nsel = cat1.comm.allreduce(cat1['Selection'].sum().compute())\n cat2 = read_cat_nsel(ns2, nsel, nmin2, nmax2)\n\n mesh1 = cat1.to_mesh(interlaced=True, compensated=True, window='tsc', Nmesh=ns.nmesh, position='RSDPosition')\n mesh2 = cat2.to_mesh(interlaced=True, compensated=True, window='tsc', Nmesh=ns.nmesh, position='RSDPosition')\n\n r1 = FFTPower(mesh1, second=mesh1, mode='2d', dk=dk, Nmu=10, kmax=ns.kmax)\n r2 = FFTPower(mesh2, second=mesh2, mode='2d', dk=dk, Nmu=10, kmax=ns.kmax)\n rx = FFTPower(mesh1, second=mesh2, mode='2d', dk=dk, Nmu=10, kmax=ns.kmax)\n\n save_bs(ns.output, 'nmin-%05d-r1' % nmin1, r1)\n save_bs(ns.output, 'nmin-%05d-r2' % nmin1, r2)\n save_bs(ns.output, 'nmin-%05d-rx' % nmin1, rx)\n if cat1.comm.rank == 0:\n print(\"nmin = \", nmin1, \"finished\")\n\ndef save_bs(filename, dataset, r):\n\n basename = filename.rsplit('.', 1)[0]\n if filename.endswith('.json'):\n r.save(basename + '-%s.json' % dataset)\n elif filename.endswith('.txt'):\n if r.comm.rank == 0:\n for var in r.power.data.dtype.names:\n numpy.savetxt(basename + '-%s-%s.txt' % (dataset, var),\n r.power[var].real\n )\n\nmain(ns, ns1, ns2)\n" ]
[ [ "numpy.log10", "numpy.savetxt" ] ]
tirkarthi/raiden
[ "dbd03ddda039332b54ec0c02d81cbe1100bc8028" ]
[ "tools/debugging/plot/line.py" ]
[ "#!/usr/bin/env python\nimport argparse\nimport csv\nimport datetime\nimport sys\nfrom typing import Any, List\n\nfrom matplotlib import dates, pyplot\nfrom matplotlib.axes import Axes\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--width\", default=1000, help=\"Configures width of the output in pixels.\")\nparser.add_argument(\"--height\", default=800, help=\"Configures height of the output in pixels.\")\nparser.add_argument(\n \"--header\", help=\"If the csv does not have a header, use this to give a name to each column\"\n)\nparser.add_argument(\n \"output\", help=\"file name for the result image, filetype is inferred from this.\"\n)\nparser.add_argument(\n \"--x\", help=\"If set, the name of the column to be used as the x axis\", default=None\n)\nparser.add_argument(\"line\", nargs=\"+\")\n\nargs = parser.parse_args()\n\n\ndef parse_datetime(data: str) -> datetime.datetime:\n return datetime.datetime.fromisoformat(data)\n\n\ndef configure_axes(axes: Axes) -> None:\n hour_fmt = dates.DateFormatter(\"%H:%M\")\n minutes_fmt = dates.DateFormatter(\"%M\")\n\n axes.xaxis.set_major_locator(dates.HourLocator(interval=1))\n axes.xaxis.set_major_formatter(hour_fmt)\n axes.xaxis.set_minor_locator(dates.MinuteLocator(interval=5))\n axes.xaxis.set_minor_formatter(minutes_fmt)\n axes.xaxis.set_tick_params(which=\"major\", rotation=90)\n axes.xaxis.set_tick_params(which=\"minor\", rotation=90)\n\n\nif args.header:\n headers = args.header.split(\",\")\n reader = csv.DictReader(sys.stdin, fieldnames=headers)\nelse:\n reader = csv.DictReader(sys.stdin)\n\nlines: List[List[Any]] = [[] for _ in range(len(args.line))]\n\nx_axis: List[Any]\nif args.x:\n x_axis = []\n for data in reader:\n x_axis.append(parse_datetime(data[args.x]))\n\n for pos, line in enumerate(args.line):\n lines[pos].append(float(data[line]))\nelse:\n for data in reader:\n for pos, line in enumerate(args.line):\n lines[pos].append(float(data[line]))\n\n x_axis = list(range(len(lines[0])))\n\n\ndpi = 60\npyplot.figure(figsize=(args.width / dpi, args.height / dpi), dpi=dpi)\n\naxes = pyplot.gca()\naxes.set_xlabel(args.x)\nconfigure_axes(axes)\n\nfor line_name, line_data in zip(args.line, lines):\n pyplot.plot(x_axis, line_data, label=line_name)\n\npyplot.legend(loc=2)\npyplot.savefig(args.output)\n" ]
[ [ "matplotlib.dates.MinuteLocator", "matplotlib.pyplot.savefig", "matplotlib.dates.DateFormatter", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.dates.HourLocator", "matplotlib.pyplot.gca" ] ]
giganticode/OpenVocabCodeNLM
[ "2d6cfd32c8399891b0fe2220946b4b4ddeac32ba" ]
[ "reader.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom itertools import chain\n\nimport collections\nimport getopt\nimport numpy as np\nimport sys\nimport tensorflow as tf\n\nfrom collections import deque\n\nUNKNOWN_WORD = \"-UNK-\"\nEMPTY_WORD = \"-EMP-\"\nEND_SENT = \"-eos-\"\nSTART_SENT = \"-bos-\"\nEND_DOC = \"-eod-\"\nSUBWORD_END = \"@@\"\n\n\ndef _build_vocab(filename, threshold=5, debug=True):\n \"\"\"\n Builds a vocabulary containing all the subword_units/words/tokens that appear at least #threshold times\n in file filename. All subword_units with frequency < #threshold are converted to a special -UNK- token.\n For the BPE NLM the threshold used should typically be 0.\n The vocabulary is represented as a pair of mappings from subword_units to ids and vice-versa.\n :param filename: The path of the file to be used for vocabulary creation.\n :param threshold: The frequency threshold for vocabulary inclusion.\n :param debug: Whether debugging information should be printed.\n :return: A pair of mappings from subword_units to ids and vice-versa.\n \"\"\"\n with open(filename, 'r') as f:\n linewords = (line.replace(\"\\n\", \" %s\" % END_DOC).split() for line in f)\n counter = collections.Counter(chain.from_iterable(linewords))\n if debug: print('Read data for vocabulary!')\n\n counter[UNKNOWN_WORD] = 0\n unk_counts = 0\n for word, freq in counter.items():\n if freq < threshold:\n unk_counts += freq\n del counter[word] # Cleans up resources. Absolutely necessary for large corpora!\n if unk_counts > 0:\n counter[UNKNOWN_WORD] += unk_counts\n if debug: print('UNKS:', unk_counts)\n counter[EMPTY_WORD] = threshold\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n\n words = [word for word, freq in count_pairs if freq >= threshold]\n word_to_id = dict(zip(words, range(len(words))))\n id_to_word = {v: k for k, v in word_to_id.items()}\n return word_to_id, id_to_word\n\n\ndef _file_to_word_ids(filename, word_to_id):\n \"\"\"\n Creates the sequence of ids for a file based on the specified mapping.\n If a unit/word/token is not contained in the vocabulary then it is convert into the id of the special -UNK- token.\n Each line of the file is considered a different instance (sentence, code file, etc.)\n :param filename: The path of the file to be converted into a sequence of ids.\n :param word_to_id: Contains the mapping of vocabulary entries to their respective ids.\n :return: The mapped sequence of ids.\n \"\"\"\n with open(filename, 'r') as f:\n ids = []\n for line in f:\n line = line.replace(\"\\n\", (\" %s\" % END_DOC))\n ids.extend([word_to_id[word]\n if word in word_to_id else word_to_id[UNKNOWN_WORD] for word in line.split()])\n return ids\n\ndef _read_words(filename):\n \"\"\"\n Reads a whitespace tokenized version of the specified file (in UTF-8 encoding) using Tensorflow's API.\n All whitespace characters at the beginning and ending of the file are trimmed.\n :param filename: The path of the file to be read.\n :return: The whitespace tokenized version of the specified file.\n \"\"\"\n with tf.device('/cpu:0'):\n with tf.gfile.GFile(filename, \"r\") as f:\n return f.read().decode(\"utf-8\").strip().split()\n\n\ndef _read_lines(filename):\n \"\"\"\n Creates a list of the specified file's lines using Tensorflow API's (in UTF-8 encoding).\n Each line of the file is a separate list entry and all whitespace characters at its beginning and ending are trimmed.\n :param filename: The path of the file to be read.\n :return: A list of the specified file's lines.\n \"\"\"\n with tf.device('/cpu:0'):\n with tf.gfile.GFile(filename, \"r\") as f:\n ret = []\n for l in f:\n ret.append(l.decode(\"utf8\").strip())\n return ret\n\ndef _read_vocab(filename):\n \"\"\"\n Reads the vocabulary from the specified file.\n The file should contain one vocabulary entry per line.\n Each line contains a word, id pair separated by a tab ('\\t') character.\n :param filename: Path to the file that the vocabulary was stored into.\n :return: A pair of mappings from subword_units to ids and vice-versa.\n \"\"\"\n with tf.device('/cpu:0'):\n word_to_id = {}\n id_to_word = {}\n with tf.gfile.GFile(filename, \"r\") as ff:\n for line in ff:\n word, iden = line.strip().split('\\t')\n iden = int(iden)\n word_to_id[word] = iden\n id_to_word[iden] = word\n return word_to_id, id_to_word\n\n\ndef _write_vocab(vocab, filename):\n \"\"\"\n Exports the vocabulary (mapping of subword_units/tokens to ids) to the specified file.\n :param vocab: A dictionary containing the mapping.\n :param filename: Path to the file in which the vocabulary will be saved.\n \"\"\"\n with tf.device('/cpu:0'):\n with tf.gfile.GFile(filename, \"w\") as ff:\n for w, wid in vocab.items():\n ff.write(w + \"\\t\" + str(wid) + \"\\n\")\n\ndef _get_ids_for_wordlist(list_words, word_to_id):\n \"\"\"\n Converts the specified list of subword_units/tokens/words to a list of ids based on the specified mapping.\n If a subword_unit/token/word is out of vocabulary then it is converted into the special -UNK- symbol.\n :param list_words: A list of subword_units/tokens/words.\n :param word_to_id: The mapping (vocabulary) to be used.\n :return:\n \"\"\"\n ret = []\n for k in list_words:\n if k in word_to_id:\n ret.append(word_to_id[k])\n else:\n ret.append(word_to_id[UNKNOWN_WORD])\n return ret\n\ndef _get_id(word, word_to_id):\n if word in word_to_id:\n return word_to_id[word]\n return word_to_id[UNKNOWN_WORD]\n\ndef _get_empty_id(vocab):\n \"\"\"\n Returns the id of the special symbol for empty words (-EMP-) for the specified vocabulary.\n :param vocab: The vocabulary to be used.\n :return: The id of the special symbol for empty words.\n \"\"\"\n return vocab[EMPTY_WORD]\n\ndef _get_unknown_id(vocab):\n \"\"\"\n Returns the id of the special symbol for unknown words (-UNK-) for the specified vocabulary.\n :param vocab: The vocabulary to be used.\n :return: The id of the special symbol for unknown words.\n \"\"\"\n return vocab[UNKNOWN_WORD]\n\n# Not used anymore. I think so at least. CHECKKKKKKKKKKKK!!!!!!!!\n# def _create_dataset(wordlist, vocabulary):\n# \"\"\"\n# Converts a list of subword_units/tokens/words in\n# :param wordlist: A tokenized list of a dataset's contents. The vocabulary is used to assign ids to the tokens.\n# :param vocabulary:\n# :return: A dataset: containing the words encoded as ids\n# \"\"\"\n# encoded = _get_ids_for_wordlist(wordlist, vocabulary)\n# return dataset(encoded, vocabulary)\n\n\nclass dataset(object):\n \"\"\"\n Represents a set of instances. Each instance is a code file but could also be a sentence in natural language.\n \"\"\"\n def __init__(self, rdata, vocab, rev_vocab):\n \"\"\"\n Creates and returns dataset instance. It contains a numpy array of ids and vocabulary mappings\n from subword_units to ids and vice-versa\n :param rdata:\n :param vocab:\n :param rev_vocab:\n \"\"\"\n self.data = np.array(rdata, dtype=np.int32)\n self.vocab = vocab\n self.rev_vocab = rev_vocab\n\n def batch_producer_memory_efficient(self, batch_size, num_steps):\n \"\"\"\n Generates batches of context and target pairs (the target is context time-shifted by one) in a memory efficient way based\n on the specified parameters (batch size and number of RNN steps).\n Each batch contains batch_size * num_steps ids.\n This method should be preferred when training on large corpora because the training corpus might not fit in memory.\n The only downside of this strategy is that the minibatches must be produced again for each epoch.\n :param batch_size: Minibatch size. Increases this parameter results in more parallelization.\n Making this parameter too big is not advised though since the average gradient of the batch\n is calculated during learning.\n :param num_steps: The lenght of the RNN sequence.\n :return: Yields minibatches of numpy arrays containing ids with dimensions: [batch_size, num_steps].\n \"\"\"\n raw_data = self.data # is just one long array\n data_len = len(raw_data)\n nbatches = data_len // (num_steps * batch_size)\n max_index_covered = nbatches * num_steps * batch_size\n remaining_tok_cnt = data_len - max_index_covered\n to_fill = num_steps * batch_size - remaining_tok_cnt + 1 # the last +1 is for target of last times ep\n to_fill_array = np.full(to_fill, self.vocab[EMPTY_WORD], dtype=int)\n for i in range(nbatches - 1):\n ip_range = raw_data[(i * batch_size * num_steps): (i + 1) * (batch_size * num_steps)]\n tar_range = raw_data[(i * batch_size * num_steps) + 1: (i + 1) * (batch_size * num_steps) + 1]\n ip_wt_range = np.ones(len(tar_range), dtype=float)\n contexts = np.stack((np.array_split(ip_range, batch_size)), axis=0)\n targets = np.stack((np.array_split(tar_range, batch_size)), axis=0)\n weights = np.stack((np.array_split(ip_wt_range, batch_size)), axis=0)\n yield(contexts, targets, weights)\n # Yield to fill\n ip_range = np.concatenate((raw_data[max_index_covered:], to_fill_array[:-1]))\n tar_range = np.concatenate((raw_data[max_index_covered+1:], to_fill_array))\n ip_wt_range = np.concatenate((np.ones(remaining_tok_cnt - 1, dtype=float), np.zeros(len(to_fill_array), dtype=float)))\n contexts = np.stack((np.array_split(ip_range, batch_size)), axis=0)\n targets = np.stack((np.array_split(tar_range, batch_size)), axis=0)\n weights = np.stack((np.array_split(ip_wt_range, batch_size)), axis=0)\n yield (contexts, targets, weights)\n\n def batch_producer(self, batch_size, num_steps, subword_weights=True, debug=False):\n \"\"\"\n Generates batches of context and target pairs (the target is context time-shifted by one)\n based on the specified parameters (batch size and number of RNN steps).\n Each batch contains batch_size * num_steps ids.\n This variation converts all the data once in huge numpy array.\n This method should be preferred when training on smaller corpora or during test time.\n Memory requirements scale with the data size.\n :param batch_size: Minibatch size. Increases this parameter results in more parallelization.\n Making this parameter too big is not advised though since the average gradient of the batch\n is calculated during learning.\n :param num_steps: The lenght of the RNN sequence.\n :return: Yields minibatches of numpy arrays containing ids with dimensions: [batch_size, num_steps].\n \"\"\"\n # raw_data = np.array(self.data, dtype=np.int32)# is just one long array\n raw_data = self.data # is just one long array\n data_len = len(raw_data)\n if debug: print('data_len:', data_len)\n nbatches = data_len // (num_steps * batch_size)\n if debug: print('nbatches', nbatches)\n remaining_tok_cnt = data_len - nbatches * num_steps * batch_size\n if debug: print('remaning:', remaining_tok_cnt)\n\n to_fill = num_steps * batch_size - remaining_tok_cnt + 1 # the last +1 is for target of last epoch\n to_fill_array = np.full(to_fill, self.vocab[EMPTY_WORD], dtype=int)\n padded_data = np.concatenate((raw_data, to_fill_array))\n if subword_weights:\n data_weights = np.concatenate((np.ones(len(raw_data) - 1, dtype=float), np.zeros(len(to_fill_array) + 1, dtype=float)))\n raw_weights = self._create_weights()\n subword_weights = np.concatenate((np.array(raw_weights[1:], dtype=float), np.zeros(len(to_fill_array) + 1, dtype=float)))\n else:\n data_weights = np.concatenate((np.ones(len(raw_data) - 1, dtype=float), np.zeros(len(to_fill_array) + 1, dtype=float)))\n if to_fill > 0:\n nbatches += 1\n\n if debug: print('actual batches:', nbatches)\n for i in range(nbatches):\n ip_range = padded_data[(i * batch_size * num_steps): (i + 1) * (batch_size * num_steps)]\n tar_range = padded_data[(i * batch_size * num_steps) + 1: (i + 1) * (batch_size * num_steps) + 1]\n ip_wt_range = data_weights[(i * batch_size * num_steps): (i + 1) * (batch_size * num_steps)]\n sub_wt_range = subword_weights[(i * batch_size * num_steps): (i + 1) * (batch_size * num_steps)]\n x = np.stack((np.array_split(ip_range, batch_size)), axis=0)\n y = np.stack((np.array_split(tar_range, batch_size)), axis=0)\n z = np.stack((np.array_split(ip_wt_range, batch_size)), axis=0)\n sub_wt = np.stack((np.array_split(sub_wt_range, batch_size)), axis=0)\n yield (x, y, z, sub_wt)\n\n\n def _create_weights(self):\n \"\"\"\n Creates weights for entropy calculation.\n If a word has #NUMBER subword units then each subword unit will have weight 1.0/(#NUMBER subword units).\n :return:\n \"\"\"\n # print(self.rev_vocab)\n raw_weights = []\n subwords = 1\n for wid in self.data:\n subword = self.rev_vocab[wid]\n if subword.endswith(SUBWORD_END):\n subwords += 1\n else:\n for _ in range(subwords):\n raw_weights.append(1.0 / subwords)\n subwords = 1\n return raw_weights\n\ndef main(argv):\n \"\"\" primarily to test if the readers work\"\"\"\n if len(argv) < 4:\n print(\"read_langmodel_comments.py -i <input_file> -v <vocab_limit_count> -b <batch_size> -n <num_steps>\")\n sys.exit(2)\n ipfile = \"\"\n num_steps = 0\n batch_size = 0\n vlimit = 0\n try:\n opts, args = getopt.getopt(argv, \"i:b:n:v:\")\n except getopt.GetoptError:\n print(\"read_langmodel_comments.py -i <input_file> -v <vocab_limit_count> -b <batch_size> -n <num_steps>\")\n sys.exit(2)\n for opt, arg in opts:\n if opt == \"-i\":\n ipfile = arg\n if opt == \"-v\":\n vlimit = int(arg)\n if opt == \"-b\":\n batch_size = int(arg)\n if opt == \"-n\":\n num_steps = int(arg)\n\n # ip_conts = _read_words(ipfile)\n # print ip_conts\n ip_vocab, rev_ip_vocab = _build_vocab(ipfile, 0)\n wids = _file_to_word_ids(ipfile, ip_vocab)\n ip_dataset = dataset(wids, ip_vocab, rev_ip_vocab)\n # print(ip_vocab)\n print(ip_vocab['</s>'])\n\n print(\"Batches from data\")\n # for (ip, tar, wts, mem) in ip_dataset.batch_producer_memory_efficient_mem_net(batch_size, num_steps, 10):\n for (ip, tar, wts) in ip_dataset.batch_producer_memory_efficient_per_file(batch_size, num_steps):\n pass\n print(\"-- Batch --\", len(ip), len(tar), len(wts))\n print(len(ip[0]), len(tar[0]), len(wts[0]))\n print(len(ip[10]), len(tar[10]), len(wts[10]))\n print(len(ip[63]), len(tar[63]), len(wts[63]))\n # print ip\n # print tar\n # print wts\n\nif __name__==\"__main__\":\n main(sys.argv[1:])\n\n" ]
[ [ "numpy.concatenate", "numpy.full", "numpy.array", "numpy.ones", "tensorflow.gfile.GFile", "tensorflow.device", "numpy.array_split" ] ]
giannisdaras/tensor2tensor
[ "8d3d175d649680c8e5b98a1b1c1c5e782ff492ac" ]
[ "tensor2tensor/bin/t2t_trainer.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Train and evaluate.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport sys\nfrom tensor2tensor import models # pylint: disable=unused-import\nfrom tensor2tensor import problems as problems_lib # pylint: disable=unused-import\nfrom tensor2tensor.data_generators import problem # pylint: disable=unused-import\nfrom tensor2tensor.utils import cloud_mlengine\nfrom tensor2tensor.utils import decoding\nfrom tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import\nfrom tensor2tensor.utils import mlperf_log\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import trainer_lib\nfrom tensor2tensor.utils import usr_dir\nimport tensorflow as tf\n\nfrom tensorflow.contrib.tpu.python.tpu import tpu_config\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\n# See flags.py for additional command-line flags.\nflags.DEFINE_string(\"t2t_usr_dir\", None,\n \"Path to a Python module that will be imported. The \"\n \"__init__.py file should include the necessary imports. \"\n \"The imported files should contain registrations, \"\n \"e.g. @registry.register_model calls, that will then be \"\n \"available to the t2t-trainer.\")\nflags.DEFINE_integer(\"random_seed\", None, \"Random seed.\")\nflags.DEFINE_integer(\"tpu_num_shards\", 8, \"Number of tpu shards.\")\nflags.DEFINE_integer(\"iterations_per_loop\", 100,\n \"Number of iterations in a TPU training loop.\")\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU.\")\nflags.DEFINE_bool(\"use_tpu_estimator\", False, \"Whether to use TPUEstimator. \"\n \"This is always enabled when use_tpu is True.\")\nflags.DEFINE_bool(\"xla_compile\", False, \"Whether to use XLA to compile graph.\")\nflags.DEFINE_integer(\"tpu_infeed_sleep_secs\", None,\n \"How long to sleep the infeed thread.\")\nflags.DEFINE_bool(\"generate_data\", False, \"Generate data before training?\")\nflags.DEFINE_string(\"tmp_dir\", \"/tmp/t2t_datagen\",\n \"Temporary storage directory, used if --generate_data.\")\nflags.DEFINE_bool(\"profile\", False, \"Profile performance?\")\nflags.DEFINE_integer(\"inter_op_parallelism_threads\", 0,\n \"Number of inter_op_parallelism_threads to use for CPU. \"\n \"See TensorFlow config.proto for details.\")\nflags.DEFINE_integer(\"intra_op_parallelism_threads\", 0,\n \"Number of intra_op_parallelism_threads to use for CPU. \"\n \"See TensorFlow config.proto for details.\")\n# TODO(hinsu): Enable DistributionStrategy by default once performance gap\n# between DistributionStrategy and Parallelism is resolved.\nflags.DEFINE_bool(\n \"optionally_use_dist_strat\", False,\n \"Whether to use TensorFlow DistributionStrategy instead of explicitly \"\n \"replicating the model. DistributionStrategy is used only if the \"\n \"model replication configuration is supported by the DistributionStrategy.\")\n\n# To maintain compatibility with some internal libs, we guard against these flag\n# definitions possibly erroring. Apologies for the ugliness.\ntry:\n flags.DEFINE_string(\"master\", \"\", \"Address of TensorFlow master.\")\n flags.DEFINE_string(\"output_dir\", \"\", \"Base output directory for run.\")\n flags.DEFINE_string(\"schedule\", \"continuous_train_and_eval\",\n \"Method of Experiment to run.\")\n flags.DEFINE_integer(\"eval_steps\", 100,\n \"Number of steps in evaluation. By default, eval will \"\n \"stop after eval_steps or when it runs through the eval \"\n \"dataset once in full, whichever comes first, so this \"\n \"can be a very large number.\")\nexcept: # pylint: disable=bare-except\n pass\n\nflags.DEFINE_string(\"std_server_protocol\", \"grpc\",\n \"Protocol for tf.train.Server.\")\n\n# Google Cloud TPUs\nflags.DEFINE_string(\"cloud_tpu_name\", \"%s-tpu\" % os.getenv(\"USER\"),\n \"Name of Cloud TPU instance to use or create.\")\n\n# Google Cloud ML Engine\nflags.DEFINE_bool(\"cloud_mlengine\", False,\n \"Whether to launch on Cloud ML Engine.\")\nflags.DEFINE_string(\"cloud_mlengine_master_type\", None,\n \"Machine type for master on Cloud ML Engine. \"\n \"If provided, overrides default selections based on \"\n \"--worker_gpu. User is responsible for ensuring \"\n \"type is valid and that --worker_gpu matches number of \"\n \"GPUs on machine type. See documentation: \"\n \"https://cloud.google.com/ml-engine/reference/rest/v1/\"\n \"projects.jobs#traininginput\")\n# Hyperparameter tuning on Cloud ML Engine\n# Pass an --hparams_range to enable\nflags.DEFINE_string(\"autotune_objective\", None,\n \"TensorBoard metric name to optimize.\")\nflags.DEFINE_bool(\"autotune_maximize\", True,\n \"Whether to maximize (vs. minimize) autotune_objective.\")\nflags.DEFINE_integer(\"autotune_max_trials\", 10,\n \"Maximum number of tuning experiments to run.\")\nflags.DEFINE_integer(\"autotune_parallel_trials\", 1,\n \"How many trials to run in parallel (will spin up this \"\n \"many jobs.\")\n# Note than in open-source TensorFlow, the dash gets converted to an underscore,\n# so access is FLAGS.job_dir.\nflags.DEFINE_string(\"job-dir\", None,\n \"DO NOT USE. Exists only for Cloud ML Engine to pass in \"\n \"during hyperparameter tuning. Overrides --output_dir.\")\nflags.DEFINE_integer(\"log_step_count_steps\", 100,\n \"Number of local steps after which progress is printed \"\n \"out\")\n\n\n\ndef set_hparams_from_args(args):\n \"\"\"Set hparams overrides from unparsed args list.\"\"\"\n if not args:\n return\n\n hp_prefix = \"--hp_\"\n tf.logging.info(\"Found unparsed command-line arguments. Checking if any \"\n \"start with %s and interpreting those as hparams \"\n \"settings.\", hp_prefix)\n\n pairs = []\n i = 0\n while i < len(args):\n arg = args[i]\n if arg.startswith(hp_prefix):\n pairs.append((arg[len(hp_prefix):], args[i+1]))\n i += 2\n else:\n tf.logging.warn(\"Found unknown flag: %s\", arg)\n i += 1\n\n as_hparams = \",\".join([\"%s=%s\" % (key, val) for key, val in pairs])\n if FLAGS.hparams:\n as_hparams = \",\" + as_hparams\n FLAGS.hparams += as_hparams\n\n\ndef create_hparams():\n \"\"\"Create hparams.\"\"\"\n if FLAGS.use_tpu and \"tpu\" not in FLAGS.hparams_set:\n tf.logging.warn(\"Not all hyperparameter sets work on TPU. \"\n \"Prefer hparams_sets with a '_tpu' suffix, \"\n \"e.g. transformer_tpu, if available for your model.\")\n hparams_path = os.path.join(FLAGS.output_dir, \"hparams.json\")\n return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams,\n hparams_path=hparams_path)\n\n\ndef create_experiment_fn():\n return trainer_lib.create_experiment_fn(\n model_name=FLAGS.model,\n problem_name=FLAGS.problem,\n data_dir=os.path.expanduser(FLAGS.data_dir),\n train_steps=FLAGS.train_steps,\n eval_steps=FLAGS.eval_steps,\n min_eval_frequency=FLAGS.local_eval_frequency,\n schedule=FLAGS.schedule,\n eval_throttle_seconds=FLAGS.eval_throttle_seconds,\n export=FLAGS.export_saved_model,\n decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams),\n use_tfdbg=FLAGS.tfdbg,\n use_dbgprofile=FLAGS.dbgprofile,\n eval_early_stopping_steps=FLAGS.eval_early_stopping_steps,\n eval_early_stopping_metric=FLAGS.eval_early_stopping_metric,\n eval_early_stopping_metric_delta=FLAGS.eval_early_stopping_metric_delta,\n eval_early_stopping_metric_minimize=FLAGS\n .eval_early_stopping_metric_minimize,\n use_tpu=FLAGS.use_tpu,\n use_tpu_estimator=FLAGS.use_tpu_estimator,\n use_xla=FLAGS.xla_compile,\n warm_start_from=FLAGS.warm_start_from,\n decode_from_file=FLAGS.decode_from_file,\n decode_to_file=FLAGS.decode_to_file,\n decode_reference=FLAGS.decode_reference,\n std_server_protocol=FLAGS.std_server_protocol)\n\n\ndef create_run_config(hp, output_dir=None):\n \"\"\"Create a run config.\n\n Args:\n hp: model hyperparameters\n output_dir: model's output directory, defaults to output_dir flag.\n\n Returns:\n a run config\n \"\"\"\n save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency)\n save_ckpt_secs = FLAGS.save_checkpoints_secs or None\n if save_ckpt_secs:\n save_ckpt_steps = None\n assert FLAGS.output_dir or FLAGS.checkpoint_path\n tpu_config_extra_kwargs = {}\n\n if getattr(hp, \"mtf_mode\", False):\n save_ckpt_steps = None # Disable the default saver\n save_ckpt_secs = None # Disable the default saver\n tpu_config_extra_kwargs = {\n \"num_cores_per_replica\": 1,\n \"per_host_input_for_training\": tpu_config.InputPipelineConfig.BROADCAST,\n }\n\n # the various custom getters we have written do not play well together yet.\n # TODO(noam): ask rsepassi for help here.\n daisy_chain_variables = (\n hp.daisy_chain_variables and\n hp.activation_dtype == \"float32\" and\n hp.weight_dtype == \"float32\")\n return trainer_lib.create_run_config(\n model_name=FLAGS.model,\n model_dir=output_dir or os.path.expanduser(FLAGS.output_dir),\n master=FLAGS.master,\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.tpu_num_shards,\n log_device_placement=FLAGS.log_device_placement,\n save_checkpoints_steps=save_ckpt_steps,\n save_checkpoints_secs=save_ckpt_secs,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n num_gpus=FLAGS.worker_gpu,\n gpu_order=FLAGS.gpu_order,\n shard_to_cpu=FLAGS.locally_shard_to_cpu,\n num_async_replicas=FLAGS.worker_replicas,\n gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction,\n enable_graph_rewriter=FLAGS.enable_graph_rewriter,\n use_tpu=FLAGS.use_tpu,\n use_tpu_estimator=FLAGS.use_tpu_estimator,\n schedule=FLAGS.schedule,\n no_data_parallelism=hp.no_data_parallelism,\n optionally_use_dist_strat=FLAGS.optionally_use_dist_strat,\n daisy_chain_variables=daisy_chain_variables,\n ps_replicas=FLAGS.ps_replicas,\n ps_job=FLAGS.ps_job,\n ps_gpu=FLAGS.ps_gpu,\n sync=FLAGS.sync,\n worker_id=FLAGS.worker_id,\n worker_job=FLAGS.worker_job,\n random_seed=FLAGS.random_seed,\n tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs,\n inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,\n log_step_count_steps=FLAGS.log_step_count_steps,\n intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,\n tpu_config_extra_kwargs=tpu_config_extra_kwargs,\n cloud_tpu_name=FLAGS.cloud_tpu_name)\n\n\ndef generate_data():\n # Generate data if requested.\n data_dir = os.path.expanduser(FLAGS.data_dir)\n tmp_dir = os.path.expanduser(FLAGS.tmp_dir)\n tf.gfile.MakeDirs(data_dir)\n tf.gfile.MakeDirs(tmp_dir)\n\n problem_name = FLAGS.problem\n tf.logging.info(\"Generating data for %s\" % problem_name)\n registry.problem(problem_name).generate_data(data_dir, tmp_dir)\n\n\n@contextlib.contextmanager\ndef profile_context():\n if FLAGS.profile:\n with tf.contrib.tfprof.ProfileContext(\n \"t2tprof\", trace_steps=range(100), dump_steps=range(100)) as pctx:\n opts = tf.profiler.ProfileOptionBuilder.time_and_memory()\n pctx.add_auto_profiling(\"op\", opts, range(100))\n yield\n else:\n yield\n\n\ndef maybe_log_registry_and_exit():\n if FLAGS.registry_help:\n tf.logging.info(registry.help_string())\n sys.exit(0)\n\n\ndef is_chief():\n schedules = [\"train\", \"train_and_evaluate\", \"continuous_train_and_eval\"]\n return FLAGS.worker_id == 0 and FLAGS.schedule in schedules\n\n\ndef save_metadata(hparams):\n \"\"\"Saves FLAGS and hparams to output_dir.\"\"\"\n output_dir = os.path.expanduser(FLAGS.output_dir)\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n # Save FLAGS in txt file\n if hasattr(FLAGS, \"flags_into_string\"):\n flags_str = FLAGS.flags_into_string()\n t2t_flags_str = \"\\n\".join([\n \"--%s=%s\" % (f.name, f.value)\n for f in FLAGS.flags_by_module_dict()[\"tensor2tensor.utils.flags\"]\n ])\n else:\n flags_dict = FLAGS.__dict__[\"__flags\"]\n flags_str = \"\\n\".join(\n [\"--%s=%s\" % (name, str(f)) for (name, f) in flags_dict.items()])\n t2t_flags_str = None\n\n flags_txt = os.path.join(output_dir, \"flags.txt\")\n with tf.gfile.Open(flags_txt, \"w\") as f:\n f.write(flags_str)\n\n if t2t_flags_str:\n t2t_flags_txt = os.path.join(output_dir, \"flags_t2t.txt\")\n with tf.gfile.Open(t2t_flags_txt, \"w\") as f:\n f.write(t2t_flags_str)\n\n # Save hparams as hparams.json\n hparams_fname = os.path.join(output_dir, \"hparams.json\")\n with tf.gfile.Open(hparams_fname, \"w\") as f:\n f.write(hparams.to_json(indent=0, sort_keys=True))\n\n\ndef execute_schedule(exp):\n if not hasattr(exp, FLAGS.schedule):\n raise ValueError(\n \"Experiment has no method %s, from --schedule\" % FLAGS.schedule)\n with profile_context():\n getattr(exp, FLAGS.schedule)()\n\n\ndef run_std_server():\n exp = trainer_lib.T2TExperiment(*([None] * 5))\n exp.run_std_server()\n\n\ndef main(argv):\n tf.logging.set_verbosity(tf.logging.INFO)\n if FLAGS.schedule != \"train\":\n mlperf_log.transformer_print(key=mlperf_log.RUN_START)\n if FLAGS.schedule == \"run_std_server\":\n run_std_server()\n mlperf_log.transformer_print(\n key=mlperf_log.RUN_SET_RANDOM_SEED, value=FLAGS.random_seed)\n trainer_lib.set_random_seed(FLAGS.random_seed)\n usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)\n maybe_log_registry_and_exit()\n\n if FLAGS.cloud_mlengine:\n cloud_mlengine.launch()\n return\n\n if FLAGS.generate_data:\n generate_data()\n\n if cloud_mlengine.job_dir():\n FLAGS.output_dir = cloud_mlengine.job_dir()\n\n if argv:\n set_hparams_from_args(argv[1:])\n hparams = create_hparams()\n\n exp_fn = create_experiment_fn()\n exp = exp_fn(create_run_config(hparams), hparams)\n if is_chief():\n save_metadata(hparams)\n execute_schedule(exp)\n if FLAGS.schedule != \"train\":\n mlperf_log.transformer_print(key=mlperf_log.RUN_FINAL)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n" ]
[ [ "tensorflow.logging.set_verbosity", "tensorflow.profiler.ProfileOptionBuilder.time_and_memory", "tensorflow.gfile.Open", "tensorflow.gfile.Exists", "tensorflow.logging.info", "tensorflow.gfile.MakeDirs", "tensorflow.app.run", "tensorflow.logging.warn" ] ]
alan-turing-institute/IceNetETL
[ "aaf4d0e211dcca58e8384e9574b59205b94a98a0" ]
[ "azfunctions/InputBlobTrigger/processor.py" ]
[ "# Standard library\nimport io\nimport logging\nimport math\nimport os\nimport time\n\n# Third party\nimport azure.functions as func\nimport pandas as pd\nimport psycopg2\nfrom shapely.geometry import Polygon\nimport xarray\n\n# Local\nfrom .utils import batches, human_readable, mean_step_size\n\n\nclass Processor:\n def __init__(self, batch_size):\n \"\"\"Constructor.\"\"\"\n self.batch_size = batch_size\n self.cnxn_ = None\n self.cursor_ = None\n self.tables = {\n \"geom\": \"cell\",\n \"predictions\": \"prediction\",\n \"latest\": \"prediction_latest\",\n }\n self.xr = None\n\n def __del__(self):\n \"\"\"Destructor.\"\"\"\n if self.cnxn_:\n self.cnxn_.close()\n\n @property\n def cnxn(self):\n \"\"\"Connect to the database or return an existing connection.\"\"\"\n if not self.cnxn_:\n try:\n db_host = os.getenv(\"PSQL_HOST\")\n db_name = os.getenv(\"PSQL_DB\")\n db_user = os.getenv(\"PSQL_USER\")\n db_pwd = os.getenv(\"PSQL_PWD\")\n self.cnxn_ = psycopg2.connect(\n dbname=db_name,\n port=\"5432\",\n user=f\"{db_user}@{db_host}\",\n password=db_pwd,\n host=db_host,\n )\n logging.info(f\"Connected to database {db_name} on {db_host}.\")\n except psycopg2.OperationalError:\n logging.error(f\"Failed to connect to database {db_name} on {db_host}!\")\n raise\n return self.cnxn_\n\n @property\n def cursor(self):\n \"\"\"Construct a database cursor or return an existing cursor.\"\"\"\n if not self.cursor_:\n self.cursor_ = self.cnxn.cursor()\n return self.cursor_\n\n def load(self, inputBlob: func.InputStream) -> None:\n \"\"\"Load data from a file into an xarray.\"\"\"\n logging.info(f\"Attempting to load {inputBlob.name}...\")\n try:\n self.xr = xarray.open_dataset(io.BytesIO(inputBlob.read()))\n logging.info(\n f\"Loaded NetCDF data into array with dimensions: {self.xr.dims}.\"\n )\n except ValueError as exc:\n logging.error(f\"Could not load NetCDF data from {inputBlob.name}!\")\n logging.error(exc)\n\n def update_geometries(self) -> None:\n \"\"\"Update the table of geometries, creating it if necessary.\"\"\"\n # Ensure that geometry table exists\n logging.info(\n f\"Ensuring that geometries table '{self.tables['geom']}' exists...\"\n )\n self.cursor.execute(\n f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.tables['geom']} (\n cell_id SERIAL PRIMARY KEY,\n centroid_x int4,\n centroid_y int4,\n geom_6931 geometry,\n geom_4326 geometry,\n UNIQUE (centroid_x, centroid_y)\n );\n \"\"\"\n )\n self.cnxn.commit()\n logging.info(f\"Ensured that geometries table '{self.tables['geom']}' exists.\")\n\n # Calculate the size of the grid cells\n logging.info(\"Identifying cell geometries from input data...\")\n centroids_x_km, centroids_y_km = self.xr.xc.values, self.xr.yc.values\n x_delta_m = 1000 * int(0.5 * mean_step_size(centroids_x_km))\n y_delta_m = 1000 * int(0.5 * mean_step_size(centroids_y_km))\n\n # Construct list of geometry records\n records = []\n for centroid_x_km in centroids_x_km:\n centroid_x_m = int(1000 * centroid_x_km)\n for centroid_y_km in centroids_y_km:\n centroid_y_m = int(1000 * centroid_y_km)\n x_min_m, x_max_m = centroid_x_m - x_delta_m, centroid_x_m + x_delta_m\n y_min_m, y_max_m = centroid_y_m - y_delta_m, centroid_y_m + y_delta_m\n geometry = Polygon(\n [\n [x_min_m, y_max_m],\n [x_max_m, y_max_m],\n [x_max_m, y_min_m],\n [x_min_m, y_min_m],\n [x_min_m, y_max_m],\n ]\n )\n records.append((centroid_x_m, centroid_y_m, geometry.wkt, geometry.wkt))\n logging.info(f\"Identified {len(records)} cell geometries.\")\n\n # Insert geometries into the database\n logging.info(\n f\"Ensuring that '{self.tables['geom']}' contains all {len(records)} geometries...\"\n )\n n_batches = int(math.ceil(len(records) / self.batch_size))\n start_time = time.monotonic()\n for idx, record_batch in enumerate(batches(records, self.batch_size), start=1):\n logging.info(\n f\"Batch {idx}/{n_batches}. Preparing to insert/update {len(record_batch)} geometries...\"\n )\n for record in record_batch:\n self.cursor.execute(\n f\"\"\"\n INSERT INTO {self.tables['geom']} (cell_id, centroid_x, centroid_y, geom_6931, geom_4326)\n VALUES(DEFAULT, %s, %s, ST_GeomFromText(%s, 6931), ST_Transform(ST_GeomFromText(%s, 6931), 4326))\n ON CONFLICT DO NOTHING;\n \"\"\",\n record,\n )\n self.cnxn.commit()\n remaining_time = (time.monotonic() - start_time) * (n_batches / idx - 1)\n logging.info(\n f\"Batch {idx}/{n_batches}. Inserted/updated {len(record_batch)} geometries. Time remaining {human_readable(remaining_time)}.\"\n )\n logging.info(f\"Ensured that '{self.tables['geom']}' contains all geometries.\")\n\n def update_predictions(self) -> None:\n \"\"\"Update the table of predictions, creating it if necessary\"\"\"\n # Ensure that prediction table exists\n logging.info(\n f\"Ensuring that predictions table '{self.tables['predictions']}' exists...\"\n )\n self.cursor.execute(\n f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.tables['predictions']} (\n prediction_id SERIAL PRIMARY KEY,\n date date,\n leadtime int4,\n cell_id int4,\n mean float4,\n stddev float4,\n UNIQUE (date, leadtime, cell_id),\n CONSTRAINT fk_cell_id FOREIGN KEY(cell_id) REFERENCES {self.tables['geom']}(cell_id)\n );\n \"\"\"\n )\n self.cnxn.commit()\n logging.info(\n f\"Ensured that predictions table '{self.tables['predictions']}' exists.\"\n )\n\n # Construct a list of values\n logging.info(\"Loading predictions from input data...\")\n df_predictions = (\n self.xr.where(self.xr[\"mean\"] > 0).to_dataframe().dropna().reset_index()\n )\n df_predictions[\"xc_m\"] = pd.to_numeric(\n 1000 * df_predictions[\"xc\"], downcast=\"integer\"\n )\n df_predictions[\"yc_m\"] = pd.to_numeric(\n 1000 * df_predictions[\"yc\"], downcast=\"integer\"\n )\n logging.info(f\"Loaded {df_predictions.shape[0]} predictions from input data.\")\n\n # Get cell IDs by loading existing cells and merging onto list of predictions\n logging.info(\"Identifying cell IDs for all predictions...\")\n df_cells = pd.io.sql.read_sql_query(\n f\"SELECT cell_id, centroid_x, centroid_y FROM {self.tables['geom']};\",\n self.cnxn,\n )\n df_merged = pd.merge(\n df_predictions,\n df_cells,\n how=\"left\",\n left_on=[\"xc_m\", \"yc_m\"],\n right_on=[\"centroid_x\", \"centroid_y\"],\n )\n logging.info(f\"Identified cell IDs for {df_merged.shape[0]} predictions.\")\n\n # Insert predictions into the database\n logging.info(\n f\"Ensuring that table '{self.tables['predictions']}' contains all {df_merged.shape[0]} predictions...\"\n )\n n_batches = int(math.ceil(df_merged.shape[0] / self.batch_size))\n start_time = time.monotonic()\n for idx, record_batch in enumerate(\n batches(df_merged, self.batch_size), start=1\n ):\n logging.info(\n f\"Batch {idx}/{n_batches}. Preparing to insert/update {len(record_batch)} predictions...\"\n )\n for record in record_batch:\n self.cursor.execute(\n f\"\"\"\n INSERT INTO {self.tables['predictions']} (prediction_id, date, leadtime, cell_id, mean, stddev)\n VALUES(\n DEFAULT,\n %s,\n %s,\n %s,\n %s,\n %s\n )\n ON CONFLICT DO NOTHING;\n \"\"\",\n [\n record.time.date(),\n record.leadtime,\n record.cell_id,\n record.mean,\n record.stddev,\n ],\n )\n self.cnxn.commit()\n remaining_time = (time.monotonic() - start_time) * (n_batches / idx - 1)\n logging.info(\n f\"Batch {idx}/{n_batches}. Inserted/updated {len(record_batch)} predictions. Time remaining {human_readable(remaining_time)}.\"\n )\n logging.info(\n f\"Ensured that table '{self.tables['predictions']}' contains all {df_merged.shape[0]} predictions.\"\n )\n\n def update_latest_prediction(self) -> None:\n \"\"\"Update the 'latest prediction' view, creating it if necessary\"\"\"\n # Ensure that view table exists\n logging.info(f\"Updating materialised view '{self.tables['latest']}'...\")\n self.cursor.execute(\n f\"\"\"\n DROP MATERIALIZED VIEW {self.tables['latest']};\n CREATE MATERIALIZED VIEW {self.tables['latest']} AS\n SELECT\n row_number() OVER (PARTITION BY true) as prediction_latest_id,\n {self.tables['predictions']}.date,\n {self.tables['predictions']}.leadtime,\n {self.tables['predictions']}.mean,\n {self.tables['predictions']}.stddev,\n {self.tables['geom']}.cell_id,\n {self.tables['geom']}.centroid_x,\n {self.tables['geom']}.centroid_y,\n {self.tables['geom']}.geom_6931,\n {self.tables['geom']}.geom_4326\n FROM {self.tables['predictions']}\n FULL OUTER JOIN cell ON {self.tables['predictions']}.cell_id = {self.tables['geom']}.cell_id\n WHERE date = (SELECT max(date) FROM {self.tables['predictions']})\n GROUP BY {self.tables['geom']}.cell_id, date, leadtime, centroid_x, centroid_y, mean, stddev, geom_6931, geom_4326;\n \"\"\"\n )\n self.cnxn.commit()\n logging.info(f\"Updated materialised view '{self.tables['latest']}'.\")\n" ]
[ [ "pandas.io.sql.read_sql_query", "pandas.to_numeric", "pandas.merge" ] ]
varunarora/Paddle
[ "868bdc97713169f3525a014f71a4dd3e52e85008" ]
[ "python/paddle/fluid/tests/book/notest_understand_sentiment.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import print_function\n\nimport unittest\nimport paddle.fluid as fluid\nimport paddle\nimport contextlib\nimport math\nimport numpy as np\nimport sys\nimport os\n\n\ndef convolution_net(data, label, input_dim, class_dim=2, emb_dim=32,\n hid_dim=32):\n emb = fluid.layers.embedding(\n input=data, size=[input_dim, emb_dim], is_sparse=True)\n conv_3 = fluid.nets.sequence_conv_pool(\n input=emb,\n num_filters=hid_dim,\n filter_size=3,\n act=\"tanh\",\n pool_type=\"sqrt\")\n conv_4 = fluid.nets.sequence_conv_pool(\n input=emb,\n num_filters=hid_dim,\n filter_size=4,\n act=\"tanh\",\n pool_type=\"sqrt\")\n prediction = fluid.layers.fc(input=[conv_3, conv_4],\n size=class_dim,\n act=\"softmax\")\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(cost)\n accuracy = fluid.layers.accuracy(input=prediction, label=label)\n return avg_cost, accuracy, prediction\n\n\ndef dyn_rnn_lstm(data, label, input_dim, class_dim=2, emb_dim=32,\n lstm_size=128):\n emb = fluid.layers.embedding(\n input=data, size=[input_dim, emb_dim], is_sparse=True)\n sentence = fluid.layers.fc(input=emb, size=lstm_size, act='tanh')\n\n rnn = fluid.layers.DynamicRNN()\n with rnn.block():\n word = rnn.step_input(sentence)\n prev_hidden = rnn.memory(value=0.0, shape=[lstm_size])\n prev_cell = rnn.memory(value=0.0, shape=[lstm_size])\n\n def gate_common(ipt, hidden, size):\n gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True)\n gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False)\n return gate0 + gate1\n\n forget_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,\n lstm_size))\n input_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,\n lstm_size))\n output_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,\n lstm_size))\n cell_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,\n lstm_size))\n\n cell = forget_gate * prev_cell + input_gate * cell_gate\n hidden = output_gate * fluid.layers.tanh(x=cell)\n rnn.update_memory(prev_cell, cell)\n rnn.update_memory(prev_hidden, hidden)\n rnn.output(hidden)\n\n last = fluid.layers.sequence_last_step(rnn())\n prediction = fluid.layers.fc(input=last, size=class_dim, act=\"softmax\")\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(cost)\n accuracy = fluid.layers.accuracy(input=prediction, label=label)\n return avg_cost, accuracy, prediction\n\n\ndef stacked_lstm_net(data,\n label,\n input_dim,\n class_dim=2,\n emb_dim=128,\n hid_dim=512,\n stacked_num=3):\n assert stacked_num % 2 == 1\n\n emb = fluid.layers.embedding(\n input=data, size=[input_dim, emb_dim], is_sparse=True)\n # add bias attr\n\n # TODO(qijun) linear act\n fc1 = fluid.layers.fc(input=emb, size=hid_dim)\n lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim)\n\n inputs = [fc1, lstm1]\n\n for i in range(2, stacked_num + 1):\n fc = fluid.layers.fc(input=inputs, size=hid_dim)\n lstm, cell = fluid.layers.dynamic_lstm(\n input=fc, size=hid_dim, is_reverse=(i % 2) == 0)\n inputs = [fc, lstm]\n\n fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max')\n lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max')\n\n prediction = fluid.layers.fc(input=[fc_last, lstm_last],\n size=class_dim,\n act='softmax')\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(cost)\n accuracy = fluid.layers.accuracy(input=prediction, label=label)\n return avg_cost, accuracy, prediction\n\n\ndef create_random_lodtensor(lod, place, low, high):\n data = np.random.random_integers(low, high, [lod[-1], 1]).astype(\"int64\")\n res = fluid.LoDTensor()\n res.set(data, place)\n res.set_lod([lod])\n return res\n\n\ndef train(word_dict,\n net_method,\n use_cuda,\n parallel=False,\n save_dirname=None,\n is_local=True):\n BATCH_SIZE = 128\n PASS_NUM = 5\n dict_dim = len(word_dict)\n class_dim = 2\n\n data = fluid.layers.data(\n name=\"words\", shape=[1], dtype=\"int64\", lod_level=1)\n label = fluid.layers.data(name=\"label\", shape=[1], dtype=\"int64\")\n\n if not parallel:\n cost, acc_out, prediction = net_method(\n data, label, input_dim=dict_dim, class_dim=class_dim)\n else:\n places = fluid.layers.get_places()\n pd = fluid.layers.ParallelDo(places)\n with pd.do():\n cost, acc, _ = net_method(\n pd.read_input(data),\n pd.read_input(label),\n input_dim=dict_dim,\n class_dim=class_dim)\n pd.write_output(cost)\n pd.write_output(acc)\n\n cost, acc = pd()\n cost = fluid.layers.mean(cost)\n acc_out = fluid.layers.mean(acc)\n prediction = None\n assert save_dirname is None\n\n adagrad = fluid.optimizer.Adagrad(learning_rate=0.002)\n adagrad.minimize(cost)\n\n train_data = paddle.batch(\n paddle.reader.shuffle(\n paddle.dataset.imdb.train(word_dict), buf_size=1000),\n batch_size=BATCH_SIZE)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n feeder = fluid.DataFeeder(feed_list=[data, label], place=place)\n\n def train_loop(main_program):\n exe.run(fluid.default_startup_program())\n\n for pass_id in xrange(PASS_NUM):\n for data in train_data():\n cost_val, acc_val = exe.run(main_program,\n feed=feeder.feed(data),\n fetch_list=[cost, acc_out])\n print(\"cost=\" + str(cost_val) + \" acc=\" + str(acc_val))\n if cost_val < 0.4 and acc_val > 0.8:\n if save_dirname is not None:\n fluid.io.save_inference_model(save_dirname, [\"words\"],\n prediction, exe)\n return\n if math.isnan(float(cost_val)):\n sys.exit(\"got NaN loss, training failed.\")\n raise AssertionError(\"Cost is too large for {0}\".format(\n net_method.__name__))\n\n if is_local:\n train_loop(fluid.default_main_program())\n else:\n port = os.getenv(\"PADDLE_INIT_PORT\", \"6174\")\n pserver_ips = os.getenv(\"PADDLE_INIT_PSERVERS\") # ip,ip...\n eplist = []\n for ip in pserver_ips.split(\",\"):\n eplist.append(':'.join([ip, port]))\n pserver_endpoints = \",\".join(eplist) # ip:port,ip:port...\n trainers = int(os.getenv(\"TRAINERS\"))\n current_endpoint = os.getenv(\"POD_IP\") + \":\" + port\n trainer_id = int(os.getenv(\"PADDLE_INIT_TRAINER_ID\"))\n training_role = os.getenv(\"TRAINING_ROLE\", \"TRAINER\")\n t = fluid.DistributeTranspiler()\n t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)\n if training_role == \"PSERVER\":\n pserver_prog = t.get_pserver_program(current_endpoint)\n pserver_startup = t.get_startup_program(current_endpoint,\n pserver_prog)\n exe.run(pserver_startup)\n exe.run(pserver_prog)\n elif training_role == \"TRAINER\":\n train_loop(t.get_trainer_program())\n\n\ndef infer(word_dict, use_cuda, save_dirname=None):\n if save_dirname is None:\n return\n\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n inference_scope = fluid.core.Scope()\n with fluid.scope_guard(inference_scope):\n # Use fluid.io.load_inference_model to obtain the inference program desc,\n # the feed_target_names (the names of variables that will be feeded\n # data using feed operators), and the fetch_targets (variables that\n # we want to obtain data from using fetch operators).\n [inference_program, feed_target_names,\n fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)\n\n word_dict_len = len(word_dict)\n\n lod = [0, 4, 10]\n tensor_words = create_random_lodtensor(\n lod, place, low=0, high=word_dict_len - 1)\n\n # Construct feed as a dictionary of {feed_target_name: feed_target_data}\n # and results will contain a list of data corresponding to fetch_targets.\n assert feed_target_names[0] == \"words\"\n results = exe.run(inference_program,\n feed={feed_target_names[0]: tensor_words},\n fetch_list=fetch_targets,\n return_numpy=False)\n print(results[0].lod())\n np_data = np.array(results[0])\n print(\"Inference Shape: \", np_data.shape)\n print(\"Inference results: \", np_data)\n\n\ndef main(word_dict, net_method, use_cuda, parallel=False, save_dirname=None):\n if use_cuda and not fluid.core.is_compiled_with_cuda():\n return\n\n train(\n word_dict,\n net_method,\n use_cuda,\n parallel=parallel,\n save_dirname=save_dirname)\n infer(word_dict, use_cuda, save_dirname)\n\n\nclass TestUnderstandSentiment(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.word_dict = paddle.dataset.imdb.word_dict()\n\n @contextlib.contextmanager\n def new_program_scope(self):\n prog = fluid.Program()\n startup_prog = fluid.Program()\n scope = fluid.core.Scope()\n with fluid.scope_guard(scope):\n with fluid.program_guard(prog, startup_prog):\n yield\n\n def test_conv_cpu(self):\n with self.new_program_scope():\n main(\n self.word_dict,\n net_method=convolution_net,\n use_cuda=False,\n save_dirname=\"understand_sentiment_conv.inference.model\")\n\n def test_conv_cpu_parallel(self):\n with self.new_program_scope():\n main(\n self.word_dict,\n net_method=convolution_net,\n use_cuda=False,\n parallel=True)\n\n @unittest.skip(reason=\"make CI faster\")\n def test_stacked_lstm_cpu(self):\n with self.new_program_scope():\n main(\n self.word_dict,\n net_method=stacked_lstm_net,\n use_cuda=False,\n save_dirname=\"understand_sentiment_stacked_lstm.inference.model\")\n\n def test_stacked_lstm_cpu_parallel(self):\n with self.new_program_scope():\n main(\n self.word_dict,\n net_method=stacked_lstm_net,\n use_cuda=False,\n parallel=True)\n\n def test_conv_gpu(self):\n with self.new_program_scope():\n main(\n self.word_dict,\n net_method=convolution_net,\n use_cuda=True,\n save_dirname=\"understand_sentiment_conv.inference.model\")\n\n def test_conv_gpu_parallel(self):\n with self.new_program_scope():\n main(\n self.word_dict,\n net_method=convolution_net,\n use_cuda=True,\n parallel=True)\n\n @unittest.skip(reason=\"make CI faster\")\n def test_stacked_lstm_gpu(self):\n with self.new_program_scope():\n main(\n self.word_dict,\n net_method=stacked_lstm_net,\n use_cuda=True,\n save_dirname=\"understand_sentiment_stacked_lstm.inference.model\")\n\n def test_stacked_lstm_gpu_parallel(self):\n with self.new_program_scope():\n main(\n self.word_dict,\n net_method=stacked_lstm_net,\n use_cuda=True,\n parallel=True)\n\n @unittest.skip(reason='make CI faster')\n def test_dynrnn_lstm_gpu(self):\n with self.new_program_scope():\n main(\n self.word_dict,\n net_method=dyn_rnn_lstm,\n use_cuda=True,\n parallel=False)\n\n def test_dynrnn_lstm_gpu_parallel(self):\n with self.new_program_scope():\n main(\n self.word_dict,\n net_method=dyn_rnn_lstm,\n use_cuda=True,\n parallel=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.random.random_integers" ] ]
francesca-perillo/smartCarColorRecognizer
[ "efb8502b3872f9fda7b427f85bedda97ae8ea9d3" ]
[ "python/col_det_screen_view.py" ]
[ "from asyncio.windows_events import NULL\r\nfrom email import message\r\nimport cv2\r\nimport urllib.request\r\nimport numpy as np\r\n# to send color detection to arduino\r\nimport socket\r\nfrom threading import Thread\r\n# to delay the sending of messages\r\nimport timeit\r\n\r\n#function to open socket and connection\r\ndef socket_connection(message):\r\n sock = socket.socket()\r\n host = \"YOUR_IP_ADDRESS\" \r\n port = \"PORT(SAME_OF_ARDUINO_WIFI_SERVER)\" \r\n sock.connect((host, port))\r\n try: \r\n sock.send(message.encode('utf-8'))\r\n except ConnectionResetError:\r\n print('lost connection after socket_connection call function, automatically restart!')\r\n sock.connect((host, port))\r\n sock.send(message.encode('utf-8'))\r\n \r\n#change the IP address below according to the\r\n#IP shown in the Serial monitor of Arduino code\r\n#remember to change 'cam-lo.jpg' with an higher resolution (see Arduino code)\r\n#to see the images in the higher resolution\r\nyourIPaddress = \"INSERT_HERE_YOUR_IP_ADDRESS\"\r\nurl='http://'+yourIPaddress+'/cam-lo.jpg'\r\n\r\ncv2.namedWindow(\"live transmission\", cv2.WINDOW_AUTOSIZE)\r\n\r\n#------- DATA FROM color_detection.py -------#\r\n#color detection for blue\r\nl_h_blue, l_s_blue, l_v_blue = 14,88,175\r\nu_h_blue, u_s_blue, u_v_blue = 165,255,255\r\n#color detecrion for red \r\nl_h_red, l_s_red, l_v_red = 0, 10, 190\r\nu_h_red, u_s_red, u_v_red = 10, 255,255\r\n\r\n# var time to avoid continuos sending messages \r\nstart = 1\r\n\r\n\r\nwhile True:\r\n try:\r\n #get image from request (the url generated by Arduino IDE)\r\n img_resp=urllib.request.urlopen(url)\r\n imgnp=np.array(bytearray(img_resp.read()),dtype=np.uint8)\r\n frame=cv2.imdecode(imgnp,-1)\r\n \r\n # 1- convert frame from BGR to HSV\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n \r\n # 2- define the range of red\r\n l_b_blue = np.array([l_h_blue, l_s_blue, l_v_blue]) #blue\r\n u_b_blue = np.array([u_h_blue, u_s_blue, u_v_blue])\r\n l_b_red = np.array([l_h_red, l_s_red, l_v_red]) #red\r\n u_b_red = np.array([u_h_red, u_s_red, u_v_red])\r\n\r\n #check if the HSV of the frame is lower or upper blue and red\r\n mask_blue = cv2.inRange(hsv, l_b_blue, u_b_blue)\r\n mask_red = cv2.inRange(hsv, l_b_red, u_b_red)\r\n\r\n cnts_blue, _ = cv2.findContours(mask_blue,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n cnts_red, _ = cv2.findContours(mask_red,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n \r\n # draw border line on the detected blue area\r\n for c in cnts_blue:\r\n area=cv2.contourArea(c)\r\n if area>9000:\r\n cv2.drawContours(frame,[c],-1,(255,0,0),3)\r\n M=cv2.moments(c)\r\n cx=int(M[\"m10\"]/M[\"m00\"])\r\n cy=int(M[\"m01\"]/M[\"m00\"])\r\n\r\n #python send message to arduino client - wifi\r\n new_vision = timeit.default_timer()\r\n if(new_vision-start>2):\r\n Thread(target=socket_connection, args=('b',)).start()\r\n start = timeit.default_timer()\r\n color ='blue'\r\n\r\n cv2.circle(frame,(cx,cy),7,(255,255,255),-1)\r\n cv2.putText(frame,\"blue\",(cx-20, cy-20),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),2)\r\n \r\n # draw border line on the detected red area\r\n for c in cnts_red:\r\n area=cv2.contourArea(c)\r\n if area>9000:\r\n cv2.drawContours(frame,[c],-1,(255,0,0),3)\r\n M=cv2.moments(c)\r\n cx=int(M[\"m10\"]/M[\"m00\"])\r\n cy=int(M[\"m01\"]/M[\"m00\"])\r\n\r\n #python send message to arduino client - wifi\r\n new_vision = timeit.default_timer()\r\n if(new_vision-start>2):\r\n Thread(target=socket_connection, args=('r',)).start()\r\n start = timeit.default_timer()\r\n color = 'red'\r\n\r\n cv2.circle(frame,(cx,cy),7,(255,255,255),-1)\r\n cv2.putText(frame,\"red\",(cx-20, cy-20),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),2)\r\n \r\n res = cv2.bitwise_and(frame, frame, mask=mask_blue)\r\n \r\n cv2.imshow(\"live transmission\", frame)\r\n cv2.imshow(\"mask\", mask_blue)\r\n cv2.imshow(\"res\", res)\r\n key=cv2.waitKey(5)\r\n if key==ord('q'):\r\n break\r\n except ConnectionResetError:\r\n print('find it '+ color)\r\n \r\ncv2.destroyAllWindows()" ]
[ [ "numpy.array" ] ]
amtagrwl/trax
[ "f7200793a0ce8922622d073a6ec90346efe8b679" ]
[ "trax/layers/metrics_test.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for metrics layers.\"\"\"\n\nfrom absl.testing import absltest\nimport numpy as np\n\nimport trax.layers as tl\n\n\nclass MetricsTest(absltest.TestCase):\n\n def test_category_accuracy(self):\n layer = tl.CategoryAccuracy()\n targets = np.array([0, 1, 2])\n\n model_outputs = np.array([[.7, .2, .1, 0.],\n [.2, .7, .1, 0.],\n [.2, .1, .7, 0.]])\n accuracy = layer([model_outputs, targets])\n self.assertEqual(accuracy, 1.0)\n\n model_outputs = np.array([[.2, .1, .7, 0.],\n [.2, .1, .7, 0.],\n [.2, .1, .7, 0.]])\n accuracy = layer([model_outputs, targets])\n self.assertEqual(accuracy, 1 / 3)\n\n def test_weighted_category_accuracy_even_weights(self):\n layer = tl.WeightedCategoryAccuracy()\n weights = np.array([1., 1., 1.])\n targets = np.array([0, 1, 2])\n\n model_outputs = np.array([[.7, .2, .1, 0.],\n [.2, .7, .1, 0.],\n [.2, .1, .7, 0.]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, 1.0)\n\n model_outputs = np.array([[.2, .1, .7, 0.],\n [.2, .1, .7, 0.],\n [.2, .1, .7, 0.]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, 1 / 3)\n\n def test_weighted_category_accuracy_uneven_weights(self):\n layer = tl.WeightedCategoryAccuracy()\n weights = np.array([1., 5., 2.])\n targets = np.array([0, 1, 2])\n\n model_outputs = np.array([[.7, .2, .1, 0.],\n [.2, .7, .1, 0.],\n [.2, .1, .7, 0.]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, 1.0)\n\n model_outputs = np.array([[.2, .7, .1, 0.],\n [.2, .7, .1, 0.],\n [.2, .7, .1, 0.]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, .625)\n\n def test_accuracy_even_weights(self):\n layer = tl.Accuracy()\n weights = np.array([1., 1., 1.])\n targets = np.array([0, 1, 2])\n\n model_outputs = np.array([[.7, .2, .1, 0.],\n [.2, .7, .1, 0.],\n [.2, .1, .7, 0.]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, 1.0)\n\n model_outputs = np.array([[.2, .1, .7, 0.],\n [.2, .1, .7, 0.],\n [.2, .1, .7, 0.]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, 1 / 3)\n\n def test_accuracy_uneven_weights(self):\n layer = tl.Accuracy()\n weights = np.array([1., 5., 2.])\n targets = np.array([0, 1, 2])\n\n model_outputs = np.array([[.7, .2, .1, 0.],\n [.2, .7, .1, 0.],\n [.2, .1, .7, 0.]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, 1.0)\n\n model_outputs = np.array([[.2, .7, .1, 0.],\n [.2, .7, .1, 0.],\n [.2, .7, .1, 0.]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, .625)\n\n model_outputs = np.array([[.7, .2, .1, 0.],\n [.7, .2, .1, 0.],\n [.7, .2, .1, 0.]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, .125)\n\n def test_accuracy_binary_classifier(self):\n layer = tl.Accuracy(classifier=tl.ThresholdToBinary())\n targets = np.array([[0, 0, 1, 1],\n [1, 1, 1, 0]])\n weights = np.ones_like(targets)\n\n model_outputs = np.array([[.499, .500, .501, .502],\n [.503, .502, .501, .500]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, 1.0)\n\n model_outputs = np.array([[.498, .499, .500, .501],\n [.502, .501, .500, .499]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, .75)\n\n def test_sequence_accuracy_weights_all_ones(self):\n layer = tl.SequenceAccuracy()\n targets = np.array([[0, 1, 0, 1],\n [1, 0, 1, 1]])\n weights = np.ones_like(targets)\n\n # Model gets both sequences right; for each position in each sequence, the\n # category (integer ID) selected by argmax matches the target category.\n model_outputs = np.array([[[.9, .1], [.2, .8], [.7, .3], [.4, .6]],\n [[.3, .7], [.8, .2], [.1, .9], [.4, .6]]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, 1.)\n\n # Model gets the first element of the first sequence barely wrong.\n model_outputs = np.array([[[.45, .55], [.2, .8], [.7, .3], [.4, .6]],\n [[.3, .7], [.8, .2], [.1, .9], [.4, .6]]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, .5)\n\n # Model gets the last element of each sequence barely wrong.\n model_outputs = np.array([[[.9, .1], [.2, .8], [.7, .3], [.55, .45]],\n [[.3, .7], [.8, .2], [.1, .9], [.52, .48]]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, 0.)\n\n def test_sequence_accuracy_last_position_zero_weight(self):\n layer = tl.SequenceAccuracy()\n targets = np.array([[0, 1, 0, 0],\n [1, 0, 1, 0]])\n weights = np.array([[1., 1., 1., 0.],\n [1., 1., 1., 0.]])\n\n # Model gets both sequences right; output in final position would give\n # wrong category but is ignored.\n model_outputs = np.array([[[.9, .1], [.2, .8], [.7, .3], [.35, .65]],\n [[.3, .7], [.8, .2], [.1, .9], [.35, .65]]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, 1.)\n\n # Model gets the first element of the first sequence barely wrong.\n model_outputs = np.array([[[.45, .55], [.2, .8], [.7, .3], [.6, .4]],\n [[.3, .7], [.8, .2], [.1, .9], [.6, .4]]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, .5)\n\n # Model gets second-to-last element of each sequence barely wrong.\n model_outputs = np.array([[[.9, .1], [.2, .8], [.48, .52], [.6, .4]],\n [[.3, .7], [.8, .2], [.51, .49], [.6, .4]]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(accuracy, 0.)\n\n def test_binary_cross_entropy_loss(self):\n # TODO(jonni): Clarify desired semantics/naming, then test it.\n layer = tl.BinaryCrossEntropyLoss()\n xs = [np.ones((9, 1)),\n np.ones((9, 1)),\n np.ones((9, 1))]\n y = layer(xs)\n self.assertEqual(y.shape, ())\n\n def test_cross_entropy_loss(self):\n # TODO(jonni): Clarify desired semantics/naming, then test it.\n layer = tl.CrossEntropyLoss()\n xs = [np.ones((9, 4, 4, 20)),\n np.ones((9, 4, 4)),\n np.ones((9, 4, 4))]\n y = layer(xs)\n self.assertEqual(y.shape, ())\n\n def test_l2_loss(self):\n layer = tl.L2Loss()\n\n model_outputs = np.array([[1., 1.], [1., 1.]])\n targets = np.array([[1., 1.], [1., 0.]])\n weights = np.array([[1., 1.], [1., 0.]])\n loss = layer([model_outputs, targets, weights])\n np.testing.assert_allclose(loss, 0.0)\n\n weights = np.array([[1., 0.], [0., 1.]])\n loss = layer([model_outputs, targets, weights])\n np.testing.assert_allclose(loss, 0.5)\n\n def test_smooth_l1_loss(self):\n layer = tl.SmoothL1Loss()\n\n model_outputs = np.array([[1., 1.], [1., 2.]])\n targets = np.array([[1., 1.], [1., 0.]])\n l1_dist = 2\n\n weights = np.array([[1., 1.], [1., 0.]])\n loss = layer([model_outputs, targets, weights])\n np.testing.assert_allclose(loss, 0.0)\n\n weights = np.array([[1., 0.], [0., 1.]])\n sum_weights = 2\n\n loss = layer([model_outputs, targets, weights])\n np.testing.assert_allclose(loss, (l1_dist-0.5) / sum_weights)\n\n model_outputs = np.array([[1., 1.], [1., 1.5]])\n targets = np.array([[1., 1.], [1., 1.]])\n l1_dist = 0.5\n loss = layer([model_outputs, targets, weights])\n np.testing.assert_allclose(loss, 0.5 * l1_dist**2 / sum_weights)\n\n def test_names(self):\n layer = tl.L2Loss()\n self.assertEqual('L2Loss_in3', str(layer))\n layer = tl.Accuracy()\n self.assertEqual('Accuracy_in3', str(layer))\n layer = tl.SequenceAccuracy()\n self.assertEqual('SequenceAccuracy_in3', str(layer))\n layer = tl.BinaryCrossEntropyLoss()\n self.assertEqual('BinaryCrossEntropyLoss_in3', str(layer))\n layer = tl.CrossEntropyLoss()\n self.assertEqual('CrossEntropyLoss_in3', str(layer))\n layer = tl.BinaryCrossEntropySum()\n self.assertEqual('BinaryCrossEntropySum_in3', str(layer))\n layer = tl.CrossEntropySum()\n self.assertEqual('CrossEntropySum_in3', str(layer))\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array", "numpy.ones_like", "numpy.ones" ] ]
karthikprasad/FLSim
[ "3c62fe83de2f06feffb9ed65ce9f71803bbd6027" ]
[ "flsim/utils/tests/test_model_param_utils.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport collections\nimport copy\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom flsim.common.pytest_helper import (\n assertEqual,\n assertTrue,\n assertAlmostEqual,\n assertFalse,\n assertRaises,\n)\nfrom flsim.utils.fl.common import FLModelParamUtils\nfrom flsim.utils.fl.personalized_model import FLModelWithPrivateModules\nfrom flsim.utils.tests.helpers.test_models import (\n FCModel,\n LinearRegression,\n PersonalizedLinearRegression,\n)\nfrom flsim.utils.tests.helpers.test_utils import FLTestUtils\n\nPRIVATE_SLOPE_MODULE_NAME = FLModelWithPrivateModules.USER_PRIVATE_MODULE_PREFIX + \"_a\"\n\n\nclass TestFLModelParamUtils:\n def test_get_state_dict(self) -> None:\n model = LinearRegression()\n assertEqual(\n set(FLModelParamUtils.get_state_dict(model, False).keys()), {\"a\", \"b\"}\n )\n assertEqual(\n set(FLModelParamUtils.get_state_dict(model, True).keys()), {\"a\", \"b\"}\n )\n\n personalized_model = PersonalizedLinearRegression()\n assertEqual(\n set(FLModelParamUtils.get_state_dict(personalized_model, False).keys()),\n {PRIVATE_SLOPE_MODULE_NAME, \"b\"},\n )\n assertEqual(\n set(FLModelParamUtils.get_state_dict(personalized_model, True).keys()),\n {\"b\"},\n )\n\n def test_load_state_dict(self) -> None:\n personalized_model = PersonalizedLinearRegression()\n\n state_dict = collections.OrderedDict()\n state_dict[PRIVATE_SLOPE_MODULE_NAME] = torch.tensor([1.0])\n state_dict[\"b\"] = torch.tensor([0.5])\n\n FLModelParamUtils.load_state_dict(personalized_model, state_dict, False)\n assertEqual(\n dict(FLModelParamUtils.get_state_dict(personalized_model, False)),\n dict(state_dict),\n )\n # load_state_dict should work if non-private modules were given with\n # only_federated_params set as True\n state_dict_without_private_module = collections.OrderedDict()\n state_dict_without_private_module[\"b\"] = torch.tensor([0.3])\n FLModelParamUtils.load_state_dict(\n personalized_model, state_dict_without_private_module, True\n )\n assertEqual(\n dict(FLModelParamUtils.get_state_dict(personalized_model, False)),\n {PRIVATE_SLOPE_MODULE_NAME: torch.tensor([1.0]), \"b\": torch.tensor([0.3])},\n )\n # throws when unexpected key is provided\n state_dict[\"c\"] = torch.tensor([0.0])\n with assertRaises(AssertionError):\n FLModelParamUtils.load_state_dict(personalized_model, state_dict, True)\n # throws when non-private (i.e. federated module) is missing\n state_dict_with_missing_non_private_module = collections.OrderedDict()\n state_dict_with_missing_non_private_module[\"a\"] = torch.tensor([1.0])\n with assertRaises(AssertionError):\n FLModelParamUtils.load_state_dict(\n personalized_model, state_dict_with_missing_non_private_module, True\n )\n\n def test_zero_weights(self) -> None:\n personalized_model = PersonalizedLinearRegression()\n FLModelParamUtils.load_state_dict(\n personalized_model,\n collections.OrderedDict(\n [\n (PRIVATE_SLOPE_MODULE_NAME, torch.tensor([2.0])),\n (\"b\", torch.tensor([1.0])),\n ]\n ),\n False,\n )\n FLModelParamUtils.zero_weights(personalized_model, True)\n assertEqual(\n dict(FLModelParamUtils.get_state_dict(personalized_model, False)),\n {PRIVATE_SLOPE_MODULE_NAME: torch.tensor([2.0]), \"b\": torch.tensor([0.0])},\n )\n FLModelParamUtils.zero_weights(personalized_model)\n assertEqual(\n dict(FLModelParamUtils.get_state_dict(personalized_model, False)),\n {PRIVATE_SLOPE_MODULE_NAME: torch.tensor([0.0]), \"b\": torch.tensor([0.0])},\n )\n\n def test_get_trainable_params(self):\n fc_model = FCModel()\n assertEqual(len(list(FLModelParamUtils.get_trainable_params(fc_model))), 6)\n\n def test_get_num_trainable_params(self):\n fc_model = FCModel()\n assertEqual(\n FLModelParamUtils.get_num_trainable_params(fc_model),\n 10 * 5 + 5 * 3 + 3 * 1 + 5 + 3 + 1,\n )\n\n def test_get_gradient_l2_norm_raw(self):\n fc_model = FCModel()\n # set all gradients to 0, l2 norm should be zero\n for p in FLModelParamUtils.get_trainable_params(fc_model):\n p.grad = torch.zeros_like(p)\n assertEqual(FLModelParamUtils.get_gradient_l2_norm_raw(fc_model), 0.0)\n\n # set all gradients to 1, non-normalized l2 norm should be = sqrt(#params)\n num_trainable_params = FLModelParamUtils.get_num_trainable_params(fc_model)\n for p in FLModelParamUtils.get_trainable_params(fc_model):\n p.grad = torch.ones_like(p)\n assertAlmostEqual(\n FLModelParamUtils.get_gradient_l2_norm_raw(fc_model),\n math.sqrt(num_trainable_params),\n delta=1e-4,\n )\n\n # all gradients are std-normal-random, normalized grad norm = 1\n torch.manual_seed(1)\n for p in FLModelParamUtils.get_trainable_params(fc_model):\n p.grad = torch.randn_like(p)\n assertAlmostEqual(\n FLModelParamUtils.get_gradient_l2_norm_normalized(fc_model), 1, delta=1e-1\n )\n\n def test_model_linear_comb(self):\n \"\"\"Test that computing linear comibination works for a model\"\"\"\n FLTestUtils.compare_model_linear_comb(FCModel(), FCModel())\n\n def test_gradient_reconstruction(self):\n \"\"\"Test that gradient reconstruction works with a model.\n Create model, run some operations on it.\n \"\"\"\n model, copy_model, reconstructed_grad = FCModel(), FCModel(), FCModel()\n FLTestUtils.compare_gradient_reconstruction(\n model, copy_model, reconstructed_grad\n )\n\n def test_fed_async_aggregation_with_weights(self):\n \"\"\"Test that weights work for FedAsync aggregation\"\"\"\n torch.manual_seed(1)\n num_models = 4\n models = [FCModel() for i in range(num_models)]\n temp_model = copy.deepcopy(models[0])\n # verify that 0 weights work as expected\n FLModelParamUtils.average_models(models, temp_model, [0, 0, 0, 1])\n assertTrue(\n FLModelParamUtils.get_mismatched_param([temp_model, models[3]]) == \"\"\n )\n # verify that equal weights work as expected\n FLModelParamUtils.average_models(models, temp_model, [1, 1, 1, 1])\n temp_model_no_wts = copy.deepcopy(models[0])\n FLModelParamUtils.average_models(models, temp_model_no_wts)\n assertTrue(\n FLModelParamUtils.get_mismatched_param([temp_model, temp_model_no_wts])\n == \"\"\n )\n # verify that unequal weights work as expected\n temp_model_1 = copy.deepcopy(models[0])\n FLModelParamUtils.average_models(models, temp_model_1, [1, 1, 2, 2])\n temp_model_2 = copy.deepcopy(models[0])\n FLModelParamUtils.average_models(models, temp_model_2, [2, 2, 1, 1])\n temp_model_3 = copy.deepcopy(models[0])\n FLModelParamUtils.average_models([temp_model_1, temp_model_2], temp_model_3)\n temp_model_4 = copy.deepcopy(models[0])\n FLModelParamUtils.average_models(models, temp_model_4, [1, 1, 1, 1])\n\n mismatched_param = FLModelParamUtils.get_mismatched_param(\n [temp_model_3, temp_model_4], 1e-6\n )\n assertTrue(\n mismatched_param == \"\",\n (\n f\"Mismatched param name: {mismatched_param}\\n\"\n f\"temp_model_3:{temp_model_3}\\n\"\n f\"temp_model_4:{temp_model_4}\\n\",\n f\"total_difference:{self._compute_difference_in_norm(temp_model_3, temp_model_4)}\",\n ),\n )\n\n def _compute_difference_in_norm(\n self, model1: torch.nn.Module, model2: torch.nn.Module\n ):\n total_difference = 0.0\n for (parameter1, parameter2) in zip(model1.parameters(), model2.parameters()):\n total_difference += torch.norm(parameter1.data - parameter2.data)\n return total_difference\n\n def test_simple_model_copy(self):\n \"\"\"Test that FedAsync aggregation works for a simple Model\"\"\"\n num_models = 4\n orig_models = [FCModel() for i in range(num_models)]\n FLTestUtils.average_and_verify_models(orig_models)\n\n def test_debug_model_norm(self):\n fc_model = FCModel()\n for p in fc_model.parameters():\n torch.nn.init.constant_(p, 0.0)\n assertEqual(FLModelParamUtils.debug_model_norm(fc_model), 0)\n for p in fc_model.parameters():\n p.data.fill_(1.0)\n assertEqual(\n FLModelParamUtils.debug_model_norm(fc_model),\n FLModelParamUtils.get_num_trainable_params(fc_model),\n )\n\n def test_set_gradient(self):\n model = LinearRegression()\n reconstructed_gradient = LinearRegression()\n reconstructed_gradient.a.data = torch.FloatTensor([0.5])\n reconstructed_gradient.b.data = torch.FloatTensor([1.0])\n FLModelParamUtils.set_gradient(\n model=model, reference_gradient=reconstructed_gradient\n )\n assertEqual(model.a.grad, reconstructed_gradient.a)\n assertEqual(model.b.grad, reconstructed_gradient.b)\n\n def test_get_mismatched_param(self):\n a_val, b_val = 0.5, 1.0\n\n class MismatchingLinearRegression(nn.Module):\n def __init__(self):\n super().__init__()\n self.a = nn.Parameter(torch.FloatTensor([a_val]))\n self.c = nn.Parameter(torch.FloatTensor([b_val]))\n\n def forward(self, x):\n return self.a + self.c * x\n\n model_1, model_2 = LinearRegression(), LinearRegression()\n model_1.a.data, model_1.b.data = (\n torch.FloatTensor([a_val]),\n torch.FloatTensor([b_val]),\n )\n model_2.a.data, model_2.b.data = (\n torch.FloatTensor([a_val]),\n torch.FloatTensor([b_val]),\n )\n\n # 1) models have same params => return an empty string\n assertEqual(FLModelParamUtils.get_mismatched_param([model_1, model_2]), \"\")\n\n # 2) only param 'a' is different => return 'a'\n model_2.a.data = torch.FloatTensor([b_val])\n assertEqual(FLModelParamUtils.get_mismatched_param([model_1, model_2]), \"a\")\n\n # 3) only param 'b' is different => return 'b'\n model_2.a.data, model_2.b.data = (\n torch.FloatTensor([a_val]),\n torch.FloatTensor([a_val]),\n )\n assertEqual(FLModelParamUtils.get_mismatched_param([model_1, model_2]), \"b\")\n\n # 4) both param 'a' and 'b' are different\n # => return the first mismatched param, which is 'a'\n model_2.a.data = torch.FloatTensor([b_val])\n assertEqual(FLModelParamUtils.get_mismatched_param([model_1, model_2]), \"a\")\n\n # 5) param 'b' in model_1 is missing in MismatchingLinearRegression\n # => return 'b'\n assertEqual(\n FLModelParamUtils.get_mismatched_param(\n [model_1, MismatchingLinearRegression()]\n ),\n \"b\",\n )\n\n def test_copy_models(self):\n torch.manual_seed(1)\n fc_model = FCModel()\n torch.manual_seed(2)\n copied_fc_model = FCModel()\n assertFalse(\n FLTestUtils.do_two_models_have_same_weights(fc_model, copied_fc_model)\n )\n\n FLModelParamUtils.copy_models(fc_model, [copied_fc_model])\n assertTrue(\n FLTestUtils.do_two_models_have_same_weights(fc_model, copied_fc_model)\n )\n" ]
[ [ "torch.nn.init.constant_", "torch.norm", "torch.FloatTensor", "torch.manual_seed", "torch.randn_like", "torch.tensor", "torch.ones_like", "torch.zeros_like" ] ]
joseluis1061/neuralnilm
[ "06bd6abc4db41140b65dfc0677677ef2aecff349" ]
[ "neuralnilm/data/realaggregatesource.py" ]
[ "from __future__ import print_function, division\nfrom copy import copy\nfrom datetime import timedelta\nimport numpy as np\nimport pandas as pd\nimport nilmtk\nfrom nilmtk.timeframegroup import TimeFrameGroup\nfrom nilmtk.timeframe import TimeFrame\nfrom neuralnilm.data.source import Sequence\nfrom neuralnilm.utils import check_windows\nfrom neuralnilm.data.activationssource import ActivationsSource\nfrom neuralnilm.consts import DATA_FOLD_NAMES\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass RealAggregateSource(ActivationsSource):\n \"\"\"\n Attributes\n ----------\n mains : dict\n Structure example:\n {<train | unseen_appliances | unseen_activations_of_seen_appliances>: {\n <building_name>: pd.Series of raw data\n }}\n mains_good_sections : dict\n Same structure as `mains`.\n sections_with_no_target : dict\n Same structure as `mains`.\n all_gaps : dict of pd.DataFrames\n Each key is a fold name.\n Each DF has columns:\n building, gap, duration, p (short for 'probability')\n p is used by _get_sequence_without_target().\n \"\"\"\n def __init__(self, activations, target_appliance,\n seq_length, filename, windows, sample_period,\n target_inclusion_prob=0.5,\n uniform_prob_of_selecting_each_building=True,\n allow_incomplete_target=True,\n include_incomplete_target_in_output=True,\n allow_multiple_target_activations_in_aggregate=False,\n include_multiple_targets_in_output=False,\n rng_seed=None):\n self.activations = copy(activations)\n self.target_appliance = target_appliance\n self.seq_length = seq_length\n self.filename = filename\n check_windows(windows)\n self.windows = windows\n self.sample_period = sample_period\n self.target_inclusion_prob = target_inclusion_prob\n self.uniform_prob_of_selecting_each_building = (\n uniform_prob_of_selecting_each_building)\n self.allow_incomplete_target = allow_incomplete_target\n self.include_incomplete_target_in_output = (\n include_incomplete_target_in_output)\n self.allow_multiple_target_activations_in_aggregate = (\n allow_multiple_target_activations_in_aggregate)\n self.include_multiple_targets_in_output = (\n include_multiple_targets_in_output)\n super(RealAggregateSource, self).__init__(rng_seed=rng_seed)\n\n self._load_mains_into_memory()\n self._remove_activations_with_no_mains()\n self._find_sections_with_no_target()\n self._compute_gap_probabilities()\n\n def _load_mains_into_memory(self):\n logger.info(\"Loading NILMTK mains...\")\n\n # Load dataset\n dataset = nilmtk.DataSet(self.filename)\n\n self.mains = {}\n self.mains_good_sections = {}\n for fold, buildings_and_windows in self.windows.items():\n for building_i, window in buildings_and_windows.items():\n dataset.set_window(*window)\n elec = dataset.buildings[building_i].elec\n building_name = (\n dataset.metadata['name'] +\n '_building_{}'.format(building_i))\n\n logger.info(\n \"Loading mains for {}...\".format(building_name))\n\n mains_meter = elec.mains()\n good_sections = mains_meter.good_sections()\n mains_data = mains_meter.power_series_all_data(\n sample_period=self.sample_period,\n sections=good_sections).dropna()\n\n def set_mains_data(dictionary, data):\n dictionary.setdefault(fold, {})[building_name] = data\n\n if not mains_data.empty:\n set_mains_data(self.mains, mains_data)\n set_mains_data(self.mains_good_sections, good_sections)\n\n logger.info(\n \"Loaded mains data from building {} for fold {}\"\n \" from {} to {}.\"\n .format(building_name, fold,\n mains_data.index[0], mains_data.index[-1]))\n\n dataset.store.close()\n logger.info(\"Done loading NILMTK mains data.\")\n\n def _find_sections_with_no_target(self):\n \"\"\"Finds the intersections of the mains good sections with the gaps\n between target appliance activations.\n \"\"\"\n self.sections_with_no_target = {}\n seq_length_secs = self.seq_length * self.sample_period\n for fold, sects_per_building in self.mains_good_sections.items():\n for building, good_sections in sects_per_building.items():\n activations = (\n self.activations[fold][self.target_appliance][building])\n mains = self.mains[fold][building]\n mains_good_sections = self.mains_good_sections[fold][building]\n gaps_between_activations = TimeFrameGroup()\n prev_end = mains.index[0]\n for activation in activations:\n gap = TimeFrame(prev_end, activation.index[0])\n gaps_between_activations.append(gap)\n prev_end = activation.index[-1]\n gap = TimeFrame(prev_end, mains.index[-1])\n gaps_between_activations.append(gap)\n intersection = (\n gaps_between_activations.intersection(mains_good_sections))\n intersection = intersection.remove_shorter_than(\n seq_length_secs)\n self.sections_with_no_target.setdefault(fold, {})[building] = (\n intersection)\n logger.info(\"Found {} sections without target for {} {}.\"\n .format(len(intersection), fold, building))\n\n def _compute_gap_probabilities(self):\n # Choose a building and a gap\n self.all_gaps = {}\n for fold in DATA_FOLD_NAMES:\n all_gaps_for_fold = []\n for building, gaps in self.sections_with_no_target[fold].items():\n gaps_for_building = [\n (building, gap, gap.timedelta.total_seconds())\n for gap in gaps]\n all_gaps_for_fold.extend(gaps_for_building)\n gaps_df = pd.DataFrame(\n all_gaps_for_fold, columns=['building', 'gap', 'duration'])\n gaps_df['p'] = gaps_df['duration'] / gaps_df['duration'].sum()\n self.all_gaps[fold] = gaps_df\n\n def _get_sequence_without_target(self, fold):\n # Choose a building and a gap\n all_gaps_for_fold = self.all_gaps[fold]\n n = len(all_gaps_for_fold)\n gap_i = self.rng.choice(n, p=all_gaps_for_fold['p'])\n row = all_gaps_for_fold.iloc[gap_i]\n building, gap = row['building'], row['gap']\n\n # Choose a start point in the gap\n latest_start_time = gap.end - timedelta(\n seconds=self.seq_length * self.sample_period)\n max_offset_seconds = (latest_start_time - gap.start).total_seconds()\n if max_offset_seconds <= 0:\n offset = 0\n else:\n offset = self.rng.randint(max_offset_seconds)\n start_time = gap.start + timedelta(seconds=offset)\n end_time = start_time + timedelta(\n seconds=(self.seq_length + 1) * self.sample_period)\n mains = self.mains[fold][building][start_time:end_time]\n seq = Sequence(self.seq_length)\n seq.input = mains.values[:self.seq_length]\n return seq\n\n def _has_sufficient_samples(self, data, start, end, threshold=0.8):\n if len(data) < 2:\n return False\n num_expected_samples = (\n (end - start).total_seconds() / self.sample_period)\n hit_rate = len(data) / num_expected_samples\n return (hit_rate >= threshold)\n\n def _remove_activations_with_no_mains(self):\n # First remove any activations where there is no mains data at all\n for fold, activations_for_appliance in self.activations.items():\n activations_for_buildings = activations_for_appliance[\n self.target_appliance]\n buildings_to_remove = []\n for building in activations_for_buildings:\n mains_for_fold = self.mains[fold]\n if (building not in mains_for_fold and\n building not in buildings_to_remove):\n buildings_to_remove.append(building)\n for building in buildings_to_remove:\n self.activations[fold][self.target_appliance].pop(building)\n\n # Now check for places where mains has insufficient samples,\n # for example because the mains series has a break in it.\n for fold, activations_for_appliance in self.activations.items():\n activations_for_buildings = activations_for_appliance[\n self.target_appliance]\n for building, activations in activations_for_buildings.items():\n mains = self.mains[fold][building]\n activations_to_remove = []\n for i, activation in enumerate(activations):\n activation_duration = (\n activation.index[-1] - activation.index[0])\n start = activation.index[0] - activation_duration\n end = activation.index[-1] + activation_duration\n mains_for_activ = mains[start:end]\n if (start < mains.index[0] or\n end > mains.index[-1] or not\n self._has_sufficient_samples(\n mains_for_activ, start, end)):\n activations_to_remove.append(i)\n if activations_to_remove:\n logger.info(\n \"Removing {} activations from fold '{}' building '{}'\"\n \" because there was not enough mains data for\"\n \" these activations. This leaves {} activations.\"\n .format(\n len(activations_to_remove), fold, building,\n len(activations) - len(activations_to_remove)))\n activations_to_remove.reverse()\n for i in activations_to_remove:\n activations.pop(i)\n self.activations[fold][self.target_appliance][building] = (\n activations)\n\n def _get_sequence(self, fold='train', enable_all_appliances=False):\n if enable_all_appliances:\n raise ValueError(\"`enable_all_appliances` is not implemented yet\"\n \" for RealAggregateSource!\")\n\n if self.rng.binomial(n=1, p=self.target_inclusion_prob):\n _seq_getter_func = self._get_sequence_which_includes_target\n else:\n _seq_getter_func = self._get_sequence_without_target\n\n MAX_RETRIES = 50\n for retry_i in range(MAX_RETRIES):\n seq = _seq_getter_func(fold=fold)\n if seq is None:\n continue\n if len(seq.input) != self.seq_length:\n continue\n if len(seq.target) != self.seq_length:\n continue\n break\n else:\n raise RuntimeError(\"No valid sequences found after {} retries!\"\n .format(MAX_RETRIES))\n\n seq.input = seq.input[:, np.newaxis]\n seq.target = seq.target[:, np.newaxis]\n assert len(seq.input) == self.seq_length\n assert len(seq.target) == self.seq_length\n return seq\n\n def _get_sequence_which_includes_target(self, fold):\n seq = Sequence(self.seq_length)\n building_name = self._select_building(fold, self.target_appliance)\n activations = (\n self.activations[fold][self.target_appliance][building_name])\n activation_i = self._select_activation(activations)\n activation = activations[activation_i]\n positioned_activation, is_complete = (\n self._position_activation(\n activation, is_target_appliance=True))\n if is_complete or self.include_incomplete_target_in_output:\n seq.target = positioned_activation\n else:\n seq.target = pd.Series(0, index=positioned_activation.index)\n\n # Check neighbouring activations\n mains_start = positioned_activation.index[0]\n mains_end = positioned_activation.index[-1]\n\n def neighbours_ok(neighbour_indicies):\n for i in neighbour_indicies:\n activation = activations[i]\n activation_duration = (\n activation.index[-1] - activation.index[0])\n neighbouring_activation_is_inside_mains_window = (\n activation.index[0] >\n (mains_start - activation_duration)\n and activation.index[0] < mains_end)\n\n if neighbouring_activation_is_inside_mains_window:\n if self.allow_multiple_target_activations_in_aggregate:\n if self.include_multiple_targets_in_output:\n sum_target = seq.target.add(\n activation, fill_value=0)\n is_complete = (\n sum_target.index == seq.target.index)\n if self.allow_incomplete_target or is_complete:\n seq.target = sum_target[seq.target.index]\n else:\n return False # need to retry\n else:\n return True # everything checks out OK so far\n return True\n\n # Check forwards\n if not neighbours_ok(range(activation_i+1, len(activations))):\n return\n # Check backwards\n if not neighbours_ok(range(activation_i-1, -1, -1)):\n return\n\n # Get mains\n mains_for_building = self.mains[fold][activation.building]\n # load some additional data to make sure we have enough samples\n mains_end_extended = mains_end + timedelta(\n seconds=self.sample_period * 2)\n mains = mains_for_building[mains_start:mains_end_extended]\n seq.input = mains.values[:self.seq_length]\n return seq\n\n @classmethod\n def _attrs_to_remove_for_report(cls):\n return [\n 'activations', 'rng', 'mains', 'mains_good_sections',\n 'sections_with_no_target', 'all_gaps']\n" ]
[ [ "pandas.DataFrame", "pandas.Series" ] ]
data2wealth/Interactive-Brokers-TWS-API-Python
[ "f7a8cbebe156cc7578704ac1b1f6eec0eec29910" ]
[ "samples/Python/Testbed/FileProcessor.py" ]
[ "import pandas as pd\r\nimport datetime as dt\r\nimport xml.etree.ElementTree as ETree\r\n\r\nimport DataStorage as DStrg\r\nimport DatabaseQuery as DBQry\r\nimport DatabaseWriter as DBWtr\r\n\r\nfrom ibapi.contract import Contract\r\n\r\ncsv_file_path = 'C:/Users/Rosie/Documents/Python Generated Files/'\r\n\r\n\r\n# ================================ Functions to Read Files =================================\r\n\r\ndef readAsxStocksList():\r\n data = pd.read_excel(r'C:\\Users\\Rosie\\Documents\\Trading\\ASX_All_Stock_List_20201224.xlsx', engine='openpyxl')\r\n df = pd.DataFrame(data, columns=['Rank', 'Code', 'Company', 'Price', 'Mkt Cap', '1 Year'])\r\n # print(df)\r\n tickers = df['Code']\r\n return tickers\r\n\r\n\r\ndef read_excel_multi_sheets(excel_file_name: str):\r\n data = pd.read_excel(excel_file_name, engine='openpyxl')\r\n df = pd.DataFrame(data, columns=['Rank', 'Code', 'Company', 'Price', 'Mkt Cap', '1 Year'])\r\n # print(df)\r\n tickers = df['Code']\r\n return tickers\r\n\r\n\r\n# ================================ Functions to Write Files =================================\r\n\r\n\r\ndef write_multiple_df_to_one_excel(excel_file_name: str, daily_bars_dict: {}, intra_day_bars_dict: {}):\r\n print(\"Writing all daily bars and intra day bars in dictionaries to excel {}:\".format(excel_file_name))\r\n for key in daily_bars_dict.keys():\r\n with pd.ExcelWriter(excel_file_name, mode='a') as writer:\r\n daily_bars_df = daily_bars_dict[key]\r\n intra_day_bars_df = intra_day_bars_dict[key]\r\n daily_bars_df.drop('time', axis=1, inplace=True)\r\n intra_day_bars_df.drop('time', axis=1, inplace=True)\r\n sym = daily_bars_df.loc[0, 'symbol']\r\n print('Appending {} daily bars to excel sheet \\'daily_bars_{}\\''.format(sym, sym))\r\n daily_bars_df.to_excel(writer, sheet_name='daily_bars_{}'.format(sym))\r\n print('Appending {} intra day bars to excel sheet \\'intra_day_bars_{}\\''.format(sym, sym))\r\n intra_day_bars_df.to_excel(writer, sheet_name='intra_day_bars_{}'.format(sym))\r\n\r\n\r\ndef write_df_to_csv(df: pd.DataFrame, file_name: str):\r\n print('Writing dataframe to a csv file {}{}'.format(csv_file_path, file_name))\r\n df.to_csv(csv_file_path + file_name, index=False, float_format='%.4f')\r\n\r\n\r\ndef save_all_asx_contracts_to_csv(db_connection):\r\n df = DBQry.select_all_contracts_for_primEX(db_connection, 'ASX')\r\n write_df_to_csv(df, 'ASX_all_contracts_20210311.csv')\r\n\r\n\r\ndef save_all_sub_categories_to_csv(db_connection):\r\n df = DBQry.select_all_sub_categories_fr_contracts(db_connection)\r\n write_df_to_csv(df, 'ASX_all_sub_categories.csv')\r\n\r\n\r\ndef save_all_REITS_to_csv(db_connection):\r\n df = DBQry.select_all_reits_contracts(db_connection)\r\n write_df_to_csv(df, 'ASX_all_reits_contracts.csv')\r\n\r\n\r\ndef save_all_gold_mining_contracts_to_csv(db_connection):\r\n df = DBQry.select_all_gold_mining_contracts(db_connection)\r\n write_df_to_csv(df, 'ASX_all_gold_mining_contracts.csv')\r\n\r\n\r\ndef test_save_to_csv():\r\n conn = DBWtr.connect_to_db()\r\n save_all_asx_contracts_to_csv(conn)\r\n save_all_sub_categories_to_csv(conn)\r\n save_all_REITS_to_csv(conn)\r\n save_all_gold_mining_contracts_to_csv(conn)\r\n\r\n\r\n# test_save_to_csv()\r\n\r\n\r\n# ================================ Functions to Handle XML =================================\r\n\r\n\r\ndef open_xml():\r\n f = open(\"ASX_SYD_Fundamental_Snapshot.xml\", \"r\")\r\n xml_str = f.read()\r\n f.close()\r\n return xml_str\r\n\r\n\r\ndef writeXml(self, contr: Contract, data: str):\r\n f = open(\"{}_{}_Fundamental_Snapshot.xml\".format(contr.primaryExchange, contr.symbol), \"w\")\r\n f.write(data)\r\n f.close()\r\n\r\n\r\ndef parse_snapshot_xml(xml_str: str):\r\n tree = ETree.fromstring(xml_str)\r\n for child in tree:\r\n if child.tag == 'CoIDs':\r\n for id_elm in child:\r\n if id_elm.get('Type') == 'CompanyName':\r\n co_name = id_elm.text\r\n elif id_elm.get('Type') == 'OrganizationPermID':\r\n co_id = float(id_elm.text)\r\n org = DStrg.Organization(co_id, co_name)\r\n if child.tag == 'Issues': # contains split info\r\n for iss_elm in child:\r\n # <Issue ID=\"1\" Type=\"C\" Desc=\"Common Stock\" Order=\"1\">\r\n # <Issue ID=\"2\" Type=\"P\" Desc=\"Preferred Stock\" Order=\"1\">\r\n if iss_elm.get('Type') == 'C':\r\n for comm_stk_elm in iss_elm:\r\n if comm_stk_elm.tag == 'MostRecentSplit':\r\n last_split_date = dt.datetime.strptime(comm_stk_elm.get('Date'), '%Y-%m-%d').date()\r\n last_split = float(comm_stk_elm.text)\r\n org.share_split = DStrg.ShareSplit(last_split_date, last_split)\r\n if child.tag == 'CoGeneralInfo': # contains number of shares\r\n for gi_elm in child:\r\n # <SharesOut Date=\"2020-06-30\" TotalFloat=\"939774578.0\">942286900.0</SharesOut>\r\n if gi_elm.tag == 'SharesOut':\r\n num_float = float(gi_elm.get('TotalFloat'))\r\n num_shares = float(gi_elm.text)\r\n num_shares_date = dt.datetime.strptime(gi_elm.get('Date'), '%Y-%m-%d').date()\r\n org.share_issued = DStrg.ShareIssued(num_shares_date, num_shares)\r\n org.share_float = DStrg.ShareFloat(num_shares_date, num_float)\r\n if child.tag == 'Ratios': # contains mkt cap\r\n for r_elm in child:\r\n if r_elm.get('ID') == 'Income Statement':\r\n for pv_elm in r_elm:\r\n if pv_elm.get('FieldName') == 'MKTCAP':\r\n mkt_cap = float(pv_elm.text) * 1000000\r\n org.mkt_cap = mkt_cap\r\n return org\r\n\r\n\r\ndef parse_fin_stmts_xml(xml_str: str):\r\n tree = ETree.fromstring(xml_str)\r\n\r\n org = None\r\n name_map = {}\r\n inc_st_dict = {}\r\n bs_st_dict = {}\r\n cf_st_dict = {}\r\n interim_inc_st_dict = {}\r\n interim_bs_st_dict = {}\r\n interim_cf_st_dict = {}\r\n\r\n for child in tree:\r\n if child.tag == 'CoIDs':\r\n for id_elm in child:\r\n co_name = ''\r\n co_id = 0\r\n if id_elm.get('Type') == 'CompanyName':\r\n co_name = id_elm.text\r\n # <CoID Type=\"RepNo\">A4EC6</CoID>\r\n # <CoID Type=\"CompanyName\">SPDR S&amp;P/ASX 50</CoID>\r\n elif id_elm.get('Type') == 'OrganizationPermID':\r\n co_id = float(id_elm.text)\r\n if co_id == '':\r\n print('OrganizationPermID does NOT exist in Financial Stmts Xml!')\r\n org = DStrg.Organization(co_id, co_name) # Not all fin_stmts_xml has CompanyId !!!\r\n elif child.tag == 'FinancialStatements':\r\n for fs_elm in child:\r\n if fs_elm.tag == 'COAMap':\r\n for coa_elm in fs_elm:\r\n name_map[coa_elm.get('coaItem')] = coa_elm.text\r\n elif fs_elm.tag == 'AnnualPeriods':\r\n for ap_elm in fs_elm:\r\n # <FiscalPeriod Type=\"Annual\" EndDate=\"2020-06-30\" FiscalYear=\"2020\">\r\n ap_date = ap_elm.get('EndDate')\r\n inc_st_dict[ap_date] = {}\r\n bs_st_dict[ap_date] = {}\r\n cf_st_dict[ap_date] = {}\r\n\r\n for fiscal in ap_elm:\r\n if fiscal.get('Type') == 'INC': # Income Stmt\r\n for is_elm in fiscal:\r\n if is_elm.tag == 'lineItem':\r\n inc_st_dict[ap_date][is_elm.get('coaCode')] = is_elm.text\r\n\r\n elif fiscal.get('Type') == 'BAL': # Balance Sheet\r\n for is_elm in fiscal:\r\n if is_elm.tag == 'lineItem':\r\n bs_st_dict[ap_date][is_elm.get('coaCode')] = is_elm.text\r\n\r\n elif fiscal.get('Type') == 'CAS': # Cashflow Stmt\r\n for is_elm in fiscal:\r\n if is_elm.tag == 'lineItem':\r\n cf_st_dict[ap_date][is_elm.get('coaCode')] = is_elm.text\r\n\r\n elif fs_elm.tag == 'InterimPeriods':\r\n for ip_elm in fs_elm:\r\n # <FiscalPeriod Type=\"Interim\" EndDate=\"2020-06-30\" FiscalYear=\"2020\" FiscalPeriodNumber=\"4\">\r\n ip_date = ip_elm.get('EndDate')\r\n period_no = ip_elm.get('FiscalPeriodNumber')\r\n period_combine = ip_date + '_period' + period_no\r\n interim_inc_st_dict[period_combine] = {}\r\n interim_bs_st_dict[period_combine] = {}\r\n interim_cf_st_dict[period_combine] = {}\r\n\r\n for fiscal in ip_elm:\r\n if fiscal.get('Type') == 'INC': # Income Stmt\r\n for is_elm in fiscal:\r\n if is_elm.tag == 'lineItem':\r\n interim_inc_st_dict[period_combine][is_elm.get('coaCode')] = is_elm.text\r\n\r\n elif fiscal.get('Type') == 'BAL': # Balance Sheet\r\n for is_elm in fiscal:\r\n if is_elm.tag == 'lineItem':\r\n interim_bs_st_dict[period_combine][is_elm.get('coaCode')] = is_elm.text\r\n\r\n elif fiscal.get('Type') == 'CAS': # Cashflow Stmt\r\n for is_elm in fiscal:\r\n if is_elm.tag == 'lineItem':\r\n interim_cf_st_dict[period_combine][is_elm.get('coaCode')] = is_elm.text\r\n\r\n inc_df = pd.DataFrame(inc_st_dict)\r\n bs_df = pd.DataFrame(bs_st_dict)\r\n cf_df = pd.DataFrame(cf_st_dict)\r\n int_inc_df = pd.DataFrame(interim_inc_st_dict)\r\n int_bs_df = pd.DataFrame(interim_bs_st_dict)\r\n int_cf_df = pd.DataFrame(interim_cf_st_dict)\r\n\r\n inc_df.rename(name_map, inplace=True)\r\n bs_df.rename(name_map, inplace=True)\r\n cf_df.rename(name_map, inplace=True)\r\n int_inc_df.rename(name_map, inplace=True)\r\n int_bs_df.rename(name_map, inplace=True)\r\n int_cf_df.rename(name_map, inplace=True)\r\n\r\n print(inc_df)\r\n print(bs_df)\r\n print(cf_df)\r\n print(int_inc_df)\r\n print(int_bs_df)\r\n print(int_cf_df)\r\n return [org, inc_df, bs_df, cf_df]\r\n" ]
[ [ "pandas.DataFrame", "pandas.read_excel", "pandas.ExcelWriter" ] ]
seatonullberg/cmstk
[ "f8dd4f698723053c06d181ecdd918d8e5fc98a92" ]
[ "cmstk/vasp/poscar.py" ]
[ "from cmstk.filetypes import TextFile\nfrom cmstk.structure.atom import Atom\nfrom cmstk.structure.simulation import SimulationCell\nfrom collections import OrderedDict\nimport numpy as np\nfrom typing import Dict, List, Optional, Tuple\n\n\nclass PoscarFile(TextFile):\n \"\"\"File wrapper for a VASP POSCAR file.\n\n Notes:\n This wrapper is compatible with both POSCAR and CONTCAR files\n because they are exactly the same with the exception of velocities\n being reported in the CONTCAR. However, I have chosen to ignore the\n velocities section because never once have I seen an example where it\n was used for anything or even an example where the result was anything\n except an array of zeros. If this feature is critically important to\n you, fork it and fix it :)\n\n Args:\n filepath: Filepath to a POSCAR file.\n comment: Comment line at the top of the file.\n direct: Specifies direct (fractional) coordinates.\n simulation_cell: Underlying simulation cell.\n n_atoms_per_symbol: Number of atoms of each species.\n - Presented in the order that they appear in the POTCAR.\n relaxations: Boolean matrix to indicate selective dymanics parameters.\n\n Attributes:\n filepath: Filepath to a POSCAR file.\n comment: Comment line at the top of the file.\n direct: Specifies direct (fractional) coordinates.\n simulation_cell: Underlying simulation cell.\n n_atoms_per_symbol: Number of atoms of each species.\n - Presented in the order that they appear in the POTCAR.\n relaxations: Boolean matrix to indicate selective dymanics parameters.\n \"\"\"\n\n def __init__(self,\n filepath: Optional[str] = None,\n comment: Optional[str] = None,\n direct: bool = False,\n scaling_factor: Optional[float] = None,\n simulation_cell: Optional[SimulationCell] = None,\n n_atoms_per_symbol: Optional[List[int]] = None,\n relaxations: Optional[List[np.ndarray]] = None) -> None:\n if filepath is None:\n filepath = \"POSCAR\"\n if comment is None:\n comment = \"# painstakingly crafted by cmstk :)\"\n self._comment = comment\n self._direct = direct\n self._scaling_factor = scaling_factor\n self._simulation_cell = simulation_cell\n if n_atoms_per_symbol is None and simulation_cell is not None:\n symbol_count_map: Dict[str, int] = OrderedDict()\n for sym in self.simulation_cell.symbols:\n if sym in symbol_count_map:\n symbol_count_map[sym] += 1\n else:\n symbol_count_map[sym] = 1\n n_atoms_per_symbol = list(symbol_count_map.values())\n self._n_atoms_per_symbol = n_atoms_per_symbol\n self._relaxations = relaxations\n super().__init__(filepath)\n\n def write(self, path: Optional[str] = None) -> None:\n \"\"\"Writes a POSCAR file.\n\n Args:\n path: Filepath to write to.\n \"\"\"\n if path is None:\n path = self.filepath\n with open(path, \"w\") as f:\n f.write(\"{}\\n\".format(self.comment))\n f.write(\"\\t{}\\n\".format(self.scaling_factor))\n for row in self.simulation_cell.coordinate_matrix:\n row = \"{:.6f} {:.6f} {:.6f}\".format(row[0], row[1], row[2])\n f.write(\"\\t{}\\n\".format(row))\n f.write(\"\\t{}\\n\".format(\" \".join(map(str,\n self.n_atoms_per_symbol))))\n if len(self.relaxations) != 0:\n f.write(\"Selective dynamics\\n\")\n if self.direct:\n f.write(\"Direct\\n\")\n else:\n f.write(\"Cartesian\\n\")\n for i, p in enumerate(self.simulation_cell.positions):\n p_row = \"{:.6f} {:.6f} {:.6f}\".format(p[0], p[1], p[2])\n if len(self.relaxations) != 0:\n r = [(\"T\" if x else \"F\") for x in self.relaxations[i]]\n r_row = \" \".join(r)\n f.write(\"\\t{} {}\\n\".format(p_row, r_row))\n else:\n f.write(\"\\t{}\\n\".format(p_row))\n\n @property\n def comment(self) -> str:\n if self._comment is None:\n self._comment = self.lines[0]\n return self._comment\n\n @comment.setter\n def comment(self, value: str) -> None:\n self._comment = value\n\n @property\n def direct(self) -> bool:\n if self._direct is None:\n coord_sys_index = self._coordinate_system_line_number\n if self.lines[coord_sys_index][0] in [\"C\", \"c\", \"K\", \"k\"]:\n self._direct = False\n else:\n self._direct = True\n return self._direct\n\n @direct.setter\n def direct(self, value: bool) -> None:\n self._direct = value\n\n @property\n def scaling_factor(self) -> float:\n if self._scaling_factor is None:\n self._scaling_factor = float(self.lines[1])\n return self._scaling_factor\n\n @scaling_factor.setter\n def scaling_factor(self, value: float) -> None:\n self._scaling_factor = value\n\n @property\n def simulation_cell(self) -> SimulationCell:\n if self._simulation_cell is None:\n cm = self.lines[2:5]\n cm_arr = np.array([np.fromstring(row, sep=\" \") for row in cm])\n start, end = self._position_section_line_numbers\n positions = self.lines[start:end]\n positions = [\" \".join(p.split()[:3]) for p in positions]\n arr_positions = [np.fromstring(p, sep=\" \") for p in positions]\n atoms = []\n for p in arr_positions:\n atoms.append(Atom(position=p))\n simulation_cell = SimulationCell(atoms, cm_arr)\n self._simulation_cell = simulation_cell\n return self._simulation_cell\n\n @simulation_cell.setter\n def simulation_cell(self, value: SimulationCell) -> None:\n self._simulation_cell = value\n\n @property\n def relaxations(self) -> List[np.ndarray]:\n if self._relaxations is None:\n start, end = self._position_section_line_numbers\n relaxations = self.lines[start:end]\n relaxations_lst = [r.split()[3:] for r in relaxations]\n relaxations_arr = [\n np.array([True if rr == \"T\" else False\n for rr in r])\n for r in relaxations_lst\n ]\n self._relaxations = relaxations_arr\n return self._relaxations\n\n @relaxations.setter\n def relaxations(self, value: List[np.ndarray]) -> None:\n if len(value) != self.simulation_cell.n_atoms:\n err = \"relaxations length must match number of atoms.\"\n raise ValueError(err)\n self._relaxations = value\n\n @property\n def n_atoms_per_symbol(self) -> List[int]:\n if self._n_atoms_per_symbol is None:\n naps = [int(s) for s in self.lines[5].split()]\n self._n_atoms_per_symbol = naps\n return self._n_atoms_per_symbol\n\n @n_atoms_per_symbol.setter\n def n_atoms_per_symbol(self, value: List[int]) -> None:\n if len(value) != self.simulation_cell.n_symbols:\n err = \"Number of symbols must match existing value.\"\n raise ValueError(err)\n self._n_atoms_per_symbol = value\n\n @property\n def _coordinate_system_line_number(self) -> int:\n if self.lines[6][0] in [\"S\", \"s\"]:\n return 7\n else:\n return 6\n\n @property\n def _position_section_line_numbers(self) -> Tuple[int, int]:\n start = self._coordinate_system_line_number + 1\n end = start + sum(self.n_atoms_per_symbol)\n return (start, end)\n" ]
[ [ "numpy.array", "numpy.fromstring" ] ]
BinWang28/EvalRank-Embedding-Evaluation
[ "454dac5c7345f01993688f33375f637129c285e3" ]
[ "src/models/sent_emb/_bert.py" ]
[ "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n###\n# Created Date: 2022-03-20 16:44:31\n# Author: Bin Wang\n# -----\n# Copyright (c) 2022 National University of Singapore\n# \n# -----\n# HISTORY:\n# Date&Time \t\t\tBy\tComments\n# ----------\t\t\t---\t----------------------------------------------------------\n###\n\nimport logging\n\nimport math\nimport torch\nimport numpy as np\nfrom tqdm import trange\n\nfrom transformers import AutoTokenizer, AutoModel\n\n\ndef embedder_init(self, config):\n ''' initialize for sentence embedding '''\n\n logging.info(\"BERT Model Preparation\")\n self.model_name_or_path = config.model_spec\n self.pooling = config.pooler\n self.cache_dir = './cache'\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path, cache_dir=self.cache_dir)\n self.bert_model = AutoModel.from_pretrained(self.model_name_or_path, cache_dir=self.cache_dir).to(self.device)\n\n\n\ndef embedder_infer_all(self, sent_list, normalization, centralization):\n ''' inference package for embedding for all needed sentences '''\n\n\n logging.debug(\"Compute BERT features\")\n\n sent2id = {}\n sents_embs = []\n count = 0\n ex_batch_size = 64\n ex_max_batches = math.ceil(len(sent_list)/float(ex_batch_size))\n \n self.bert_model.eval()\n with torch.no_grad():\n for cur_batch in trange(ex_max_batches, unit=\"batches\", leave=False):\n\n cur_sents = sent_list[cur_batch*ex_batch_size:cur_batch*ex_batch_size+ex_batch_size]\n\n model_inputs = self.tokenizer(\n cur_sents,\n add_special_tokens = True,\n return_tensors = 'pt',\n max_length = 512,\n padding = 'longest',\n truncation = True\n ).to(self.device)\n\n all_hidden_states = self.bert_model(\n input_ids = model_inputs['input_ids'],\n attention_mask = model_inputs['attention_mask'],\n output_hidden_states = True,\n return_dict = True\n ).hidden_states\n\n lengths = model_inputs['attention_mask'].sum(dim=1, keepdim=True) # (bsz, 1)\n\n if self.pooling == 'cls':\n bt_ori_emb = all_hidden_states[-1][:,0]\n elif self.pooling == 'last-avg':\n bt_ori_emb = ((all_hidden_states[-1] * model_inputs['attention_mask'].unsqueeze(-1)).sum(dim=1)).div(lengths) # (bsz, hdim)\n elif self.pooling == 'first-last-avg':\n bt_ori_emb = ((all_hidden_states[1] * model_inputs['attention_mask'].unsqueeze(-1)).sum(dim=1) + \\\n (all_hidden_states[-1] * model_inputs['attention_mask'].unsqueeze(-1)).sum(dim=1)\n ).div(2 * lengths) # (bsz, hdim)\n\n for bt_index in range(len(cur_sents)):\n sent = cur_sents[bt_index]\n if sent not in sent2id:\n sent2id[sent] = count\n count = count + 1\n ori_emb = bt_ori_emb[bt_index].squeeze().cpu().numpy()\n sents_embs.append(ori_emb)\n else:\n continue\n\n sents_embs = np.stack(sents_embs)\n\n self.sent2id = sent2id\n self.sents_embs = sents_embs\n\n if centralization:\n if self.sents_embs is not None:\n self.sents_embs = self.sents_embs - self.sents_embs.mean(axis=0, keepdims=True)\n\n if normalization:\n self.normalizing_sent_vectors()\n" ]
[ [ "torch.no_grad", "numpy.stack", "torch.cuda.is_available" ] ]
Kzra/pykrev
[ "1a328fccded962f309e951c8509b87a82c3d3ae6" ]
[ "pykrev/plotting/kendrick_mass_defect_plot.py" ]
[ "from ..formula import kendrick_mass_defect\nfrom matplotlib import pyplot as plt\ndef kendrick_mass_defect_plot(mz_list, base = 'CH2', rounding = 'even', **kwargs):\n \"\"\" \n\tDocstring for function PyKrev.kendrick_mass_defect_plot.py\n\t====================\n\tThis function takes a list of molecular formula strings and a list or numpy array of mz values and creates a kendrick mass defect plot.\n\tUse\n\t----\n\tkendrick_mass_defect_plot(Y,X)\n \n\tReturns the figure and axes handles and a tuple containing two numpy arrays, the first contains the kendrick mass and the second the kendrick mass defect. \n \n\tParameters\n\t----------\n\tY: A list of molecular formula. \n X: A list or numpy array of mz_values of len(Y). \n\tBase: Atom group used to define the Kendrick mass.\n Rounding: One of 'ceil', 'floor', or 'even', see pk.kendrickMass()\n **kwargs: key word arguments for pyplot.scatter(). \n Info\n\t----------\n Calculation taken from Hughey et al (2001) \n \"Kendrick Mass Defect Spectrum: A Compact Visual Analysis for Ultrahigh-Resolution Broadband Mass Spectra\"\n Note: Rounding calclations may lead to artefacts in complex datasets with many peaks. \n We recommend experimenting with different rounding methods when making kmd plots.\n \"\"\"\n \n kendrickMass, kendrickMassDefect = kendrick_mass_defect(mz_list,base=base,rounding=rounding)\n plt.scatter(kendrickMass,kendrickMassDefect, **kwargs)\n plt.xlabel('Kendrick Mass')\n plt.ylabel('Kendrick Mass Defect')\n fig = plt.gcf()\n ax = plt.gca()\n return fig, ax, (kendrickMass, kendrickMassDefect)\n " ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.gcf", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "matplotlib.pyplot.gca" ] ]
toandaominh1997/automlkiller
[ "c5f451cfb05d8287bec0d26a9b14e2c1b452861c" ]
[ "automlkiller/models/classification/adaboost_classifier.py" ]
[ "from sklearn.ensemble import AdaBoostClassifier\nfrom automlkiller.models.model_factory import ModelFactory\nfrom automlkiller.utils.distributions import np_list_arange, UniformDistribution, IntUniformDistribution\n@ModelFactory.register('classification-adaboostclassifier')\nclass AdaBoostClassifierContainer():\n def __init__(self, **kwargs):\n tune_grid = {}\n tune_distributions = {}\n tune_grid = {\n \"n_estimators\": np_list_arange(10, 300, 10, inclusive=True),\n \"learning_rate\": np_list_arange(0.001, 0.5, 0.001, inclusive=True),\n \"algorithm\": [\"SAMME\", \"SAMME.R\"],\n }\n tune_distributions = {\n \"n_estimators\": IntUniformDistribution(10, 300),\n \"learning_rate\": UniformDistribution(0.000001, 0.5, log=True),\n }\n\n self.tune_grid = tune_grid\n self.tune_distributions = tune_distributions\n self.estimator = AdaBoostClassifier(**kwargs)\n\n" ]
[ [ "sklearn.ensemble.AdaBoostClassifier" ] ]
scrlnas2019/nas4candle
[ "318959424cc66819c816054a87bd1cb5d426e2e7", "318959424cc66819c816054a87bd1cb5d426e2e7" ]
[ "nas4candle/candle/NT3/models/candle_conv_mlp_baseline.py", "nas4candle/nasapi/benchmark/nas/cifar10/load_data.py" ]
[ "import tensorflow as tf\n\nfrom nas4candle.nasapi.search.nas.model.baseline.util.struct import (create_seq_struct,\n create_struct_full_skipco)\nfrom nas4candle.nasapi.search.nas.model.space.block import Block\nfrom nas4candle.nasapi.search.nas.model.space.cell import Cell\nfrom nas4candle.nasapi.search.nas.model.space.structure import KerasStructure\nfrom nas4candle.nasapi.search.nas.model.space.node import VariableNode, ConstantNode\nfrom nas4candle.nasapi.search.nas.model.space.op.basic import Connect\nfrom nas4candle.nasapi.search.nas.model.space.op.op1d import (Conv1D, Dense, Identity, Activation,\n MaxPooling1D, Dropout, Flatten)\n\n\n\ndef create_cell_conv(input_nodes):\n \"\"\"Create a cell with convolution.\n\n Args:\n input_nodes (list(Node)): a list of input_nodes for this cell.\n\n Returns:\n Cell: the corresponding cell.\n \"\"\"\n cell = Cell(input_nodes)\n\n n1 = ConstantNode(op=Conv1D(filter_size=20, num_filters=128), name='N1')\n cell.graph.add_edge(input_nodes[0], n1) # fixed input connection\n\n n2 = ConstantNode(op=Activation(activation='relu'), name='N2')\n\n n3 = ConstantNode(op=MaxPooling1D(pool_size=1, padding='same'), name='N3')\n\n n4 = ConstantNode(op=Conv1D(filter_size=10, num_filters=128),name='N4')\n\n n5 = ConstantNode(op=Activation(activation='relu'), name='N5')\n\n n6 = ConstantNode(op=MaxPooling1D(pool_size=10, padding='same'), name='N6')\n\n n7 = ConstantNode(op=Flatten(), name='N7')\n\n n8 = ConstantNode(op=Dense(units=200), name='N8')\n\n n9 = ConstantNode(op=Activation(activation='relu'), name='N9')\n\n n10 = ConstantNode(op=Dropout(rate=0.1), name='N10')\n\n n11 = ConstantNode(op=Dense(units=20), name='N11')\n\n n12 = ConstantNode(op=Activation(activation='relu'), name='N12')\n\n n13 = ConstantNode(op=Dropout(rate=0.1), name='N13')\n\n block = Block()\n block.add_node(n1)\n block.add_node(n2)\n block.add_node(n3)\n block.add_node(n4)\n block.add_node(n5)\n block.add_node(n6)\n block.add_node(n7)\n block.add_node(n8)\n block.add_node(n9)\n block.add_node(n10)\n block.add_node(n11)\n block.add_node(n12)\n block.add_node(n13)\n block.add_edge(n1, n2)\n block.add_edge(n2, n3)\n block.add_edge(n3, n4)\n block.add_edge(n4, n5)\n block.add_edge(n5, n6)\n block.add_edge(n6, n7)\n block.add_edge(n7, n8)\n block.add_edge(n8, n9)\n block.add_edge(n9, n10)\n block.add_edge(n10, n11)\n block.add_edge(n11, n12)\n block.add_edge(n12, n13)\n\n cell.add_block(block)\n\n cell.set_outputs()\n return cell\n\ndef create_structure(input_shape=(2,), output_shape=(1,), *args, **kwargs):\n network = KerasStructure(input_shape, output_shape) #, output_op=AddByPadding)\n input_nodes = network.input_nodes\n\n # CELL 1\n cell1 = create_cell_conv(input_nodes)\n network.add_cell(cell1)\n\n return network\n\ndef test_create_structure():\n from random import random, seed\n from nas4candle.nasapi.search.nas.model.space.structure import KerasStructure\n from nas4candle.nasapi.core.model_utils import number_parameters\n from tensorflow.keras.utils import plot_model\n import tensorflow as tf\n # seed(10)\n shapes = [(2, )]\n structure = create_structure(shapes, (1,), 5)\n assert type(structure) is KerasStructure\n\n ops = [random() for i in range(structure.num_nodes)]\n print('num ops: ', len(ops))\n structure.set_ops(ops)\n structure.draw_graphviz('nt3_model.dot')\n\n model = structure.create_model()\n print('depth: ', structure.depth)\n\n model = structure.create_model()\n plot_model(model, to_file='nt3_model.png', show_shapes=True)\n\n model.summary()\n\n # import numpy as np\n # x0 = np.zeros((1, *shapes[0]))\n # x1 = np.zeros((1, *shapes[1]))\n # x2 = np.zeros((1, *shapes[2]))\n # inpts = [x0, x1, x2]\n # y = model.predict(inpts)\n\n # for x in inpts:\n # print(f'shape(x): {np.shape(x)}')\n # print(f'shape(y): {np.shape(y)}')\n\n # total_parameters = number_parameters()\n # print('total_parameters: ', total_parameters)\n\n # assert np.shape(y) == (1, 1), f'Wrong output shape {np.shape(y)} should be {(1, 1)}'\n\nif __name__ == '__main__':\n test_create_structure()\n", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nimport numpy as np\n\nfrom keras.datasets.cifar import load_batch\nimport keras.backend as K\nfrom keras.utils.data_utils import get_file\n\nHERE = os.path.dirname(os.path.abspath(__file__))\n\ndef load_data(dest=None):\n \"\"\"Loads CIFAR10 dataset.\n Returns:\n Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\n \"\"\"\n origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n dest = HERE+'/DATA'\n if dest is None:\n dest = '/projects/datascience/username/nas4candle.nasapi/benchmark/cifar10Nas/DATA'\n else:\n dest = os.path.abspath(os.path.expanduser(dest))\n\n print(f\"getfile(origin={origin}, dest={dest})\")\n\n path = get_file('cifar-10-batches-py', origin=origin, untar=True,\n cache_subdir=dest)\n\n num_train_samples = 50000\n\n train_X = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')\n train_y = np.empty((num_train_samples,), dtype='uint8')\n\n for i in range(1, 6):\n fpath = os.path.join(path, 'data_batch_' + str(i))\n (train_X[(i - 1) * 10000:i * 10000, :, :, :],\n train_y[(i - 1) * 10000:i * 10000]) = load_batch(fpath)\n\n fpath = os.path.join(path, 'test_batch')\n test_X, test_y = load_batch(fpath)\n\n train_y = np.reshape(train_y, (len(train_y)))\n test_y = np.reshape(test_y, (len(test_y)))\n\n train_X = np.true_divide(train_X, 255)\n test_X = np.true_divide(test_X, 255)\n\n if K.image_data_format() == 'channels_last':\n train_X = train_X.transpose(0, 2, 3, 1)\n test_X = test_X.transpose(0, 2, 3, 1)\n return (train_X, train_y), (test_X, test_y)\n\nif __name__ == '__main__':\n (train_X, train_y), (test_X, test_Y) = load_data()\n print(train_X[0])\n print(f'train_X shape = {np.shape(train_X)}')\n print(f'train_y shape = {np.shape(train_y)}')\n print(f'test_X shape = {np.shape(test_X)}')\n print(f'test_y shape = {np.shape(test_y)}')\n#load_data('cifar10_data')\n" ]
[ [ "tensorflow.keras.utils.plot_model" ], [ "numpy.true_divide", "numpy.empty", "numpy.shape" ] ]
SaiKrishna1207/Underwater-Image-Segmentation
[ "78def27e577b10e6722c02807bdcfeb7ba53d760" ]
[ "preprocessed_data/UCM/Code/global_histogram_stretching.py" ]
[ "import numpy as np\n\ndef histogram_r(r_array,height, width):\n length = height * width\n R_rray = []\n for i in range(height):\n for j in range(width):\n R_rray.append(r_array[i][j])\n R_rray.sort()\n I_min = int(R_rray[int(length / 500)])\n I_max = int(R_rray[-int(length / 500)])\n array_Global_histogram_stretching = np.zeros((height, width))\n for i in range(0, height):\n for j in range(0, width):\n if r_array[i][j] < I_min:\n # p_out = r_array[i][j]\n array_Global_histogram_stretching[i][j] = I_min\n elif (r_array[i][j] > I_max):\n p_out = r_array[i][j]\n array_Global_histogram_stretching[i][j] = 255\n else:\n p_out = int((r_array[i][j] - I_min) * ((255 - I_min) / (I_max - I_min)))+ I_min\n array_Global_histogram_stretching[i][j] = p_out\n return (array_Global_histogram_stretching)\n\ndef histogram_g(r_array,height, width):\n length = height * width\n R_rray = []\n for i in range(height):\n for j in range(width):\n R_rray.append(r_array[i][j])\n R_rray.sort()\n I_min = int(R_rray[int(length / 500)])\n I_max = int(R_rray[-int(length / 500)])\n array_Global_histogram_stretching = np.zeros((height, width))\n for i in range(0, height):\n for j in range(0, width):\n if r_array[i][j] < I_min:\n p_out = r_array[i][j]\n array_Global_histogram_stretching[i][j] = 0\n elif (r_array[i][j] > I_max):\n p_out = r_array[i][j]\n array_Global_histogram_stretching[i][j] = 255\n else:\n p_out = int((r_array[i][j] - I_min) * ((255) / (I_max - I_min)) )\n array_Global_histogram_stretching[i][j] = p_out\n return (array_Global_histogram_stretching)\n\ndef histogram_b(r_array,height, width):\n length = height * width\n R_rray = []\n for i in range(height):\n for j in range(width):\n R_rray.append(r_array[i][j])\n R_rray.sort()\n I_min = int(R_rray[int(length / 500)])\n I_max = int(R_rray[-int(length / 500)])\n array_Global_histogram_stretching = np.zeros((height, width))\n for i in range(0, height):\n for j in range(0, width):\n if r_array[i][j] < I_min:\n # p_out = r_array[i][j]\n array_Global_histogram_stretching[i][j] = 0\n elif (r_array[i][j] > I_max):\n # p_out = r_array[i][j]\n array_Global_histogram_stretching[i][j] = I_max\n else:\n p_out = int((r_array[i][j] - I_min) * ((I_max) / (I_max - I_min)))\n array_Global_histogram_stretching[i][j] = p_out\n return (array_Global_histogram_stretching)\n\ndef stretching(img):\n height = len(img)\n width = len(img[0])\n img[:, :, 2] = histogram_r(img[:, :, 2], height, width)\n img[:, :, 1] = histogram_g(img[:, :, 1], height, width)\n img[:, :, 0] = histogram_b(img[:, :, 0], height, width)\n return img\n\n\n" ]
[ [ "numpy.zeros" ] ]
larsll/sagemaker-python-sdk
[ "4d0c2e67ea32376460a8d21abac4cc2c729d8c45" ]
[ "tests/integ/test_tf_script_mode.py" ]
[ "# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport os\nimport time\n\nimport pytest\n\nfrom sagemaker.tensorflow import TensorFlow\nfrom sagemaker.utils import unique_name_from_base, sagemaker_timestamp\n\nimport tests.integ\nfrom tests.integ import timeout\nfrom tests.integ import kms_utils\nfrom tests.integ.retry import retries\nfrom tests.integ.s3_utils import assert_s3_files_exist\n\nROLE = \"SageMakerRole\"\n\nRESOURCE_PATH = os.path.join(os.path.dirname(__file__), \"..\", \"data\")\nMNIST_RESOURCE_PATH = os.path.join(RESOURCE_PATH, \"tensorflow_mnist\")\nTFS_RESOURCE_PATH = os.path.join(RESOURCE_PATH, \"tfs\", \"tfs-test-entrypoint-with-handler\")\n\nSCRIPT = os.path.join(MNIST_RESOURCE_PATH, \"mnist.py\")\nPARAMETER_SERVER_DISTRIBUTION = {\"parameter_server\": {\"enabled\": True}}\nMPI_DISTRIBUTION = {\"mpi\": {\"enabled\": True}}\nTAGS = [{\"Key\": \"some-key\", \"Value\": \"some-value\"}]\n\n\ndef test_mnist_with_checkpoint_config(sagemaker_session, instance_type, tf_full_version):\n checkpoint_s3_uri = \"s3://{}/checkpoints/tf-{}\".format(\n sagemaker_session.default_bucket(), sagemaker_timestamp()\n )\n checkpoint_local_path = \"/test/checkpoint/path\"\n estimator = TensorFlow(\n entry_point=SCRIPT,\n role=\"SageMakerRole\",\n train_instance_count=1,\n train_instance_type=instance_type,\n sagemaker_session=sagemaker_session,\n script_mode=True,\n framework_version=tf_full_version,\n py_version=tests.integ.PYTHON_VERSION,\n metric_definitions=[{\"Name\": \"train:global_steps\", \"Regex\": r\"global_step\\/sec:\\s(.*)\"}],\n checkpoint_s3_uri=checkpoint_s3_uri,\n checkpoint_local_path=checkpoint_local_path,\n )\n inputs = estimator.sagemaker_session.upload_data(\n path=os.path.join(MNIST_RESOURCE_PATH, \"data\"), key_prefix=\"scriptmode/mnist\"\n )\n\n training_job_name = unique_name_from_base(\"test-tf-sm-mnist\")\n with tests.integ.timeout.timeout(minutes=tests.integ.TRAINING_DEFAULT_TIMEOUT_MINUTES):\n estimator.fit(inputs=inputs, job_name=training_job_name)\n assert_s3_files_exist(\n sagemaker_session,\n estimator.model_dir,\n [\"graph.pbtxt\", \"model.ckpt-0.index\", \"model.ckpt-0.meta\"],\n )\n df = estimator.training_job_analytics.dataframe()\n assert df.size > 0\n\n expected_training_checkpoint_config = {\n \"S3Uri\": checkpoint_s3_uri,\n \"LocalPath\": checkpoint_local_path,\n }\n actual_training_checkpoint_config = sagemaker_session.sagemaker_client.describe_training_job(\n TrainingJobName=training_job_name\n )[\"CheckpointConfig\"]\n assert actual_training_checkpoint_config == expected_training_checkpoint_config\n\n\ndef test_server_side_encryption(sagemaker_session, tf_full_version):\n with kms_utils.bucket_with_encryption(sagemaker_session, ROLE) as (bucket_with_kms, kms_key):\n output_path = os.path.join(\n bucket_with_kms, \"test-server-side-encryption\", time.strftime(\"%y%m%d-%H%M\")\n )\n\n estimator = TensorFlow(\n entry_point=\"training.py\",\n source_dir=TFS_RESOURCE_PATH,\n role=ROLE,\n train_instance_count=1,\n train_instance_type=\"ml.c5.xlarge\",\n sagemaker_session=sagemaker_session,\n script_mode=True,\n framework_version=tf_full_version,\n py_version=tests.integ.PYTHON_VERSION,\n code_location=output_path,\n output_path=output_path,\n model_dir=\"/opt/ml/model\",\n output_kms_key=kms_key,\n )\n\n inputs = estimator.sagemaker_session.upload_data(\n path=os.path.join(MNIST_RESOURCE_PATH, \"data\"), key_prefix=\"scriptmode/mnist\"\n )\n\n with tests.integ.timeout.timeout(minutes=tests.integ.TRAINING_DEFAULT_TIMEOUT_MINUTES):\n estimator.fit(\n inputs=inputs, job_name=unique_name_from_base(\"test-server-side-encryption\")\n )\n\n endpoint_name = unique_name_from_base(\"test-server-side-encryption\")\n with timeout.timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):\n estimator.deploy(\n initial_instance_count=1,\n instance_type=\"ml.c5.xlarge\",\n endpoint_name=endpoint_name,\n entry_point=os.path.join(TFS_RESOURCE_PATH, \"inference.py\"),\n )\n\n\n@pytest.mark.canary_quick\ndef test_mnist_distributed(sagemaker_session, instance_type, tf_full_version):\n estimator = TensorFlow(\n entry_point=SCRIPT,\n role=ROLE,\n train_instance_count=2,\n train_instance_type=instance_type,\n sagemaker_session=sagemaker_session,\n py_version=tests.integ.PYTHON_VERSION,\n script_mode=True,\n framework_version=tf_full_version,\n distributions=PARAMETER_SERVER_DISTRIBUTION,\n )\n inputs = estimator.sagemaker_session.upload_data(\n path=os.path.join(MNIST_RESOURCE_PATH, \"data\"), key_prefix=\"scriptmode/distributed_mnist\"\n )\n\n with tests.integ.timeout.timeout(minutes=tests.integ.TRAINING_DEFAULT_TIMEOUT_MINUTES):\n estimator.fit(inputs=inputs, job_name=unique_name_from_base(\"test-tf-sm-distributed\"))\n assert_s3_files_exist(\n sagemaker_session,\n estimator.model_dir,\n [\"graph.pbtxt\", \"model.ckpt-0.index\", \"model.ckpt-0.meta\"],\n )\n\n\ndef test_mnist_async(sagemaker_session, cpu_instance_type):\n estimator = TensorFlow(\n entry_point=SCRIPT,\n role=ROLE,\n train_instance_count=1,\n train_instance_type=\"ml.c5.4xlarge\",\n py_version=tests.integ.PYTHON_VERSION,\n sagemaker_session=sagemaker_session,\n script_mode=True,\n # testing py-sdk functionality, no need to run against all TF versions\n framework_version=TensorFlow.LATEST_VERSION,\n tags=TAGS,\n )\n inputs = estimator.sagemaker_session.upload_data(\n path=os.path.join(MNIST_RESOURCE_PATH, \"data\"), key_prefix=\"scriptmode/mnist\"\n )\n estimator.fit(inputs=inputs, wait=False, job_name=unique_name_from_base(\"test-tf-sm-async\"))\n training_job_name = estimator.latest_training_job.name\n time.sleep(20)\n endpoint_name = training_job_name\n _assert_training_job_tags_match(\n sagemaker_session.sagemaker_client, estimator.latest_training_job.name, TAGS\n )\n with tests.integ.timeout.timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):\n estimator = TensorFlow.attach(\n training_job_name=training_job_name, sagemaker_session=sagemaker_session\n )\n model_name = \"model-mnist-async\"\n predictor = estimator.deploy(\n initial_instance_count=1,\n instance_type=cpu_instance_type,\n endpoint_name=endpoint_name,\n model_name=model_name,\n )\n\n result = predictor.predict(np.zeros(784))\n print(\"predict result: {}\".format(result))\n _assert_endpoint_tags_match(sagemaker_session.sagemaker_client, predictor.endpoint, TAGS)\n _assert_model_tags_match(sagemaker_session.sagemaker_client, model_name, TAGS)\n _assert_model_name_match(sagemaker_session.sagemaker_client, endpoint_name, model_name)\n\n\ndef test_deploy_with_input_handlers(sagemaker_session, instance_type, tf_full_version):\n estimator = TensorFlow(\n entry_point=\"training.py\",\n source_dir=TFS_RESOURCE_PATH,\n role=ROLE,\n train_instance_count=1,\n train_instance_type=instance_type,\n py_version=tests.integ.PYTHON_VERSION,\n sagemaker_session=sagemaker_session,\n script_mode=True,\n framework_version=tf_full_version,\n tags=TAGS,\n )\n\n estimator.fit(job_name=unique_name_from_base(\"test-tf-tfs-deploy\"))\n\n endpoint_name = estimator.latest_training_job.name\n\n with timeout.timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):\n predictor = estimator.deploy(\n initial_instance_count=1,\n instance_type=instance_type,\n endpoint_name=endpoint_name,\n entry_point=os.path.join(TFS_RESOURCE_PATH, \"inference.py\"),\n )\n\n input_data = {\"instances\": [1.0, 2.0, 5.0]}\n expected_result = {\"predictions\": [4.0, 4.5, 6.0]}\n\n result = predictor.predict(input_data)\n assert expected_result == result\n\n\ndef _assert_tags_match(sagemaker_client, resource_arn, tags, retry_count=15):\n # endpoint and training tags might take minutes to propagate.\n for _ in retries(retry_count, \"Getting endpoint tags\", seconds_to_sleep=30):\n actual_tags = sagemaker_client.list_tags(ResourceArn=resource_arn)[\"Tags\"]\n if actual_tags:\n break\n\n assert actual_tags == tags\n\n\ndef _assert_model_tags_match(sagemaker_client, model_name, tags):\n model_description = sagemaker_client.describe_model(ModelName=model_name)\n _assert_tags_match(sagemaker_client, model_description[\"ModelArn\"], tags)\n\n\ndef _assert_endpoint_tags_match(sagemaker_client, endpoint_name, tags):\n endpoint_description = sagemaker_client.describe_endpoint(EndpointName=endpoint_name)\n\n _assert_tags_match(sagemaker_client, endpoint_description[\"EndpointArn\"], tags)\n\n\ndef _assert_training_job_tags_match(sagemaker_client, training_job_name, tags):\n training_job_description = sagemaker_client.describe_training_job(\n TrainingJobName=training_job_name\n )\n _assert_tags_match(sagemaker_client, training_job_description[\"TrainingJobArn\"], tags)\n\n\ndef _assert_model_name_match(sagemaker_client, endpoint_config_name, model_name):\n endpoint_config_description = sagemaker_client.describe_endpoint_config(\n EndpointConfigName=endpoint_config_name\n )\n assert model_name == endpoint_config_description[\"ProductionVariants\"][0][\"ModelName\"]\n" ]
[ [ "numpy.zeros" ] ]
starcraft2-ai/python-env
[ "4504696381b8721b9e9914f8ba8d17241348ecf8", "4504696381b8721b9e9914f8ba8d17241348ecf8" ]
[ "s2py3/lib/python3.6/site-packages/tensorflow/contrib/reduce_slice_ops/ops/gen_reduce_slice_ops.py", "s2py3/lib/python3.6/site-packages/tensorflow/contrib/text/python/ops/gen_skip_gram_ops.py" ]
[ "\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\nOriginal C++ source file: reduce_slice_ops.cc\n\"\"\"\n\nimport collections as _collections\nimport six as _six\n\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import errors as _errors\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export('reduce_slice_max')\ndef reduce_slice_max(data, indices, axis, name=None):\n r\"\"\"Dynamically compute the maximum over the first dimension of a tensor according\n\n to start and end indices specified at \"indices\".\n\n For example:\n\n ```prettyprint\n # if 'data' is [[ 1, 20, 3]\n [ 400, 5, 60]\n [ 70, 8, 900]\n [1000,2000,3000]],\n\n and 'indices' is [[0,1]\n [1,1]\n [0,2]],\n\n the output will be [[ 1, 20, 3]\n [ -BIG_VALUE, -BIG_VALUE, -BIG_VALUE]\n [ 400, 20, 60]].\n ```\n\n The data must be at least rank 1. The indices can be of shape (?,2) where the\n first column is start indices and the second column is end indices. The end indices\n are not included in the reduce operation, which means, if you want to do a reduce\n over indices 0,1,2, then you should have start index 0 and end index 3. If end\n index is smaller than or equal to start, the result will be 1. If end index is\n out of bounds, then the reduce operation will automatically stop at the bound, so\n feel free to put a large number as your end of your index if you want to do the\n reduction until the bound. The indices can also be of shape (?), in this case, the\n start index of i will be the element at i, then end index of i will be the element\n at i+1. That is:\n\n ```prettyprint\n indices = [0,5,11,115]\n\n is equivalent to\n\n indices = [ [0,5],\n [5,11],\n [11,115]]\n ```\n\n Args:\n data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.\n The source of data where the computation will be taken from.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n start, end indices that controls which part to be included.\n axis: A `Tensor` of type `int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `data`. the computed product values.\n \"\"\"\n _ctx = _context.context()\n if not _ctx.executing_eagerly():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"ReduceSliceMax\", data=data, indices=indices, axis=axis, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tindices\", _op.get_attr(\"Tindices\"))\n _execute.record_gradient(\n \"ReduceSliceMax\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._handle, _ctx.device_name, \"ReduceSliceMax\", name,\n _ctx._post_execution_callbacks, data, indices, axis)\n return _result\n except _core._FallbackException:\n return reduce_slice_max_eager_fallback(\n data, indices, axis, name=name)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef reduce_slice_max_eager_fallback(data, indices, axis, name=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function reduce_slice_max\n \"\"\"\n _ctx = _context.context()\n _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)\n _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)\n axis = _ops.convert_to_tensor(axis, _dtypes.int64)\n _inputs_flat = [data, indices, axis]\n _attrs = (\"T\", _attr_T, \"Tindices\", _attr_Tindices)\n _result = _execute.execute(b\"ReduceSliceMax\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"ReduceSliceMax\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n_ops.RegisterShape(\"ReduceSliceMax\")(None)\n\n\n@tf_export('reduce_slice_min')\ndef reduce_slice_min(data, indices, axis, name=None):\n r\"\"\"Dynamically compute the minimum over the first dimension of a tensor according\n\n to start and end indices specified at 'indices'.\n\n For example:\n\n ```prettyprint\n # if 'data' is [[ 1, 20, 3]\n [ 400, 5, 60]\n [ 70, 8, 900]\n [1000,2000,3000]],\n\n and 'indices' is [[0,1]\n [1,1]\n [0,2]],\n\n the output will be [[ 1, 20, 3]\n [ +BIG_VALUE, +BIG_VALUE, +BIG_VALUE]\n [ 1, 5, 3]].\n ```\n\n The data must be at least rank 1. The indices can be of shape (?,2) where the\n first column is start indices and the second column is end indices. The end indices\n are not included in the reduce operation, which means, if you want to do a reduce\n over indices 0,1,2, then you should have start index 0 and end index 3. If end\n index is smaller than or equal to start, the result will be 1. If end index is\n out of bounds, then the reduce operation will automatically stop at the bound, so\n feel free to put a large number as your end of your index if you want to do the\n reduction until the bound. The indices can also be of shape (?), in this case, the\n start index of i will be the element at i, then end index of i will be the element\n at i+1. That is:\n\n ```prettyprint\n indices = [0,5,11,115]\n\n is equivalent to\n\n indices = [ [0,5],\n [5,11],\n [11,115]]\n ```\n\n Args:\n data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.\n The source of data where the computation will be taken from.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n start, end indices that controls which part to be included.\n axis: A `Tensor` of type `int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `data`. the computed product values.\n \"\"\"\n _ctx = _context.context()\n if not _ctx.executing_eagerly():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"ReduceSliceMin\", data=data, indices=indices, axis=axis, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tindices\", _op.get_attr(\"Tindices\"))\n _execute.record_gradient(\n \"ReduceSliceMin\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._handle, _ctx.device_name, \"ReduceSliceMin\", name,\n _ctx._post_execution_callbacks, data, indices, axis)\n return _result\n except _core._FallbackException:\n return reduce_slice_min_eager_fallback(\n data, indices, axis, name=name)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef reduce_slice_min_eager_fallback(data, indices, axis, name=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function reduce_slice_min\n \"\"\"\n _ctx = _context.context()\n _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)\n _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)\n axis = _ops.convert_to_tensor(axis, _dtypes.int64)\n _inputs_flat = [data, indices, axis]\n _attrs = (\"T\", _attr_T, \"Tindices\", _attr_Tindices)\n _result = _execute.execute(b\"ReduceSliceMin\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"ReduceSliceMin\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n_ops.RegisterShape(\"ReduceSliceMin\")(None)\n\n\n@tf_export('reduce_slice_prod')\ndef reduce_slice_prod(data, indices, axis, name=None):\n r\"\"\"Dynamically compute the product over the first dimension of a tensor according\n\n to start and end indices specified at 'indices'.\n\n For example:\n\n ```prettyprint\n # if 'data' is [[ 1, 2, 3]\n [ 40, 50, 60]\n [ 700, 800, 900]\n [1000,2000,3000]],\n\n and 'indices' is [[0,1]\n [1,1]\n [0,2]],\n\n the output will be [[ 1, 2, 3]\n [ 1, 1, 1]\n [40,100,180]].\n ```\n\n The data must be at least rank 1. The indices can be of shape (?,2) where the\n first column is start indices and the second column is end indices. The end indices\n are not included in the reduce operation, which means, if you want to do a reduce\n over indices 0,1,2, then you should have start index 0 and end index 3. If end\n index is smaller than or equal to start, the result will be 1. If end index is\n out of bounds, then the reduce operation will automatically stop at the bound, so\n feel free to put a large number as your end of your index if you want to do the\n reduction until the bound. The indices can also be of shape (?), in this case, the\n start index of i will be the element at i, then end index of i will be the element\n at i+1. That is:\n\n ```prettyprint\n indices = [0,5,11,115]\n\n is equivalent to\n\n indices = [ [0,5],\n [5,11],\n [11,115]]\n ```\n\n Args:\n data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.\n The source of data where the computation will be taken from.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n start, end indices that controls which part to be included.\n axis: A `Tensor` of type `int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `data`. the computed product values.\n \"\"\"\n _ctx = _context.context()\n if not _ctx.executing_eagerly():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"ReduceSliceProd\", data=data, indices=indices, axis=axis, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tindices\", _op.get_attr(\"Tindices\"))\n _execute.record_gradient(\n \"ReduceSliceProd\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._handle, _ctx.device_name, \"ReduceSliceProd\", name,\n _ctx._post_execution_callbacks, data, indices, axis)\n return _result\n except _core._FallbackException:\n return reduce_slice_prod_eager_fallback(\n data, indices, axis, name=name)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef reduce_slice_prod_eager_fallback(data, indices, axis, name=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function reduce_slice_prod\n \"\"\"\n _ctx = _context.context()\n _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)\n _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)\n axis = _ops.convert_to_tensor(axis, _dtypes.int64)\n _inputs_flat = [data, indices, axis]\n _attrs = (\"T\", _attr_T, \"Tindices\", _attr_Tindices)\n _result = _execute.execute(b\"ReduceSliceProd\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"ReduceSliceProd\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n_ops.RegisterShape(\"ReduceSliceProd\")(None)\n\n\n@tf_export('reduce_slice_sum')\ndef reduce_slice_sum(data, indices, axis, name=None):\n r\"\"\"Dynamically sum over the first dimension of a tensor according to start and end\n\n indices specified at 'index'.\n\n For example:\n\n ```prettyprint\n # if 'data' is [[ 1, 2, 3]\n [ 40, 50, 60]\n [ 700, 800, 900]\n [1000,2000,3000]],\n\n and 'indices' is [[0,1]\n [1,1]\n [0,2]],\n\n the output will be [[ 1, 2, 3]\n [ 0, 0, 0]\n [41,52,63]].\n ```\n\n The data must be at least rank 1. The indices must be of shape (?,2) where the\n first column is start indices and the second column is end indices. The end indices\n are not included in the reduce operation, which means, if you want to do a reduce\n over indices 0,1,2, then you should have start index 0 and end index 3. If end\n index is smaller than or equal to start, the result will be zero. If end index is\n out of bounds, then the reduce operation will automatically stop at the bound, so\n feel free to put a large number as your end of your index if you want to do the\n reduction until the bound.\n\n Args:\n data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.\n The source of data where the computation will be taken from.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n start, end indices that controls which part to be included.\n axis: A `Tensor` of type `int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `data`. the computed sum values.\n \"\"\"\n _ctx = _context.context()\n if not _ctx.executing_eagerly():\n _, _, _op = _op_def_lib._apply_op_helper(\n \"ReduceSliceSum\", data=data, indices=indices, axis=axis, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tindices\", _op.get_attr(\"Tindices\"))\n _execute.record_gradient(\n \"ReduceSliceSum\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._handle, _ctx.device_name, \"ReduceSliceSum\", name,\n _ctx._post_execution_callbacks, data, indices, axis)\n return _result\n except _core._FallbackException:\n return reduce_slice_sum_eager_fallback(\n data, indices, axis, name=name)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef reduce_slice_sum_eager_fallback(data, indices, axis, name=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function reduce_slice_sum\n \"\"\"\n _ctx = _context.context()\n _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)\n _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)\n axis = _ops.convert_to_tensor(axis, _dtypes.int64)\n _inputs_flat = [data, indices, axis]\n _attrs = (\"T\", _attr_T, \"Tindices\", _attr_Tindices)\n _result = _execute.execute(b\"ReduceSliceSum\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"ReduceSliceSum\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n_ops.RegisterShape(\"ReduceSliceSum\")(None)\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"ReduceSliceMax\"\n# input_arg {\n# name: \"data\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"indices\"\n# type_attr: \"Tindices\"\n# }\n# input_arg {\n# name: \"axis\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"output\"\n# type_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_FLOAT\n# type: DT_DOUBLE\n# type: DT_INT32\n# type: DT_UINT8\n# type: DT_INT16\n# type: DT_INT8\n# type: DT_COMPLEX64\n# type: DT_INT64\n# type: DT_QINT8\n# type: DT_QUINT8\n# type: DT_QINT32\n# type: DT_BFLOAT16\n# type: DT_UINT16\n# type: DT_COMPLEX128\n# type: DT_HALF\n# type: DT_UINT32\n# type: DT_UINT64\n# }\n# }\n# }\n# attr {\n# name: \"Tindices\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# }\n# op {\n# name: \"ReduceSliceMin\"\n# input_arg {\n# name: \"data\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"indices\"\n# type_attr: \"Tindices\"\n# }\n# input_arg {\n# name: \"axis\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"output\"\n# type_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_FLOAT\n# type: DT_DOUBLE\n# type: DT_INT32\n# type: DT_UINT8\n# type: DT_INT16\n# type: DT_INT8\n# type: DT_COMPLEX64\n# type: DT_INT64\n# type: DT_QINT8\n# type: DT_QUINT8\n# type: DT_QINT32\n# type: DT_BFLOAT16\n# type: DT_UINT16\n# type: DT_COMPLEX128\n# type: DT_HALF\n# type: DT_UINT32\n# type: DT_UINT64\n# }\n# }\n# }\n# attr {\n# name: \"Tindices\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# }\n# op {\n# name: \"ReduceSliceProd\"\n# input_arg {\n# name: \"data\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"indices\"\n# type_attr: \"Tindices\"\n# }\n# input_arg {\n# name: \"axis\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"output\"\n# type_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_FLOAT\n# type: DT_DOUBLE\n# type: DT_INT32\n# type: DT_UINT8\n# type: DT_INT16\n# type: DT_INT8\n# type: DT_COMPLEX64\n# type: DT_INT64\n# type: DT_QINT8\n# type: DT_QUINT8\n# type: DT_QINT32\n# type: DT_BFLOAT16\n# type: DT_UINT16\n# type: DT_COMPLEX128\n# type: DT_HALF\n# type: DT_UINT32\n# type: DT_UINT64\n# }\n# }\n# }\n# attr {\n# name: \"Tindices\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# }\n# op {\n# name: \"ReduceSliceSum\"\n# input_arg {\n# name: \"data\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"indices\"\n# type_attr: \"Tindices\"\n# }\n# input_arg {\n# name: \"axis\"\n# type: DT_INT64\n# }\n# output_arg {\n# name: \"output\"\n# type_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_FLOAT\n# type: DT_DOUBLE\n# type: DT_INT32\n# type: DT_UINT8\n# type: DT_INT16\n# type: DT_INT8\n# type: DT_COMPLEX64\n# type: DT_INT64\n# type: DT_QINT8\n# type: DT_QUINT8\n# type: DT_QINT32\n# type: DT_BFLOAT16\n# type: DT_UINT16\n# type: DT_COMPLEX128\n# type: DT_HALF\n# type: DT_UINT32\n# type: DT_UINT64\n# }\n# }\n# }\n# attr {\n# name: \"Tindices\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\203\\001\\n\\016ReduceSliceMax\\022\\t\\n\\004data\\\"\\001T\\022\\023\\n\\007indices\\\"\\010Tindices\\022\\010\\n\\004axis\\030\\t\\032\\013\\n\\006output\\\"\\001T\\\" \\n\\001T\\022\\004type:\\025\\n\\0232\\021\\001\\002\\003\\004\\005\\006\\010\\t\\013\\014\\r\\016\\021\\022\\023\\026\\027\\\"\\030\\n\\010Tindices\\022\\004type:\\006\\n\\0042\\002\\003\\t\\n\\203\\001\\n\\016ReduceSliceMin\\022\\t\\n\\004data\\\"\\001T\\022\\023\\n\\007indices\\\"\\010Tindices\\022\\010\\n\\004axis\\030\\t\\032\\013\\n\\006output\\\"\\001T\\\" \\n\\001T\\022\\004type:\\025\\n\\0232\\021\\001\\002\\003\\004\\005\\006\\010\\t\\013\\014\\r\\016\\021\\022\\023\\026\\027\\\"\\030\\n\\010Tindices\\022\\004type:\\006\\n\\0042\\002\\003\\t\\n\\204\\001\\n\\017ReduceSliceProd\\022\\t\\n\\004data\\\"\\001T\\022\\023\\n\\007indices\\\"\\010Tindices\\022\\010\\n\\004axis\\030\\t\\032\\013\\n\\006output\\\"\\001T\\\" \\n\\001T\\022\\004type:\\025\\n\\0232\\021\\001\\002\\003\\004\\005\\006\\010\\t\\013\\014\\r\\016\\021\\022\\023\\026\\027\\\"\\030\\n\\010Tindices\\022\\004type:\\006\\n\\0042\\002\\003\\t\\n\\203\\001\\n\\016ReduceSliceSum\\022\\t\\n\\004data\\\"\\001T\\022\\023\\n\\007indices\\\"\\010Tindices\\022\\010\\n\\004axis\\030\\t\\032\\013\\n\\006output\\\"\\001T\\\" \\n\\001T\\022\\004type:\\025\\n\\0232\\021\\001\\002\\003\\004\\005\\006\\010\\t\\013\\014\\r\\016\\021\\022\\023\\026\\027\\\"\\030\\n\\010Tindices\\022\\004type:\\006\\n\\0042\\002\\003\\t\")\n", "\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\nOriginal C++ source file: gen_skip_gram_ops.cc\n\"\"\"\n\nimport collections as _collections\nimport six as _six\n\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import errors as _errors\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n_skip_gram_generate_candidates_outputs = [\"tokens\", \"labels\"]\n_SkipGramGenerateCandidatesOutput = _collections.namedtuple(\n \"SkipGramGenerateCandidates\", _skip_gram_generate_candidates_outputs)\n\n\n@tf_export('skip_gram_generate_candidates')\ndef skip_gram_generate_candidates(input_tensor, min_skips, max_skips, start, limit, emit_self_as_target, seed=0, seed2=0, name=None):\n r\"\"\"Generates skip-gram token and label paired Tensors from the input tensor.\n\n See docs for the public-facing skip_gram_sample() Python op for more details.\n\n Args:\n input_tensor: A `Tensor`.\n min_skips: A `Tensor` of type `int32`.\n max_skips: A `Tensor` of type `int32`.\n start: A `Tensor` of type `int32`.\n limit: A `Tensor` of type `int32`.\n emit_self_as_target: A `Tensor` of type `bool`.\n seed: An optional `int`. Defaults to `0`.\n seed2: An optional `int`. Defaults to `0`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (tokens, labels).\n\n tokens: A `Tensor`. Has the same type as `input_tensor`.\n labels: A `Tensor`. Has the same type as `input_tensor`.\n \"\"\"\n _ctx = _context.context()\n if not _ctx.executing_eagerly():\n if seed is None:\n seed = 0\n seed = _execute.make_int(seed, \"seed\")\n if seed2 is None:\n seed2 = 0\n seed2 = _execute.make_int(seed2, \"seed2\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"SkipGramGenerateCandidates\", input_tensor=input_tensor,\n min_skips=min_skips, max_skips=max_skips, start=start, limit=limit,\n emit_self_as_target=emit_self_as_target, seed=seed, seed2=seed2,\n name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"seed\", _op.get_attr(\"seed\"), \"seed2\",\n _op.get_attr(\"seed2\"))\n _execute.record_gradient(\n \"SkipGramGenerateCandidates\", _inputs_flat, _attrs, _result, name)\n _result = _SkipGramGenerateCandidatesOutput._make(_result)\n return _result\n\n else:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._handle, _ctx.device_name, \"SkipGramGenerateCandidates\", name,\n _ctx._post_execution_callbacks, input_tensor, min_skips, max_skips,\n start, limit, emit_self_as_target, \"seed\", seed, \"seed2\", seed2)\n _result = _SkipGramGenerateCandidatesOutput._make(_result)\n return _result\n except _core._FallbackException:\n return skip_gram_generate_candidates_eager_fallback(\n input_tensor, min_skips, max_skips, start, limit,\n emit_self_as_target, seed=seed, seed2=seed2, name=name)\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n\n\ndef skip_gram_generate_candidates_eager_fallback(input_tensor, min_skips, max_skips, start, limit, emit_self_as_target, seed=0, seed2=0, name=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function skip_gram_generate_candidates\n \"\"\"\n _ctx = _context.context()\n if seed is None:\n seed = 0\n seed = _execute.make_int(seed, \"seed\")\n if seed2 is None:\n seed2 = 0\n seed2 = _execute.make_int(seed2, \"seed2\")\n _attr_T, (input_tensor,) = _execute.args_to_matching_eager([input_tensor], _ctx)\n min_skips = _ops.convert_to_tensor(min_skips, _dtypes.int32)\n max_skips = _ops.convert_to_tensor(max_skips, _dtypes.int32)\n start = _ops.convert_to_tensor(start, _dtypes.int32)\n limit = _ops.convert_to_tensor(limit, _dtypes.int32)\n emit_self_as_target = _ops.convert_to_tensor(emit_self_as_target, _dtypes.bool)\n _inputs_flat = [input_tensor, min_skips, max_skips, start, limit, emit_self_as_target]\n _attrs = (\"T\", _attr_T, \"seed\", seed, \"seed2\", seed2)\n _result = _execute.execute(b\"SkipGramGenerateCandidates\", 2,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"SkipGramGenerateCandidates\", _inputs_flat, _attrs, _result, name)\n _result = _SkipGramGenerateCandidatesOutput._make(_result)\n return _result\n\n_ops.RegisterShape(\"SkipGramGenerateCandidates\")(None)\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"SkipGramGenerateCandidates\"\n# input_arg {\n# name: \"input_tensor\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"min_skips\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"max_skips\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"start\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"limit\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"emit_self_as_target\"\n# type: DT_BOOL\n# }\n# output_arg {\n# name: \"tokens\"\n# type_attr: \"T\"\n# }\n# output_arg {\n# name: \"labels\"\n# type_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# }\n# attr {\n# name: \"seed\"\n# type: \"int\"\n# default_value {\n# i: 0\n# }\n# }\n# attr {\n# name: \"seed2\"\n# type: \"int\"\n# default_value {\n# i: 0\n# }\n# }\n# is_stateful: true\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\307\\001\\n\\032SkipGramGenerateCandidates\\022\\021\\n\\014input_tensor\\\"\\001T\\022\\r\\n\\tmin_skips\\030\\003\\022\\r\\n\\tmax_skips\\030\\003\\022\\t\\n\\005start\\030\\003\\022\\t\\n\\005limit\\030\\003\\022\\027\\n\\023emit_self_as_target\\030\\n\\032\\013\\n\\006tokens\\\"\\001T\\032\\013\\n\\006labels\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\017\\n\\004seed\\022\\003int\\032\\002\\030\\000\\\"\\020\\n\\005seed2\\022\\003int\\032\\002\\030\\000\\210\\001\\001\")\n" ]
[ [ "tensorflow.python.framework.ops.RegisterShape", "tensorflow.python.eager.context.context", "tensorflow.python.eager.execute.args_to_matching_eager", "tensorflow.python.eager.execute.execute", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.eager.execute.record_gradient", "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute", "tensorflow.python.framework.op_def_library.OpDefLibrary", "tensorflow.python.eager.core._status_to_exception", "tensorflow.python.framework.op_def_registry.register_op_list", "tensorflow.python.util.tf_export.tf_export" ], [ "tensorflow.python.framework.ops.RegisterShape", "tensorflow.python.eager.context.context", "tensorflow.python.eager.execute.args_to_matching_eager", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.eager.execute.execute", "tensorflow.python.eager.execute.make_int", "tensorflow.python.eager.execute.record_gradient", "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.python.framework.op_def_library.OpDefLibrary", "tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute", "tensorflow.python.framework.op_def_registry.register_op_list", "tensorflow.python.eager.core._status_to_exception", "tensorflow.python.util.tf_export.tf_export" ] ]
ar90n/kkt
[ "e772860b20231e067973478350a4f0edb8bf5db1" ]
[ "examples/titanic_multi_files/script/random_forest.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom titanic_multi_files import io, util\nfrom pathlib import Path\n\ntrain, test = io.load_dataset()\n\ndata = util.merge_dataset(train, test)\ndata[\"Sex\"].replace([\"male\", \"female\"], [0, 1], inplace=True)\ndata[\"Embarked\"].replace([\"C\", \"Q\", \"S\", None], [0, 1, 2, 3], inplace=True)\ndata[\"Fare\"].fillna(np.mean(data[\"Fare\"]), inplace=True)\ndata[\"Age\"].fillna(np.mean(data[\"Age\"]), inplace=True)\ntrain, test = util.split_dataset(data)\n\nfeature_columns = [\n \"Pclass\",\n \"Embarked\",\n \"Fare\",\n \"Parch\",\n \"SibSp\",\n \"Age\",\n \"Sex\",\n]\nx_train = train[feature_columns].values\ny_train = train[\"Survived\"].values\nx_test = test[feature_columns].values\n\nmax_depth = int(os.environ.get(\"RF_MAX_DEPTH\", 2))\ncrf = RandomForestClassifier(max_depth=max_depth, random_state=0)\ncrf.fit(x_train, y_train)\ny_pred = crf.predict(x_test)\n\nsub = io.load_submission()\nsub[\"Survived\"] = y_pred.astype(np.int32)\nio.save_submission(sub)\n" ]
[ [ "sklearn.ensemble.RandomForestClassifier", "numpy.mean" ] ]
samir-joshi/tmtoolkit
[ "42fdd388e606d6d1c45d80d2364dfa42b8408111" ]
[ "examples/bundestag18_tfidf.py" ]
[ "\"\"\"\nExample script that loads and processes the proceedings of the 18th German Bundestag and generates a tf-idf matrix.\nThe data is quite large, consisting of 15,733 documents with 14,355,341 tokens in total. This script shows how to\nhandle large data efficiently by using the parallel processing power of tmtoolkit and sparse matrix calculations\nthat use few memory.\n\nNote that it is highly advisable to run this script section by section (denoted with \"#%%\" or even line by line in an\ninteractive Python interpreter in order to see the effects of each code block.\n\nThe data for the debates comes from offenesparlament.de, see https://github.com/Datenschule/offenesparlament-data.\n\nMarkus Konrad <markus.konrad@wzb.eu>\nJune 2019\n\"\"\"\n\nimport re\nimport pickle\nimport string\nimport random\nimport logging\nfrom pprint import pprint\nfrom zipfile import ZipFile\n\nfrom tmtoolkit.preprocess import TMPreproc\nfrom tmtoolkit.corpus import Corpus\nfrom tmtoolkit.bow.bow_stats import tfidf, sorted_terms_datatable\nfrom tmtoolkit.utils import unpickle_file, pickle_data\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\npd.set_option('display.width', 140)\npd.set_option('display.max_columns', 100)\n\n#%% Optional: set up output log for tmtoolkit\n\nlogging.basicConfig(level=logging.INFO)\ntmtoolkit_log = logging.getLogger('tmtoolkit')\ntmtoolkit_log.setLevel(logging.INFO)\ntmtoolkit_log.propagate = True\n\n#%% Load the data inside the zip file\n\nprint('loading data from zip file')\n\nwith ZipFile('data/bt18_full.zip') as bt18zip:\n # there is a pickled pandas data frame inside the zipfile\n # extract it and load it\n bt18pickle = bt18zip.read('bt18_speeches_merged.pickle')\n bt18_data = pickle.loads(bt18pickle)\n\n# we don't need this anymore, remove it to free memory\ndel bt18pickle, bt18zip\n\n#%% Generate document labels\n\n# format of the document labels: <session_number>_<speech_number>\nbt18_data['doc_label'] = ['%s_%s' % (str(sitzung).zfill(3), str(seq).zfill(5))\n for sitzung, seq in zip(bt18_data.sitzung, bt18_data.sequence)]\n\nprint('loaded data frame with %d rows:' % bt18_data.shape[0])\nprint(bt18_data.head())\n\n\n#%% Generate a Corpus object to preprocess the raw, untokenized text data\n\n# we use the column \"doc_label\" as document labels and \"text\" as raw text\ncorpus = Corpus(dict(zip(bt18_data.doc_label, bt18_data.text)))\nprint('created corpus')\n\nprint('document lengths in number of characters:')\npprint(corpus.doc_lengths)\n\n# we don't need this anymore, remove it to free memory\ndel bt18_data\n\n#%% Investigate the set of characters used in the whole corpus\n\n# we can see that there are several \"strange\" characters and unprintable unicode characters which may later cause\n# trouble\nprint('used set of characters used in the whole corpus:')\npprint(corpus.unique_characters)\n\n# lets see which of these are not in Pythons standard set of printable ASCII characters\nprint('used set of characters not in Pythons standard set of printable ASCII characters:')\npprint(corpus.unique_characters - set(string.printable))\n\n#%% Replace some characters in each of document of the corpus\n\n# create a table to replace some characters\n# None denotes characters that should be deleted\nchar_transl_table = {\n '\\x1e': None,\n '\\xad': None,\n '´': None,\n 'ʼ': None,\n '̃': None,\n '̆': None,\n 'ҫ': 'ç', # they look the same but they aren't\n '‘': None,\n '’': None,\n '‚': ',',\n '“': None,\n '”': None,\n '„': None,\n '…': None,\n '\\u202f': None,\n '�': None\n}\n\nprint('replacing characters in each document of the corpus')\ncorpus.replace_characters(char_transl_table)\n\nprint('these non-ASCII characters are left:')\npprint(corpus.unique_characters - set(string.printable))\n\n#%% Correct contractions\n\n# some contractions have a stray space in between, like \"EU -Hilfen\" where it should be \"EU-Hilfen\"\n# correct this by applying a custom function with a regular expression (RE) to each document in the corpus\npttrn_contraction_ws = re.compile(r'(\\w+)(\\s+)(-\\w+)')\n\nprint('correcting wrong contractions')\n# in each document text `t`, remove the RE group 2 (the stray white space \"(\\s+)\") for each match `m`\ncorpus.apply(lambda t: pttrn_contraction_ws.sub(lambda m: m.group(1) + m.group(3), t))\n\n#%% Create a TMPreproc object for token processing\n\n# this takes some time because the documents are directly tokenized\nprint('creating TMPreproc object from corpus')\npreproc = TMPreproc(corpus, language='german')\nprint('created: %s' % preproc)\n\n# we don't need this anymore, remove it to free memory\ndel corpus\n\n#%% Calculate the total number of tokens in the whole corpus\n\nprint('total number of tokens in the whole corpus:')\nprint(sum(preproc.doc_lengths.values()))\n\n#%% Have a glimpse at the tokens\n\n# Note that \"preproc.tokens_datatable\" (*table* instead of *frame*) is much faster but the \"datatable\" package is still\n# in early development stages. If you like to have a pandas dataframe instead, use the property \"tokens_dataframe\".\n\nprint('first 50 rows from the tokens dataframe:')\n\ntry:\n import datatable as dt\n has_datatable = True\n print(preproc.tokens_datatable.head(50))\nexcept ImportError: # fallback when \"datatable\" package is not installed\n has_datatable = False\n print(preproc.tokens_dataframe.head(50))\n\n\n#%% Have a look at the vocabulary of the whole corpus\nprint('vocabulary:')\npprint(preproc.vocabulary)\n\nprint('\\nvocabulary contains %d tokens' % len(preproc.vocabulary))\n\n#%% Fix hyphenation problems\n\n# we can see in the above vocabulary that there are several hyphenation problems (e.g. \"wiederho-len\"), because of\n# words being hyphenated on line breaks\n# we use a quite \"brutal\" way to fix this by simply removing all hyphens in the tokens\n\npreproc.remove_chars_in_tokens(['-'])\n\nprint('vocabulary:')\npprint(preproc.vocabulary)\n\nprint('\\nvocabulary contains %d tokens' % len(preproc.vocabulary))\n\n\n#%% Display a keywords-in-context (KWIC) table\n\n# the result is returned as *datatable* (because it is much faster to construct)\nprint('keywords-in-context (KWIC) table for keyword \"Merkel\":')\nprint(preproc.get_kwic_table('Merkel'))\n\n#%% Apply Part-of-Speech tagging (POS tagging) and lemmatization to normalize the vocabulary\n\n# this is very computationally extensive and hence takes a long time, even when computed in parallel\n# consider storing / loading the processing state as shown below\npreproc.pos_tag().lemmatize()\n\n#%% Saving / loading state\n\n# at any time you can save the current processing state to disk via `save_state(<path to file>)` and later\n# restore it via `from_state(<path to file>)`\n# this is extremely useful when you have computations that take a long time and after which you want to create\n# \"save points\" in order to load the state and continue experimenting with the data without having to run the\n# whole processing pipeline again\n\n# preproc.save_state('data/bt18_tagged_lemmatized_state.pickle')\n# preproc = TMPreproc.from_state('data/bt18_tagged_lemmatized_state.pickle')\n\n#%% Further token normalization\n\n# convert all tokens to lowercase and apply several \"cleaning\" methods (see `clean_tokens` for details)\nprint('applying further token normalization')\npreproc.tokens_to_lowercase().clean_tokens().remove_tokens(r'^-.+', match_type='regex')\n\nprint('vocabulary:')\npprint(preproc.vocabulary)\n\nprint('\\nvocabulary contains %d tokens' % len(preproc.vocabulary))\n\n# there are still some stray tokens which should be removed:\npreproc.remove_tokens(['#en', \"''\", \"'s\", '+++', '+40', ',50', '...', '.plädieren'])\n\n#%% Let's have a look at the most frequent tokens\n\nprint('retrieving document frequencies for all tokens in the vocabulary')\nvocab_doc_freq = preproc.vocabulary_rel_doc_frequency\nvocab_doc_freq_df = pd.DataFrame({'token': list(vocab_doc_freq.keys()),\n 'freq': list(vocab_doc_freq.values())})\n\nprint('top 50 tokens by relative document frequency:')\nvocab_top = vocab_doc_freq_df.sort_values('freq', ascending=False).head(50)\nprint(vocab_top)\n\n# plot this\nplt.figure()\nvocab_top.plot(x='token', y='freq', kind='bar')\nplt.show()\n\n#%% Further token cleanup\n\n# we can remove tokens above a certain threshold of (relative or absolute) document frequency\npreproc.remove_common_tokens(0.8) # this will only remove \"müssen\"\n\n# since we'll later use tf-idf, common words don't have much influence on the result and can remain\n\n#%% Document lengths (number of tokens per document)\n\ndoc_labels = np.array(list(preproc.doc_lengths.keys()))\ndoc_lengths = np.array(list(preproc.doc_lengths.values()))\n\nprint('range of document lengths: %d tokens minimum, %d tokens maximum' % (np.min(doc_lengths), np.max(doc_lengths)))\nprint('mean document length:', np.mean(doc_lengths))\nprint('mean document length:', np.median(doc_lengths))\n\nplt.figure()\nplt.hist(doc_lengths, bins=100)\nplt.title('Histogram of document lengths')\nplt.xlabel('Number of tokens')\nplt.show()\n\n\n#%% Let's have a look at very short document\n\n# document labels of documents with lesser or equal 30 tokens\ndoc_labels_short = doc_labels[doc_lengths <= 30]\n\nprint('%d documents with lesser or equal 30 tokens:' % len(doc_labels_short))\nfor dl in doc_labels_short:\n print(dl)\n pprint(' '.join(preproc.tokens[dl]))\n print('---')\n\n\n#%% Remove very short documents\n\nprint('removing documents with lesser or equal 30 tokens')\npreproc.remove_documents_by_name(doc_labels_short)\n\n\n#%% Another keywords-in-context (KWIC) table\n\nprint('keywords-in-context (KWIC) table for keyword \"merkel\" with normalized tokens:')\nprint(preproc.get_kwic_table('merkel'))\n\n#%% Create a document-term-matrix (DTM)\n\n# this creates a sparse DTM where the matrix rows correspond to the current document labels and the\n# matrix columns correspond to the current vocabulary\n# the calculations take several minutes, even when they're performed in parallel\n\nprint('creating document-term-matrix (DTM)')\ndtm = preproc.dtm\n\nprint('matrix created:')\nprint(repr(dtm))\n\ndoc_labels = preproc.doc_labels\nvocab = np.array(preproc.vocabulary)\n\nprint('number of rows match number of documents (%d)' % len(doc_labels))\nprint('number of columns match vocabulary size (%d)' % len(vocab))\n\n\n#%% Saving / loading a DTM\n\n# again, you may store the DTM along with the document labels and vocabulary to disk to later load it again:\n\n# pickle_data((dtm, doc_labels, vocab), 'data/bt18_dtm.pickle')\n# dtm, doc_labels, vocab = unpickle_file('data/bt18_dtm.pickle')\n\n\n#%% Computing a tf-idf matrix\n\n# we can apply tf-idf to the DTM\n# the result will remain a sparse matrix, hence it doesn't allocate much memory\n\nprint('computing a tf-idf matrix from the DTM')\ntfidf_mat = tfidf(dtm)\nprint('matrix created:')\nprint(repr(tfidf_mat))\n\n#%% Investigating the top tokens of the tf-idf transformed matrix\n\n# this will create a data frame of the 20 most \"informative\" (tf-idf-wise) tokens per document\ntop_tokens = sorted_terms_datatable(tfidf_mat, vocab, doc_labels, top_n=20)\n\nrandom_doc = random.choice(doc_labels)\nprint('20 most \"informative\" (tf-idf high ranked) tokens in randomly chosen document \"%s\":' % random_doc)\n\n\nif has_datatable:\n print(top_tokens[dt.f.doc == random_doc, :])\nelse:\n print(top_tokens[top_tokens.doc == random_doc])\n" ]
[ [ "numpy.max", "numpy.array", "pandas.set_option", "numpy.median", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "numpy.min", "numpy.mean", "matplotlib.pyplot.figure", "matplotlib.pyplot.hist", "matplotlib.pyplot.show" ] ]
acmlia/jobs
[ "ee13ac3363c8a4fab64e21c8787739f5de4ef385", "ee13ac3363c8a4fab64e21c8787739f5de4ef385", "ee13ac3363c8a4fab64e21c8787739f5de4ef385" ]
[ "tf_regression_sfcprcp_T17B6.py", "tf_regression_sfcprcp_T8.py", "tf_regression_sfcprcp_T10.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 20 21:31:04 2019\n\n@author: dvdgmf\n\"\"\"\n# https://www.tensorflow.org/tutorials/keras/basic_regression\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport time\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom sklearn.externals import joblib\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.decomposition import PCA\n\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom keras.layers import GaussianNoise\nfrom keras.layers import GaussianDropout\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.models import model_from_yaml\n\n\nprint(tf.__version__)\n\n# --------------------------\n# DROP DATA OUTSIDE INTERVAL\n# --------------------------\ndef keep_interval(keepfrom:0.0, keepto:1.0, dataframe, target_col:str):\n keepinterval = np.where((dataframe[target_col] >= keepfrom) &\n (dataframe[target_col] <= keepto))\n result = dataframe.iloc[keepinterval] \n return result\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#def tic():\n# global _start_time\n# _start_time = time.time()\n#\n#def tac():\n# t_sec = round(time.time() - _start_time)\n# (t_min, t_sec) = divmod(t_sec, 60)\n# (t_hour, t_min) = divmod(t_min, 60)\n# print('Time passed: {}hour:{}min:{}sec'.format(t_hour, t_min, t_sec))\n \n# Fix random seed for reproducibility:\nseed = 7\nnp.random.seed(seed)\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Path, file and load DATAFRAME\n\nvrn = 'T17B6'\n\nfile = 'yearly_br_underc1_0956.csv'\npath = '/media/DATA/tmp/datasets/brazil/brazil_qgis/csv/'\nfig_title = 'tf_regression_'+vrn+'_undc1_0956_'\npath_fig = '/media/DATA/tmp/git-repositories/jobs/tf_regression_figures/'\n\ndf_orig = pd.read_csv(os.path.join(path, file), sep=',', decimal='.')\n\n#path = '/home/david/DATA/'\n#file = 'yrly_br_under_c1.csv'\n#path_fig = '/home/david/DATA/'\n#file = 'yrly_br_under_c1_over_c3c4.csv'\n#file_name = os.path.splitext(file)[0]\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#### PRE - PROCESSING:\n# Count the number of pixels by classs:\n\n#colunas = list(df_orig.columns.values)\n#df_orig = df_orig.loc[:,colunas]\n#x, y = df_orig.loc[:,colunas], df_orig.loc[:,['CLASSE']]\n#x_arr = np.asanyarray(x)\n#y_arr = np.asanyarray(y)\n#y_arr = np.ravel(y_arr)\n#print('Original dataset shape %s' % Counter(y_arr))\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#dataset=df_orig.drop(columns=['lat','lon','sfccode', 'T2m','tcwv','skint',\n# 'cnvprcp','10V','10H','18V','18H','23V','36H',\n# '89H','166H','10VH','18VH','SSI','delta_neg',\n# 'delta_pos','MPDI','MPDI_scaled','PCT10','PCT18',\n# 'TagRain', 'CLASSE'])\n\ndf_input=df_orig.loc[:,['10V','10H','18V','18H','36V','36H','89V','89H',\n '166V','166H','183VH','sfccode','T2m','tcwv', 'PCT36','PCT89','89VH']]\n\ncolunas = ['10V','10H','18V','18H','36V','36H','89V','89H',\n '166V','166H','183VH','sfccode','T2m','tcwv', 'PCT36','PCT89','89VH']\nscaler = StandardScaler()\nnormed_input = scaler.fit_transform(df_input) \ndf_normed_input = pd.DataFrame(normed_input[:],\n columns = colunas)\nancillary=df_normed_input.loc[:,['183VH','sfccode','T2m','tcwv','PCT36','PCT89','89VH']]\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------ \n# Choosing the number of components:\n\nTB1 = df_normed_input.loc[:,['10V','10H','18V','18H']]\nTB2 = df_normed_input.loc[:,['36V','36H','89V','89H','166V','166H']]\n\n#------------------------------------------------------------------------------\n# Verifying the number of components that most contribute: \npca = PCA()\npca1 = pca.fit(TB1)\nplt.plot(np.cumsum(pca1.explained_variance_ratio_))\nplt.xlabel('Number of components for TB1')\nplt.ylabel('Cumulative explained variance');\nplt.savefig(\"PCA_TB1.png\")\n#--- \npca_trans1 = PCA(n_components=2)\npca1=pca_trans1.fit(TB1)\nTB1_transformed = pca_trans1.transform(TB1)\nprint(\"original shape: \", TB1.shape)\nprint(\"transformed shape:\", TB1_transformed.shape)\n#------------------------------------------------------------------------------\npca = PCA()\npca2 = pca.fit(TB2)\nplt.plot(np.cumsum(pca2.explained_variance_ratio_))\nplt.xlabel('Number of components for TB2')\nplt.ylabel('Cumulative explained variance');\nplt.savefig(\"PCA_TB2.png\")\n#---\npca_trans2 = PCA(n_components=2)\npca2=pca_trans2.fit(TB2)\nTB2_transformed = pca_trans2.transform(TB2)\nprint(\"original shape: \", TB2.shape)\nprint(\"transformed shape:\", TB2_transformed.shape)\n#------------------------------------------------------------------------------\n# JOIN THE TREATED VARIABLES IN ONE SINGLE DATASET AGAIN:\n\nPCA1=pd.DataFrame()\n\nPCA1 = pd.DataFrame(TB1_transformed[:],\n columns = ['pca1_1', 'pca_2'])\nPCA2 = pd.DataFrame(TB2_transformed[:],\n columns = ['pca2_1','pca2_2'])\n\ndataset=PCA1.join(PCA2, how='right')\ndataset=dataset.join(ancillary, how='right')\ndataset=dataset.join(df_orig.loc[:,['sfcprcp']], how='right')\n \n#df_orig['sfcprcp']=df_orig[['sfcprcp']].astype(int)\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\ndataset = keep_interval(0.1, 100.0, dataset, 'sfcprcp')\n# scale the output between 0 and 1 for the colorbar\n# y = minmax_scale(y_full)\n\n# --------------------------------------\n# Transform pd.DataFrame column to array\n# --------------------------------------\n#x_position = dataset.loc[:,['sfcprcp']]\n#x_array = np.asanyarray(x_position)\n#plt.plot(x_array)\n#plt.show() \n\n#threshold_rain =0.1\n#rain_pixels = np.where((dataset['sfcprcp'] >= threshold_rain))\n#dataset=dataset.iloc[rain_pixels] \n\n# \n# ----------------------------------------\n# SUBSET BY SPECIFIC CLASS (UNDERSAMPLING)\nn = 0.90\nto_remove = np.random.choice(\n dataset.index,\n size=int(dataset.shape[0]*n),\n replace=False)\ndataset = dataset.drop(to_remove)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Split the data into train and test\n# Now split the dataset into a training set and a test set.\n# We will use the test set in the final evaluation of our model.\n\ntrain_dataset = dataset.sample(frac=0.8,random_state=0)\ntest_dataset = dataset.drop(train_dataset.index)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Inspect the data:\n# Have a quick look at the joint distribution of a few pairs of columns from the training set.\n\ncolunas = list(dataset.columns.values)\n#sns.pairplot(df_orig[colunas], diag_kind=\"kde\")\n#sns.pairplot(train_dataset[colunas], diag_kind=\"kde\")\n#sns.pairplot(test_dataset[colunas], diag_kind=\"kde\")\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Also look at the overall statistics:\n\ntrain_stats = train_dataset.describe()\ntrain_stats.pop(\"sfcprcp\")\ntrain_stats = train_stats.transpose()\ntrain_stats\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Split features from labels:\n# Separate the target value, or \"label\", from the features. This label is the value that you will train the model to predict.\n\ny_train = train_dataset.pop('sfcprcp')\ny_test = test_dataset.pop('sfcprcp')\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Normalize the data:\n# Look again at the train_stats block above and note how different the ranges \n# of each feature are.\n\n# It is good practice to normalize features that use different scales and ranges.\n# Although the model might converge without feature normalization, it makes \n# training more difficult, and it makes the resulting model \n# dependent on the choice of units used in the input. \n\n#def norm(x):\n# return (x - train_stats['mean']) / train_stats['std']\n#\n#normed_train_data = norm(train_dataset)\n#normed_test_data = norm(test_dataset)\n\n\n#scaler=QuantileTransformer(output_distribution='uniform')\nscaler = StandardScaler()\nnormed_train_data = scaler.fit_transform(train_dataset)\nnormed_test_data = scaler.fit_transform(test_dataset)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Build the model:\n\n#def build_model():\n# model = keras.Sequential([\n# layers.Dense(24, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),\n# layers.Dense(12, activation=tf.nn.relu),\n# layers.Dense(1)\n# ])\n# optimizer = tf.keras.optimizers.Adam(0.001)\n# model.compile(loss='mean_squared_error',\n# optimizer=optimizer,\n# metrics=['mean_absolute_error', 'mean_squared_error'])\n# return model\n\ndef build_model():\n model = Sequential()\n model.add(GaussianNoise(0.01, input_shape=[len(train_dataset.keys())] ))\n model.add(Dense(11, activation='relu'))\n model.add(Dense(33, activation='relu'))\n model.add(Dense(15, activation='relu'))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error',\n optimizer='adam',\n metrics=['mean_absolute_error', 'mean_squared_error']) \n return model\n\nmodel = build_model()\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Inspect the model:\n# Use the .summary method to print a simple description of the model\n\nmodel.summary()\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Now try out the model. \n# Take a batch of 10 examples from the training\n# data and call model.predict on it.\n\nexample_batch = normed_train_data[:10]\nexample_result = model.predict(example_batch)\nexample_result\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# It seems to be working, and it produces a result \n# of the expected shape and type.\n\n# Train the model:\n# Train the model for 1000 epochs, and record the training\n# and validation accuracy in the history object.\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Display training progress by printing a single dot for each completed epoch\nclass PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\nEPOCHS = 1000\n\nhistory = model.fit(\n normed_train_data, y_train,\n epochs=EPOCHS, validation_split = 0.2, verbose=0,\n callbacks=[PrintDot()])\nprint(history.history.keys())\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Visualize the model's training progress using the stats\n# stored in the history object.\n\nhist = pd.DataFrame(history.history)\nhist['epoch'] = history.epoch\nhist.tail()\n\ndef plot_history(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n \n \n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label = 'Val Error')\n ylim_max = hist.val_mean_absolute_error.max()+10\n plt.ylim([0,ylim_max])\n plt.legend()\n \n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$scfprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label = 'Val Error')\n ylim_max = hist.val_mean_squared_error.max()+10\n plt.ylim([0,ylim_max])\n plt.legend()\n #plt.show()\n fig_name = fig_title + \"_error_per_epochs_history.png\"\n plt.savefig(path_fig+fig_name)\n \nplot_history(history)\n\n \n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Ploting again, but with the EarlyStopping apllied:\n\ndef plot_history_EarlyStopping(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n \n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label = 'Val Error')\n ylim_max = hist.val_mean_absolute_error.max()+10\n plt.ylim([0,ylim_max])\n #plt.ylim([0,5])\n plt.legend()\n \n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$sfcprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label = 'Val Error')\n ylim_max = hist.val_mean_squared_error.max()+10\n plt.ylim([0,ylim_max])\n #plt.ylim([0,30])\n plt.legend()\n #plt.show()\n fig_name = fig_title + \"_error_per_epochs_EarlyStopping.png\"\n plt.savefig(path_fig+fig_name)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n \nmodel = build_model()\n\n# The patience parameter is the amount of epochs to check for improvement\nearly_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n\nhistory = model.fit(normed_train_data, y_train, epochs=EPOCHS,\n validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])\n\nplot_history_EarlyStopping(history)\n\n# The graph shows that on the validation set, the average error \n# is usually around +/- 2 MPG. Is this good? \n# We'll leave that decision up to you.\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Let's see how well the model generalizes by using \n# the test set, which we did not use when training the model. \n# This tells us how well we can expect the model to predict \n# when we use it in the real world.\n\nloss, mae, mse = model.evaluate(normed_test_data, y_test, verbose=0)\n\nprint(\"Testing set Mean Abs Error: {:5.2f} sfcprcp\".format(mae))\n\n# Make predictions\n# Finally, predict SFCPRCP values using data in the testing set:\n\ntest_predictions = model.predict(normed_test_data).flatten()\n\nplt.figure()\nplt.scatter(y_test, test_predictions)\nplt.xlabel('True Values [sfcprcp]')\nplt.ylabel('Predictions [sfcprcp]')\nplt.axis('equal')\nplt.axis('square')\nplt.xlim([0,plt.xlim()[1]])\nplt.ylim([0,plt.ylim()[1]])\nplt.plot([-100, 100], [-100, 100])\nfig_name = fig_title + \"_plot_scatter_y_test_vs_y_pred.png\"\nplt.savefig(path_fig+fig_name)\nplt.clf()\n \n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\nfig = plt.figure()\nax = plt.gca()\nax.plot(y_test,test_predictions, 'o', c='blue', alpha=0.07, markeredgecolor='none')\nax.set_yscale('log')\nax.set_xscale('log')\nax.set_xlabel('True Values [sfcprcp]')\nax.set_ylabel('Predictions [sfcprcp]')\nplt.plot([-100, 100], [-100, 100])\nfig_name = fig_title + \"_plot_scatter_LOG_y_test_vs_y_pred.png\"\nplt.savefig(path_fig+fig_name)\nplt.clf()\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# It looks like our model predicts reasonably well. \n# Let's take a look at the error distribution.\n\nerror = test_predictions - y_test\nplt.hist(error, bins = 25)\nplt.xlabel(\"Prediction Error [sfcprcp]\")\nplt.ylabel(\"Count\")\nfig_name = fig_title + \"_prediction_error.png\"\nplt.savefig(path_fig+fig_name)\nplt.clf()\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\n# Saving a model\n#if __name__ == '__main__':\n# _start_time = time.time()\n#\n# tic()\n\n# serialize model to YAML\nmodel_yaml = model.to_yaml()\nwith open('tf_regression_'+vrn+'.yaml', 'w') as yaml_file:\n yaml_file.write(model_yaml)\n# serialize weights to HDF5\nmodel.save_weights('tf_regression_'+vrn+'.h5')\nprint(\"Saved model to disk\")\n# \n## later...\n# \n## load YAML and create model\n#yaml_file = open('tf_regression_T17.yaml', 'r')\n#loaded_model_yaml = yaml_file.read()\n#yaml_file.close()\n#loaded_model = model_from_yaml(loaded_model_yaml)\n## load weights into new model\n#loaded_model.load_weights(\"tf_regression_T17.h5\")\n#print(\"Loaded model from disk\")\n# \n## evaluate loaded model on test data\n#loaded_model.compile(loss='mean_squared_error',\n# optimizer='adam',\n# metrics=['mean_absolute_error', 'mean_squared_error'])\n#score = loaded_model.evaluate(normed_test_data, y_test, verbose=0)\n#print(\"%s: %.2f%%\" % (loaded_model.metrics_names[1], score[1]))\n# \n# training_model = history\n# #grid_result = training_model.run_TuningRegressionPrecipitation()\n# joblib.dump(training_model, 'teste.pkl')\n## loaded_model = joblib.load('/media/DATA/tmp/git-repositories/jobs/model_trained_regression_precipitation_W1.pkl')\n# \n# \n# \n# tac()\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n\n\n# It's not quite gaussian, but we might expect that because \n# the number of samples is very small.\n\n# Conclusion:\n\n# This notebook introduced a few techniques to handle a regression problem.\n\n# >> Mean Squared Error (MSE) is a common loss function used for regression problems (different loss functions are used for classification problems).\n\n# >> Similarly, evaluation metrics used for regression differ from classification. A common regression metric is Mean Absolute Error (MAE).\n\n# >> When numeric input data features have values with different ranges, each feature should be scaled independently to the same range.\n\n# >> If there is not much training data, one technique is to prefer a small network with few hidden layers to avoid overfitting.\n\n# >> Early stopping is a useful technique to prevent overfitting.\n\n#@title MIT License\n#\n# Copyright (c) 2017 François Chollet\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 20 21:31:04 2019\n\n@author: dvdgmf\n\"\"\"\n# https://www.tensorflow.org/tutorials/keras/basic_regression\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport time\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom sklearn.externals import joblib\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import QuantileTransformer\n\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom keras.layers import GaussianNoise\nfrom keras.layers import GaussianDropout\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.models import model_from_yaml\n\n\nprint(tf.__version__)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#def tic():\n# global _start_time\n# _start_time = time.time()\n#\n#def tac():\n# t_sec = round(time.time() - _start_time)\n# (t_min, t_sec) = divmod(t_sec, 60)\n# (t_hour, t_min) = divmod(t_min, 60)\n# print('Time passed: {}hour:{}min:{}sec'.format(t_hour, t_min, t_sec))\n \n# Fix random seed for reproducibility:\nseed = 7\nnp.random.seed(seed)\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Path, file and load DATAFRAME\n\nfile = 'yearly_br_underc1_0956.csv'\npath = '/media/DATA/tmp/datasets/brazil/brazil_qgis/csv/'\nfig_title = 'tf_regression_T8_undc1_0956_'\npath_fig = '/media/DATA/tmp/git-repositories/jobs/tf_regression_figures/'\n\ndf_orig = pd.read_csv(os.path.join(path, file), sep=',', decimal='.')\n\n#path = '/home/david/DATA/'\n#file = 'yrly_br_under_c1.csv'\n#path_fig = '/home/david/DATA/'\n#file = 'yrly_br_under_c1_over_c3c4.csv'\n#file_name = os.path.splitext(file)[0]\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Count the number of pixels by classs:\n\ncolunas = list(df_orig.columns.values)\ndf_orig = df_orig.loc[:,colunas]\nx, y = df_orig.loc[:,colunas], df_orig.loc[:,['CLASSE']]\nx_arr = np.asanyarray(x)\ny_arr = np.asanyarray(y)\ny_arr = np.ravel(y_arr)\nprint('Original dataset shape %s' % Counter(y_arr))\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\ndataset=df_orig.drop(columns=['lat','lon','sfccode', 'T2m','tcwv','skint',\n 'cnvprcp','10V','10H','18V','18H','23V','36H',\n '89H','166H','10VH','18VH','SSI','delta_neg',\n 'delta_pos','MPDI','MPDI_scaled','PCT10','PCT18',\n 'TagRain', 'CLASSE'])\n#df_orig['sfcprcp']=df_orig[['sfcprcp']].astype(int)\n\nthreshold_rain =0.1\nrain_pixels = np.where((dataset['sfcprcp'] >= threshold_rain))\ndataset=dataset.iloc[rain_pixels]\n \n# ----------------------------------------\n# SUBSET BY SPECIFIC CLASS (UNDERSAMPLING)\nn = 0.90\nto_remove = np.random.choice(\n dataset.index,\n size=int(dataset.shape[0]*n),\n replace=False)\ndataset = dataset.drop(to_remove)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Split the data into train and test\n# Now split the dataset into a training set and a test set.\n# We will use the test set in the final evaluation of our model.\n\ntrain_dataset = dataset.sample(frac=0.8,random_state=0)\ntest_dataset = dataset.drop(train_dataset.index)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Inspect the data:\n# Have a quick look at the joint distribution of a few pairs of columns from the training set.\n\ncolunas = list(dataset.columns.values)\n#sns.pairplot(df_orig[colunas], diag_kind=\"kde\")\n#sns.pairplot(train_dataset[colunas], diag_kind=\"kde\")\n#sns.pairplot(test_dataset[colunas], diag_kind=\"kde\")\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Also look at the overall statistics:\n\ntrain_stats = train_dataset.describe()\ntrain_stats.pop(\"sfcprcp\")\ntrain_stats = train_stats.transpose()\ntrain_stats\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Split features from labels:\n# Separate the target value, or \"label\", from the features. This label is the value that you will train the model to predict.\n\ny_train = train_dataset.pop('sfcprcp')\ny_test = test_dataset.pop('sfcprcp')\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Normalize the data:\n# Look again at the train_stats block above and note how different the ranges \n# of each feature are.\n\n# It is good practice to normalize features that use different scales and ranges.\n# Although the model might converge without feature normalization, it makes \n# training more difficult, and it makes the resulting model \n# dependent on the choice of units used in the input. \n\n#def norm(x):\n# return (x - train_stats['mean']) / train_stats['std']\n#\n#normed_train_data = norm(train_dataset)\n#normed_test_data = norm(test_dataset)\n\n\n#scaler=QuantileTransformer(output_distribution='uniform')\nscaler = StandardScaler()\nnormed_train_data = scaler.fit_transform(train_dataset)\nnormed_test_data = scaler.fit_transform(test_dataset)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Build the model:\n\ndef build_model():\n model = keras.Sequential([\n layers.Dense(24, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),\n layers.Dense(12, activation=tf.nn.relu),\n layers.Dense(1)\n ])\n optimizer = tf.keras.optimizers.Adagrad(0.01)\n model.compile(loss='mean_squared_error',\n optimizer=optimizer,\n metrics=['mean_absolute_error', 'mean_squared_error'])\n return model\n\n#def build_model():\n# model = Sequential()\n# model.add(GaussianDropout(0.01, input_shape=[len(train_dataset.keys())] ))\n# model.add(Dense(24, activation='relu'))\n# model.add(Dense(12, activation='relu'))\n# model.add(Dense(1))\n# model.compile(loss='mean_squared_error',\n# optimizer='adam',\n# metrics=['mean_absolute_error', 'mean_squared_error']) \n# return model\n\nmodel = build_model()\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Inspect the model:\n# Use the .summary method to print a simple description of the model\n\nmodel.summary()\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Now try out the model. \n# Take a batch of 10 examples from the training\n# data and call model.predict on it.\n\nexample_batch = normed_train_data[:10]\nexample_result = model.predict(example_batch)\nexample_result\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# It seems to be working, and it produces a result \n# of the expected shape and type.\n\n# Train the model:\n# Train the model for 1000 epochs, and record the training\n# and validation accuracy in the history object.\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Display training progress by printing a single dot for each completed epoch\nclass PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\nEPOCHS = 1000\n\nhistory = model.fit(\n normed_train_data, y_train,\n epochs=EPOCHS, validation_split = 0.2, verbose=0,\n callbacks=[PrintDot()])\nprint(history.history.keys())\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Visualize the model's training progress using the stats\n# stored in the history object.\n\nhist = pd.DataFrame(history.history)\nhist['epoch'] = history.epoch\nhist.tail()\n\ndef plot_history(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n \n \n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label = 'Val Error')\n ylim_max = hist.val_mean_absolute_error.max()+10\n plt.ylim([0,ylim_max])\n plt.legend()\n \n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$scfprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label = 'Val Error')\n ylim_max = hist.val_mean_squared_error.max()+10\n plt.ylim([0,ylim_max])\n plt.legend()\n #plt.show()\n fig_name = fig_title + \"_error_per_epochs_history.png\"\n plt.savefig(path_fig+fig_name)\n \nplot_history(history)\n\n \n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Ploting again, but with the EarlyStopping apllied:\n\ndef plot_history_EarlyStopping(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n \n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label = 'Val Error')\n ylim_max = hist.val_mean_absolute_error.max()+10\n plt.ylim([0,ylim_max])\n #plt.ylim([0,5])\n plt.legend()\n \n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$sfcprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label = 'Val Error')\n ylim_max = hist.val_mean_squared_error.max()+10\n plt.ylim([0,ylim_max])\n #plt.ylim([0,30])\n plt.legend()\n #plt.show()\n fig_name = fig_title + \"_error_per_epochs_EarlyStopping.png\"\n plt.savefig(path_fig+fig_name)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n \nmodel = build_model()\n\n# The patience parameter is the amount of epochs to check for improvement\nearly_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n\nhistory = model.fit(normed_train_data, y_train, epochs=EPOCHS,\n validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])\n\nplot_history_EarlyStopping(history)\n\n# The graph shows that on the validation set, the average error \n# is usually around +/- 2 MPG. Is this good? \n# We'll leave that decision up to you.\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Let's see how well the model generalizes by using \n# the test set, which we did not use when training the model. \n# This tells us how well we can expect the model to predict \n# when we use it in the real world.\n\nloss, mae, mse = model.evaluate(normed_test_data, y_test, verbose=0)\n\nprint(\"Testing set Mean Abs Error: {:5.2f} sfcprcp\".format(mae))\n\n# Make predictions\n# Finally, predict SFCPRCP values using data in the testing set:\n\ntest_predictions = model.predict(normed_test_data).flatten()\n\nplt.figure()\nplt.scatter(y_test, test_predictions)\nplt.xlabel('True Values [sfcprcp]')\nplt.ylabel('Predictions [sfcprcp]')\nplt.axis('equal')\nplt.axis('square')\nplt.xlim([0,plt.xlim()[1]])\nplt.ylim([0,plt.ylim()[1]])\nplt.plot([-100, 100], [-100, 100])\nfig_name = fig_title + \"_plot_scatter_y_test_vs_y_pred.png\"\nplt.savefig(path_fig+fig_name)\nplt.clf()\n \n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# It looks like our model predicts reasonably well. \n# Let's take a look at the error distribution.\n\nerror = test_predictions - y_test\nplt.hist(error, bins = 25)\nplt.xlabel(\"Prediction Error [sfcprcp]\")\nplt.ylabel(\"Count\")\nfig_name = fig_title + \"_prediction_error.png\"\nplt.savefig(path_fig+fig_name)\nplt.clf()\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Saving a model\n#if __name__ == '__main__':\n# _start_time = time.time()\n#\n# tic()\n\n# serialize model to YAML\nmodel_yaml = model.to_yaml()\nwith open(\"tf_regression_T8.yaml\", \"w\") as yaml_file:\n yaml_file.write(model_yaml)\n# serialize weights to HDF5\nmodel.save_weights(\"tf_regression_T8.h5\")\nprint(\"Saved model to disk\")\n# \n## later...\n# \n## load YAML and create model\n#yaml_file = open('model.yaml', 'r')\n#loaded_model_yaml = yaml_file.read()\n#yaml_file.close()\n#loaded_model = model_from_yaml(loaded_model_yaml)\n## load weights into new model\n#loaded_model.load_weights(\"model.h5\")\n#print(\"Loaded model from disk\")\n# \n## evaluate loaded model on test data\n#loaded_model.compile(loss='mean_squared_error',\n# optimizer='adam',\n# metrics=['mean_absolute_error', 'mean_squared_error'])\n#score = loaded_model.evaluate(normed_test_data, y_test, verbose=0)\n#print(\"%s: %.2f%%\" % (loaded_model.metrics_names[1], score[1]))\n# \n# training_model = history\n# #grid_result = training_model.run_TuningRegressionPrecipitation()\n# joblib.dump(training_model, 'teste.pkl')\n## loaded_model = joblib.load('/media/DATA/tmp/git-repositories/jobs/model_trained_regression_precipitation_W1.pkl')\n# \n# \n# \n# tac()\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n\n\n# It's not quite gaussian, but we might expect that because \n# the number of samples is very small.\n\n# Conclusion:\n\n# This notebook introduced a few techniques to handle a regression problem.\n\n# >> Mean Squared Error (MSE) is a common loss function used for regression problems (different loss functions are used for classification problems).\n\n# >> Similarly, evaluation metrics used for regression differ from classification. A common regression metric is Mean Absolute Error (MAE).\n\n# >> When numeric input data features have values with different ranges, each feature should be scaled independently to the same range.\n\n# >> If there is not much training data, one technique is to prefer a small network with few hidden layers to avoid overfitting.\n\n# >> Early stopping is a useful technique to prevent overfitting.\n\n#@title MIT License\n#\n# Copyright (c) 2017 François Chollet\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 20 21:31:04 2019\n\n@author: dvdgmf\n\"\"\"\n# https://www.tensorflow.org/tutorials/keras/basic_regression\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport time\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom sklearn.externals import joblib\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import QuantileTransformer\n\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom keras.layers import GaussianNoise\nfrom keras.layers import GaussianDropout\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.models import model_from_yaml\n\n\nprint(tf.__version__)\n\n# --------------------------\n# DROP DATA OUTSIDE INTERVAL\n# --------------------------\ndef keep_interval(keepfrom:0.0, keepto:1.0, dataframe, target_col:str):\n keepinterval = np.where((dataframe[target_col] >= keepfrom) &\n (dataframe[target_col] <= keepto))\n result = dataframe.iloc[keepinterval] \n return result\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#def tic():\n# global _start_time\n# _start_time = time.time()\n#\n#def tac():\n# t_sec = round(time.time() - _start_time)\n# (t_min, t_sec) = divmod(t_sec, 60)\n# (t_hour, t_min) = divmod(t_min, 60)\n# print('Time passed: {}hour:{}min:{}sec'.format(t_hour, t_min, t_sec))\n \n# Fix random seed for reproducibility:\nseed = 7\nnp.random.seed(seed)\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Path, file and load DATAFRAME\n\nfile = 'yearly_br_underc1_0956.csv'\npath = '/media/DATA/tmp/datasets/brazil/brazil_qgis/csv/'\nfig_title = 'tf_regression_T10_undc1_0956_'\npath_fig = '/media/DATA/tmp/git-repositories/jobs/tf_regression_figures/'\n\ndf_orig = pd.read_csv(os.path.join(path, file), sep=',', decimal='.')\n\n#path = '/home/david/DATA/'\n#file = 'yrly_br_under_c1.csv'\n#path_fig = '/home/david/DATA/'\n#file = 'yrly_br_under_c1_over_c3c4.csv'\n#file_name = os.path.splitext(file)[0]\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Count the number of pixels by classs:\n\ncolunas = list(df_orig.columns.values)\ndf_orig = df_orig.loc[:,colunas]\nx, y = df_orig.loc[:,colunas], df_orig.loc[:,['CLASSE']]\nx_arr = np.asanyarray(x)\ny_arr = np.asanyarray(y)\ny_arr = np.ravel(y_arr)\nprint('Original dataset shape %s' % Counter(y_arr))\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\ndataset=df_orig.drop(columns=['lat','lon','sfccode', 'T2m','tcwv','skint',\n 'cnvprcp','10V','10H','18V','18H','23V','36H',\n '89H','166H','10VH','18VH','SSI','delta_neg',\n 'delta_pos','MPDI','MPDI_scaled','PCT10','PCT18',\n 'TagRain', 'CLASSE'])\n \n#df_orig['sfcprcp']=df_orig[['sfcprcp']].astype(int)\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\ndataset = keep_interval(20.0, 40.0, dataset, 'sfcprcp')\n# scale the output between 0 and 1 for the colorbar\n# y = minmax_scale(y_full)\n\n# --------------------------------------\n# Transform pd.DataFrame column to array\n# --------------------------------------\n#x_position = dataset.loc[:,['sfcprcp']]\n#x_array = np.asanyarray(x_position)\n#plt.plot(x_array)\n#plt.show() \n \n#\n#threshold_rain =0.1\n#rain_pixels = np.where((dataset['sfcprcp'] >= threshold_rain))\n#dataset=dataset.iloc[rain_pixels]\n# \n# ----------------------------------------\n# SUBSET BY SPECIFIC CLASS (UNDERSAMPLING)\nn = 0.90\nto_remove = np.random.choice(\n dataset.index,\n size=int(dataset.shape[0]*n),\n replace=False)\ndataset = dataset.drop(to_remove)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Split the data into train and test\n# Now split the dataset into a training set and a test set.\n# We will use the test set in the final evaluation of our model.\n\ntrain_dataset = dataset.sample(frac=0.8,random_state=0)\ntest_dataset = dataset.drop(train_dataset.index)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Inspect the data:\n# Have a quick look at the joint distribution of a few pairs of columns from the training set.\n\ncolunas = list(dataset.columns.values)\n#sns.pairplot(df_orig[colunas], diag_kind=\"kde\")\n#sns.pairplot(train_dataset[colunas], diag_kind=\"kde\")\n#sns.pairplot(test_dataset[colunas], diag_kind=\"kde\")\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Also look at the overall statistics:\n\ntrain_stats = train_dataset.describe()\ntrain_stats.pop(\"sfcprcp\")\ntrain_stats = train_stats.transpose()\ntrain_stats\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Split features from labels:\n# Separate the target value, or \"label\", from the features. This label is the value that you will train the model to predict.\n\ny_train = train_dataset.pop('sfcprcp')\ny_test = test_dataset.pop('sfcprcp')\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Normalize the data:\n# Look again at the train_stats block above and note how different the ranges \n# of each feature are.\n\n# It is good practice to normalize features that use different scales and ranges.\n# Although the model might converge without feature normalization, it makes \n# training more difficult, and it makes the resulting model \n# dependent on the choice of units used in the input. \n\n#def norm(x):\n# return (x - train_stats['mean']) / train_stats['std']\n#\n#normed_train_data = norm(train_dataset)\n#normed_test_data = norm(test_dataset)\n\n\n#scaler=QuantileTransformer(output_distribution='uniform')\nscaler = StandardScaler()\nnormed_train_data = scaler.fit_transform(train_dataset)\nnormed_test_data = scaler.fit_transform(test_dataset)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Build the model:\n\ndef build_model():\n model = keras.Sequential([\n layers.Dense(24, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),\n layers.Dense(12, activation=tf.nn.relu),\n layers.Dense(1)\n ])\n optimizer = tf.keras.optimizers.Adam(0.001)\n model.compile(loss='mean_squared_error',\n optimizer=optimizer,\n metrics=['mean_absolute_error', 'mean_squared_error'])\n return model\n\n#def build_model():\n# model = Sequential()\n# model.add(GaussianDropout(0.01, input_shape=[len(train_dataset.keys())] ))\n# model.add(Dense(24, activation='relu'))\n# model.add(Dense(12, activation='relu'))\n# model.add(Dense(1))\n# model.compile(loss='mean_squared_error',\n# optimizer='adam',\n# metrics=['mean_absolute_error', 'mean_squared_error']) \n# return model\n\nmodel = build_model()\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Inspect the model:\n# Use the .summary method to print a simple description of the model\n\nmodel.summary()\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Now try out the model. \n# Take a batch of 10 examples from the training\n# data and call model.predict on it.\n\nexample_batch = normed_train_data[:10]\nexample_result = model.predict(example_batch)\nexample_result\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# It seems to be working, and it produces a result \n# of the expected shape and type.\n\n# Train the model:\n# Train the model for 1000 epochs, and record the training\n# and validation accuracy in the history object.\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Display training progress by printing a single dot for each completed epoch\nclass PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\nEPOCHS = 1000\n\nhistory = model.fit(\n normed_train_data, y_train,\n epochs=EPOCHS, validation_split = 0.2, verbose=0,\n callbacks=[PrintDot()])\nprint(history.history.keys())\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Visualize the model's training progress using the stats\n# stored in the history object.\n\nhist = pd.DataFrame(history.history)\nhist['epoch'] = history.epoch\nhist.tail()\n\ndef plot_history(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n \n \n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label = 'Val Error')\n ylim_max = hist.val_mean_absolute_error.max()+10\n plt.ylim([0,ylim_max])\n plt.legend()\n \n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$scfprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label = 'Val Error')\n ylim_max = hist.val_mean_squared_error.max()+10\n plt.ylim([0,ylim_max])\n plt.legend()\n #plt.show()\n fig_name = fig_title + \"_error_per_epochs_history.png\"\n plt.savefig(path_fig+fig_name)\n \nplot_history(history)\n\n \n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Ploting again, but with the EarlyStopping apllied:\n\ndef plot_history_EarlyStopping(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n \n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label = 'Val Error')\n ylim_max = hist.val_mean_absolute_error.max()+10\n plt.ylim([0,ylim_max])\n #plt.ylim([0,5])\n plt.legend()\n \n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$sfcprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label = 'Val Error')\n ylim_max = hist.val_mean_squared_error.max()+10\n plt.ylim([0,ylim_max])\n #plt.ylim([0,30])\n plt.legend()\n #plt.show()\n fig_name = fig_title + \"_error_per_epochs_EarlyStopping.png\"\n plt.savefig(path_fig+fig_name)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n \nmodel = build_model()\n\n# The patience parameter is the amount of epochs to check for improvement\nearly_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n\nhistory = model.fit(normed_train_data, y_train, epochs=EPOCHS,\n validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])\n\nplot_history_EarlyStopping(history)\n\n# The graph shows that on the validation set, the average error \n# is usually around +/- 2 MPG. Is this good? \n# We'll leave that decision up to you.\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Let's see how well the model generalizes by using \n# the test set, which we did not use when training the model. \n# This tells us how well we can expect the model to predict \n# when we use it in the real world.\n\nloss, mae, mse = model.evaluate(normed_test_data, y_test, verbose=0)\n\nprint(\"Testing set Mean Abs Error: {:5.2f} sfcprcp\".format(mae))\n\n# Make predictions\n# Finally, predict SFCPRCP values using data in the testing set:\n\ntest_predictions = model.predict(normed_test_data).flatten()\n\nplt.figure()\nplt.scatter(y_test, test_predictions)\nplt.xlabel('True Values [sfcprcp]')\nplt.ylabel('Predictions [sfcprcp]')\nplt.axis('equal')\nplt.axis('square')\nplt.xlim([0,plt.xlim()[1]])\nplt.ylim([0,plt.ylim()[1]])\nplt.plot([-100, 100], [-100, 100])\nfig_name = fig_title + \"_plot_scatter_y_test_vs_y_pred.png\"\nplt.savefig(path_fig+fig_name)\nplt.clf()\n \n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# It looks like our model predicts reasonably well. \n# Let's take a look at the error distribution.\n\nerror = test_predictions - y_test\nplt.hist(error, bins = 25)\nplt.xlabel(\"Prediction Error [sfcprcp]\")\nplt.ylabel(\"Count\")\nfig_name = fig_title + \"_prediction_error.png\"\nplt.savefig(path_fig+fig_name)\nplt.clf()\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n# Saving a model\n#if __name__ == '__main__':\n# _start_time = time.time()\n#\n# tic()\n\n# serialize model to YAML\nmodel_yaml = model.to_yaml()\nwith open(\"tf_regression_T10.yaml\", \"w\") as yaml_file:\n yaml_file.write(model_yaml)\n# serialize weights to HDF5\nmodel.save_weights(\"tf_regression_T10.h5\")\nprint(\"Saved model to disk\")\n# \n## later...\n# \n## load YAML and create model\n#yaml_file = open('model.yaml', 'r')\n#loaded_model_yaml = yaml_file.read()\n#yaml_file.close()\n#loaded_model = model_from_yaml(loaded_model_yaml)\n## load weights into new model\n#loaded_model.load_weights(\"model.h5\")\n#print(\"Loaded model from disk\")\n# \n## evaluate loaded model on test data\n#loaded_model.compile(loss='mean_squared_error',\n# optimizer='adam',\n# metrics=['mean_absolute_error', 'mean_squared_error'])\n#score = loaded_model.evaluate(normed_test_data, y_test, verbose=0)\n#print(\"%s: %.2f%%\" % (loaded_model.metrics_names[1], score[1]))\n# \n# training_model = history\n# #grid_result = training_model.run_TuningRegressionPrecipitation()\n# joblib.dump(training_model, 'teste.pkl')\n## loaded_model = joblib.load('/media/DATA/tmp/git-repositories/jobs/model_trained_regression_precipitation_W1.pkl')\n# \n# \n# \n# tac()\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n\n\n# It's not quite gaussian, but we might expect that because \n# the number of samples is very small.\n\n# Conclusion:\n\n# This notebook introduced a few techniques to handle a regression problem.\n\n# >> Mean Squared Error (MSE) is a common loss function used for regression problems (different loss functions are used for classification problems).\n\n# >> Similarly, evaluation metrics used for regression differ from classification. A common regression metric is Mean Absolute Error (MAE).\n\n# >> When numeric input data features have values with different ranges, each feature should be scaled independently to the same range.\n\n# >> If there is not much training data, one technique is to prefer a small network with few hidden layers to avoid overfitting.\n\n# >> Early stopping is a useful technique to prevent overfitting.\n\n#@title MIT License\n#\n# Copyright (c) 2017 François Chollet\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n" ]
[ [ "matplotlib.pyplot.xlim", "numpy.where", "numpy.cumsum", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.gca", "sklearn.decomposition.PCA", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure", "matplotlib.pyplot.hist", "matplotlib.pyplot.clf", "sklearn.preprocessing.StandardScaler", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "tensorflow.keras.callbacks.EarlyStopping" ], [ "matplotlib.pyplot.xlim", "tensorflow.keras.layers.Dense", "numpy.where", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure", "matplotlib.pyplot.hist", "tensorflow.keras.optimizers.Adagrad", "matplotlib.pyplot.clf", "sklearn.preprocessing.StandardScaler", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "numpy.ravel", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "numpy.asanyarray", "tensorflow.keras.callbacks.EarlyStopping" ], [ "matplotlib.pyplot.xlim", "tensorflow.keras.layers.Dense", "numpy.where", "pandas.DataFrame", "matplotlib.pyplot.savefig", "tensorflow.keras.optimizers.Adam", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure", "matplotlib.pyplot.hist", "matplotlib.pyplot.clf", "sklearn.preprocessing.StandardScaler", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "numpy.ravel", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "numpy.asanyarray", "tensorflow.keras.callbacks.EarlyStopping" ] ]
wangdecheng/QAStrategy
[ "d970242ea61cff2f1a6f69545dc7f65e8efd1672" ]
[ "GolemQ/portfolio/by_es_fof.py" ]
[ "#\n# The MIT License (MIT)\n#\n# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# 克隆自聚宽文章:https://www.joinquant.com/post/24542\n# 标题:FOF养老成长基金-v2.0\n# 原作者:富在知足\nimport numpy as np\nimport talib\nimport pandas\nimport scipy as sp\nimport scipy.optimize\nimport datetime as dt\nfrom scipy import linalg as sla\nfrom scipy import spatial\n\nfrom collections import namedtuple\n#Context = namedtuple(\"_Context\",['tradeRatio',\n# 'positionDict',\n# 'position',\n# 'initPriceDict',\n# 'transactionRecord',\n# 'pool',]) #Point为返回的类,_Point为返回类的类名\ncontext = dict(['doneTrade'])\n\ndef initialize(context):\n \"\"\"\n 初始化 context = indices\n \"\"\"\n #set_benchmark('000300.XSHG')\n # 设置买卖手续费,万三,最小 5 元\n #set_commission(PerTrade(buy_cost=0.0003, sell_cost=0.0003, min_cost=5))\n #set_slippage(FixedSlippage(0.002))\n #set_option('use_real_price', True)\n # enable_profile()\n # 关闭部分\n #log.set_level('order', 'error')\n\n # 交易系数\n context['tradeRatio'] = 0\n\n # 交易收益率\n context['positionDict'] = np.nan\n\n # 仓位(未使用)\n context['position'] = np.nan\n\n # 需要进行盘整巩固\n context['initPriceDict'] = np.nan\n\n # 上次交易日所获取的平均值\n context['transactionRecord'] = np.nan\n\n return context\n\n\ndef handle_data(context, data):\n \"\"\"\n 新Bar处理\n \"\"\"\n\n\t# 初始化交易数据\n initializeStockDict(context)\n # 进行仓位调整\n rebalance(context)\n # 获取买卖决策\n message = \"\"\n for stock in context.pool:\n if context.tradeRatio[stock] > 0:\n message += buyOrSellCheck(context,stock, 3, 1.4)\n\t# 此段代码仅用于发微信,可以跳过\n if message != \"\":\n send_message(message)\n # 每周进行一次调仓操作\n if getLastestTransactTime(context):\n tradeStockDict(context, context.tradeRatio)\n if context.doneTrade:\n print((\"\\n\" + context.ratioMessage))\n\n\n# initialize parameters\ndef initializeStockDict(context):\n ## 每天注入400元资金\n #inout_cash(400, pindex=0)\n\n # 是否进行了交易操作\n context.doneTrade = False\n\n # 需要进行过渡交易\n context.Transitional = True\n\n # 定义调仓周期\n context.weekDay = { 4 }\n\n # 置信区间\n context.confidenceLevel = 0.02\n\n # 定义参考周期\n context.referenceCycle = 252\n\n # 涨跌幅度\n context.maxRange = 30\n\n # 巩固幅度\n context.consolidation = 5\n\n # 持仓数量\n context.stockCount = 5\n\n # 入选参考\n context.referStock = '000002.XSHG'\n\n # 入选条件\n context.minPosition = 0\n\n # 持仓位数\n context.Multiple = 1\n\n # 最小持仓比例\n context.minRatio = 0.10 * context.Multiple\n\n # 最大持仓比例\n context.maxRatio = 0.30 * context.Multiple\n\n # 最小交易金额\n context.minAmount = 2000\n\n # 网格交易率(%)\n context.netRate = 3\n\n # 定义交易标的\n context.stockDict = {}\n\n # 分析标的\n context.trackStock = ''\n\n # 其余仓位\n context.bondStock = '163210' # 诺安纯债定开债A\n\n # 权重系数:1~10\n context.stockDict['515520.XSHG'] = 10 # 价值100\n context.stockDict['161907.XSHE'] = 10 # 中证红利\n context.stockDict['512890.XSHG'] = 10 # 红利低波\n context.stockDict['515300.XSHG'] = 10 # 红利300\n context.stockDict['510050.XSHG'] = 10 # 上证50\n context.stockDict['159916.XSHE'] = 10 # 深证60\n context.stockDict['512910.XSHG'] = 10 # 中证100\n context.stockDict['510310.XSHG'] = 10 # 沪深300\n context.stockDict['512260.XSHG'] = 10 # 中证500低波\n context.stockDict['512090.XSHG'] = 10 # MSCI\n context.stockDict['515000.XSHG'] = 10 # 中证科技\n context.stockDict['512040.XSHG'] = 10 # 中证价值\n context.stockDict['510900.XSHG'] = 10 # 恒生国企\n context.stockDict['501021.XSHG'] = 10 # 香港中小\n context.stockDict['513050.XSHG'] = 10 # 中概互联\n context.stockDict['518880.XSHG'] = 4 # 黄金\n\n # 上市不足参考周期的剔除掉\n context.stockDict = delNewStock(context, context.stockDict)\n\n # 整理能交易的标的\n context.pool = list(context.stockDict.keys())\n\n # 持仓数量\n if (len(context.pool) < context.stockCount):\n context.stockCount = len(context.pool)\n\n # 统计交易资料\n for stock in context.pool:\n if stock not in context.initPriceDict:\n context.initPriceDict[stock] = 0\n if stock not in context.positionDict:\n context.positionDict[stock] = 0\n if stock not in context.transactionRecord:\n context.transactionRecord[stock] = 0\n # 初始化交易记录\n context.tradeRecord = \"\"\n # 初始化权重记录\n context.ratioMessage = \"\"\n \ndef getStockRSIRatio(stock):\n try:\n # 根据周来计算出RSI系数\n if ratio > 0:\n nRSI = 60\n nRSIAvg = 5\n his = attribute_history(stock, 300, '1d', 'close', skip_paused=True, df=False, fq='pre')\n closeArray = his['close']\n # 计算RSI #\n rsiArray = talib.RSI(closeArray,nRSI)\n # RSI均值 #\n rsiAvgArrayS = talib.MA(rsiArray,nRSIAvg)\n rsiAvgArrayL = talib.MA(rsiArray,nRSI)\n rsiRatio = 100 - np.round(math.tan(math.radians(rsiAvgArrayS[-1] - (50.0 if math.isnan(rsiAvgArrayL[-1]) else rsiAvgArrayL[-1]))) * 50,0)\n if rsiRatio < 0:\n rsiRatio = 0\n return rsiRatio\n else:\n return 0\n except:\n return 50\n\ndef getStockRSI(stock):\n # 根据周来计算出RSI系数\n # 计算RSI所用天数\n try:\n nRSI = 7\n his = attribute_history(stock, 30, \"1d\", (\"close\",\"high\",\"low\"), skip_paused=True, df=False, fq=\"pre\")\n closeArray = his[\"close\"]\n rsiArray = talib.RSI(closeArray, nRSI)\n return np.round(rsiArray[-1],2)\n except Exception as err:\n return 100\n\ndef getLastestTransactTime(context):\n\n # 定义调仓周期\n # 只会有月初进行操作\n lastestDate = datetime.datetime(2000, 1, 1)\n # 获取最后一次交易日期\n for stock in list(context.portfolio.positions.keys()):\n if (context.portfolio.positions[stock].transact_time > lastestDate):\n lastestDate = context.portfolio.positions[stock].transact_time\n if (context.current_dt - lastestDate).days >= 30 and context.current_dt.isoweekday() in context.weekDay:\n return True\n\n return False\n\ndef getStockName(stock):\n return get_security_info(stock).display_name\n\ndef drawCloseValue(stock):\n his = attribute_history(stock, 1, '1d','close',df=False, skip_paused=False)\n record(C=his['close'][0])\n\ndef variance(stock):\n # 计算平均涨跌幅\n his = attribute_history(stock, 120, '1d','close',df=False, skip_paused=False)\n trList = []\n for i in range(len(his['close'])):\n if i > 0:\n trList.append(abs(his['close'][i - 1] - his['close'][i]) / his['close'][i - 1] * 100)\n trArray = np.array(trList)\n trMean = trArray.mean()\n return np.round(trMean,1) if not isnan(trMean) else 0\n\ndef getAvgMoney(stock):\n # 计算平均交易额\n his = attribute_history(stock, 120, '1d','volume',df=False, skip_paused=False)\n trMean = his['money'].mean()\n return np.round(trMean,1) if not isnan(trMean) else 0\n\n\ndef delNewStock(context, stockDict):\n \"\"\"\n\t剔除上市时间较短和平均交易额过低的产品。\n \"\"\"\n for stock in list(stockDict.keys()):\n stockData = context.ohlc.loc[(), :]\n if ():\n avgMoney = getAvgMoney(stock)\n if avgMoney >= 2000000:\n tmpDict[stock] = stockDict[stock]\n return tmpDict\n\n# 每天开盘前用于判断某一只etf今日该买还是该卖的函数 #\n# 此函数输入为一个股票代码,应卖出时输出-1,应买进时输出1 #\ndef buyOrSellCheck(context, stock, nATRValue, nstdValue):\n message = \"\"\n try:\n # 计算RSI所用天数\n nRSI = 7\n nRSIAvg = 14\n nATR = 21\n # 取得近90天的历史行情数据\n deltaDate = context.current_dt.date() - datetime.timedelta(90)\n if get_security_info(stock).start_date > deltaDate:\n return message\n his = attribute_history(stock, 120, '1d', ('close','high','low'), skip_paused=True, df=False, fq='pre')\n closeArray = his['close']\n # 计算长线是60天(月)\n emaArray = talib.MA(closeArray,60)\n # 计算RSI #\n rsiArray = talib.RSI(closeArray,nRSI)\n # RSI均值 #\n rsiAvgArray = talib.MA(rsiArray,nRSIAvg)\n # RSI标准差 #\n rsiStdArray = talib.STDDEV(rsiArray,nRSIAvg)\n # ATR #\n trList = []\n for i in range(len(closeArray)):\n if i > 0:\n trList.append(max([(his['high'][i] - his['low'][i]), \n abs(his['close'][i - 1] - his['high'][i]),\n abs(his['close'][i - 1] - his['low'][i])]))\n trArray = np.array(trList)\n atrAvgArray = talib.MA(trArray,nATR)\n ATR = nATRValue * atrAvgArray[-1]\n # 买入的阈值 #\n buyThreshold = rsiAvgArray[-1] - nstdValue * rsiStdArray[-1]\n if buyThreshold > 30:\n buyThreshold = 30\n # 卖出的阈值 #\n sellThreshold = rsiAvgArray[-1] + nstdValue * rsiStdArray[-1]\n if sellThreshold < 70:\n sellThreshold = 70\n # 获取溢价率 #\n premiumRate = getPremiumRate(context, stock)\n #record(RA=sellThreshold,RB=rsiArray[-1],RC=buyThreshold)\n #record(TA=closeArray[-2]+ATR,TB=closeArray[-1],TC=closeArray[-2]-ATR)\n # 当天出现超过3%的跌幅时,禁止任何操盘\n if stopLoss(stock):\n message = getStockName(stock) + \" : \" + \"禁止操作!\\n\"\n elif premiumRate >= 0.5:\n message = getStockName(stock) + \" : \" + \"溢价[\" + str(premiumRate) + \"]!\\n\"\n elif premiumRate <= -1.0:\n message = getStockName(stock) + \" : \" + \"折价[\" + str(premiumRate) + \"]!\\n\"\n # 如果RSI高于卖出阈值,则卖出股票\n elif rsiArray[-1] > sellThreshold:\n message = getStockName(stock) + \" : \" + \"RSI[\" + str(np.round(rsiArray[-1],2)) + \">\" + str(np.round(sellThreshold,2)) + \"]卖出!\\n\"\n # 如果RSI低于买入阈值,则买入股票\n elif rsiArray[-1] < buyThreshold:\n message = getStockName(stock) + \" : \" + \"RSI[\" + str(np.round(rsiArray[-1],2)) + \"<\" + str(np.round(buyThreshold,2)) + \"]买入!\\n\"\n # 如果ATR高于卖出阈值,则卖出股票\n elif closeArray[-1] > closeArray[-2] + ATR:\n message = getStockName(stock) + \" : \" + \"ATR[\" + str(np.round(closeArray[-1],2)) + \">\" + str(np.round(emaArray[-1],2)) + \"]卖出!\\n\"\n # 如果ATR低于买入阈值,则买入股票\n elif closeArray[-1] < closeArray[-2] - ATR:\n message = getStockName(stock) + \" : \" + \"ATR[\" + str(np.round(closeArray[-1],2)) + \"<\" + str(np.round(emaArray[-1],2)) + \"]买入!\\n\"\n except:\n message = \"\"\n return message\n\ndef stopLoss(stock, lag=2, loss=2, more=4):\n # 当跌幅大于2%时禁止当天交易,以观望的方式等待下一个交易日,防止股灾出现时仍连接交易 #\n hisArray = attribute_history(stock,lag,'1d',('open', 'close', 'high', 'low', 'volume'),skip_paused=True)\n closeArray = hisArray['close'].values\n rate = abs((closeArray[-1] - closeArray[-2]) / closeArray[-2] * 100)\n if (rate > loss and rate < more):\n return True\n else:\n return False\n\ndef getPremiumRate(context, stock):\n # 计算基金当前的溢价情况 #\n try:\n now = context.current_dt\n start_date = now + datetime.timedelta(days=-10)\n end_date = now + datetime.timedelta(days=-1)\n unitPriceArray = get_extras('unit_net_value', stock, start_date=start_date, end_date=end_date, df=False)\n unitPrice = unitPriceArray[stock][-1]\n his = attribute_history(stock, 5, '1d', 'close', skip_paused=True, df=False, fq='pre')\n closePrice = his['close'][-1]\n return np.round((closePrice - unitPrice) / unitPrice * 100, 2)\n except:\n return 0\n\ndef rebalance(context):\n # 重新调整仓位\n tradeRatio = caltradeStockRatio(context)\n context.tradeRatio = tradeRatio\n\ndef caltradeStockRatio(context):\n \n def getGrowthRate(stock, n=21):\n \"\"\"\n 获取股票n日以来涨幅,根据当前价计算 / 这个东西按理说是ROC\n \"\"\"\n lc = attribute_history(stock, n, '1d', ('close'), True)['close'][0]\n c = attribute_history(stock, 1, '1d', ('close'), True)['close'][0]\n if not isnan(lc) and not isnan(c) and lc != 0:\n return (c - lc) / lc * 100\n else:\n return 0\n \n def calstockRiskVaR(stock):\n\t\t# 风险价值(VaR)\n portfolio_VaR = 0.0000001\n dailyReturns = fun_getdailyreturn(stock, '1d', context.referenceCycle)\n portfolio_VaR = 1 * context.confidenceLevel * np.std(dailyReturns) * 100\n if isnan(portfolio_VaR):\n portfolio_VaR = 0.0000001\n return 1 #portfolio_VaR\n\t\t\n def calstockRiskES(stock):\n # 期望损失(ES)\n portfolio_ES = 0\n dailyReturns = fun_getdailyreturn(stock, '1d', context.referenceCycle)\n dailyReturns_sort = sorted(dailyReturns) \n count = 0\n sum_value = 0\n for i in range(len(dailyReturns_sort)):\n if i < (context.referenceCycle * context.confidenceLevel):\n sum_value += dailyReturns_sort[i]\n count += 1\n if count == 0:\n portfolio_ES = 0\n else:\n portfolio_ES = -(sum_value / (context.referenceCycle * context.confidenceLevel))\n if isnan(portfolio_ES):\n portfolio_ES = 0\n return portfolio_ES\n\n def fun_getdailyreturn(stock, freq, lag):\n stockHis = history(lag, freq, 'close', stock, df=True)\n #dailyReturns =\n #stockHis.resample('D',how='last').pct_change().fillna(value=0,\n #method=None, axis=0).values\n dailyReturns = stockHis.resample('D').last().pct_change().fillna(value=0, method=None, axis=0).values\n return dailyReturns\n\n def fun_caltraderatio(tradeRatio, stock, position, total_position):\n if stock in tradeRatio:\n tradeRatio[stock] += np.round((position / total_position), 3)\n else:\n tradeRatio[stock] = np.round((position / total_position), 3)\n tradeRatio[stock] = tradeRatio[stock] * 100 // context.Multiple * context.Multiple / 100\n return tradeRatio\n\n # 计算所有标的的仓位比例\n max_ES = -1\n max_VaR = -1\n ESDict = {}\n VaRDict = {}\n grDict = {}\n for stock in context.pool:\n ES = calstockRiskES(stock)\n if ES > max_ES:\n max_ES = ES\n ESDict[stock] = ES\n VaR = calstockRiskVaR(stock)\n if VaR > max_VaR:\n max_VaR = VaR\n VaRDict[stock] = VaR\n grDict[stock] = getGrowthRate(stock)\n\n # 计算入选条件\n referES = calstockRiskES(context.referStock)\n referVar = calstockRiskVaR(context.referStock)\n referGR = getGrowthRate(context.referStock)\n referPosition = np.round((max_ES / referES) * (max_VaR / referVar) * power(1.02, referGR) * 100, 3)\n if context.minPosition == 0:\n context.minPosition = referPosition\n else:\n context.minPosition = (referPosition + context.minPosition) / 2\n # 计算总仓位\n positionDict = {}\n for stock in context.pool:\n if ESDict[stock] == 0:\n positionDict[stock] = 0\n else:\n stockRatio = context.stockDict[stock]\n positionDict[stock] = np.round((max_ES / ESDict[stock]) * (max_VaR / VaRDict[stock]) * power(1.02, grDict[stock]) * stockRatio * 10,3)\n # 与上次交易的平均值再进行一次平均值计算\n if context.transactionRecord[stock] == 0:\n context.transactionRecord[stock] = positionDict[stock]\n else:\n positionDict[stock] = positionDict[stock] * 0.8 + context.transactionRecord[stock] * 0.2\n context.transactionRecord[stock] = positionDict[stock]\n positionDictSorted = sorted(list(positionDict.items()), key=lambda d: d[1], reverse = True)\n # 针对边际标的进行如果持仓则继续持仓\n stockIn = \"\"\n stockOut = \"\"\n doChange = False\n if len(positionDictSorted) > context.stockCount and context.Transitional:\n stockIn = positionDictSorted[(context.stockCount - 1)][0]\n stockOut = positionDictSorted[context.stockCount][0]\n doChange = stockIn not in context.portfolio.positions and stockOut in context.portfolio.positions\n # 如果需要进行边际交换操作时,仍需要判断交易的标的是否满足过滤条件\n if doChange:\n if np.round(positionDict[stockIn] - context.minPosition, 2) < 0 or np.round(positionDict[stockOut] - context.minPosition, 2) < 0:\n doChange = False\n # 把排前列的标的选择出来\n positionDict.clear()\n index = 1\n for (key,value) in positionDictSorted:\n if context.initPriceDict[key] == 0 and abs(grDict[key]) >= context.maxRange:\n # 近期涨跌幅度过大需要盘整巩固\n context.initPriceDict[key] = 1\n elif context.initPriceDict[key] == 1 and abs(grDict[key]) < context.consolidation:\n # 盘整巩固解除封闭\n context.initPriceDict[key] = 0\n # 涨跌幅过大需要等待一个周期后再解解封闭\n if context.initPriceDict[key] == 1:\n positionDict[key] = 0\n else:\n # 针对边际标的进行如果持仓则继续持仓\n if doChange and (key == stockIn or key == stockOut):\n if key == stockIn:\n positionDict[key] = 0\n elif key == stockOut:\n positionDict[key] = value\n elif index <= context.stockCount:\n positionDict[key] = value\n else:\n positionDict[key] = 0\n index += 1\n total_position = 0\n for stock in context.pool:\n total_position += positionDict[stock]\n if total_position == 0:\n total_position = 1\n # 计算所有标的的系数\n ratio = {}\n profitDict = {}\n for stock in context.pool:\n # 未超过入选条件时不进行交易\n stockPosition = positionDict[stock]\n # 如果RSI出现超卖情况时,不进行卖出操作\n RSI = getStockRSI(stock)\n if RSI > 30 and np.round(stockPosition - context.minPosition, 2) < 0:\n stockPosition = 0\n ratio = fun_caltraderatio(ratio, stock, stockPosition, total_position)\n # 计算持仓收益率\n if stock in context.portfolio.positions:\n profitDict[stock] = (context.portfolio.positions[stock].price - context.portfolio.positions[stock].avg_cost) / context.portfolio.positions[stock].avg_cost * 100\n else:\n profitDict[stock] = 0\n # 踢去持仓比例低于要求的标换\n if context.trackStock != \"\":\n record(T=1)\n drawCloseValue(context.trackStock)\n for stock in context.pool:\n if ratio[stock] < context.minRatio and ratio[stock] > 0:\n ratio[stock] = 0\n elif ratio[stock] > context.maxRatio:\n ratio[stock] = context.maxRatio\n if context.trackStock == stock:\n if ratio[stock] > 0:\n record(T=1.5)\n else:\n record(T=1)\n sumRatio = 0\n index = 1\n adjustment = np.round(context.stockCount * 1.0 / 2 / 100, 2)\n for (key,value) in positionDictSorted:\n if ratio[key] > 0:\n ratio[key] = adjustment + ratio[key]\n adjustment -= 0.01\n try:\n context.ratioMessage += \"%2d.%s:%3d/%3d(%2d%%/%1.1f%%/%1.1f%%/%1.1f%%/%1.2f%%) %s\\n\" % (index,\n key,\n positionDict[key],\n value,\n ratio[key] * 100,\n variance(key),\n grDict[key],\n context.positionDict[key] + profitDict[key],\n getPremiumRate(context, key),\n getStockName(key))\n except:\n context.ratioMessage += \"%2d.%s\" % (index,key)\n sumRatio += ratio[key]\n index += 1\n if index > (context.stockCount + 2):\n break\n # 增加债券\n ratio[context.bondStock] = 0\n context.positionDict[context.bondStock] = 1\n try:\n context.ratioMessage += \"合计:%3d%%,%3d,\" % (sumRatio * 100,context.minPosition)\n if (1 - sumRatio) > 0.2:\n ratio[context.bondStock] = (1 - sumRatio)\n except:\n context.ratioMessage += \"合计:%3d%%,%3d,\" % (0,context.minPosition)\n context.ratioMessage += \"累计金额:%d,可用资金:%d,总资产:%d\\n\" % (context.portfolio.inout_cash,context.portfolio.available_cash,context.portfolio.total_value)\n # 计算当前仓位\n if context.trackStock == '':\n record(P=(sumRatio * 100))\n record(B=(ratio[context.bondStock] * 100))\n return ratio\n\ndef tradeStockDict(context, buyDict):\n\n def tradeStock(context, stock, ratio):\n total_value = context.portfolio.total_value\n curPrice = history(1,'1d', 'close', stock, df=False)[stock][-1]\n curValue = 0\n if stock in context.portfolio.positions:\n curValue = context.portfolio.positions[stock].total_amount * curPrice\n quota = total_value * ratio\n # 平仓后记录实际收益率\n if quota == 0 and curValue > 0:\n if stock in context.portfolio.positions:\n context.positionDict[stock] += np.round((curPrice - context.portfolio.positions[stock].avg_cost) / context.portfolio.positions[stock].avg_cost * 100, 2)\n else:\n context.positionDict[stock] = curPrice\n deltaValue = np.round(abs(quota - curValue) / 1000, 0) * 1000\n if deltaValue >= context.minAmount or (quota == 0 and curValue > 0):\n order_target_value(stock, quota)\n context.doneTrade = True\n\n buylist = list(buyDict.keys())\n hStocks = history(1, '1d', 'close', buylist, df=False)\n myholdstock = list(context.portfolio.positions.keys())\n portfolioValue = context.portfolio.portfolio_value\n\n # 已有仓位\n holdDict = {}\n hholdstocks = history(1, '1d', 'close', myholdstock, df=False)\n for stock in myholdstock:\n tmpW = np.round((context.portfolio.positions[stock].total_amount * hholdstocks[stock]) / portfolioValue, 2)\n holdDict[stock] = float(tmpW)\n\n # 对已有仓位做排序\n tmpDict = {}\n for stock in holdDict:\n if stock in buyDict:\n tmpDict[stock] = np.round((buyDict[stock] - holdDict[stock]), 2)\n else:\n tmpDict[stock] = -999999\n tradeOrder = sorted(list(tmpDict.items()), key=lambda d:d[1], reverse=False)\n\n # 先卖掉持仓减少的标的\n tmpList = []\n for idx in tradeOrder:\n stock = idx[0]\n if stock in buyDict:\n tradeStock(context, stock, buyDict[stock])\n else:\n tradeStock(context, stock, 0)\n tmpList.append(stock)\n\n # 交易其他股票\n for i in range(len(buylist)):\n stock = buylist[i]\n if len(tmpList) != 0 :\n if stock not in tmpList:\n tradeStock(context, stock, buyDict[stock])\n else:\n tradeStock(context, stock, buyDict[stock])\n\nif __name__ == '__main__':\n initialize(context)\n data = []\n handle_data(context, data)" ]
[ [ "numpy.round", "numpy.std", "numpy.array" ] ]
anuritabose/Advanced-Algorithm-FFT-RSA_Encryption
[ "748f571d85639db8ae090e0d10a3c3186bb11ba2" ]
[ "rsa.py" ]
[ "import numpy as np\r\nimport random\r\nimport cmath\r\nimport math\r\nimport copy\r\nimport time\r\n\r\n\r\n#generate two large odd numbers of a specific bit size\r\ndef generateLargeOdd(bitSize):\r\n a = random.getrandbits(128) | 1\r\n #print(a)\r\n return a\r\n\r\n\r\n#Primality check using Fermat Theorem to identify base 2 pseudoprimes\r\ndef checkPseudoprimeFermat(num):\r\n base = 2\r\n if (checkPrimeModularExponentiation(num-1, base, num)) == 1:\r\n return True #returns true if prime\r\n return False #returns false if composite\r\n\r\n\r\n#Primality check using Modular Exponentiation to the base 2\r\ndef checkPrimeModularExponentiation(num2, base, num):\r\n res = 1 \r\n base%=num\r\n if (base == 0) :\r\n res = 0\r\n return res\r\n while (num2 > 0) :\r\n #print(\"base:\", base)\r\n if ((int(num2) & 1) == 1) :\r\n res = (res * base) % num\r\n base = (base * base) % num\r\n num2 = int(num2) >> 1 # num2 = num/2\r\n return res #if res is 1 or n-1 it is prime\r\n\r\n\r\n\r\n#Helper function for Miller Rabin test\r\ndef millerHelper(d, n):\r\n a = 2 + random.randint(1, n - 4)\r\n #print(\"base:\", a)\r\n x = checkPrimeModularExponentiation(d, a, n)\r\n if (x == 1 or x == n - 1):\r\n return True\r\n while (d != n - 1):\r\n x = (x * x) % n\r\n d *= 2\r\n if (x == 1):\r\n return False\r\n if (x == n - 1):\r\n return True\r\n return False\r\n\r\n \r\n\r\n#Primality check using Miller Rabin test\r\ndef checkPrimeMillerRabin(n):\r\n k = 4 #no. of iterations\r\n if (n <= 1 or n == 4):\r\n return False\r\n if (n <= 3):\r\n return True\r\n d = n - 1\r\n while (d % 2 == 0):\r\n d //= 2\r\n for i in range(k): #Iterating k times\r\n if not millerHelper(d, n):\r\n return False\r\n return True\r\n\r\n\r\n\r\n\r\n#Primality check using Trial Division\r\ndef checkPrimeTrialDivision(a):\r\n for i in range(2, math.ceil(pow(a, 0.5))):\r\n if a%i == 0: \r\n return False\r\n return True\r\n\r\n\r\n\r\n#Find relative prime of a number\r\ndef relativePrime(p,q):\r\n phi = (p-1)*(q-1)\r\n for e in range(3, phi, 2):\r\n if phi%e != 0:\r\n return e\r\n\r\n\r\n\r\n#Extended Euclid\r\ndef egcd(a, b):\r\n s = 0; old_s = 1\r\n t = 1; old_t = 0\r\n r = b; old_r = a\r\n while r != 0:\r\n quotient = old_r // r\r\n old_r, r = r, old_r - quotient * r\r\n old_s, s = s, old_s - quotient * s\r\n old_t, t = t, old_t - quotient * t\r\n return old_r, old_s, old_t # return gcd, x, y\r\n\r\n\r\n\r\n#To find modular multiplicative inverse of e \r\ndef modularMultiplicativeInverse(e, p, q):\r\n phi = (p-1)*(q-1)\r\n gcd, x, y = egcd(e, phi)\r\n if x < 0:\r\n x += phi\r\n return x\r\n\r\n\r\n\r\ndef rsaEncrypt(M, e, n):\r\n C = []\r\n for m in M:\r\n C_inner = []\r\n for i in m:\r\n ex = checkPrimeModularExponentiation(e, i, n)\r\n #ex = (pow(m, e))%n\r\n C_inner.append(ex)\r\n C.append(C_inner)\r\n return C\r\n\r\n\r\ndef rsaDecrypt(C, d, n):\r\n MD = []\r\n for c in C:\r\n MD_inner = []\r\n for i in c:\r\n de = checkPrimeModularExponentiation(d, i, n)\r\n #de = (pow(c, d))%n\r\n MD_inner.append(chr(de))\r\n MD_in = \"\".join(MD_inner) \r\n MD.append(MD_in)\r\n return MD\r\n\r\n\r\n\r\ndef rsaHelper(M):\r\n count = 0\r\n #a = 6967\r\n M_original = copy.deepcopy(M)\r\n M_ascii = []\r\n for i in range(len(M)):\r\n M[i] = str(M[i])\r\n ma = [ord(c) for c in M[i]]\r\n M_ascii.append(ma)\r\n largePrimes = []\r\n while(count<2):\r\n oddNum = generateLargeOdd(128)\r\n if count==1 and oddNum == largePrimes[0]: continue\r\n if checkPrimeModularExponentiation(oddNum-1, 2, oddNum)!=1: \r\n continue\r\n if not checkPseudoprimeFermat(oddNum): \r\n if not checkPrimeMillerRabin(oddNum):\r\n continue #continue searching if number is composite\r\n #if not checkPrimeTrialDivisionLoop(oddNum):\r\n #continue \r\n count+=1\r\n largePrimes.append(oddNum) \r\n\r\n\r\n\r\n a, b = largePrimes[0], largePrimes[1]\r\n #print(\"The two large prime numbers: \", a, b)\r\n #print(\"Original message M: \", M)\r\n n = a*b\r\n e = relativePrime(a, b)\r\n #print(\"e: \",e)\r\n d = modularMultiplicativeInverse(e, a, b)\r\n #print(\"d: \", d)\r\n C = rsaEncrypt(M_ascii, e, n)\r\n #print(\"CypherText C: \", C)\r\n MD = rsaDecrypt(C, d, n)\r\n #print(\"Decrypted message MD: \",MD)\r\n \r\n return MD\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n VC = [5,7, 8, 10]\r\n #VC = [0j, (2+1.1102230246251565e-16j), (1.4997597826618576e-32-2.4492935982947064e-16j), (2+4.440892098500626e-16j)]\r\n VCD = rsaHelper(VC)\r\n print(np.allclose(VC, VCD))\r\n\r\n \r\n\r\n" ]
[ [ "numpy.allclose" ] ]
fossabot/ludwig
[ "d4ac229bbff89842fc3302cdafbd94c77c1cc069" ]
[ "ludwig/modules/loss_modules.py" ]
[ "# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nfrom tensorflow.python.ops.losses.losses_impl import Reduction\n\nfrom ludwig.constants import *\nfrom ludwig.utils.tf_utils import sequence_length_2D, sequence_length_3D\n\n\n#\n# Custom classes to support Tensorflow 2\n#\nclass BWCEWLoss(tf.keras.losses.Loss):\n def __init__(\n self,\n positive_class_weight=1,\n robust_lambda=0,\n confidence_penalty=0\n ):\n super(BWCEWLoss, self).__init__()\n\n self.positive_class_weight = positive_class_weight\n self.robust_lambda = robust_lambda\n self.confidence_penalty = confidence_penalty\n\n def call(self, y_true, y_pred):\n logits = y_pred[LOGITS]\n\n # weighted cross entropy\n train_loss = tf.nn.weighted_cross_entropy_with_logits(\n labels=tf.cast(y_true, tf.float32),\n logits=logits,\n pos_weight=self.positive_class_weight\n )\n\n # robust lambda\n if self.robust_lambda > 0:\n train_loss = ((1 - self.robust_lambda) * train_loss +\n self.robust_lambda / 2)\n\n train_mean_loss = tf.reduce_mean(\n train_loss\n )\n\n # confidence penalty\n if self.confidence_penalty > 0:\n probabilities = tf.nn.sigmoid(logits)\n mean_penalty = mean_confidence_penalty(probabilities, 2)\n train_mean_loss += self.confidence_penalty * mean_penalty\n\n return train_mean_loss\n\n\nclass SoftmaxCrossEntropyLoss(tf.keras.losses.Loss):\n def __init__(\n self,\n num_classes=0,\n feature_loss=None,\n name=None\n ):\n super(SoftmaxCrossEntropyLoss, self).__init__(name=name)\n self.num_classes = num_classes\n self.feature_loss = feature_loss\n\n def call(self, y, y_pred):\n vector_labels = tf.one_hot(\n tf.cast(y, dtype=tf.int64),\n self.num_classes\n )\n\n loss = weighted_softmax_cross_entropy(\n y_pred[LOGITS],\n vector_labels,\n **self.feature_loss\n )\n\n return loss\n\n\n# todo tf2: wait for fix to https://github.com/tensorflow/tensorflow/issues/41792\nclass SampledSoftmaxCrossEntropyLoss(tf.keras.losses.Loss):\n def __init__(\n self,\n decoder_obj=None,\n num_classes=0,\n feature_loss=None,\n name=None\n ):\n super(SampledSoftmaxCrossEntropyLoss, self).__init__(name=name)\n\n self.decoder_obj = decoder_obj\n self.num_classes = num_classes\n self.feature_loss = feature_loss\n\n def call(self, y, y_pred):\n decoder_weights = self.decoder_obj.get_weights()[0]\n decoder_biases = self.decoder_obj.get_weights()[1]\n\n loss = sampled_softmax_cross_entropy(\n y,\n y_pred[LAST_HIDDEN],\n num_classes=self.num_classes,\n decoder_weights=decoder_weights,\n decoder_biases=decoder_biases,\n **self.feature_loss\n )\n\n return loss\n\n\nclass SigmoidCrossEntropyLoss(tf.keras.losses.Loss):\n def __init__(\n self,\n name=None\n ):\n super(SigmoidCrossEntropyLoss, self).__init__(name=name)\n\n def call(self, y, y_pred):\n loss = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=tf.cast(y, tf.float32),\n logits=y_pred[LOGITS]\n )\n\n return loss\n\n\nclass SequenceLoss(tf.keras.losses.Loss):\n def __init__(self, name=None, **kwargs):\n super(SequenceLoss, self).__init__(name=name)\n self.loss_function = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True,\n reduction='none'\n )\n\n def call(self, y_true, y_pred):\n # y_true: shape [batch_size, sequence_size]\n # y_pred: shape [batch_size, sequence_size, num_classes]\n\n y_pred = y_pred[LOGITS]\n y_true = tf.convert_to_tensor(y_true, dtype=tf.int64)\n\n # pad the shorter sequence\n if y_true.shape[1] > y_pred.shape[1]:\n pad = tf.zeros(\n [\n y_pred.shape[0],\n y_true.shape[1] - y_pred.shape[1],\n y_pred.shape[2]\n ],\n dtype=y_pred.dtype)\n y_pred = tf.concat([y_pred, pad], axis=1)\n elif y_pred.shape[1] > y_true.shape[1]:\n pad = tf.zeros(\n [\n y_true.shape[0],\n y_pred.shape[1] - y_true.shape[1],\n ],\n dtype=y_true.dtype\n )\n y_true = tf.concat([y_true, pad], axis=1)\n\n longest_sequence_length = tf.maximum(sequence_length_2D(y_true),\n sequence_length_3D(y_pred))\n longest_sequence_length += 1 # for EOS\n longest_sequence_length = tf.minimum(longest_sequence_length,\n y_true.shape[1])\n mask = tf.sequence_mask(\n longest_sequence_length,\n maxlen=y_true.shape[1],\n dtype=tf.float32\n )\n # compute loss based on valid time steps\n loss = self.loss_function(y_true, y_pred)\n loss = loss * mask\n loss = tf.reduce_sum(loss) / tf.reduce_sum(mask)\n return loss\n\n\n# end of custom classes\n\n\ndef softmax_cross_entropy_with_class_weighting(logits, one_hot_labels,\n class_weights,\n labels_smoothing=0.0):\n class_weights_const = tf.expand_dims(\n tf.constant(class_weights, dtype=tf.float32), 0)\n sample_weights = tf.reduce_sum(\n tf.multiply(one_hot_labels, class_weights_const), 1)\n return tf.losses.softmax_cross_entropy(onehot_labels=one_hot_labels,\n logits=logits,\n label_smoothing=labels_smoothing,\n weights=sample_weights,\n reduction=tf.losses.Reduction.NONE)\n\n\ndef sigmoid_cross_entropy_with_class_weighting(logits, multi_class_labels,\n class_weights,\n labels_smoothing=0.0):\n class_weights_const = tf.expand_dims(\n tf.constant(class_weights, dtype=tf.float32), 0)\n sample_weights = tf.reduce_sum(\n tf.multiply(multi_class_labels, class_weights_const), 1)\n return tf.losses.sigmoid_cross_entropy(\n multi_class_labels=multi_class_labels,\n logits=logits,\n label_smoothing=labels_smoothing,\n weights=sample_weights,\n reduction=tf.losses.Reduction.NONE)\n\n\ndef mean_confidence_penalty(probabilities, num_classes):\n max_entropy = tf.constant(np.log(num_classes), dtype=tf.float32)\n # clipping needed for avoiding log(0) = -inf\n entropy_per_class = tf.maximum(- probabilities *\n tf.log(\n tf.clip_by_value(probabilities, 1e-10,\n 1)), 0)\n entropy = tf.reduce_sum(entropy_per_class, -1)\n penalty = (max_entropy - entropy) / max_entropy\n return tf.reduce_mean(penalty)\n\n\ndef seq2seq_sequence_loss(targets, targets_sequence_length, logits,\n softmax_function=None):\n batch_max_targets_sequence_length = tf.shape(targets)[1]\n batch_max_logits_sequence_length = tf.shape(logits)[1]\n difference = tf.maximum(0,\n batch_max_targets_sequence_length - batch_max_logits_sequence_length)\n padded_logits = tf.pad(logits, [[0, 0], [0, difference], [0, 0]])\n padded_logits = padded_logits[:, :batch_max_targets_sequence_length, :]\n\n with tf.variable_scope('sequence_loss'):\n sequence_loss = tfa.seq2seq.sequence_loss(\n padded_logits,\n targets,\n tf.sequence_mask(targets_sequence_length,\n batch_max_targets_sequence_length,\n dtype=tf.float32),\n average_across_timesteps=True,\n average_across_batch=False,\n softmax_loss_function=softmax_function\n )\n\n # batch_max_seq_length = tf.shape(logits)[1]\n # unpadded_targets = targets[:, :tf.shape(logits)[1]]\n # with tf.variable_scope('sequence_loss'):\n # sequence_loss = tfa.seq2seq.sequence_loss(\n # logits,\n # unpadded_targets,\n # tf.sequence_mask(targets_sequence_length, batch_max_seq_length, dtype=tf.float32),\n # average_across_timesteps=True,\n # average_across_batch=False,\n # softmax_loss_function=softmax_function\n # )\n\n return sequence_loss\n\n\n# manual implementation of sequence loss\ndef cross_entropy_sequence_loss(logits, targets, sequence_length):\n \"\"\"Calculates the per-example cross-entropy loss for a sequence of logits and\n masks out all losses passed the sequence length.\n Args:\n logits: Logits of shape `[B, T, vocab_size]`\n targets: Target classes of shape `[B, T]`\n sequence_length: An int32 tensor of shape `[B]` corresponding\n to the length of each input\n Returns:\n A tensor of shape [T, B] that contains the loss per example, per time step.\n \"\"\"\n with tf.variable_scope('sequence_loss'):\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=targets)\n # Mask out the losses we don't care about\n loss_mask = tf.sequence_mask(\n tf.cast(sequence_length, tf.int32),\n tf.cast(tf.shape(targets)[1], tf.int32)\n )\n losses = losses * tf.cast(loss_mask, tf.float32)\n return losses\n\n\ndef sampled_softmax_cross_entropy(\n labels,\n last_hidden,\n num_classes=1,\n decoder_weights=None,\n decoder_biases=None,\n sampler=None,\n negative_samples=0,\n class_counts=0,\n distortion=1,\n unique=False,\n **kwargs\n):\n labels = tf.cast(\n tf.expand_dims(labels, -1),\n tf.int64\n )\n sampled_values = sample_values_from_classes(labels, sampler, num_classes,\n negative_samples, unique,\n class_counts, distortion)\n train_loss = tf.nn.sampled_softmax_loss(\n weights=tf.transpose(decoder_weights),\n biases=decoder_biases,\n labels=labels,\n inputs=last_hidden,\n num_sampled=negative_samples,\n num_classes=num_classes,\n sampled_values=sampled_values)\n\n return train_loss\n\n\ndef sequence_sampled_softmax_cross_entropy(targets, targets_sequence_length,\n eval_logits, train_logits,\n class_weights,\n class_biases, loss,\n num_classes):\n batch_max_targets_sequence_length = tf.shape(targets)[1]\n\n batch_max_train_logits_sequence_length = tf.shape(train_logits)[1]\n difference_train = batch_max_targets_sequence_length - batch_max_train_logits_sequence_length\n padded_train_logits = tf.pad(train_logits,\n [[0, 0], [0, difference_train], [0, 0]])\n\n batch_max_eval_logits_sequence_length = tf.shape(eval_logits)[1]\n difference_eval = batch_max_targets_sequence_length - batch_max_eval_logits_sequence_length\n padded_eval_logits = tf.pad(eval_logits,\n [[0, 0], [0, difference_eval], [0, 0]])\n\n # batch_max_seq_length = tf.shape(train_logits)[1]\n # unpadded_targets = targets[:, :batch_max_seq_length]\n # output_exp = tf.cast(tf.reshape(unpadded_targets, [-1, 1]), tf.int64)\n output_exp = tf.cast(tf.reshape(targets, [-1, 1]), tf.int64)\n sampled_values = sample_values_from_classes(output_exp, loss['sampler'],\n num_classes,\n loss['negative_samples'],\n loss['unique'],\n loss['class_counts'],\n loss['distortion'])\n\n def _sampled_loss(labels, logits):\n labels = tf.cast(labels, tf.int64)\n labels = tf.reshape(labels, [-1, 1])\n logits = tf.cast(logits, tf.float32)\n\n return tf.cast(\n tf.nn.sampled_softmax_loss(weights=tf.transpose(class_weights),\n biases=class_biases,\n labels=labels,\n inputs=logits,\n num_sampled=loss['negative_samples'],\n num_classes=num_classes,\n sampled_values=sampled_values),\n tf.float32)\n\n train_loss = tfa.seq2seq.sequence_loss(\n padded_train_logits,\n targets,\n tf.sequence_mask(targets_sequence_length,\n batch_max_targets_sequence_length, dtype=tf.float32),\n average_across_timesteps=True,\n average_across_batch=False,\n softmax_loss_function=_sampled_loss\n )\n\n # batch_max_seq_length_eval = tf.shape(eval_logits)[1]\n # unpadded_targets_eval = targets[:, :batch_max_seq_length_eval]\n\n eval_loss = tfa.seq2seq.sequence_loss(\n padded_eval_logits,\n targets,\n tf.sequence_mask(targets_sequence_length,\n batch_max_targets_sequence_length, dtype=tf.float32),\n average_across_timesteps=True,\n average_across_batch=False\n )\n\n return train_loss, eval_loss\n\n\ndef weighted_softmax_cross_entropy(\n logits,\n vector_labels,\n class_weights=1,\n labels_smoothing=0,\n **kwargs\n):\n use_class_weights = not isinstance(class_weights, (int, float))\n if use_class_weights:\n train_loss = softmax_cross_entropy_with_class_weighting(\n logits,\n vector_labels,\n class_weights,\n labels_smoothing\n )\n else:\n train_loss = tf.keras.losses.categorical_crossentropy(\n y_true=vector_labels,\n y_pred=logits,\n from_logits=True,\n label_smoothing=labels_smoothing\n )\n return train_loss\n\n\ndef loss_multilabel(logits, vector_labels, loss):\n # input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`\n # output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.\n # let `x = logits`, `z = labels`. The logistic loss is:z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n use_class_weights = not isinstance(loss['class_weights'], (int, float))\n if use_class_weights:\n train_loss = sigmoid_cross_entropy_with_class_weighting(\n logits,\n vector_labels,\n loss['class_weights'],\n loss['labels_smoothing']\n )\n else:\n train_loss = tf.losses.sigmoid_cross_entropy(\n multi_class_labels=vector_labels,\n logits=logits,\n label_smoothing=loss[\n 'labels_smoothing'],\n reduction=Reduction.NONE)\n return train_loss\n\n\ndef absolute_loss(y, y_hat):\n return tf.abs(tf.subtract(y, y_hat))\n\n\ndef squared_loss(y, y_hat):\n return tf.square(tf.subtract(y, y_hat))\n\n\ndef mean_absolute_error(y, y_hat, weight=1.0):\n return tf.reduce_mean(tf.multiply(absolute_loss(y, y_hat), weight))\n\n\ndef mean_squared_error(y, y_hat, weight=1.0):\n return tf.reduce_mean(tf.multiply(squared_loss(y, y_hat), weight))\n\n\ndef sample_values_from_classes(labels, sampler, num_classes, negative_samples,\n unique, class_counts, distortion):\n \"\"\"returns sampled_values using the chosen sampler\"\"\"\n if sampler == 'fixed_unigram':\n sampled_values = tf.random.fixed_unigram_candidate_sampler(\n true_classes=labels,\n num_true=1,\n num_sampled=negative_samples,\n unique=unique,\n range_max=num_classes,\n unigrams=class_counts,\n distortion=distortion\n )\n elif sampler == 'uniform':\n sampled_values = tf.random.uniform_candidate_sampler(\n true_classes=labels,\n num_true=1,\n num_sampled=negative_samples,\n unique=unique,\n range_max=num_classes\n )\n elif sampler == 'log_uniform':\n sampled_values = tf.random.log_uniform_candidate_sampler(\n true_classes=labels,\n num_true=1,\n num_sampled=negative_samples,\n unique=unique,\n range_max=num_classes\n )\n elif sampler == 'learned_unigram':\n sampled_values = tf.random.fixed_unigram_candidate_sampler(\n true_classes=labels,\n num_true=1,\n num_sampled=negative_samples,\n unique=unique,\n range_max=num_classes,\n unigrams=class_counts,\n distortion=distortion\n )\n else:\n raise ValueError('Unsupported sampler {}'.format(sampler))\n return sampled_values\n" ]
[ [ "tensorflow.keras.losses.categorical_crossentropy", "tensorflow.reshape", "tensorflow.clip_by_value", "tensorflow.random.uniform_candidate_sampler", "tensorflow.cast", "tensorflow.shape", "tensorflow.concat", "numpy.log", "tensorflow.subtract", "tensorflow.transpose", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.pad", "tensorflow.losses.sigmoid_cross_entropy", "tensorflow.random.fixed_unigram_candidate_sampler", "tensorflow.nn.sigmoid", "tensorflow.zeros", "tensorflow.minimum", "tensorflow.expand_dims", "tensorflow.losses.softmax_cross_entropy", "tensorflow.reduce_sum", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.convert_to_tensor", "tensorflow.sequence_mask", "tensorflow.multiply", "tensorflow.random.log_uniform_candidate_sampler", "tensorflow.maximum", "tensorflow.reduce_mean" ] ]
rmarshall10/Insight_App
[ "f0276e79d0ceaf2787762a1602d62a19db1c1800" ]
[ "app/routes.py" ]
[ "from flask import render_template\nfrom flask import request, Response, make_response\nfrom app import app\nfrom app import video_tracker\nfrom flask import send_from_directory\nimport os\nfrom werkzeug import secure_filename\nimport posenet\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\n#@app.route('/')\n@app.route('/index')\ndef index():\n\treturn render_template(\"index.html\", title = 'Home', user = { 'nickname': 'rockstar!' })\n\n@app.route('/')\n@app.route('/upload')\ndef upload_file():\n\treturn render_template('upload.html')\n\ndef allowed_extension(filename):\n\t'''This function can be implemented to check if the uploaded file is a video file'''\n\tif not \".\" in filename:\n\t\treturn False\n\text = filename.rsplit(\".\",1)[1]\n\n\tif ext.upper() in app.config['ALLOWED_EXTENSIONS']:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\t\n@app.route('/uploader', methods = ['GET', 'POST'])\ndef uploaded_file():\n\t'''If a video is uploaded, process the video and display output'''\n\tif request.method == 'POST':\n\t\tf = request.files['file']\n\t\t# if not allowed_extension(f.filename):\n\t\t# \treturn\n\n\t\t# save the file\n\t\tf.save(secure_filename(f.filename))\n\t\n\t\t# load the soccer ball detection model\n\t\tmodel_path = \"app/static/\"\n\t\tmodel_name = \"frozen_inference_graph.pb\"\n\t\tmodel_text = \"graph_text.pbtxt\"\n\t\tnet = video_tracker.loading_model(model_path + model_name, model_path + model_text)\n\t\t\n\t\t# load pose model\n\t\tsess = tf.Session()\n\t\tmodel_cfg, model_outputs = posenet.load_model(101, sess)\n\t\toutput_stride = model_cfg['output_stride']\n\n\t\t#OPTIONALLY can output the following, then show output as pre-loaded gif\n\t\t#(video_bytes, bounces, body_part_bounces, body_part_sequence) = video_tracker.run_video(f.filename, net, sess, output_stride, model_outputs)\n\t\t#return Response(video_tracker.display_video(video_bytes), mimetype='multipart/x-mixed-replace; boundary=frame')\n\t\t\n\t\t# Show output video as it is processed in real time\n\t\treturn Response(video_tracker.run_video(f.filename, net, sess, output_stride, model_outputs), mimetype='multipart/x-mixed-replace; boundary=frame')\n\t\t\t\n\ndef example_file(link):\n\t'''If user clicks on an example link, process the static video file'''\n\n\tif link == 1:\n\t\tvideo_name = \"ball_test6.mp4\"\n\telif link == 2:\n\t\tvideo_name = \"ball_test4.mp4\"\n\telif link ==3:\n\t\tvideo_name = \"ball_test13.mp4\"\n\n\t#load the soccer ball detection model\n\tmodel_path = \"app/static/\"\n\tmodel_name = \"frozen_inference_graph.pb\"\n\tmodel_text = \"graph_text.pbtxt\"\n\tfilename = model_path + video_name\n\n\tnet = video_tracker.loading_model(model_path + model_name, model_path + model_text)\n\t\n\t#load the pose model\n\tsess = tf.Session()\n\tmodel_cfg, model_outputs = posenet.load_model(101, sess)\n\toutput_stride = model_cfg['output_stride']\n\n\t#OPTIONALLY can output the following, then show output as pre-loaded gif\n\t#(video_bytes, bounces, body_part_bounces, body_part_sequence) = video_tracker.run_video(f.filename, net, sess, output_stride, model_outputs)\n\t#return Response(video_tracker.display_video(video_bytes), mimetype='multipart/x-mixed-replace; boundary=frame')\n\t\n\treturn Response(video_tracker.run_video(filename, net, sess, output_stride, model_outputs), mimetype='multipart/x-mixed-replace; boundary=frame')\n\t\n\n\n@app.route('/example_1')\ndef example1():\n\treturn example_file(1)\n\n@app.route('/example_2')\ndef example2():\n\treturn example_file(2)\n\n@app.route('/example_3')\ndef example3():\n\treturn example_file(3)\n\n@app.route('/about')\ndef about():\t\n\treturn \"Insight Fellows project\"\n\n@app.route('/contact')\ndef contact():\n\treturn \"Email: ryanmarshall89@gmail.com\"" ]
[ [ "tensorflow.compat.v1.disable_v2_behavior", "tensorflow.compat.v1.Session" ] ]
githubartema/efficientnet-lite-keras
[ "a8bda33caf9ecc69bde8f2ea2689ac24e7da7c02" ]
[ "test_efficientnet_lite/test_tfmot_qat_possibility.py" ]
[ "from typing import Callable, Tuple\n\nimport tensorflow as tf\nimport tensorflow_model_optimization as tfmot\nfrom absl.testing import absltest, parameterized\n\nfrom test_efficientnet_lite.test_model import TEST_PARAMS\n\n# Disable GPU\ntf.config.set_visible_devices([], \"GPU\")\n\n\nclass TestEfficientNetLiteQATWrap(parameterized.TestCase):\n def setUp(self):\n tf.keras.backend.clear_session()\n\n @parameterized.named_parameters(TEST_PARAMS)\n def test_qat_wrapper(self, model_fn: Callable, input_shape: Tuple[int, int]):\n model = model_fn(weights=None, input_shape=input_shape + (3,))\n tfmot.quantization.keras.quantize_model(model)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n" ]
[ [ "tensorflow.keras.backend.clear_session", "tensorflow.config.set_visible_devices" ] ]
IuAyala/Self-Driving-Cars-Course
[ "3171a556726e1a67d43a8fae4512c66d3715cf1c" ]
[ "3_road_sign_classification/preprocessing.py" ]
[ "import os\nfrom pathlib import Path\nimport xml.dom.minidom\nimport cv2\nfrom skimage.transform import rescale, resize\nfrom sklearn.model_selection import train_test_split\nfrom PIL import Image\nimport numpy as np\nimport pickle\n\n# Dataset --> https://www.kaggle.com/andrewmvd/road-sign-detection\n# Classes (4) --> speedlimit, trafficlight, crosswalk, stop\n\n# Prameters\nSEED = 123 # for reprducibility\nDATASET_FOLDER = Path(__file__).parent / \"original_dataset\"\nANNOTATIONS_FOLDER = DATASET_FOLDER / \"annotations\"\nIMAGES_FOLDER = DATASET_FOLDER / \"images\"\nMODIFIED_DATASET_FOLDER = Path(__file__).parent / \"modified_dataset\"\nSTANDARD_SHAPE = (100, 100)\nMIN_SIDE = 20 # if one side is smaller than this, the image will be discarded\n\n\ndef crop_image(image, x_min, x_max, y_min, y_max):\n \"\"\"Crops a square shaped part of the image, ensuring that the image is centered as long as the image is big enough for the square crop\n\n Args:\n image (np.ndarray): image as a 3D numpy array\n x_min (int): initial column of the crop\n x_max (int): final column of the crop\n y_min (int): initial row of the crop\n y_max (int): final row of the crop\n\n Raises:\n Exception: if the image is not big enough raises an exception\n\n Returns:\n np.ndarray: square crop of the input image\n \"\"\"\n x_range = x_max - x_min # width axis\n y_range = y_max - y_min # height axis\n\n rows, cols = image.shape[:2]\n while x_range >= rows or y_range >= cols:\n if x_range >= rows:\n x_range -= 1\n x_max -= 1\n if y_range >= cols:\n y_range -= 1\n y_max -= 1\n\n if y_range > x_range: # y_range greater\n x_middle = (x_min + x_max) / 2\n x_min = int(x_middle - y_range / 2)\n x_max = int(x_min + y_range)\n elif y_range < x_range: # x_range greater\n y_middle = (y_min + y_max) / 2\n y_min = int(y_middle - x_range / 2)\n y_max = int(y_min + x_range)\n\n count = 0\n while (\n x_min < 0\n or y_min < 0\n or x_max > image.shape[1] - 1\n or y_max > image.shape[0] - 1\n ):\n if x_min < 0:\n x_min += 1\n x_max += 1\n if y_min < 0:\n y_min += 1\n y_max += 1\n if x_max > image.shape[1] - 1:\n x_min -= 1\n x_max -= 1\n if y_max > image.shape[0] - 1:\n y_min -= 1\n y_max -= 1\n\n count += 1\n if count > 1000:\n raise Exception(\n \"Stuck in while loop!!\"\n ) # TODO: needs improving - smarter behaviour\n\n new_image = image[y_min:y_max, x_min:x_max, :]\n\n return new_image\n\n\ndef read_dataset(overwrite=False, standar_size=True, store_images=True):\n # Annotation files list\n annotation_files = [f for f in ANNOTATIONS_FOLDER.iterdir() if f.is_file()]\n\n cropped_images_folder = MODIFIED_DATASET_FOLDER / \"cropped_images\"\n if overwrite and cropped_images_folder.exists():\n os.remove(cropped_images_folder)\n cropped_images_folder.mkdir(parents=True, exist_ok=True)\n\n X = []\n y = []\n too_small_count = 0\n # Create features for each element\n count = 0\n for annotation_file in annotation_files:\n doc = xml.dom.minidom.parse(str(annotation_file))\n folder = doc.getElementsByTagName(\"folder\")[0].firstChild.nodeValue\n # Image name\n filename = doc.getElementsByTagName(\"filename\")[0].firstChild.nodeValue\n\n # Load image\n image_path = IMAGES_FOLDER / filename\n image_uint8 = cv2.imread(str(image_path)) # range 0-255, encoded in uint8\n image_uint8 = cv2.cvtColor(image_uint8, cv2.COLOR_BGR2RGB)\n\n # Normalize image\n image = image_uint8 / 255 # range 0-1, encoded float64\n\n # Get element name and bounding box\n names = doc.getElementsByTagName(\"name\")\n bndboxs = doc.getElementsByTagName(\"bndbox\")\n label = doc.getElementsByTagName(\"name\")[0].firstChild.nodeValue\n\n for name, bndbox in zip(names, bndboxs):\n label = name.firstChild.nodeValue\n xmin = int(bndbox.getElementsByTagName(\"xmin\")[0].firstChild.nodeValue)\n ymin = int(bndbox.getElementsByTagName(\"ymin\")[0].firstChild.nodeValue)\n xmax = int(bndbox.getElementsByTagName(\"xmax\")[0].firstChild.nodeValue)\n ymax = int(bndbox.getElementsByTagName(\"ymax\")[0].firstChild.nodeValue)\n\n if min(xmax - xmin, ymax - ymin) < MIN_SIDE:\n too_small_count += 1\n continue\n\n # Crop image\n new_image = crop_image(image, xmin, xmax, ymin, ymax)\n # new_image = image[ymin:ymax, xmin:xmax, :]\n if standar_size:\n new_image = resize(new_image, STANDARD_SHAPE, anti_aliasing=False)\n\n # Add elements to dataset\n X.append(new_image)\n y.append(label)\n\n # Save image\n if store_images:\n im = Image.fromarray((new_image * 255).astype(np.uint8))\n image_path = cropped_images_folder / f\"image_{count}.png\"\n im.save(image_path)\n\n count += 1\n\n print(\"Number images skipped - too small:\", too_small_count)\n\n return X, y\n\n\ndef get_dataset(recompute=False):\n # Check if already has been generated\n dataset_file = MODIFIED_DATASET_FOLDER / \"dataset.pickle\"\n if dataset_file.exists() and not recompute:\n print(\"INFO: modified dataset already created\")\n with open(dataset_file, \"rb\") as file:\n X, y = pickle.load(file)\n else:\n X, y = read_dataset()\n # Save dataset\n with open(dataset_file, \"wb\") as file:\n # A new file will be created\n pickle.dump([X, y], file)\n\n # Split dataset\n X_train, X_test, y_train, y_test = train_test_split(\n X,\n y,\n test_size=0.2,\n shuffle=True,\n random_state=SEED,\n )\n\n return X_train, X_test, y_train, y_test\n\n\ndef dict_of_classes(X_train, y_train):\n output = {}\n\n for img, label in zip(X_train, y_train):\n if label not in output:\n output[label] = []\n\n output[label].append(img)\n\n return output\n\n\nif __name__ == \"__main__\":\n X_train, X_test, y_train, y_test = get_dataset(recompute=True)\n\n print(\"Classes\", set(y_train))\n" ]
[ [ "sklearn.model_selection.train_test_split" ] ]
anchoranalysis/anchor-python-training
[ "f2089540b381cba8b2188a15797912f984d39f6c" ]
[ "src/anchor_python_training/visualize/reconstruction.py" ]
[ "\"\"\"Plots reconstruction of images against their original forms.\"\"\"\nimport torch\nimport torch.nn as nn\nfrom typing import Iterable\nimport numpy as np\n\n\nfrom .comparison import plot_images_two_rows\n\n\ndef plot_reconstruction_on_first_batch(\n loader: torch.utils.data.DataLoader, model_reconstructing: nn.Module\n) -> None:\n \"\"\"Plots the first batch of reconstructed images against the original images.\n\n :param loader: a loader from which the first batch of images is loaded.\n :param model_reconstructing: a model which reconstructs an image (produces an image of identical structure to\n the input image, when passed an input-image).\n \"\"\"\n\n first_batch = _retrieve_first_input_batch_from_loader(loader)\n\n with torch.no_grad():\n reconstruction = model_reconstructing(first_batch)\n\n plot_images_two_rows(\n _images_from_tensor(first_batch),\n _images_from_tensor(reconstruction),\n len(first_batch),\n )\n\n\ndef _retrieve_first_input_batch_from_loader(\n loader: torch.utils.data.DataLoader,\n) -> torch.Tensor:\n \"\"\"Retrieves the input-images from the first batch in a loader.\"\"\"\n for batch_features in loader:\n return batch_features[0]\n raise ValueError\n\n\ndef _images_from_tensor(images: torch.Tensor) -> Iterable[np.ndarray]:\n \"\"\"Converts a tensor to an iterable of images, converting each image to an appropriately sized numpy array.\"\"\"\n for index in range(images.size()[0]):\n # Convert from PyTorch RGB format (3, y, x) to Numpy expected RGB format (y, x, 3)\n yield images[index].permute(1, 2, 0).numpy()\n" ]
[ [ "torch.no_grad" ] ]