repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
jpolchlo/rasterframes
|
[
"1ec7e9e65a4ac7f8f17863d64a4884c35cd24403"
] |
[
"pyrasterframes/src/main/python/pyrasterframes/__init__.py"
] |
[
"#\n# This software is licensed under the Apache 2 license, quoted below.\n#\n# Copyright 2019 Astraea, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# [http://www.apache.org/licenses/LICENSE-2.0]\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n\n\"\"\"\nModule initialization for PyRasterFrames. This is where much of the cool stuff is\nappended to PySpark classes.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom pyspark import SparkContext\nfrom pyspark.sql import SparkSession, DataFrame, DataFrameReader, DataFrameWriter\nfrom pyspark.sql.column import _to_java_column\nfrom geomesa_pyspark import types # <-- required to ensure Shapely UDTs get registered.\n\n# Import RasterFrameLayer types and functions\nfrom .rf_context import RFContext\nfrom .version import __version__\nfrom .rf_types import RasterFrameLayer, TileExploder, TileUDT, RasterSourceUDT\nimport geomesa_pyspark.types # enable vector integrations\n\n__all__ = ['RasterFrameLayer', 'TileExploder']\n\n\ndef _rf_init(spark_session):\n \"\"\" Adds RasterFrames functionality to PySpark session.\"\"\"\n if not hasattr(spark_session, \"rasterframes\"):\n spark_session.rasterframes = RFContext(spark_session)\n spark_session.sparkContext._rf_context = spark_session.rasterframes\n\n return spark_session\n\n\ndef _kryo_init(builder):\n \"\"\"Registers Kryo Serializers for better performance.\"\"\"\n # NB: These methods need to be kept up-to-date wit those in `org.locationtech.rasterframes.extensions.KryoMethods`\n builder \\\n .config(\"spark.serializer\", \"org.apache.spark.serializer.KryoSerializer\") \\\n .config(\"spark.kryo.registrator\", \"org.locationtech.rasterframes.util.RFKryoRegistrator\") \\\n .config(\"spark.kryoserializer.buffer.max\", \"500m\")\n return builder\n\ndef _convert_df(df, sp_key=None, metadata=None):\n ctx = SparkContext._active_spark_context._rf_context\n\n if sp_key is None:\n return RasterFrameLayer(ctx._jrfctx.asLayer(df._jdf), ctx._spark_session)\n else:\n import json\n return RasterFrameLayer(ctx._jrfctx.asLayer(\n df._jdf, _to_java_column(sp_key), json.dumps(metadata)), ctx._spark_session)\n\n\ndef _raster_join(df, other, left_extent=None, left_crs=None, right_extent=None, right_crs=None, join_exprs=None):\n ctx = SparkContext._active_spark_context._rf_context\n if join_exprs is not None:\n assert left_extent is not None and left_crs is not None and right_extent is not None and right_crs is not None\n # Note the order of arguments here.\n cols = [join_exprs, left_extent, left_crs, right_extent, right_crs]\n jdf = ctx._jrfctx.rasterJoin(df._jdf, other._jdf, *[_to_java_column(c) for c in cols])\n\n elif left_extent is not None:\n assert left_crs is not None and right_extent is not None and right_crs is not None\n cols = [left_extent, left_crs, right_extent, right_crs]\n jdf = ctx._jrfctx.rasterJoin(df._jdf, other._jdf, *[_to_java_column(c) for c in cols])\n\n else:\n jdf = ctx._jrfctx.rasterJoin(df._jdf, other._jdf)\n\n return RasterFrameLayer(jdf, ctx._spark_session)\n\n\ndef _layer_reader(df_reader, format_key, path, **options):\n \"\"\" Loads the file of the given type at the given path.\"\"\"\n df = df_reader.format(format_key).load(path, **options)\n return _convert_df(df)\n\n\ndef _aliased_reader(df_reader, format_key, path, **options):\n \"\"\" Loads the file of the given type at the given path.\"\"\"\n return df_reader.format(format_key).load(path, **options)\n\n\ndef _aliased_writer(df_writer, format_key, path, **options):\n \"\"\" Saves the dataframe to a file of the given type at the given path.\"\"\"\n return df_writer.format(format_key).save(path, **options)\n\n\ndef _raster_reader(\n df_reader,\n source=None,\n catalog_col_names=None,\n band_indexes=None,\n tile_dimensions=(256, 256),\n lazy_tiles=True,\n **options):\n \"\"\"\n Returns a Spark DataFrame from raster data files specified by URIs.\n Each row in the returned DataFrame will contain a column with struct of (CRS, Extent, Tile) for each item in\n `catalog_col_names`.\n Multiple bands from the same raster file are spread across rows of the DataFrame. See `band_indexes` param.\n If bands from a scene are stored in separate files, provide a DataFrame to the `source` parameter.\n\n For more details and example usage, consult https://rasterframes.io/raster-read.html\n\n :param source: a string, list of strings, list of lists of strings, a Pandas DataFrame or a Spark DataFrame giving URIs to the raster data to read.\n :param catalog_col_names: required if `source` is a DataFrame or CSV string. It is a list of strings giving the names of columns containing URIs to read.\n :param band_indexes: list of integers indicating which bands, zero-based, to read from the raster files specified; default is to read only the first band.\n :param tile_dimensions: tuple or list of two indicating the default tile dimension as (columns, rows).\n :param lazy_tiles: If true (default) only generate minimal references to tile contents; if false, fetch tile cell values.\n :param options: Additional keyword arguments to pass to the Spark DataSource.\n \"\"\"\n\n from pandas import DataFrame as PdDataFrame\n\n if 'catalog' in options:\n source = options['catalog'] # maintain back compatibility with 0.8.0\n\n def to_csv(comp):\n if isinstance(comp, str):\n return comp\n else:\n return ','.join(str(v) for v in comp)\n\n def temp_name():\n \"\"\" Create a random name for a temporary view \"\"\"\n import uuid\n return str(uuid.uuid4()).replace('-', '')\n\n if band_indexes is None:\n band_indexes = [0]\n\n options.update({\n \"band_indexes\": to_csv(band_indexes),\n \"tile_dimensions\": to_csv(tile_dimensions),\n \"lazy_tiles\": lazy_tiles\n })\n\n # Parse the `source` argument\n path = None # to pass into `path` param\n if isinstance(source, list):\n if all([isinstance(i, str) for i in source]):\n path = None\n catalog = None\n options.update(dict(paths='\\n'.join([str(i) for i in source]))) # pass in \"uri1\\nuri2\\nuri3\\n...\"\n if all([isinstance(i, list) for i in source]):\n # list of lists; we will rely on pandas to:\n # - coerce all data to str (possibly using objects' __str__ or __repr__)\n # - ensure data is not \"ragged\": all sublists are same len\n path = None\n catalog_col_names = ['proj_raster_{}'.format(i) for i in range(len(source[0]))] # assign these names\n catalog = PdDataFrame(source,\n columns=catalog_col_names,\n dtype=str,\n )\n elif isinstance(source, str):\n if '\\n' in source or '\\r' in source:\n # then the `source` string is a catalog as a CSV (header is required)\n path = None\n catalog = source\n else:\n # interpret source as a single URI string\n path = source\n catalog = None\n else:\n # user has passed in some other type, we will try to interpret as a catalog\n catalog = source\n\n if catalog is not None:\n if catalog_col_names is None:\n raise Exception(\"'catalog_col_names' required when DataFrame 'catalog' specified\")\n\n if isinstance(catalog, str):\n options.update({\n \"catalog_csv\": catalog,\n \"catalog_col_names\": to_csv(catalog_col_names)\n })\n elif isinstance(catalog, DataFrame):\n # check catalog_col_names\n assert all([c in catalog.columns for c in catalog_col_names]), \\\n \"All items in catalog_col_names must be the name of a column in the catalog DataFrame.\"\n # Create a random view name\n tmp_name = temp_name()\n catalog.createOrReplaceTempView(tmp_name)\n options.update({\n \"catalog_table\": tmp_name,\n \"catalog_col_names\": to_csv(catalog_col_names)\n })\n elif isinstance(catalog, PdDataFrame):\n # check catalog_col_names\n assert all([c in catalog.columns for c in catalog_col_names]), \\\n \"All items in catalog_col_names must be the name of a column in the catalog DataFrame.\"\n\n # Handle to active spark session\n session = SparkContext._active_spark_context._rf_context._spark_session\n # Create a random view name\n tmp_name = temp_name()\n spark_catalog = session.createDataFrame(catalog)\n spark_catalog.createOrReplaceTempView(tmp_name)\n options.update({\n \"catalog_table\": tmp_name,\n \"catalog_col_names\": to_csv(catalog_col_names)\n })\n\n return df_reader \\\n .format(\"raster\") \\\n .load(path, **options)\n\n\ndef _geotiff_writer(\n df_writer,\n path=None,\n crs=None,\n raster_dimensions=None,\n **options):\n\n def set_dims(parts):\n parts = [int(p) for p in parts]\n assert len(parts) == 2, \"Expected dimensions specification to have exactly two components\"\n assert all([p > 0 for p in parts]), \"Expected all components in dimensions to be positive integers\"\n options.update({\n \"imageWidth\": parts[0],\n \"imageHeight\": parts[1]\n })\n parts = [int(p) for p in parts]\n assert all([p > 0 for p in parts]), 'nice message'\n\n if raster_dimensions is not None:\n if isinstance(raster_dimensions, (list, tuple)):\n set_dims(raster_dimensions)\n elif isinstance(raster_dimensions, str):\n set_dims(raster_dimensions.split(','))\n\n if crs is not None:\n options.update({\n \"crs\": crs\n })\n\n return _aliased_writer(df_writer, \"geotiff\", path, **options)\n\n\n# Patch RasterFrames initialization method on SparkSession to mirror Scala approach\nSparkSession.withRasterFrames = _rf_init\n\n# Patch Kryo serialization initialization method on SparkSession.Builder to mirror Scala approach\nSparkSession.Builder.withKryoSerialization = _kryo_init\n\n# Add the 'asLayer' method to pyspark DataFrame\nDataFrame.as_layer = _convert_df\n\n# Add `raster_join` method to pyspark DataFrame\nDataFrame.raster_join = _raster_join\n\n# Add DataSource convenience methods to the DataFrameReader\nDataFrameReader.raster = _raster_reader\nDataFrameReader.geojson = lambda df_reader, path: _aliased_reader(df_reader, \"geojson\", path)\nDataFrameReader.geotiff = lambda df_reader, path: _layer_reader(df_reader, \"geotiff\", path)\nDataFrameWriter.geotiff = _geotiff_writer\nDataFrameReader.geotrellis = lambda df_reader, path: _layer_reader(df_reader, \"geotrellis\", path)\nDataFrameReader.geotrellis_catalog = lambda df_reader, path: _aliased_reader(df_reader, \"geotrellis-catalog\", path)\nDataFrameWriter.geotrellis = lambda df_writer, path: _aliased_writer(df_writer, \"geotrellis\", path)\n"
] |
[
[
"pandas.DataFrame"
]
] |
ajmarcus/tfx
|
[
"28ac2be5ace31ca733f6292495f8be83484a1730",
"28ac2be5ace31ca733f6292495f8be83484a1730"
] |
[
"tfx/components/trainer/fn_args_utils_test.py",
"tfx/examples/penguin/penguin_pipeline_kubeflow_e2e_test.py"
] |
[
"# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.components.trainer.fn_args_utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\nfrom tfx.components.trainer import fn_args_utils\nfrom tfx.proto import trainer_pb2\nfrom tfx.types import artifact_utils\nfrom tfx.types import standard_artifacts\nfrom tfx.types import standard_component_specs\nfrom tfx.utils import proto_utils\n\n\nclass FnArgsUtilsTest(tf.test.TestCase):\n\n def testGetCommonFnArgs(self):\n source_data_dir = os.path.join(\n os.path.dirname(os.path.dirname(__file__)), 'testdata')\n\n # Create input dict.\n examples = standard_artifacts.Examples()\n examples.uri = os.path.join(source_data_dir,\n 'transform/transformed_examples')\n examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])\n\n transform_output = standard_artifacts.TransformGraph()\n transform_output.uri = os.path.join(source_data_dir,\n 'transform/transform_graph')\n\n schema = standard_artifacts.Schema()\n schema.uri = os.path.join(source_data_dir, 'schema_gen')\n\n base_model = standard_artifacts.Model()\n base_model.uri = os.path.join(source_data_dir, 'trainer/previous')\n\n input_dict = {\n standard_component_specs.EXAMPLES_KEY: [examples],\n standard_component_specs.TRANSFORM_GRAPH_KEY: [transform_output],\n standard_component_specs.SCHEMA_KEY: [schema],\n standard_component_specs.BASE_MODEL_KEY: [base_model],\n }\n\n # Create exec properties skeleton.\n exec_properties = {\n 'train_args':\n proto_utils.proto_to_json(trainer_pb2.TrainArgs(num_steps=1000)),\n 'eval_args':\n proto_utils.proto_to_json(trainer_pb2.EvalArgs(num_steps=500)),\n }\n\n fn_args = fn_args_utils.get_common_fn_args(input_dict, exec_properties,\n 'tempdir')\n self.assertEqual(fn_args.working_dir, 'tempdir')\n self.assertEqual(fn_args.train_steps, 1000)\n self.assertEqual(fn_args.eval_steps, 500)\n self.assertLen(fn_args.train_files, 1)\n self.assertEqual(fn_args.train_files[0],\n os.path.join(examples.uri, 'Split-train', '*'))\n self.assertLen(fn_args.eval_files, 1)\n self.assertEqual(fn_args.eval_files[0],\n os.path.join(examples.uri, 'Split-eval', '*'))\n self.assertEqual(fn_args.schema_path,\n os.path.join(schema.uri, 'schema.pbtxt'))\n # Depending on execution environment, the base model may have been stored\n # at .../Format-Servo/... or .../Format-Serving/... directory patterns.\n self.assertRegexpMatches(\n fn_args.base_model,\n os.path.join(base_model.uri,\n r'Format-(Servo|Serving)/export/chicago-taxi/\\d+'))\n self.assertEqual(fn_args.transform_graph_path, transform_output.uri)\n self.assertIsInstance(fn_args.data_accessor, fn_args_utils.DataAccessor)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"E2E tests for tfx.examples.penguin.penguin_pipeline_kubeflow.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tfx.dsl.io import fileio\nfrom tfx.examples.penguin import penguin_pipeline_kubeflow\nfrom tfx.orchestration import test_utils\nfrom tfx.orchestration.kubeflow import test_utils as kubeflow_test_utils\n\n\nclass PenguinPipelineKubeflowTest(kubeflow_test_utils.BaseKubeflowTest):\n\n def testEndToEndPipelineRun(self):\n \"\"\"End-to-end test for pipeline with RuntimeParameter.\"\"\"\n pipeline_name = 'kubeflow-e2e-test-parameter-{}'.format(\n test_utils.random_id())\n kubeflow_pipeline = penguin_pipeline_kubeflow.create_pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=self._pipeline_root(pipeline_name),\n data_root=self._penguin_data_root,\n module_file=self._penguin_dependency_file,\n enable_tuning=False,\n ai_platform_training_args=penguin_pipeline_kubeflow\n ._ai_platform_training_args,\n ai_platform_serving_args=penguin_pipeline_kubeflow\n ._ai_platform_serving_args,\n beam_pipeline_args=penguin_pipeline_kubeflow\n ._beam_pipeline_args_by_runner['DirectRunner'],\n use_aip_component=False,\n serving_model_dir=self._serving_model_dir)\n\n parameters = {\n 'train-args': '{\"num_steps\": 100}',\n 'eval-args': '{\"num_steps\": 50}',\n }\n self._compile_and_run_pipeline(\n pipeline=kubeflow_pipeline, parameters=parameters)\n self.assertTrue(fileio.exists(self._serving_model_dir))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.test.main"
],
[
"tensorflow.test.main"
]
] |
congw112358/RAL2022_Code
|
[
"5e275cb8b389be745fee1285f58732456146f7af"
] |
[
"train_realMulti-DA-Loss_classification.py"
] |
[
"import os\nimport sys\nimport torch\nimport numpy as np\n\nimport datetime\nimport logging\nimport provider\nimport importlib\nimport shutil\nimport argparse\n\nfrom pathlib import Path\nfrom tqdm import tqdm\nfrom data_utils.PCDLoader import *\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = BASE_DIR\nsys.path.append(os.path.join(ROOT_DIR, 'models'))\n\ndef parse_args():\n '''PARAMETERS'''\n parser = argparse.ArgumentParser('training')\n parser.add_argument('--use_cpu', action='store_true', default=False, help='use cpu mode')\n parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')\n parser.add_argument('--batch_size', type=int, default=8, help='batch size in training')\n parser.add_argument('--model', default='pointnet_cls', help='model name [default: pointnet_cls]')\n parser.add_argument('--num_category', default=12, type=int, help='training on real dataset')\n parser.add_argument('--epoch', default=20, type=int, help='number of epoch in training')\n parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training')\n parser.add_argument('--num_point', type=int, default=1024, help='Point Number')\n parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training')\n parser.add_argument('--log_dir', type=str, default=None, help='experiment root')\n parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate')\n parser.add_argument('--use_normals', action='store_true', default=False, help='use normals')\n parser.add_argument('--process_data', action='store_true', default=False, help='save data offline')\n parser.add_argument('--use_uniform_sample', action='store_true', default=False, help='use uniform sampiling')\n parser.add_argument('--num_sparse_point', type=int, default=50, help='Point Number for domain loss')\n parser.add_argument('--random_choose_sparse', type=bool, default=False, help='Random select num_sparse_point from [10,20,30,40,50]')\n parser.add_argument('--SO3_Rotation', action='store_true', default=False, help='arbitrary rotation in SO3')\n parser.add_argument('--DA_method', type=str, default=\"multi_coral_mmd\", help='choose the DA loss function')\n parser.add_argument('--alpha', type=float, default=10, help='set the value of classification loss')\n parser.add_argument('--lamda', type=float, default=10, help='set the value of CORAL loss')\n parser.add_argument('--beta', type=float, default=10, help='set the value of MMD loss')\n return parser.parse_args()\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef inplace_relu(m):\n classname = m.__class__.__name__\n if classname.find('ReLU') != -1:\n m.inplace=True\n\n\ndef test(model, loader, num_class=12):\n mean_correct = []\n class_acc = np.zeros((num_class, 3))\n classifier = model.eval()\n\n for j, data in tqdm(enumerate(loader), total=len(loader)):\n\n if not args.use_cpu:\n points, target = data['pointcloud'].to(device).float(), data['category'].to(device)\n\n points = points.transpose(2, 1)\n pred, _ = classifier(points)\n pred_choice = pred.data.max(1)[1]\n\n for cat in np.unique(target.cpu()):\n classacc = pred_choice[target == cat].eq(target[target == cat].long().data).cpu().sum()\n class_acc[cat, 0] += classacc.item() / float(points[target == cat].size()[0])\n class_acc[cat, 1] += 1\n\n correct = pred_choice.eq(target.long().data).cpu().sum()\n mean_correct.append(correct.item() / float(points.size()[0]))\n\n class_acc[:, 2] = class_acc[:, 0] / class_acc[:, 1]\n class_acc = np.mean(class_acc[:, 2])\n instance_acc = np.mean(mean_correct)\n\n return instance_acc, class_acc\n\n\ndef main(args):\n def log_string(str):\n logger.info(str)\n print(str)\n\n '''HYPER PARAMETER'''\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\n '''CREATE DIR'''\n timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))\n exp_dir = Path('./log/')\n exp_dir.mkdir(exist_ok=True)\n exp_dir = exp_dir.joinpath('classification')\n exp_dir.mkdir(exist_ok=True)\n if args.log_dir is None:\n exp_dir = exp_dir.joinpath(timestr)\n else:\n exp_dir = exp_dir.joinpath(args.log_dir)\n exp_dir.mkdir(exist_ok=True)\n checkpoints_dir = exp_dir.joinpath('checkpoints/')\n checkpoints_dir.mkdir(exist_ok=True)\n log_dir = exp_dir.joinpath('logs/')\n log_dir.mkdir(exist_ok=True)\n\n '''LOG'''\n args = parse_args()\n logger = logging.getLogger(\"Model\")\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n log_string('PARAMETER ...')\n log_string(args)\n\n '''DATA LOADING'''\n log_string('Load dataset ...')\n visual_data_path = 'data/visual_data_pcd/'\n tactile_data_path = 'data/tactile_pcd_10_sampled_21.02/'\n\n\n\n train_dataset = PCDPointCloudData(visual_data_path,\n folder='Train',\n sample_method='Voxel',\n num_point=args.num_point,\n sample=True,\n rotation=False,\n est_normal=args.use_normals)\n\n test_dataset = PCDPointCloudData(visual_data_path,\n folder='Test',\n sample_method='Voxel',\n num_point=args.num_point,\n sample=True,\n rotation=False,\n est_normal=args.use_normals)\n\n\n if args.random_choose_sparse is True:\n raise NotImplementedError(\"Function Not Implemented\") # Not Implement\n # domain_adaptation_dataset = PCDPointCloudData(tactile_data_path, folder='Train',\n # random_num=True,\n # list_num_point=[10,20,30,40,50])\n else:\n domain_adaptation_dataset = PCDPointCloudData(tactile_data_path,\n folder='Train',\n sample_method='Voxel',\n num_point=args.num_sparse_point,\n sample=True,\n rotation=False,\n est_normal=args.use_normals)\n\n trainDataLoader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=10, drop_last=True)\n domainAdaptationDataLoader = torch.utils.data.DataLoader(domain_adaptation_dataset, batch_size=args.batch_size, shuffle=True, num_workers=10, drop_last=True)\n testDataLoader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=10)\n\n '''Output middle layers'''\n activation = {}\n def get_activation(name):\n def hook(model, input, output):\n activation [name] = output.detach()\n return hook\n\n '''MODEL LOADING'''\n num_class = args.num_category\n model = importlib.import_module(args.model)\n shutil.copy('./models/%s.py' % args.model, str(exp_dir))\n shutil.copy('models/pointnet_cls.py', str(exp_dir))\n shutil.copy('data_utils/PCDLoader.py', str(exp_dir))\n shutil.copy('./train_realMulti-DA-Loss_classification.py', str(exp_dir))\n\n classifier = model.get_model(num_class, normal_channel=args.use_normals)\n criterion = model.get_loss()\n if args.DA_method == \"coral\":\n criterion_DA = model.get_coral_loss(DA_alpha=args.alpha, DA_lamda=args.lamda)\n elif args.DA_method == \"mmd\":\n criterion_DA = model.get_mmd_loss(DA_alpha=args.alpha, DA_lamda=args.lamda)\n elif args.DA_method == \"coral_mmd\":\n criterion_DA = model.get_coral_mmd_loss(DA_alpha=args.alpha, DA_beta=args.beta,\n DA_lamda=args.lamda)\n elif args.DA_method == \"multi_coral_mmd\":\n criterion_DA = model.get_multiLayer_loss(DA_alpha=args.alpha, DA_beta=args.beta,\n DA_lamda=args.lamda)\n else:\n raise NameError(\"Wrong input for DA method name!\")\n\n classifier.apply(inplace_relu)\n\n if not args.use_cpu:\n classifier = classifier.cuda()\n criterion = criterion.cuda()\n criterion_DA = criterion_DA.cuda()\n\n # Load pretrained model with real dataset\n try:\n checkpoint = torch.load(str(exp_dir) + '/checkpoints/best_model.pth')\n start_epoch = checkpoint['epoch']\n classifier.load_state_dict(checkpoint['model_state_dict'])\n log_string('Use pretrain model')\n except:\n log_string('No existing model, starting training from scratch...')\n start_epoch = 0\n\n try:\n min_loss = checkpoint['loss']\n log_string('Loading model with DA loss %f' % min_loss)\n except:\n log_string('No DA loss found in the model')\n min_loss = 10000.0\n\n\n if args.optimizer == 'Adam':\n optimizer = torch.optim.Adam(\n classifier.parameters(),\n lr=args.learning_rate,\n betas=(0.9, 0.999),\n eps=1e-08,\n weight_decay=args.decay_rate\n )\n else:\n optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)\n\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7)\n global_epoch = 0\n global_step = 0\n best_instance_acc = 0.0\n best_class_acc = 0.0\n running_loss = 0.0\n\n '''TRANING'''\n logger.info('Start training...')\n end_epoch = start_epoch + args.epoch\n print(\"start epoch: \", start_epoch)\n print(\"end epoch: \", end_epoch)\n for epoch in range(start_epoch, end_epoch):\n log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, end_epoch))\n mean_correct = []\n # Freeze Conv\n for name, param in classifier.named_parameters():\n if \"feat\" in name:\n param.requires_grad = False\n\n\n scheduler.step()\n for batch_id, (data, data_DA) in tqdm(\n enumerate(zip(trainDataLoader,domainAdaptationDataLoader), 0),\n total=len(trainDataLoader),\n smoothing=0.9):\n\n optimizer.zero_grad()\n points, target = data['pointcloud'].to(device).float(), data['category'].to(device)\n points_DA = data_DA['pointcloud'].to(device).float()\n\n points = points.data.cpu().numpy()\n points = provider.random_point_dropout(points)\n points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :, 0:3])\n points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])\n points = torch.Tensor(points)\n points = points.transpose(2, 1)\n\n points_DA = points_DA.data.cpu().numpy()\n points_DA = provider.random_point_dropout(points_DA)\n points_DA[:, :, 0:3] = provider.random_scale_point_cloud(points_DA[:, :, 0:3])\n points_DA[:, :, 0:3] = provider.shift_point_cloud(points_DA[:, :, 0:3])\n points_DA = torch.Tensor(points_DA)\n points_DA = points_DA.transpose(2, 1)\n\n if not args.use_cpu:\n points, target = points.cuda(), target.cuda()\n points_DA = points_DA.cuda()\n\n pred, trans_feat = classifier(points)\n\n # Multi-layer Loss\n ###############################################################################################\n # FC1\n classifier.fc1.register_forward_hook(get_activation('fc1'))\n output_dense_1 = classifier(points)\n feature_dense_1 = activation['fc1']\n # print(feature_dense_1.size())\n\n classifier.fc1.register_forward_hook(get_activation('fc1'))\n output_DA_1 = classifier(points_DA)\n feature_DA_1 = activation['fc1']\n # print(feature_DA_1.size())\n\n # FC2\n classifier.fc2.register_forward_hook(get_activation('fc2'))\n output_dense_2 = classifier(points)\n feature_dense_2 = activation['fc2']\n # print(feature_dense_2.size())\n\n classifier.fc2.register_forward_hook(get_activation('fc2'))\n output_DA_2 = classifier(points_DA)\n feature_DA_2 = activation['fc2']\n # print(feature_DA_2.size())\n\n # change the loss here for testing!!!\n\n DA_loss, loss = criterion_DA(pred, target.long(), trans_feat,\n feature_dense_1, feature_DA_1, feature_dense_2, feature_DA_2)\n ################################################################################################\n pred_choice = pred.data.max(1)[1]\n\n correct = pred_choice.eq(target.long().data).cpu().sum()\n mean_correct.append(correct.item() / float(points.size()[0]))\n\n loss.backward()\n optimizer.step()\n global_step += 1\n\n # Print the loss\n running_loss += DA_loss.item()\n if batch_id % 100 == 99:\n # log_string(\"fc1 {}\".format(classifier.fc1.weight.grad))\n # log_string(\"fc2 {}\".format(classifier.fc2.weight.grad))\n # log_string(\"fc3 {}\".format(classifier.fc3.weight.grad))\n # print(\"Training loss {} \".format(loss.item()/100))\n calculate_loss = running_loss/100\n log_string(\"Running DA loss {} \".format(calculate_loss))\n\n if calculate_loss < min_loss:\n min_loss = calculate_loss\n logger.info('Save model...')\n savepath = str(checkpoints_dir) + '/best_model.pth'\n log_string('Saving at %s' % savepath)\n state = {\n 'epoch': epoch,\n # 'instance_acc': instance_acc,\n # 'class_acc': class_acc,\n 'loss': calculate_loss,\n 'model_state_dict': classifier.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }\n torch.save(state, savepath)\n running_loss = 0.0\n\n train_instance_acc = np.mean(mean_correct)\n log_string('Train Instance Accuracy: %f' % train_instance_acc)\n\n with torch.no_grad():\n instance_acc, class_acc = test(classifier.eval(), testDataLoader, num_class=num_class)\n\n if (instance_acc >= best_instance_acc):\n best_instance_acc = instance_acc\n best_epoch = epoch + 1\n\n if (class_acc >= best_class_acc):\n best_class_acc = class_acc\n log_string('Test Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc, class_acc))\n log_string('Best Instance Accuracy: %f, Class Accuracy: %f' % (best_instance_acc, best_class_acc))\n\n if (instance_acc >= best_instance_acc):\n # logger.info('Save model...')\n # # print(\"This is a better model, but the model will not be saved\")\n logger.info('Model will not be saved with vision validation')\n # savepath = str(checkpoints_dir) + '/best_model.pth'\n # log_string('Saving at %s' % savepath)\n # state = {\n # 'epoch': best_epoch,\n # 'instance_acc': instance_acc,\n # 'class_acc': class_acc,\n # 'model_state_dict': classifier.state_dict(),\n # 'optimizer_state_dict': optimizer.state_dict(),\n # }\n # torch.save(state, savepath)\n global_epoch += 1\n\n logger.info('End of training...')\n\n\nif __name__ == '__main__':\n # torch.cuda.empty_cache()\n args = parse_args()\n main(args)\n"
] |
[
[
"torch.optim.lr_scheduler.StepLR",
"numpy.zeros",
"torch.no_grad",
"torch.save",
"numpy.mean",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.Tensor"
]
] |
Richoor/HEFT
|
[
"8422bfc5e9abf132c409a0ae299cbde29eb6e5fc"
] |
[
"venv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py"
] |
[
"# -*- coding: utf-8 -*-\n# Copyright (C) 2014 ysitu <ysitu@users.noreply.github.com>\n# All rights reserved.\n# BSD license.\n#\n# Author: ysitu <ysitu@users.noreply.github.com>\n\"\"\"\nAlgebraic connectivity and Fiedler vectors of undirected graphs.\n\"\"\"\nfrom functools import partial\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\nfrom networkx.utils import reverse_cuthill_mckee_ordering\n\ntry:\n from numpy import array, asmatrix, asarray, dot, ndarray, ones, sqrt, zeros\n from numpy.linalg import norm, qr\n from numpy.random import normal\n from scipy.linalg import eigh, inv\n from scipy.sparse import csc_matrix, spdiags\n from scipy.sparse.linalg import eigsh, lobpcg\n __all__ = ['algebraic_connectivity', 'fiedler_vector', 'spectral_ordering']\nexcept ImportError:\n __all__ = []\n\ntry:\n from scipy.linalg.blas import dasum, daxpy, ddot\nexcept ImportError:\n if __all__:\n # Make sure the imports succeeded.\n # Use minimal replacements if BLAS is unavailable from SciPy.\n dasum = partial(norm, ord=1)\n ddot = dot\n\n def daxpy(x, y, a):\n y += a * x\n return y\n\n\nclass _PCGSolver(object):\n \"\"\"Preconditioned conjugate gradient method.\n\n To solve Ax = b:\n M = A.diagonal() # or some other preconditioner\n solver = _PCGSolver(lambda x: A * x, lambda x: M * x)\n x = solver.solve(b)\n\n The inputs A and M are functions which compute\n matrix multiplication on the argument.\n A - multiply by the matrix A in Ax=b\n M - multiply by M, the preconditioner surragate for A\n\n Warning: There is no limit on number of iterations.\n \"\"\"\n\n def __init__(self, A, M):\n self._A = A\n self._M = M or (lambda x: x.copy())\n\n def solve(self, B, tol):\n B = asarray(B)\n X = ndarray(B.shape, order='F')\n for j in range(B.shape[1]):\n X[:, j] = self._solve(B[:, j], tol)\n return X\n\n def _solve(self, b, tol):\n A = self._A\n M = self._M\n tol *= dasum(b)\n # Initialize.\n x = zeros(b.shape)\n r = b.copy()\n z = M(r)\n rz = ddot(r, z)\n p = z.copy()\n # Iterate.\n while True:\n Ap = A(p)\n alpha = rz / ddot(p, Ap)\n x = daxpy(p, x, a=alpha)\n r = daxpy(Ap, r, a=-alpha)\n if dasum(r) < tol:\n return x\n z = M(r)\n beta = ddot(r, z)\n beta, rz = beta / rz, beta\n p = daxpy(p, z, a=beta)\n\n\nclass _CholeskySolver(object):\n \"\"\"Cholesky factorization.\n\n To solve Ax = b:\n solver = _CholeskySolver(A)\n x = solver.solve(b)\n\n optional argument `tol` on solve method is ignored but included\n to match _PCGsolver API.\n \"\"\"\n\n def __init__(self, A):\n if not self._cholesky:\n raise nx.NetworkXError('Cholesky solver unavailable.')\n self._chol = self._cholesky(A)\n\n def solve(self, B, tol=None):\n return self._chol(B)\n\n try:\n from scikits.sparse.cholmod import cholesky\n _cholesky = cholesky\n except ImportError:\n _cholesky = None\n\n\nclass _LUSolver(object):\n \"\"\"LU factorization.\n\n To solve Ax = b:\n solver = _LUSolver(A)\n x = solver.solve(b)\n\n optional argument `tol` on solve method is ignored but included\n to match _PCGsolver API.\n \"\"\"\n\n def __init__(self, A):\n if not self._splu:\n raise nx.NetworkXError('LU solver unavailable.')\n self._LU = self._splu(A)\n\n def solve(self, B, tol=None):\n B = asarray(B)\n X = ndarray(B.shape, order='F')\n for j in range(B.shape[1]):\n X[:, j] = self._LU.solve(B[:, j])\n return X\n\n try:\n from scipy.sparse.linalg import splu\n _splu = partial(splu, permc_spec='MMD_AT_PLUS_A', diag_pivot_thresh=0.,\n options={'Equil': True, 'SymmetricMode': True})\n except ImportError:\n _splu = None\n\n\ndef _preprocess_graph(G, weight):\n \"\"\"Compute edge weights and eliminate zero-weight edges.\n \"\"\"\n if G.is_directed():\n H = nx.MultiGraph()\n H.add_nodes_from(G)\n H.add_weighted_edges_from(((u, v, e.get(weight, 1.))\n for u, v, e in G.edges(data=True)\n if u != v), weight=weight)\n G = H\n if not G.is_multigraph():\n edges = ((u, v, abs(e.get(weight, 1.)))\n for u, v, e in G.edges(data=True) if u != v)\n else:\n edges = ((u, v, sum(abs(e.get(weight, 1.)) for e in G[u][v].values()))\n for u, v in G.edges() if u != v)\n H = nx.Graph()\n H.add_nodes_from(G)\n H.add_weighted_edges_from((u, v, e) for u, v, e in edges if e != 0)\n return H\n\n\ndef _rcm_estimate(G, nodelist):\n \"\"\"Estimate the Fiedler vector using the reverse Cuthill-McKee ordering.\n \"\"\"\n G = G.subgraph(nodelist)\n order = reverse_cuthill_mckee_ordering(G)\n n = len(nodelist)\n index = dict(zip(nodelist, range(n)))\n x = ndarray(n, dtype=float)\n for i, u in enumerate(order):\n x[index[u]] = i\n x -= (n - 1) / 2.\n return x\n\n\ndef _tracemin_fiedler(L, X, normalized, tol, method):\n \"\"\"Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.\n\n The Fiedler vector of a connected undirected graph is the eigenvector\n corresponding to the second smallest eigenvalue of the Laplacian matrix of\n of the graph. This function starts with the Laplacian L, not the Graph.\n\n Parameters\n ----------\n L : Laplacian of a possibly weighted or normalized, but undirected graph\n\n X : Initial guess for a solution. Usually a matrix of random numbers.\n This function allows more than one column in X to identify more than\n one eigenvector if desired.\n\n normalized : bool\n Whether the normalized Laplacian matrix is used.\n\n tol : float\n Tolerance of relative residual in eigenvalue computation.\n Warning: There is no limit on number of iterations.\n\n method : string\n Should be 'tracemin_pcg', 'tracemin_chol' or 'tracemin_lu'.\n Otherwise exception is raised.\n\n Returns\n -------\n sigma, X : Two NumPy arrays of floats.\n The lowest eigenvalues and corresponding eigenvectors of L.\n The size of input X determines the size of these outputs.\n As this is for Fiedler vectors, the zero eigenvalue (and\n constant eigenvector) are avoided.\n \"\"\"\n n = X.shape[0]\n\n if normalized:\n # Form the normalized Laplacian matrix and determine the eigenvector of\n # its nullspace.\n e = sqrt(L.diagonal())\n D = spdiags(1. / e, [0], n, n, format='csr')\n L = D * L * D\n e *= 1. / norm(e, 2)\n\n if normalized:\n def project(X):\n \"\"\"Make X orthogonal to the nullspace of L.\n \"\"\"\n X = asarray(X)\n for j in range(X.shape[1]):\n X[:, j] -= dot(X[:, j], e) * e\n else:\n def project(X):\n \"\"\"Make X orthogonal to the nullspace of L.\n \"\"\"\n X = asarray(X)\n for j in range(X.shape[1]):\n X[:, j] -= X[:, j].sum() / n\n\n if method == 'tracemin_pcg':\n D = L.diagonal().astype(float)\n solver = _PCGSolver(lambda x: L * x, lambda x: D * x)\n elif method == 'tracemin_chol' or method == 'tracemin_lu':\n # Convert A to CSC to suppress SparseEfficiencyWarning.\n A = csc_matrix(L, dtype=float, copy=True)\n # Force A to be nonsingular. Since A is the Laplacian matrix of a\n # connected graph, its rank deficiency is one, and thus one diagonal\n # element needs to modified. Changing to infinity forces a zero in the\n # corresponding element in the solution.\n i = (A.indptr[1:] - A.indptr[:-1]).argmax()\n A[i, i] = float('inf')\n if method == 'tracemin_chol':\n solver = _CholeskySolver(A)\n else:\n solver = _LUSolver(A)\n else:\n raise nx.NetworkXError('Unknown linear system solver: ' + method)\n\n # Initialize.\n Lnorm = abs(L).sum(axis=1).flatten().max()\n project(X)\n W = asmatrix(ndarray(X.shape, order='F'))\n\n while True:\n # Orthonormalize X.\n X = qr(X)[0]\n # Compute interation matrix H.\n W[:, :] = L * X\n H = X.T * W\n sigma, Y = eigh(H, overwrite_a=True)\n # Compute the Ritz vectors.\n X *= Y\n # Test for convergence exploiting the fact that L * X == W * Y.\n res = dasum(W * asmatrix(Y)[:, 0] - sigma[0] * X[:, 0]) / Lnorm\n if res < tol:\n break\n # Compute X = L \\ X / (X' * (L \\ X)).\n # L \\ X can have an arbitrary projection on the nullspace of L,\n # which will be eliminated.\n W[:, :] = solver.solve(X, tol)\n X = (inv(W.T * X) * W.T).T # Preserves Fortran storage order.\n project(X)\n\n return sigma, asarray(X)\n\n\ndef _get_fiedler_func(method):\n \"\"\"Return a function that solves the Fiedler eigenvalue problem.\n \"\"\"\n if method == \"tracemin\": # old style keyword <v2.1\n method = \"tracemin_pcg\"\n if method in (\"tracemin_pcg\", \"tracemin_chol\", \"tracemin_lu\"):\n def find_fiedler(L, x, normalized, tol):\n q = 1 if method == 'tracemin_pcg' else min(4, L.shape[0] - 1)\n X = asmatrix(normal(size=(q, L.shape[0]))).T\n sigma, X = _tracemin_fiedler(L, X, normalized, tol, method)\n return sigma[0], X[:, 0]\n elif method == 'lanczos' or method == 'lobpcg':\n def find_fiedler(L, x, normalized, tol):\n L = csc_matrix(L, dtype=float)\n n = L.shape[0]\n if normalized:\n D = spdiags(1. / sqrt(L.diagonal()), [0], n, n, format='csc')\n L = D * L * D\n if method == 'lanczos' or n < 10:\n # Avoid LOBPCG when n < 10 due to\n # https://github.com/scipy/scipy/issues/3592\n # https://github.com/scipy/scipy/pull/3594\n sigma, X = eigsh(L, 2, which='SM', tol=tol,\n return_eigenvectors=True)\n return sigma[1], X[:, 1]\n else:\n X = asarray(asmatrix(x).T)\n M = spdiags(1. / L.diagonal(), [0], n, n)\n Y = ones(n)\n if normalized:\n Y /= D.diagonal()\n sigma, X = lobpcg(L, X, M=M, Y=asmatrix(Y).T, tol=tol,\n maxiter=n, largest=False)\n return sigma[0], X[:, 0]\n else:\n raise nx.NetworkXError(\"unknown method '%s'.\" % method)\n\n return find_fiedler\n\n\n@not_implemented_for('directed')\ndef algebraic_connectivity(G, weight='weight', normalized=False, tol=1e-8,\n method='tracemin_pcg'):\n \"\"\"Return the algebraic connectivity of an undirected graph.\n\n The algebraic connectivity of a connected undirected graph is the second\n smallest eigenvalue of its Laplacian matrix.\n\n Parameters\n ----------\n G : NetworkX graph\n An undirected graph.\n\n weight : object, optional (default: None)\n The data key used to determine the weight of each edge. If None, then\n each edge has unit weight.\n\n normalized : bool, optional (default: False)\n Whether the normalized Laplacian matrix is used.\n\n tol : float, optional (default: 1e-8)\n Tolerance of relative residual in eigenvalue computation.\n\n method : string, optional (default: 'tracemin_pcg')\n Method of eigenvalue computation. It must be one of the tracemin\n options shown below (TraceMIN), 'lanczos' (Lanczos iteration)\n or 'lobpcg' (LOBPCG).\n\n The TraceMIN algorithm uses a linear system solver. The following\n values allow specifying the solver to be used.\n\n =============== ========================================\n Value Solver\n =============== ========================================\n 'tracemin_pcg' Preconditioned conjugate gradient method\n 'tracemin_chol' Cholesky factorization\n 'tracemin_lu' LU factorization\n =============== ========================================\n\n Returns\n -------\n algebraic_connectivity : float\n Algebraic connectivity.\n\n Raises\n ------\n NetworkXNotImplemented\n If G is directed.\n\n NetworkXError\n If G has less than two nodes.\n\n Notes\n -----\n Edge weights are interpreted by their absolute values. For MultiGraph's,\n weights of parallel edges are summed. Zero-weighted edges are ignored.\n\n To use Cholesky factorization in the TraceMIN algorithm, the\n :samp:`scikits.sparse` package must be installed.\n\n See Also\n --------\n laplacian_matrix\n \"\"\"\n if len(G) < 2:\n raise nx.NetworkXError('graph has less than two nodes.')\n G = _preprocess_graph(G, weight)\n if not nx.is_connected(G):\n return 0.\n\n L = nx.laplacian_matrix(G)\n if L.shape[0] == 2:\n return 2. * L[0, 0] if not normalized else 2.\n\n find_fiedler = _get_fiedler_func(method)\n x = None if method != 'lobpcg' else _rcm_estimate(G, G)\n sigma, fiedler = find_fiedler(L, x, normalized, tol)\n return sigma\n\n\n@not_implemented_for('directed')\ndef fiedler_vector(G, weight='weight', normalized=False, tol=1e-8,\n method='tracemin_pcg'):\n \"\"\"Return the Fiedler vector of a connected undirected graph.\n\n The Fiedler vector of a connected undirected graph is the eigenvector\n corresponding to the second smallest eigenvalue of the Laplacian matrix of\n of the graph.\n\n Parameters\n ----------\n G : NetworkX graph\n An undirected graph.\n\n weight : object, optional (default: None)\n The data key used to determine the weight of each edge. If None, then\n each edge has unit weight.\n\n normalized : bool, optional (default: False)\n Whether the normalized Laplacian matrix is used.\n\n tol : float, optional (default: 1e-8)\n Tolerance of relative residual in eigenvalue computation.\n\n method : string, optional (default: 'tracemin_pcg')\n Method of eigenvalue computation. It must be one of the tracemin\n options shown below (TraceMIN), 'lanczos' (Lanczos iteration)\n or 'lobpcg' (LOBPCG).\n\n The TraceMIN algorithm uses a linear system solver. The following\n values allow specifying the solver to be used.\n\n =============== ========================================\n Value Solver\n =============== ========================================\n 'tracemin_pcg' Preconditioned conjugate gradient method\n 'tracemin_chol' Cholesky factorization\n 'tracemin_lu' LU factorization\n =============== ========================================\n\n Returns\n -------\n fiedler_vector : NumPy array of floats.\n Fiedler vector.\n\n Raises\n ------\n NetworkXNotImplemented\n If G is directed.\n\n NetworkXError\n If G has less than two nodes or is not connected.\n\n Notes\n -----\n Edge weights are interpreted by their absolute values. For MultiGraph's,\n weights of parallel edges are summed. Zero-weighted edges are ignored.\n\n To use Cholesky factorization in the TraceMIN algorithm, the\n :samp:`scikits.sparse` package must be installed.\n\n See Also\n --------\n laplacian_matrix\n \"\"\"\n if len(G) < 2:\n raise nx.NetworkXError('graph has less than two nodes.')\n G = _preprocess_graph(G, weight)\n if not nx.is_connected(G):\n raise nx.NetworkXError('graph is not connected.')\n\n if len(G) == 2:\n return array([1., -1.])\n\n find_fiedler = _get_fiedler_func(method)\n L = nx.laplacian_matrix(G)\n x = None if method != 'lobpcg' else _rcm_estimate(G, G)\n sigma, fiedler = find_fiedler(L, x, normalized, tol)\n return fiedler\n\n\ndef spectral_ordering(G, weight='weight', normalized=False, tol=1e-8,\n method='tracemin_pcg'):\n \"\"\"Compute the spectral_ordering of a graph.\n\n The spectral ordering of a graph is an ordering of its nodes where nodes\n in the same weakly connected components appear contiguous and ordered by\n their corresponding elements in the Fiedler vector of the component.\n\n Parameters\n ----------\n G : NetworkX graph\n A graph.\n\n weight : object, optional (default: None)\n The data key used to determine the weight of each edge. If None, then\n each edge has unit weight.\n\n normalized : bool, optional (default: False)\n Whether the normalized Laplacian matrix is used.\n\n tol : float, optional (default: 1e-8)\n Tolerance of relative residual in eigenvalue computation.\n\n method : string, optional (default: 'tracemin_pcg')\n Method of eigenvalue computation. It must be one of the tracemin\n options shown below (TraceMIN), 'lanczos' (Lanczos iteration)\n or 'lobpcg' (LOBPCG).\n\n The TraceMIN algorithm uses a linear system solver. The following\n values allow specifying the solver to be used.\n\n =============== ========================================\n Value Solver\n =============== ========================================\n 'tracemin_pcg' Preconditioned conjugate gradient method\n 'tracemin_chol' Cholesky factorization\n 'tracemin_lu' LU factorization\n =============== ========================================\n\n Returns\n -------\n spectral_ordering : NumPy array of floats.\n Spectral ordering of nodes.\n\n Raises\n ------\n NetworkXError\n If G is empty.\n\n Notes\n -----\n Edge weights are interpreted by their absolute values. For MultiGraph's,\n weights of parallel edges are summed. Zero-weighted edges are ignored.\n\n To use Cholesky factorization in the TraceMIN algorithm, the\n :samp:`scikits.sparse` package must be installed.\n\n See Also\n --------\n laplacian_matrix\n \"\"\"\n if len(G) == 0:\n raise nx.NetworkXError('graph is empty.')\n G = _preprocess_graph(G, weight)\n\n find_fiedler = _get_fiedler_func(method)\n order = []\n for component in nx.connected_components(G):\n size = len(component)\n if size > 2:\n L = nx.laplacian_matrix(G, component)\n x = None if method != 'lobpcg' else _rcm_estimate(G, component)\n sigma, fiedler = find_fiedler(L, x, normalized, tol)\n sort_info = zip(fiedler, range(size), component)\n order.extend(u for x, c, u in sorted(sort_info))\n else:\n order.extend(component)\n\n return order\n\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import numpy\n import scipy.sparse\n except ImportError:\n raise SkipTest('SciPy not available.')\n"
] |
[
[
"numpy.random.normal",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"numpy.asarray",
"numpy.zeros",
"scipy.sparse.spdiags",
"scipy.linalg.eigh",
"scipy.linalg.blas.daxpy",
"scipy.sparse.csc_matrix",
"numpy.ones",
"scipy.linalg.blas.ddot",
"numpy.linalg.qr",
"numpy.ndarray",
"scipy.linalg.inv",
"scipy.sparse.linalg.eigsh",
"numpy.asmatrix"
]
] |
mschart/prednet_FORK
|
[
"6baebee177e0047edfc66fe9cfbf8f8efca52422"
] |
[
"kitti_evaluate.py"
] |
[
"'''\nEvaluate trained PredNet on KITTI sequences.\nCalculates mean-squared error and plots predictions.\n'''\n\nimport os\nimport numpy as np\nfrom six.moves import cPickle\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\nfrom keras import backend as K\nfrom keras.models import Model, model_from_json\nfrom keras.layers import Input, Dense, Flatten\n\nfrom prednet import PredNet\nfrom data_utils import SequenceGenerator\nfrom kitti_settings import *\n\n\nn_plot = 40\nbatch_size = 10\nnt = 10\n\nweights_file = os.path.join(WEIGHTS_DIR, 'tensorflow_weights/prednet_kitti_weights.hdf5')\njson_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model.json')\ntest_file = os.path.join(DATA_DIR, 'X_test.hkl')\ntest_sources = os.path.join(DATA_DIR, 'sources_test.hkl')\n\n# Load trained model\nf = open(json_file, 'r')\njson_string = f.read()\nf.close()\ntrain_model = model_from_json(json_string, custom_objects = {'PredNet': PredNet})\ntrain_model.load_weights(weights_file)\n\n# Create testing model (to output predictions)\nlayer_config = train_model.layers[1].get_config()\nlayer_config['output_mode'] = 'prediction'\ndata_format = layer_config['data_format'] if 'data_format' in layer_config else layer_config['dim_ordering']\ntest_prednet = PredNet(weights=train_model.layers[1].get_weights(), **layer_config)\ninput_shape = list(train_model.layers[0].batch_input_shape[1:])\ninput_shape[0] = nt\ninputs = Input(shape=tuple(input_shape))\npredictions = test_prednet(inputs)\ntest_model = Model(inputs=inputs, outputs=predictions)\n\ntest_generator = SequenceGenerator(test_file, test_sources, nt, sequence_start_mode='unique', data_format=data_format)\nX_test = test_generator.create_all()\nX_hat = test_model.predict(X_test, batch_size)\nif data_format == 'channels_first':\n X_test = np.transpose(X_test, (0, 1, 3, 4, 2))\n X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))\n\n# Compare MSE of PredNet predictions vs. using last frame. Write results to prediction_scores.txt\nmse_model = np.mean( (X_test[:, 1:] - X_hat[:, 1:])**2 ) # look at all timesteps except the first\nmse_prev = np.mean( (X_test[:, :-1] - X_test[:, 1:])**2 )\nif not os.path.exists(RESULTS_SAVE_DIR): os.mkdir(RESULTS_SAVE_DIR)\nf = open(RESULTS_SAVE_DIR + 'prediction_scores.txt', 'w')\nf.write(\"Model MSE: %f\\n\" % mse_model)\nf.write(\"Previous Frame MSE: %f\" % mse_prev)\nf.close()\n\n# Plot some predictions\naspect_ratio = float(X_hat.shape[2]) / X_hat.shape[3]\nplt.figure(figsize = (nt, 2*aspect_ratio))\ngs = gridspec.GridSpec(2, nt)\ngs.update(wspace=0., hspace=0.)\nplot_save_dir = os.path.join(RESULTS_SAVE_DIR, 'prediction_plots/')\nif not os.path.exists(plot_save_dir): os.mkdir(plot_save_dir)\nplot_idx = np.random.permutation(X_test.shape[0])[:n_plot]\nfor i in plot_idx:\n for t in range(nt):\n plt.subplot(gs[t])\n plt.imshow(X_test[i,t], interpolation='none')\n plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')\n if t==0: plt.ylabel('Actual', fontsize=10)\n\n plt.subplot(gs[t + nt])\n plt.imshow(X_hat[i,t], interpolation='none')\n plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')\n if t==0: plt.ylabel('Predicted', fontsize=10)\n\n plt.savefig(plot_save_dir + 'plot_' + str(i) + '.png')\n plt.clf()\n"
] |
[
[
"matplotlib.use",
"numpy.random.permutation",
"numpy.mean",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tick_params",
"numpy.transpose",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.imshow",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplot"
]
] |
filemaster/qiskit-terra
|
[
"8672c407a5a0e34405315f82d5ad5847916e857e"
] |
[
"qiskit/tools/qcvv/tomography.py"
] |
[
"# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# pylint: disable=invalid-name\n\n\"\"\"\nQuantum Tomography Module\n\nDescription:\n This module contains functions for performing quantum state and quantum\n process tomography. This includes:\n - Functions for generating a set of circuits to\n extract tomographically complete sets of measurement data.\n - Functions for generating a tomography data set from the\n results after the circuits have been executed on a backend.\n - Functions for reconstructing a quantum state, or quantum process\n (Choi-matrix) from tomography data sets.\n\nReconstruction Methods:\n Currently implemented reconstruction methods are\n - Linear inversion by weighted least-squares fitting.\n - Fast maximum likelihood reconstruction using ref [1].\n\nReferences:\n [1] J Smolin, JM Gambetta, G Smith, Phys. Rev. Lett. 108, 070502 (2012).\n Open access: arXiv:1106.5458 [quant-ph].\n\nWorkflow:\n The basic functions for performing state and tomography experiments are:\n - `tomography_set`, `state_tomography_set`, and `process_tomography_set`\n all generates data structures for tomography experiments.\n - `create_tomography_circuits` generates the quantum circuits specified\n in a `tomography_set` for performing state tomography of the output\n - `tomography_data` extracts the results after executing the tomography\n circuits and returns it in a data structure used by fitters for state\n reconstruction.\n - `fit_tomography_data` reconstructs a density matrix or Choi-matrix from\n the a set of tomography data.\n\"\"\"\n\nimport logging\nfrom functools import reduce\nfrom itertools import product\nfrom re import match\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit\nfrom qiskit import QiskitError\nfrom qiskit.tools.qi.qi import vectorize, devectorize, outer\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\n# Tomography Bases\n###############################################################################\n\n\nclass TomographyBasis(dict):\n \"\"\"\n Dictionary subsclass that includes methods for adding gates to circuits.\n\n A TomographyBasis is a dictionary where the keys index a measurement\n and the values are a list of projectors associated to that measurement.\n It also includes two optional methods `prep_gate` and `meas_gate`:\n - `prep_gate` adds gates to a circuit to prepare the corresponding\n basis projector from an initial ground state.\n - `meas_gate` adds gates to a circuit to transform the default\n Z-measurement into a measurement in the basis.\n With the exception of built in bases, these functions do nothing unless\n they are specified by the user. They may be set by the data members\n `prep_fun` and `meas_fun`. We illustrate this with an example.\n\n Example:\n A measurement in the Pauli-X basis has two outcomes corresponding to\n the projectors:\n `Xp = [[0.5, 0.5], [0.5, 0.5]]`\n `Xm = [[0.5, -0.5], [-0.5, 0.5]]`\n We can express this as a basis by\n `BX = TomographyBasis( {'X': [Xp, Xm]} )`\n To specifiy the gates to prepare and measure in this basis we :\n ```\n def BX_prep_fun(circuit, qreg, op):\n bas, proj = op\n if bas == \"X\":\n if proj == 0:\n circuit.u2(0., np.pi, qreg) # apply H\n else: # proj == 1\n circuit.u2(np.pi, np.pi, qreg) # apply H.X\n def BX_prep_fun(circuit, qreg, op):\n if op == \"X\":\n circuit.u2(0., np.pi, qreg) # apply H\n ```\n We can then attach these functions to the basis using:\n `BX.prep_fun = BX_prep_fun`\n `BX.meas_fun = BX_meas_fun`.\n\n Generating function:\n A generating function `tomography_basis` exists to create bases in a\n single step. Using the above example this can be done by:\n ```\n BX = tomography_basis({'X': [Xp, Xm]},\n prep_fun=BX_prep_fun,\n meas_fun=BX_meas_fun)\n ```\n \"\"\"\n\n prep_fun = None\n meas_fun = None\n\n def prep_gate(self, circuit, qreg, op):\n \"\"\"\n Add state preparation gates to a circuit.\n\n Args:\n circuit (QuantumCircuit): circuit to add a preparation to.\n qreg (tuple(QuantumRegister,int)): quantum register to apply\n preparation to.\n op (tuple(str, int)): the basis label and index for the\n preparation op.\n \"\"\"\n if self.prep_fun is None:\n pass\n else:\n self.prep_fun(circuit, qreg, op)\n\n def meas_gate(self, circuit, qreg, op):\n \"\"\"\n Add measurement gates to a circuit.\n\n Args:\n circuit (QuantumCircuit): circuit to add measurement to.\n qreg (tuple(QuantumRegister,int)): quantum register being measured.\n op (str): the basis label for the measurement.\n \"\"\"\n if self.meas_fun is None:\n pass\n else:\n self.meas_fun(circuit, qreg, op)\n\n\ndef tomography_basis(basis, prep_fun=None, meas_fun=None):\n \"\"\"\n Generate a TomographyBasis object.\n\n See TomographyBasis for further details.abs\n\n Args:\n prep_fun (callable) optional: the function which adds preparation\n gates to a circuit.\n meas_fun (callable) optional: the function which adds measurement\n gates to a circuit.\n\n Returns:\n TomographyBasis: A tomography basis.\n \"\"\"\n ret = TomographyBasis(basis)\n ret.prep_fun = prep_fun\n ret.meas_fun = meas_fun\n return ret\n\n\n# PAULI BASIS\n# This corresponds to measurements in the X, Y, Z basis where\n# Outcomes 0,1 are the +1,-1 eigenstates respectively.\n# State preparation is also done in the +1 and -1 eigenstates.\n\n\ndef __pauli_prep_gates(circuit, qreg, op):\n \"\"\"\n Add state preparation gates to a circuit.\n \"\"\"\n bas, proj = op\n if bas not in ['X', 'Y', 'Z']:\n raise QiskitError(\"There's no X, Y or Z basis for this Pauli \"\n \"preparation\")\n\n if bas == \"X\":\n if proj == 1:\n circuit.u2(np.pi, np.pi, qreg) # H.X\n else:\n circuit.u2(0., np.pi, qreg) # H\n elif bas == \"Y\":\n if proj == 1:\n circuit.u2(-0.5 * np.pi, np.pi, qreg) # S.H.X\n else:\n circuit.u2(0.5 * np.pi, np.pi, qreg) # S.H\n elif bas == \"Z\" and proj == 1:\n circuit.u3(np.pi, 0., np.pi, qreg) # X\n\n\ndef __pauli_meas_gates(circuit, qreg, op):\n \"\"\"\n Add state measurement gates to a circuit.\n \"\"\"\n if op not in ['X', 'Y', 'Z']:\n raise QiskitError(\"There's no X, Y or Z basis for this Pauli \"\n \"measurement\")\n\n if op == \"X\":\n circuit.u2(0., np.pi, qreg) # H\n elif op == \"Y\":\n circuit.u2(0., 0.5 * np.pi, qreg) # H.S^*\n\n\n__PAULI_BASIS_OPS = {\n 'X':\n [np.array([[0.5, 0.5], [0.5, 0.5]]),\n np.array([[0.5, -0.5], [-0.5, 0.5]])],\n 'Y': [\n np.array([[0.5, -0.5j], [0.5j, 0.5]]),\n np.array([[0.5, 0.5j], [-0.5j, 0.5]])\n ],\n 'Z': [np.array([[1, 0], [0, 0]]),\n np.array([[0, 0], [0, 1]])]\n}\n\n# Create the actual basis\nPAULI_BASIS = tomography_basis(\n __PAULI_BASIS_OPS,\n prep_fun=__pauli_prep_gates,\n meas_fun=__pauli_meas_gates)\n\n\n# SIC-POVM BASIS\ndef __sic_prep_gates(circuit, qreg, op):\n \"\"\"\n Add state preparation gates to a circuit.\n \"\"\"\n bas, proj = op\n\n if bas != 'S':\n raise QiskitError('Not in SIC basis!')\n\n theta = -2 * np.arctan(np.sqrt(2))\n if proj == 1:\n circuit.u3(theta, np.pi, 0.0, qreg)\n elif proj == 2:\n circuit.u3(theta, np.pi / 3, 0.0, qreg)\n elif proj == 3:\n circuit.u3(theta, -np.pi / 3, 0.0, qreg)\n\n\n__SIC_BASIS_OPS = {\n 'S': [\n np.array([[1, 0], [0, 0]]),\n np.array([[1, np.sqrt(2)], [np.sqrt(2), 2]]) / 3,\n np.array([[1, np.exp(np.pi * 2j / 3) * np.sqrt(2)],\n [np.exp(-np.pi * 2j / 3) * np.sqrt(2), 2]]) / 3,\n np.array([[1, np.exp(-np.pi * 2j / 3) * np.sqrt(2)],\n [np.exp(np.pi * 2j / 3) * np.sqrt(2), 2]]) / 3\n ]\n}\n\nSIC_BASIS = tomography_basis(__SIC_BASIS_OPS, prep_fun=__sic_prep_gates)\n\n###############################################################################\n# Tomography Set and labels\n###############################################################################\n\n\ndef tomography_set(meas_qubits,\n meas_basis='Pauli',\n prep_qubits=None,\n prep_basis=None):\n \"\"\"\n Generate a dictionary of tomography experiment configurations.\n\n This returns a data structure that is used by other tomography functions\n to generate state and process tomography circuits, and extract tomography\n data from results after execution on a backend.\n\n Quantum State Tomography:\n Be default it will return a set for performing Quantum State\n Tomography where individual qubits are measured in the Pauli basis.\n A custom measurement basis may also be used by defining a user\n `tomography_basis` and passing this in for the `meas_basis` argument.\n\n Quantum Process Tomography:\n A quantum process tomography set is created by specifying a preparation\n basis along with a measurement basis. The preparation basis may be a\n user defined `tomography_basis`, or one of the two built in basis 'SIC'\n or 'Pauli'.\n - SIC: Is a minimal symmetric informationally complete preparation\n basis for 4 states for each qubit (4 ^ number of qubits total\n preparation states). These correspond to the |0> state and the 3\n other vertices of a tetrahedron on the Bloch-sphere.\n - Pauli: Is a tomographically overcomplete preparation basis of the six\n eigenstates of the 3 Pauli operators (6 ^ number of qubits\n total preparation states).\n\n Args:\n meas_qubits (list): The qubits being measured.\n meas_basis (tomography_basis or str): The qubit measurement basis.\n The default value is 'Pauli'.\n prep_qubits (list or None): The qubits being prepared. If None then\n meas_qubits will be used for process tomography experiments.\n prep_basis (tomography_basis or None): The optional qubit preparation\n basis. If no basis is specified state tomography will be performed\n instead of process tomography. A built in basis may be specified by\n 'SIC' or 'Pauli' (SIC basis recommended for > 2 qubits).\n\n Returns:\n dict: A dict of tomography configurations that can be parsed by\n `create_tomography_circuits` and `tomography_data` functions\n for implementing quantum tomography experiments. This output contains\n fields \"qubits\", \"meas_basis\", \"circuits\". It may also optionally\n contain a field \"prep_basis\" for process tomography experiments.\n ```\n {\n 'qubits': qubits (list[ints]),\n 'meas_basis': meas_basis (tomography_basis),\n 'circuit_labels': (list[string]),\n 'circuits': (list[dict]) # prep and meas configurations\n # optionally for process tomography experiments:\n 'prep_basis': prep_basis (tomography_basis)\n }\n ```\n Raises:\n QiskitError: if the Qubits argument is not a list.\n \"\"\"\n if not isinstance(meas_qubits, list):\n raise QiskitError('Qubits argument must be a list')\n num_of_qubits = len(meas_qubits)\n\n if prep_qubits is None:\n prep_qubits = meas_qubits\n if not isinstance(prep_qubits, list):\n raise QiskitError('prep_qubits argument must be a list')\n if len(prep_qubits) != len(meas_qubits):\n raise QiskitError('meas_qubits and prep_qubitsare different length')\n\n if isinstance(meas_basis, str):\n if meas_basis.lower() == 'pauli':\n meas_basis = PAULI_BASIS\n\n if isinstance(prep_basis, str):\n if prep_basis.lower() == 'pauli':\n prep_basis = PAULI_BASIS\n elif prep_basis.lower() == 'sic':\n prep_basis = SIC_BASIS\n\n circuits = []\n circuit_labels = []\n\n # add meas basis configs\n if prep_basis is None:\n # State Tomography\n for meas_product in product(meas_basis.keys(), repeat=num_of_qubits):\n meas = dict(zip(meas_qubits, meas_product))\n circuits.append({'meas': meas})\n # Make label\n label = '_meas_'\n for qubit, op in meas.items():\n label += '%s(%d)' % (op[0], qubit)\n circuit_labels.append(label)\n return {'qubits': meas_qubits,\n 'circuits': circuits,\n 'circuit_labels': circuit_labels,\n 'meas_basis': meas_basis}\n\n # Process Tomography\n num_of_s = len(list(prep_basis.values())[0])\n plst_single = [(b, s)\n for b in prep_basis.keys()\n for s in range(num_of_s)]\n for plst_product in product(plst_single, repeat=num_of_qubits):\n for meas_product in product(meas_basis.keys(),\n repeat=num_of_qubits):\n prep = dict(zip(prep_qubits, plst_product))\n meas = dict(zip(meas_qubits, meas_product))\n circuits.append({'prep': prep, 'meas': meas})\n # Make label\n label = '_prep_'\n for qubit, op in prep.items():\n label += '%s%d(%d)' % (op[0], op[1], qubit)\n label += '_meas_'\n for qubit, op in meas.items():\n label += '%s(%d)' % (op[0], qubit)\n circuit_labels.append(label)\n return {'qubits': meas_qubits,\n 'circuits': circuits,\n 'circuit_labels': circuit_labels,\n 'prep_basis': prep_basis,\n 'meas_basis': meas_basis}\n\n\ndef state_tomography_set(qubits, meas_basis='Pauli'):\n \"\"\"\n Generate a dictionary of state tomography experiment configurations.\n\n This returns a data structure that is used by other tomography functions\n to generate state and process tomography circuits, and extract tomography\n data from results after execution on a backend.\n\n Quantum State Tomography:\n Be default it will return a set for performing Quantum State\n Tomography where individual qubits are measured in the Pauli basis.\n A custom measurement basis may also be used by defining a user\n `tomography_basis` and passing this in for the `meas_basis` argument.\n\n Quantum Process Tomography:\n A quantum process tomography set is created by specifying a preparation\n basis along with a measurement basis. The preparation basis may be a\n user defined `tomography_basis`, or one of the two built in basis 'SIC'\n or 'Pauli'.\n - SIC: Is a minimal symmetric informationally complete preparation\n basis for 4 states for each qubit (4 ^ number of qubits total\n preparation states). These correspond to the |0> state and the 3\n other vertices of a tetrahedron on the Bloch-sphere.\n - Pauli: Is a tomographically overcomplete preparation basis of the six\n eigenstates of the 3 Pauli operators (6 ^ number of qubits\n total preparation states).\n\n Args:\n qubits (list): The qubits being measured.\n meas_basis (tomography_basis or str): The qubit measurement basis.\n The default value is 'Pauli'.\n\n Returns:\n dict: A dict of tomography configurations that can be parsed by\n `create_tomography_circuits` and `tomography_data` functions\n for implementing quantum tomography experiments. This output contains\n fields \"qubits\", \"meas_basis\", \"circuits\".\n ```\n {\n 'qubits': qubits (list[ints]),\n 'meas_basis': meas_basis (tomography_basis),\n 'circuit_labels': (list[string]),\n 'circuits': (list[dict]) # prep and meas configurations\n }\n ```\n \"\"\"\n return tomography_set(qubits, meas_basis=meas_basis)\n\n\ndef process_tomography_set(meas_qubits, meas_basis='Pauli',\n prep_qubits=None, prep_basis='SIC'):\n \"\"\"\n Generate a dictionary of process tomography experiment configurations.\n\n This returns a data structure that is used by other tomography functions\n to generate state and process tomography circuits, and extract tomography\n data from results after execution on a backend.\n\n A quantum process tomography set is created by specifying a preparation\n basis along with a measurement basis. The preparation basis may be a\n user defined `tomography_basis`, or one of the two built in basis 'SIC'\n or 'Pauli'.\n - SIC: Is a minimal symmetric informationally complete preparation\n basis for 4 states for each qubit (4 ^ number of qubits total\n preparation states). These correspond to the |0> state and the 3\n other vertices of a tetrahedron on the Bloch-sphere.\n - Pauli: Is a tomographically overcomplete preparation basis of the six\n eigenstates of the 3 Pauli operators (6 ^ number of qubits\n total preparation states).\n\n Args:\n meas_qubits (list): The qubits being measured.\n meas_basis (tomography_basis or str): The qubit measurement basis.\n The default value is 'Pauli'.\n prep_qubits (list or None): The qubits being prepared. If None then\n meas_qubits will be used for process tomography experiments.\n prep_basis (tomography_basis or str): The qubit preparation basis.\n The default value is 'SIC'.\n\n Returns:\n dict: A dict of tomography configurations that can be parsed by\n `create_tomography_circuits` and `tomography_data` functions\n for implementing quantum tomography experiments. This output contains\n fields \"qubits\", \"meas_basis\", \"prep_basus\", circuits\".\n ```\n {\n 'qubits': qubits (list[ints]),\n 'meas_basis': meas_basis (tomography_basis),\n 'prep_basis': prep_basis (tomography_basis),\n 'circuit_labels': (list[string]),\n 'circuits': (list[dict]) # prep and meas configurations\n }\n ```\n \"\"\"\n return tomography_set(meas_qubits, meas_basis=meas_basis,\n prep_qubits=prep_qubits, prep_basis=prep_basis)\n\n\ndef tomography_circuit_names(tomo_set, name=''):\n \"\"\"\n Return a list of tomography circuit names.\n\n The returned list is the same as the one returned by\n `create_tomography_circuits` and can be used by a QuantumProgram\n to execute tomography circuits and extract measurement results.\n\n Args:\n tomo_set (tomography_set): a tomography set generated by\n `tomography_set`.\n name (str): the name of the base QuantumCircuit used by the\n tomography experiment.\n\n Returns:\n list: A list of circuit names.\n \"\"\"\n return [name + l for l in tomo_set['circuit_labels']]\n\n\n###############################################################################\n# Tomography circuit generation\n###############################################################################\n\n\ndef create_tomography_circuits(circuit, qreg, creg, tomoset):\n \"\"\"\n Add tomography measurement circuits to a QuantumProgram.\n\n The quantum program must contain a circuit 'name', which is treated as a\n state preparation circuit for state tomography, or as teh circuit being\n measured for process tomography. This function then appends the circuit\n with a set of measurements specified by the input `tomography_set`,\n optionally it also prepends the circuit with state preparation circuits if\n they are specified in the `tomography_set`.\n\n For n-qubit tomography with a tomographically complete set of preparations\n and measurements this results in $4^n 3^n$ circuits being added to the\n quantum program.\n\n Args:\n circuit (QuantumCircuit): The circuit to be appended with tomography\n state preparation and/or measurements.\n qreg (QuantumRegister): the quantum register containing qubits to be\n measured.\n creg (ClassicalRegister): the classical register containing bits to\n store measurement outcomes.\n tomoset (tomography_set): the dict of tomography configurations.\n\n Returns:\n list: A list of quantum tomography circuits for the input circuit.\n\n Raises:\n QiskitError: if circuit is not a valid QuantumCircuit\n\n Example:\n For a tomography set specifying state tomography of qubit-0 prepared\n by a circuit 'circ' this would return:\n ```\n ['circ_meas_X(0)', 'circ_meas_Y(0)', 'circ_meas_Z(0)']\n ```\n For process tomography of the same circuit with preparation in the\n SIC-POVM basis it would return:\n ```\n [\n 'circ_prep_S0(0)_meas_X(0)', 'circ_prep_S0(0)_meas_Y(0)',\n 'circ_prep_S0(0)_meas_Z(0)', 'circ_prep_S1(0)_meas_X(0)',\n 'circ_prep_S1(0)_meas_Y(0)', 'circ_prep_S1(0)_meas_Z(0)',\n 'circ_prep_S2(0)_meas_X(0)', 'circ_prep_S2(0)_meas_Y(0)',\n 'circ_prep_S2(0)_meas_Z(0)', 'circ_prep_S3(0)_meas_X(0)',\n 'circ_prep_S3(0)_meas_Y(0)', 'circ_prep_S3(0)_meas_Z(0)'\n ]\n ```\n \"\"\"\n\n if not isinstance(circuit, QuantumCircuit):\n raise QiskitError('Input circuit must be a QuantumCircuit object')\n\n dics = tomoset['circuits']\n labels = tomography_circuit_names(tomoset, circuit.name)\n tomography_circuits = []\n for label, conf in zip(labels, dics):\n tmp = circuit\n # Add prep circuits\n if 'prep' in conf:\n prep = QuantumCircuit(qreg, creg, name='tmp_prep')\n for qubit, op in conf['prep'].items():\n tomoset['prep_basis'].prep_gate(prep, qreg[qubit], op)\n prep.barrier(qreg[qubit]) # pylint: disable=no-member\n tmp = prep + tmp\n # Add measurement circuits\n meas = QuantumCircuit(qreg, creg, name='tmp_meas')\n for qubit, op in conf['meas'].items():\n meas.barrier(qreg[qubit]) # pylint: disable=no-member\n tomoset['meas_basis'].meas_gate(meas, qreg[qubit], op)\n meas.measure(qreg[qubit], creg[qubit])\n tmp = tmp + meas\n # Add label to the circuit\n tmp.name = label\n tomography_circuits.append(tmp)\n\n logger.info('>> created tomography circuits for \"%s\"', circuit.name)\n return tomography_circuits\n\n\n###############################################################################\n# Get results data\n###############################################################################\n\n\ndef tomography_data(results, name, tomoset):\n \"\"\"\n Return a results dict for a state or process tomography experiment.\n\n Args:\n results (Result): Results from execution of a process tomography\n circuits on a backend.\n name (string): The name of the circuit being reconstructed.\n tomoset (tomography_set): the dict of tomography configurations.\n\n Returns:\n list: A list of dicts for the outcome of each process tomography\n measurement circuit.\n \"\"\"\n\n labels = tomography_circuit_names(tomoset, name)\n circuits = tomoset['circuits']\n data = []\n prep = None\n for j, _ in enumerate(labels):\n counts = marginal_counts(results.get_counts(labels[j]),\n tomoset['qubits'])\n shots = sum(counts.values())\n meas = circuits[j]['meas']\n prep = circuits[j].get('prep', None)\n meas_qubits = sorted(meas.keys())\n if prep:\n prep_qubits = sorted(prep.keys())\n circuit = {}\n for c in counts.keys():\n circuit[c] = {}\n circuit[c]['meas'] = [(meas[meas_qubits[k]], int(c[-1 - k]))\n for k in range(len(meas_qubits))]\n if prep:\n circuit[c]['prep'] = [prep[prep_qubits[k]]\n for k in range(len(prep_qubits))]\n data.append({'counts': counts, 'shots': shots, 'circuit': circuit})\n\n ret = {'data': data, 'meas_basis': tomoset['meas_basis']}\n if prep:\n ret['prep_basis'] = tomoset['prep_basis']\n return ret\n\n\ndef marginal_counts(counts, meas_qubits):\n \"\"\"\n Compute the marginal counts for a subset of measured qubits.\n\n Args:\n counts (dict): the counts returned from a backend ({str: int}).\n meas_qubits (list[int]): the qubits to return the marginal\n counts distribution for.\n\n Returns:\n dict: A counts dict for the meas_qubits.abs\n Example: if `counts = {'00': 10, '01': 5}`\n `marginal_counts(counts, [0])` returns `{'0': 15, '1': 0}`.\n `marginal_counts(counts, [0])` returns `{'0': 10, '1': 5}`.\n \"\"\"\n # pylint: disable=cell-var-from-loop\n # Extract total number of qubits from count keys\n num_of_qubits = len(list(counts.keys())[0])\n\n # keys for measured qubits only\n qs = sorted(meas_qubits, reverse=True)\n\n meas_keys = count_keys(len(qs))\n\n # get regex match strings for summing outcomes of other qubits\n rgx = [\n reduce(lambda x, y: (key[qs.index(y)] if y in qs else '\\\\d') + x,\n range(num_of_qubits), '') for key in meas_keys\n ]\n\n # build the return list\n meas_counts = []\n for m in rgx:\n c = 0\n for key, val in counts.items():\n if match(m, key):\n c += val\n meas_counts.append(c)\n\n # return as counts dict on measured qubits only\n return dict(zip(meas_keys, meas_counts))\n\n\ndef count_keys(n):\n \"\"\"Generate outcome bitstrings for n-qubits.\n\n Args:\n n (int): the number of qubits.\n\n Returns:\n list: A list of bitstrings ordered as follows:\n Example: n=2 returns ['00', '01', '10', '11'].\n \"\"\"\n return [bin(j)[2:].zfill(n) for j in range(2**n)]\n\n\n###############################################################################\n# Tomographic Reconstruction functions.\n###############################################################################\n\n\ndef fit_tomography_data(tomo_data, method='wizard', options=None):\n \"\"\"\n Reconstruct a density matrix or process-matrix from tomography data.\n\n If the input data is state_tomography_data the returned operator will\n be a density matrix. If the input data is process_tomography_data the\n returned operator will be a Choi-matrix in the column-vectorization\n convention.\n\n Args:\n tomo_data (dict): process tomography measurement data.\n method (str): the fitting method to use.\n Available methods:\n - 'wizard' (default)\n - 'leastsq'\n options (dict or None): additional options for fitting method.\n\n Returns:\n numpy.array: The fitted operator.\n\n Available methods:\n - 'wizard' (Default): The returned operator will be constrained to be\n positive-semidefinite.\n Options:\n - 'trace': the trace of the returned operator.\n The default value is 1.\n - 'beta': hedging parameter for computing frequencies from\n zero-count data. The default value is 0.50922.\n - 'epsilon: threshold for truncating small eigenvalues to zero.\n The default value is 0\n - 'leastsq': Fitting without positive-semidefinite constraint.\n Options:\n - 'trace': Same as for 'wizard' method.\n - 'beta': Same as for 'wizard' method.\n Raises:\n Exception: if the `method` parameter is not valid.\n \"\"\"\n\n if isinstance(method, str) and method.lower() in ['wizard', 'leastsq']:\n # get options\n trace = __get_option('trace', options)\n beta = __get_option('beta', options)\n # fit state\n rho = __leastsq_fit(tomo_data, trace=trace, beta=beta)\n if method == 'wizard':\n # Use wizard method to constrain positivity\n epsilon = __get_option('epsilon', options)\n rho = __wizard(rho, epsilon=epsilon)\n return rho\n else:\n raise Exception('Invalid reconstruction method \"%s\"' % method)\n\n\ndef __get_option(opt, options):\n \"\"\"\n Return an optional value or None if not found.\n \"\"\"\n if options is not None:\n if opt in options:\n return options[opt]\n return None\n\n\n###############################################################################\n# Fit Method: Linear Inversion\n###############################################################################\n\n\ndef __leastsq_fit(tomo_data, weights=None, trace=None, beta=None):\n \"\"\"\n Reconstruct a state from unconstrained least-squares fitting.\n\n Args:\n tomo_data (list[dict]): state or process tomography data.\n weights (list or array or None): weights to use for least squares\n fitting. The default is standard deviation from a binomial\n distribution.\n trace (float or None): trace of returned operator. The default is 1.\n beta (float or None): hedge parameter (>=0) for computing frequencies\n from zero-count data. The default value is 0.50922.\n\n Returns:\n numpy.array: A numpy array of the reconstructed operator.\n \"\"\"\n if trace is None:\n trace = 1. # default to unit trace\n\n data = tomo_data['data']\n keys = data[0]['circuit'].keys()\n\n # Get counts and shots\n counts = []\n shots = []\n ops = []\n for dat in data:\n for key in keys:\n counts.append(dat['counts'][key])\n shots.append(dat['shots'])\n projectors = dat['circuit'][key]\n op = __projector(projectors['meas'], tomo_data['meas_basis'])\n if 'prep' in projectors:\n op_prep = __projector(projectors['prep'],\n tomo_data['prep_basis'])\n op = np.kron(op_prep.conj(), op)\n ops.append(op)\n\n # Convert counts to frequencies\n counts = np.array(counts)\n shots = np.array(shots)\n freqs = counts / shots\n\n # Use hedged frequencies to calculate least squares fitting weights\n if weights is None:\n if beta is None:\n beta = 0.50922\n K = len(keys)\n freqs_hedged = (counts + beta) / (shots + K * beta)\n weights = np.sqrt(shots / (freqs_hedged * (1 - freqs_hedged)))\n\n return __tomo_linear_inv(freqs, ops, weights, trace=trace)\n\n\ndef __projector(op_list, basis):\n \"\"\"Returns a projectors.\n \"\"\"\n ret = 1\n # list is from qubit 0 to 1\n for op in op_list:\n label, eigenstate = op\n ret = np.kron(basis[label][eigenstate], ret)\n return ret\n\n\ndef __tomo_linear_inv(freqs, ops, weights=None, trace=None):\n \"\"\"\n Reconstruct a matrix through linear inversion.\n\n Args:\n freqs (list[float]): list of observed frequences.\n ops (list[np.array]): list of corresponding projectors.\n weights (list[float] or array_like):\n weights to be used for weighted fitting.\n trace (float or None): trace of returned operator.\n\n Returns:\n numpy.array: A numpy array of the reconstructed operator.\n \"\"\"\n # get weights matrix\n if weights is not None:\n W = np.array(weights)\n if W.ndim == 1:\n W = np.diag(W)\n\n # Get basis S matrix\n S = np.array([vectorize(m).conj()\n for m in ops]).reshape(len(ops), ops[0].size)\n if weights is not None:\n S = np.dot(W, S) # W.S\n\n # get frequencies vec\n v = np.array(freqs) # |f>\n if weights is not None:\n v = np.dot(W, freqs) # W.|f>\n Sdg = S.T.conj() # S^*.W^*\n inv = np.linalg.pinv(np.dot(Sdg, S)) # (S^*.W^*.W.S)^-1\n\n # linear inversion of freqs\n ret = devectorize(np.dot(inv, np.dot(Sdg, v)))\n # renormalize to input trace value\n if trace is not None:\n ret = trace * ret / np.trace(ret)\n return ret\n\n\n###############################################################################\n# Fit Method: Wizard\n###############################################################################\n\n\ndef __wizard(rho, epsilon=None):\n \"\"\"\n Returns the nearest positive semidefinite operator to an operator.\n\n This method is based on reference [1]. It constrains positivity\n by setting negative eigenvalues to zero and rescaling the positive\n eigenvalues.\n\n Args:\n rho (array_like): the input operator.\n epsilon(float or None): threshold (>=0) for truncating small\n eigenvalues values to zero.\n\n Returns:\n numpy.array: A positive semidefinite numpy array.\n \"\"\"\n if epsilon is None:\n epsilon = 0. # default value\n\n dim = len(rho)\n rho_wizard = np.zeros([dim, dim])\n v, w = np.linalg.eigh(rho) # v eigenvecrors v[0] < v[1] <...\n for j in range(dim):\n if v[j] < epsilon:\n tmp = v[j]\n v[j] = 0.\n # redistribute loop\n x = 0.\n for k in range(j + 1, dim):\n x += tmp / (dim - (j + 1))\n v[k] = v[k] + tmp / (dim - (j + 1))\n for j in range(dim):\n rho_wizard = rho_wizard + v[j] * outer(w[:, j])\n return rho_wizard\n\n\n###############################################################\n# Wigner function tomography\n###############################################################\n\ndef build_wigner_circuits(circuit, phis, thetas, qubits,\n qreg, creg):\n \"\"\"Create the circuits to rotate to points in phase space\n Args:\n circuit (QuantumCircuit): The circuit to be appended with tomography\n state preparation and/or measurements.\n phis (np.matrix[[complex]]): phis\n thetas (np.matrix[[complex]]): thetas\n qubits (list[int]): a list of the qubit indexes of qreg to be measured.\n qreg (QuantumRegister): the quantum register containing qubits to be\n measured.\n creg (ClassicalRegister): the classical register containing bits to\n store measurement outcomes.\n\n Returns:\n list: A list of names of the added wigner function circuits.\n\n Raises:\n QiskitError: if circuit is not a valid QuantumCircuit.\n \"\"\"\n\n if not isinstance(circuit, QuantumCircuit):\n raise QiskitError('Input circuit must be a QuantumCircuit object')\n\n tomography_circuits = []\n points = len(phis[0])\n for point in range(points):\n label = '_wigner_phase_point'\n label += str(point)\n tmp_circ = QuantumCircuit(qreg, creg, name=label)\n for qubit, _ in enumerate(qubits):\n tmp_circ.u3(thetas[qubit][point], 0, # pylint: disable=no-member\n phis[qubit][point], qreg[qubits[qubit]])\n tmp_circ.measure(qreg[qubits[qubit]], creg[qubits[qubit]])\n # Add to original circuit\n tmp_circ = circuit + tmp_circ\n tmp_circ.name = circuit.name + label\n tomography_circuits.append(tmp_circ)\n\n logger.info('>> Created Wigner function circuits for \"%s\"', circuit.name)\n return tomography_circuits\n\n\ndef wigner_data(q_result, meas_qubits, labels, shots=None):\n \"\"\"Get the value of the Wigner function from measurement results.\n\n Args:\n q_result (Result): Results from execution of a state tomography\n circuits on a backend.\n meas_qubits (list[int]): a list of the qubit indexes measured.\n labels (list[str]): a list of names of the circuits\n shots (int): number of shots\n\n Returns:\n list: The values of the Wigner function at measured points in\n phase space\n \"\"\"\n num = len(meas_qubits)\n\n dim = 2**num\n p = [0.5 + 0.5 * np.sqrt(3), 0.5 - 0.5 * np.sqrt(3)]\n parity = 1\n\n for i in range(num):\n parity = np.kron(parity, p)\n\n w = [0] * len(labels)\n wpt = 0\n counts = [marginal_counts(q_result.get_counts(circ), meas_qubits)\n for circ in labels]\n for entry in counts:\n x = [0] * dim\n\n for i in range(dim):\n if bin(i)[2:].zfill(num) in entry:\n x[i] = float(entry[bin(i)[2:].zfill(num)])\n\n if shots is None:\n shots = np.sum(x)\n\n for i in range(dim):\n w[wpt] = w[wpt] + (x[i] / shots) * parity[i]\n wpt += 1\n\n return w\n"
] |
[
[
"numpy.array",
"numpy.dot",
"numpy.trace",
"numpy.zeros",
"numpy.sum",
"numpy.linalg.eigh",
"numpy.exp",
"numpy.sqrt",
"numpy.diag",
"numpy.kron"
]
] |
Sanduoo/OpenCV-Python
|
[
"8356782f65918a5193b0d2d2618d0bcb32e831d2"
] |
[
"11_ Histogram3.py"
] |
[
"import cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n#直方图反向投影\n\ndef bitwise_and():\n small = cv.imread(\"C:/1/image/small.jpg\")\n big = cv.imread(\"C:/1/image/big.jpg\")\n small_hsv = cv.cvtColor(small, cv.COLOR_BGR2HSV)\n big_hsv = cv.cvtColor(big, cv.COLOR_BGR2HSV)\n\n \"\"\"\n h,s,v = cv.split(small_hsv)\n print(h)\n print(s)\n print(v)\n \"\"\"\n\n lower_hsv = np.array([1, 120, 240])\n upper_hsv = np.array([4, 160, 255])\n mask = cv.inRange(big_hsv, lower_hsv, upper_hsv)\n dest = cv.bitwise_and(big_hsv, big_hsv, mask=mask)\n cv.imshow('mask', dest)\n cv.imshow('video', big)\n\n\ndef back_projection_demo():\n sample = cv.imread(\"C:/1/image/small.jpg\")\n target = cv.imread(\"C:/1/image/big.jpg\")\n roi_hsv = cv.cvtColor(sample,cv.COLOR_BGR2HSV)\n target_hsv = cv.cvtColor(target,cv.COLOR_BGR2HSV)\n\n #show images\n cv.imshow(\"sample\",sample)\n cv.imshow(\"target\",target)\n\n roiHist = cv.calcHist([roi_hsv],[0,1],None,[32,32],[0,180,0,256]) #求出样本直方图\n cv.normalize(roiHist,roiHist,0,256,cv.NORM_MINMAX) #直方图归一化\n dest = cv.calcBackProject([target_hsv],[0,1],roiHist,[0,180,0,256],1) #直方图反向投影\n cv.imshow(\"back_projection_demo\", dest)\n\n\ndef hist2d_demo(image):\n hsv = cv.cvtColor(image,cv.COLOR_BGR2HSV)\n hist = cv.calcHist([image],[0,1],None,[32,32],[0,180,0,256])\n # cv.imshow(\"hist2d_demo\",hist)\n plt.imshow(hist,interpolation='nearest')\n plt.title(\"2D Histogram\")\n plt.show()\n\n\nsrc = cv.imread(\"C:/1/1.jpg\")\n# cv.namedWindow('input_image', cv.WINDOW_AUTOSIZE)\n# cv.imshow(\"input_image\",src)\n\nbitwise_and()\n\ncv.waitKey(0)\ncv.destroyAllWindows()\n"
] |
[
[
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.title",
"matplotlib.pyplot.imshow"
]
] |
xxg1413/oneflow
|
[
"f2e3c85a25b8aecfb6c0c0af1737833b1a77e135",
"f2e3c85a25b8aecfb6c0c0af1737833b1a77e135"
] |
[
"oneflow/python/test/ops/test_reduce_mean.py",
"oneflow/python/test/ops/test_mod_int.py"
] |
[
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\nimport oneflow as flow\nimport tensorflow as tf\nimport test_global_storage\nfrom test_util import GenArgList\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n\ndef compare_with_tensorflow(device_type, input_shape, axis, keepdims):\n assert device_type in [\"gpu\", \"cpu\"]\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def ReduceMeanJob():\n with flow.scope.placement(device_type, \"0:0\"):\n x = flow.get_variable(\n \"x\",\n shape=input_shape,\n dtype=flow.float,\n initializer=flow.random_uniform_initializer(minval=-10, maxval=10),\n trainable=True,\n )\n loss = flow.math.reduce_mean(x, axis=axis, keepdims=keepdims)\n # TODO: fix facade and add_loss bug\n loss = flow.identity(loss)\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0\n ).minimize(loss)\n\n flow.watch(x, test_global_storage.Setter(\"x\"))\n flow.watch_diff(x, test_global_storage.Setter(\"x_diff\"))\n flow.watch(loss, test_global_storage.Setter(\"loss\"))\n flow.watch_diff(loss, test_global_storage.Setter(\"loss_diff\"))\n\n return loss\n\n # OneFlow\n check_point = flow.train.CheckPoint()\n check_point.init()\n of_out = ReduceMeanJob().get()\n # TensorFlow\n with tf.GradientTape(persistent=True) as tape:\n x = tf.Variable(test_global_storage.Get(\"x\"))\n tf_out = tf.math.reduce_mean(x, axis=axis, keepdims=keepdims)\n loss_diff = test_global_storage.Get(\"loss_diff\")\n tf_x_diff = tape.gradient(tf_out, x, loss_diff)\n\n assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=1e-5, atol=1e-5)\n assert np.allclose(\n test_global_storage.Get(\"x_diff\"), tf_x_diff.numpy(), rtol=1e-5, atol=1e-5\n )\n\n\ndef test_reduce_mean(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"input_shape\"] = [(64, 64, 64)]\n arg_dict[\"axis\"] = [None, [1], [0, 2]]\n arg_dict[\"keepdims\"] = [True, False]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\nimport oneflow as flow\nimport oneflow.typing as oft\n\nfunc_config = flow.FunctionConfig()\nfunc_config.default_data_type(flow.int32)\n# func_config.default_data_type(flow.float32)\n\n\ndef test_naive(test_case):\n @flow.global_function(function_config=func_config)\n def ModJob(\n a: oft.Numpy.Placeholder((5, 2), dtype=flow.int32),\n b: oft.Numpy.Placeholder((5, 2), dtype=flow.int32),\n ):\n return a % b\n\n x = (np.random.rand(5, 2) * 1000).astype(np.int32) + 1\n y = (np.random.rand(5, 2) * 1000).astype(np.int32) + 1\n z = None\n z = ModJob(x, y).get().numpy()\n test_case.assertTrue(np.array_equal(z, x % y))\n\n\ndef test_broadcast(test_case):\n @flow.global_function(function_config=func_config)\n def ModJob(\n a: oft.Numpy.Placeholder((5, 2), dtype=flow.int32),\n b: oft.Numpy.Placeholder((1, 2), dtype=flow.int32),\n ):\n return a % b\n\n x = (np.random.rand(5, 2) * 1000).astype(np.int32) + 1\n y = (np.random.rand(1, 2) * 1000).astype(np.int32) + 1\n z = None\n z = ModJob(x, y).get().numpy()\n test_case.assertTrue(np.array_equal(z, x % y))\n\n\ndef test_xy_mod_x1(test_case):\n GenerateTest(test_case, (64, 64), (64, 1))\n\n\ndef test_xy_mod_1y(test_case):\n GenerateTest(test_case, (64, 64), (1, 64))\n\n\ndef test_xyz_mod_x1z(test_case):\n GenerateTest(test_case, (64, 64, 64), (64, 1, 64))\n\n\ndef test_xyz_mod_1y1(test_case):\n GenerateTest(test_case, (64, 64, 64), (1, 64, 1))\n\n\ndef GenerateTest(test_case, a_shape, b_shape):\n @flow.global_function(function_config=func_config)\n def ModJob(\n a: oft.Numpy.Placeholder(a_shape, dtype=flow.int32),\n b: oft.Numpy.Placeholder(b_shape, dtype=flow.int32),\n ):\n return a % b\n\n a = (np.random.rand(*a_shape) * 1000).astype(np.int32) + 1\n b = (np.random.rand(*b_shape) * 1000).astype(np.int32) + 1\n y = ModJob(a, b).get().numpy()\n test_case.assertTrue(np.array_equal(y, a % b))\n"
] |
[
[
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.GradientTape",
"tensorflow.math.reduce_mean"
],
[
"numpy.array_equal",
"numpy.random.rand"
]
] |
kougou/end2end_dialog
|
[
"bd70fcca8faec0ded37b74997fa9d2b507a6f07b"
] |
[
"SlotTaggingModel_multitask.py"
] |
[
"''' Natural language understanding model based on multi-task learning.\n This model is trained on two tasks: slot tagging and user intent prediction. \n\n Inputs: user utterance, e.g. BOS w1 w2 ... EOS\n Outputs: slot tags and user intents, e.g. O O B-moviename ... O\\tinform+moviename\n \n Author : Xuesong Yang\n Email : xyang45@illinois.edu\n Created Date: Dec. 31, 2016\n'''\nfrom DataSetCSVslotTagging import DataSetCSVslotTagging\nfrom keras.layers import Input, LSTM, Dense, Dropout, merge, Embedding, TimeDistributed\nfrom keras.models import Model\nfrom utils import print_params, eval_slotTagging, eval_intentPredict, writeTxt, getNLUpred, getActPred, getTagPred, checkExistence, getNLUframeAccuracy, eval_actPred \nimport os\nimport numpy as np\nnp.random.seed(1983)\n\n\ndef writeUtterTagIntentTxt(utter_txt, tag_txt, intent_txt, target_fname):\n with open(target_fname, 'wb') as f:\n for (utter, tag, intent) in zip(utter_txt, tag_txt, intent_txt):\n tag_new = [token.replace('tag-', '', 1) for token in tag.split()]\n intent_new = [token.replace('intent-', '', 1)\n for token in intent.split(';')]\n new_line = '{}\\t{}\\t{}'.format(\n utter, ' '.join(tag_new), ';'.join(intent_new))\n f.write('{}\\n'.format(new_line))\n\n\nclass SlotTaggingModel(object):\n\n def __init__(self, **argparams):\n self.train_data = argparams['train_data']\n if self.train_data is not None:\n assert isinstance(self.train_data, DataSetCSVslotTagging)\n self.test_data = argparams['test_data']\n if self.test_data is not None:\n assert isinstance(self.test_data, DataSetCSVslotTagging)\n self.dev_data = argparams['dev_data']\n if self.dev_data is not None:\n assert isinstance(self.dev_data, DataSetCSVslotTagging)\n self.model_folder = argparams['model_folder']\n if self.model_folder is None:\n pid = argparams['pid']\n self.model_folder = './model/slot_{}'.format(pid)\n os.makedirs('{}/weights'.format(self.model_folder))\n os.makedirs('{}/dev_results'.format(self.model_folder))\n self.epoch_nb = argparams['epoch_nb']\n self.batch_size = argparams['batch_size']\n self.embedding_size = argparams['embedding_size']\n self.hidden_size = argparams['hidden_size']\n self.dropout = argparams['dropout_ratio']\n self.optimizer = argparams['optimizer']\n self.patience = argparams['patience']\n self.loss = argparams['loss']\n self.test_tag_flag = argparams['test_tag_only']\n self.test_intent_flag = argparams['test_intent_only']\n self.threshold = argparams['threshold']\n self.weights_fname = argparams['weights_fname']\n self.params = argparams\n\n def _build(self):\n print('Building Graph ...')\n words_input = Input(shape=(self.maxlen_userUtter,),\n dtype='int32', name='words_input')\n # reserve 0 for masking, therefore vocab_size + 1\n embeddings = Embedding(input_dim=self.word_vocab_size + 1,\n output_dim=self.embedding_size,\n input_length=self.maxlen_userUtter,\n mask_zero=True)(words_input)\n embeddings = Dropout(self.dropout)(embeddings)\n lstm_forward = LSTM(output_dim=self.hidden_size,\n return_sequences=True,\n name='LSTM_forward')(embeddings)\n lstm_forward = Dropout(self.dropout)(lstm_forward)\n lstm_backward = LSTM(output_dim=self.hidden_size,\n return_sequences=True,\n go_backwards=True,\n name='LSTM_backward')(embeddings)\n lstm_backward = Dropout(self.dropout)(lstm_backward)\n lstm_concat = merge([lstm_forward, lstm_backward],\n mode='concat',\n concat_axis=-1,\n name='merge_bidirections')\n slot_softmax_seq = TimeDistributed(Dense(\n output_dim=self.userTag_vocab_size,\n activation='softmax'), name='slot_output')(lstm_concat)\n intent_summary = LSTM(output_dim=self.hidden_size,\n return_sequences=False,\n name='summarize_to_dense')(lstm_concat)\n intent_summary = Dropout(self.dropout)(intent_summary)\n # intent_softmax = Dense(output_dim=self.userIntent_vocab_size,\n # activation='softmax', name='intent_output')(intent_summary)\n intent_softmax = Dense(output_dim=self.userIntent_vocab_size,\n activation='sigmoid', name='intent_output')(intent_summary)\n self.model = Model(input=words_input, output=[\n slot_softmax_seq, intent_softmax])\n self.model.compile(optimizer=self.optimizer,\n # metrics=['accuracy'],\n sample_weight_mode={\n 'slot_output': 'temporal', 'intent_output': None},\n loss={'slot_output': self.loss, 'intent_output': 'binary_crossentropy'})\n\n def train(self):\n print('Training model ...')\n # load params\n self.maxlen_userUtter = self.train_data.maxlen_userUtter\n self.word_vocab_size = self.train_data.word_vocab_size\n self.userIntent_vocab_size = self.train_data.userIntent_vocab_size\n self.userTag_vocab_size = self.train_data.userTag_vocab_size\n self.id2word = self.train_data.id2word\n self.id2userTag = self.train_data.id2userTag\n self.id2userIntent = self.train_data.id2userIntent\n self.userTag2id = self.train_data.userTag2id\n other_npz = '{}/other_vars.npz'.format(self.model_folder)\n train_vars = {'id2userTag': self.id2userTag,\n 'id2word': self.id2word,\n 'id2userIntent': self.id2userIntent,\n 'userTag2id': self.userTag2id,\n 'userTag_vocab_size': self.userTag_vocab_size,\n 'userIntent_vocab_size': self.userIntent_vocab_size,\n 'word_vocab_size': self.word_vocab_size,\n 'maxlen_userUtter': self.maxlen_userUtter}\n np.savez_compressed(other_npz, **train_vars)\n self.params['maxlen_userUtter'] = self.maxlen_userUtter\n self.params['word_vocab_size'] = self.word_vocab_size\n self.params['userTag_vocab_size'] = self.userTag_vocab_size\n self.params['userIntent_vocab_size'] = self.userIntent_vocab_size\n print_params(self.params)\n # build model graph, save graph and plot graph\n self._build()\n self._plot_graph()\n graph_yaml = '{}/graph-arch.yaml'.format(self.model_folder)\n with open(graph_yaml, 'w') as fyaml:\n fyaml.write(self.model.to_yaml())\n # load train data\n X_train = self.train_data.userUtter_encodePad\n tag_train = self.train_data.userTag_1hotPad\n intent_train = self.train_data.userIntent_vecBin\n train_utter_txt = self.train_data.userUtter_txt\n train_intent_txt = self.train_data.userIntent_txt\n train_tag_txt = self.train_data.userTag_txt\n train_target_fname = '{}/train.target'.format(self.model_folder)\n writeUtterTagIntentTxt(train_utter_txt, train_tag_txt, train_intent_txt, train_target_fname)\n # load dev data\n X_dev = self.dev_data.userUtter_encodePad\n tag_dev = self.dev_data.userTag_1hotPad\n intent_dev = self.dev_data.userIntent_vecBin\n dev_utter_txt = self.dev_data.userUtter_txt\n dev_intent_txt = self.dev_data.userIntent_txt\n dev_tag_txt = self.dev_data.userTag_txt\n dev_target_fname = '{}/dev.target'.format(self.model_folder)\n writeUtterTagIntentTxt(dev_utter_txt, dev_tag_txt, dev_intent_txt, dev_target_fname)\n # get mask matrix for train and dev set\n mask_array_train = np.zeros_like(X_train)\n mask_array_train[X_train != 0] = 1\n mask_array_dev = np.zeros_like(X_dev)\n mask_array_dev[X_dev != 0] = 1\n # jointly training\n for ep in xrange(self.epoch_nb):\n print('<Epoch {}>'.format(ep))\n self.model.fit(x=X_train,\n y={'slot_output': tag_train,\n 'intent_output': intent_train},\n sample_weight={'slot_output': mask_array_train,\n 'intent_output': None},\n batch_size=self.batch_size, nb_epoch=1, verbose=2)\n tag_probs, intent_probs = self.model.predict(X_dev)\n # calculate token-level scores\n precision_tag, recall_tag, fscore_tag, accuracy_frame_tag = eval_slotTagging(tag_probs, mask_array_dev,\n tag_dev, self.userTag2id['tag-O'])\n print('SlotTagging: ep={}, precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}'.format(ep, precision_tag, recall_tag, fscore_tag, accuracy_frame_tag))\n precision_intent, recall_intent, fscore_intent, accuracy_frame_intent, threshold = eval_intentPredict(intent_probs,\n intent_dev)\n print('Intent Prediction: ep={}, precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}, threshold={:.4f}'.format(ep, precision_intent, recall_intent, fscore_intent, accuracy_frame_intent, threshold))\n accuracy_frame_both = getNLUframeAccuracy(tag_probs, mask_array_dev, tag_dev, intent_probs, intent_dev, threshold)\n print('NLU Frame: ep={}, accuracy={:.4f}'.format(ep, accuracy_frame_both))\n dev_tag_pred_txt, dev_intent_pred_txt = getNLUpred(tag_probs, mask_array_dev, self.id2userTag, intent_probs, threshold, self.id2userIntent)\n dev_results_fname = '{}/dev_results/dev_ep={}.pred'.format(self.model_folder, ep)\n writeUtterTagIntentTxt(dev_utter_txt, dev_tag_pred_txt, dev_intent_pred_txt, dev_results_fname)\n print('Write dev results: {}'.format(dev_results_fname))\n weights_fname = '{}/weights/ep={}_tagF1={:.4f}frameAcc={:.4f}_intentF1={:.4f}frameAcc={:.4f}th={:.4f}.h5'.format(self.model_folder, ep, fscore_tag, accuracy_frame_tag, fscore_intent, accuracy_frame_intent, threshold)\n print('Saving Model: {}'.format(weights_fname))\n self.model.save_weights(weights_fname, overwrite=True)\n\n def _plot_graph(self):\n from keras.utils import visualize_util\n graph_png = '{}/graph-plot.png'.format(self.model_folder)\n visualize_util.plot(self.model,\n to_file=graph_png,\n show_shapes=True,\n show_layer_names=True)\n\n def predict(self):\n print('Predicting ...')\n result_folder = '{}/test_result'.format(self.model_folder)\n if not os.path.exists(result_folder):\n os.makedirs(result_folder)\n # write user utters\n utter_fname = '{}/utter.txt'.format(result_folder)\n if not os.path.exists(utter_fname):\n utter_txt = self.test_data.userUtter_txt\n writeTxt(utter_txt, utter_fname, prefix='', delimiter=None)\n print('\\ttest_utter={}'.format(utter_fname))\n # load test data and calculate posterior probs.\n X_test = self.test_data.userUtter_encodePad\n tag_probs, intent_probs = self.model.predict(X_test) # a tuple, slot_tags and intents\n # make prediction\n if self.test_intent_flag:\n assert self.threshold is not None, 'Argument required: --threshold'\n intent_probs_fname = '{}/intentProb_{}.npz'.format(result_folder, os.path.basename(self.weights_fname).split('_')[0])\n np.savez_compressed(intent_probs_fname, probs=intent_probs)\n print('\\tintent_probs={}'.format(intent_probs_fname))\n # write prediction test results\n pred_intent_fname = '{}/intent_{}.pred'.format(result_folder, os.path.basename(self.weights_fname).split('_')[0])\n pred_intent_txt = getActPred(intent_probs, self.threshold, self.id2userIntent)\n writeTxt(pred_intent_txt, pred_intent_fname, prefix='intent-', delimiter=';')\n print('\\tintent_pred={}'.format(pred_intent_fname))\n # write target test\n target_intent_fname = '{}/intent_test.target'.format(result_folder)\n target_intent = self.test_data.userIntent_txt\n writeTxt(target_intent, target_intent_fname, prefix='intent-', delimiter=';')\n print('\\tintent_target={}'.format(target_intent_fname))\n # calculate performance scores\n preds_indicator, precision, recall, fscore, accuracy_frame = eval_actPred(intent_probs,\n self.test_data.userIntent_vecBin, \n self.threshold)\n print('IntentPred: precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}'.format(precision, recall, fscore, accuracy_frame))\n\n if self.test_tag_flag:\n tag_probs_fname = '{}/tagProb_{}.npz'.format(result_folder, os.path.basename(self.weights_fname).split('_')[0])\n np.savez_compressed(tag_probs_fname, probs=tag_probs)\n print('\\ttag_probs={}'.format(tag_probs_fname))\n # write prediction results\n pred_tag_fname = '{}/tag_{}.pred'.format(result_folder, os.path.basename(self.weights_fname).split('_')[0])\n mask_test = np.zeros_like(X_test)\n mask_test[X_test != 0] = 1\n pred_tag_txt = getTagPred(tag_probs, mask_test, self.id2userTag)\n writeTxt(pred_tag_txt, pred_tag_fname, prefix='tag-', delimiter=None)\n print('\\ttag_pred={}'.format(pred_tag_fname))\n # write target\n target_tag_fname = '{}/tag_test.target'.format(result_folder)\n target_tag = self.test_data.userTag_txt\n writeTxt(target_tag, target_tag_fname, prefix='tag-', delimiter=None)\n print('\\ttag_target={}'.format(target_tag_fname))\n # calculate performance scores\n precision, recall, fscore, accuracy_frame = eval_slotTagging(tag_probs, mask_test,\n self.test_data.userTag_1hotPad, self.userTag2id['tag-O'])\n print('SlotTagging: precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}'.format(precision, recall, fscore, accuracy_frame))\n\n def load_model(self):\n print('Loading model ...')\n # check existence of params\n assert os.path.exists(self.model_folder), 'model_fold is not found: {}'.format(self.model_folder)\n assert self.weights_fname is not None, 'Argument required: --weights-file'\n checkExistence(self.weights_fname)\n model_graph = '{}/graph-arch.yaml'.format(self.model_folder)\n model_train_vars = '{}/other_vars.npz'.format(self.model_folder)\n checkExistence(model_graph)\n checkExistence(model_train_vars)\n from keras.models import model_from_yaml\n with open(model_graph, 'r') as fgraph:\n self.model = model_from_yaml(fgraph.read())\n self.model.load_weights(self.weights_fname)\n npzfile = np.load(model_train_vars)\n self.maxlen_userUtter = np.int32(npzfile['maxlen_userUtter'][()])\n self.word_vocab_size = np.int32(npzfile['word_vocab_size'][()])\n self.userTag_vocab_size = np.int32(npzfile['userTag_vocab_size'][()])\n self.userIntent_vocab_size = np.int32(\n npzfile['userIntent_vocab_size'][()])\n self.id2userTag = npzfile['id2userTag'][()]\n self.id2word = npzfile['id2word'][()]\n self.id2userIntent = npzfile['id2userIntent'][()]\n self.userTag2id = npzfile['userTag2id'][()]\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--data-npz', dest='data_npz',\n help='.npz file including instances of DataSetCSVslotTagging for train, dev and test')\n parser.add_argument('--loss', dest='loss',\n default='categorical_crossentropy',\n help='objective function')\n parser.add_argument('--optimizer', dest='optimizer',\n default='adam', help='optimizer')\n parser.add_argument('--epoch-nb', dest='epoch_nb', type=int,\n default=300, help='number of epoches')\n parser.add_argument('--embedding-size', dest='embedding_size', type=int,\n default=512, help='the dimention of word embeddings.')\n parser.add_argument('--patience', dest='patience', type=int,\n default=10, help='the patience for early stopping criteria')\n parser.add_argument('--batch-size', dest='batch_size', type=int,\n default=32, help='batch size')\n parser.add_argument('--hidden-size', dest='hidden_size', type=int,\n default=128, help='the number of hidden units in recurrent layer')\n parser.add_argument('--dropout-ratio', dest='dropout_ratio',\n type=float, default=0.5, help='dropout ratio')\n parser.add_argument('--model-folder', dest='model_folder',\n help='the folder contains graph.yaml, weights.h5, and other_vars.npz')\n parser.add_argument('--test-tag', dest='test_tag_only', action='store_true',\n help='only perform user Tagging test if this option is activated.')\n parser.add_argument('--test-intent', dest='test_intent_only', action='store_true',\n help='only perform user intent test if this option is activated.')\n parser.add_argument('--train', dest='train_only', action='store_true',\n help='only perform training if this option is activated.')\n parser.add_argument('--weights-file', dest='weights_fname', help='.h5 weights file.')\n parser.add_argument('--threshold', dest='threshold', type=float, help='float number of threshold for multi-label prediction decision.')\n args = parser.parse_args()\n argparams = vars(args)\n # early stop criteria are different for two tasks, therefore one model is\n # chosen for each.\n test_tag_only = argparams['test_tag_only']\n test_intent_only = argparams['test_intent_only']\n train_only = argparams['train_only']\n assert train_only or test_tag_only or test_intent_only, 'Arguments required: either --train, --test-tag, or --test-intent'\n pid = os.getpid()\n argparams['pid'] = pid\n npz_fname = argparams['data_npz']\n checkExistence(npz_fname)\n data_npz = np.load(npz_fname)\n if train_only: # train model\n argparams['train_data'] = data_npz['train_data'][()]\n argparams['dev_data'] = data_npz['dev_data'][()]\n argparams['test_data'] = None\n model = SlotTaggingModel(**argparams)\n model.train()\n else:\n # train_only is False, while test_only is True\n # need to load model\n argparams['train_data'] = None\n argparams['dev_data'] = None\n argparams['test_data'] = None\n if argparams['model_folder'] is None:\n raise Exception('Argument required: --model-folder')\n model = SlotTaggingModel(**argparams)\n model.load_model()\n # test\n if test_tag_only or test_intent_only:\n model.test_data = data_npz['test_data'][()]\n model.predict()\n"
] |
[
[
"numpy.zeros_like",
"numpy.random.seed",
"numpy.load",
"numpy.savez_compressed",
"numpy.int32"
]
] |
mlexample/gcspytorchimagenet
|
[
"b864886ba39869c1c1d601979a80b347f0cc179a"
] |
[
"test_train_mp_wds_local.py"
] |
[
"import torch_xla.test.test_utils as test_utils\nimport torch_xla.distributed.xla_multiprocessing as xmp\nimport torch_xla.core.xla_model as xm\nimport torch_xla.utils.utils as xu\nimport torch_xla.distributed.parallel_loader as pl\nimport torch_xla.debug.metrics as met\nimport torch_xla\nimport torchvision.transforms as transforms\nimport torchvision\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch\nimport numpy as np\nimport sys\nimport os\nimport webdataset as wds\nimport datetime\nimport time\n# import warnings\n# warnings.filterwarnings(\"ignore\")\nfrom itertools import islice\nimport torch_xla.debug.profiler as xp\n\n\n# profiler_port=9012\n\nfor extra in ('/usr/share/torch-xla-1.7/pytorch/xla/test', '/pytorch/xla/test', '/usr/share/pytorch/xla/test'):\n if os.path.exists(extra):\n sys.path.insert(0, extra)\n\nimport schedulers\n# import gcsdataset\nimport args_parse # XLA arg parser\n# import argparse # py arg parser\n\n# parser = argparse.ArgumentParser(description='WebDataset args for modified XLA model')\n\n# parser.add_argument('--wds_traindir', type=str, default='/tmp/imagenet')\n# parser.add_argument('--wds_testdir', type=str, default='/tmp/imagenet')\n# parser.add_argument('--trainsize', type=int, default=1280000) \n# parser.add_argument('--testsize', type=int, default=50000)\n# wds_args, others = parser.parse_known_args()\n\nSUPPORTED_MODELS = [\n 'alexnet', 'densenet121', 'densenet161', 'densenet169', 'densenet201',\n 'inception_v3', 'resnet101', 'resnet152', 'resnet18', 'resnet34',\n 'resnet50', 'squeezenet1_0', 'squeezenet1_1', 'vgg11', 'vgg11_bn', 'vgg13',\n 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn'\n]\nMODEL_OPTS = {\n '--model': {\n 'choices': SUPPORTED_MODELS,\n 'default': 'resnet50',\n },\n '--test_set_batch_size': {\n 'type': int,\n },\n '--lr_scheduler_type': {\n 'type': str,\n },\n '--lr_scheduler_divide_every_n_epochs': {\n 'type': int,\n },\n '--lr_scheduler_divisor': {\n 'type': int,\n },\n '--dataset': {\n 'choices': ['gcsdataset', 'torchdataset'],\n 'default': 'gcsdataset',\n 'type': str,\n },\n}\n\n# '--wds_traindir': {\n# 'type': str,\n# 'default':'/tmp/imagenet'\n# },\n# '--wds_testdir': {\n# 'type': str,\n# 'default': '/tmp/imagenet'\n# },\n# '--trainsize': {\n# 'type': int,\n# 'default': 1280000\n# },\n# '--testsize': {\n# 'type': int,\n# 'default': 50000\n# },\n \nFLAGS = args_parse.parse_common_options(\n datadir='/tmp/imagenet',\n batch_size=None,\n num_epochs=None,\n momentum=None,\n lr=None,\n target_accuracy=None,\n opts=MODEL_OPTS.items(),\n profiler_port=9012,\n)\n\nDEFAULT_KWARGS = dict(\n batch_size=128,\n test_set_batch_size=64,\n num_epochs=18,\n momentum=0.9,\n lr=0.1,\n target_accuracy=0.0,\n)\nMODEL_SPECIFIC_DEFAULTS = {\n # Override some of the args in DEFAULT_KWARGS, or add them to the dict\n # if they don't exist.\n 'resnet50':\n dict(\n DEFAULT_KWARGS, **{\n 'lr': 0.5,\n 'lr_scheduler_divide_every_n_epochs': 20,\n 'lr_scheduler_divisor': 5,\n 'lr_scheduler_type': 'WarmupAndExponentialDecayScheduler',\n })\n}\n\n# Set any args that were not explicitly given by the user.\ndefault_value_dict = MODEL_SPECIFIC_DEFAULTS.get(FLAGS.model, DEFAULT_KWARGS)\nfor arg, value in default_value_dict.items():\n if getattr(FLAGS, arg) is None:\n setattr(FLAGS, arg, value)\n\n\ndef get_model_property(key):\n default_model_property = {\n 'img_dim': 224,\n 'model_fn': getattr(torchvision.models, FLAGS.model)\n }\n model_properties = {\n 'inception_v3': {\n 'img_dim': 299,\n 'model_fn': lambda: torchvision.models.inception_v3(aux_logits=False)\n },\n }\n model_fn = model_properties.get(FLAGS.model, default_model_property)[key]\n return model_fn\n\n\ndef _train_update(device, step, loss, tracker, epoch, writer):\n test_utils.print_training_update(\n device,\n step,\n loss.item(),\n tracker.rate(),\n tracker.global_rate(),\n epoch,\n summary_writer=writer)\n\n##### WDS ########\n# trainsize = 1281167 # all shards\ntrainsize = 1280000 #FLAGS.trainsize # 1280 shards {000...079}\ntestsize = 50000 # FLAGS.testsize \n\n# train_dir = FLAGS.wds_traindir\n# test_dir = FLAGS.wds_testdir\n\ndef identity(x):\n return x \n\ndef my_worker_splitter(urls):\n \"\"\"Split urls per worker\n Selects a subset of urls based on Torch get_worker_info.\n Used as a shard selection function in Dataset.\n replaces wds.split_by_worker\"\"\"\n # import torch\n\n urls = [url for url in urls]\n\n assert isinstance(urls, list)\n\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is not None:\n wid = worker_info.id\n num_workers = worker_info.num_workers\n\n return urls[wid::num_workers]\n else:\n return urls\n\ndef my_node_splitter(urls):\n \"\"\"Split urls_ correctly per accelerator node\n :param urls:\n :return: slice of urls_\n \"\"\"\n rank=xm.get_ordinal()\n num_replicas=xm.xrt_world_size()\n\n urls_this = urls[rank::num_replicas]\n \n return urls_this\n\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n\ndef make_train_loader(img_dim, shuffle=10000, batch_size=FLAGS.batch_size):\n # \"pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards/imagenet-train-{000000..001281}.tar\"\n # \"pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards/imagenet-train-{000000..001279}.tar\"\n # \"pipe:cat /mnt/disks/dataset/webdataset/shards/imagenet-train-{000000..001281}.tar\"\n # \"pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-320/imagenet-train-{000000..000320}.tar\"\n # \"pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-640/imagenet-train-{000000..000639}.tar\"\n num_dataset_instances = xm.xrt_world_size() * FLAGS.num_workers\n epoch_size = trainsize // num_dataset_instances\n # num_batches = (epoch_size + batch_size - 1) // batch_size\n # num_batches = epoch_size // batch_size\n\n image_transform = transforms.Compose(\n [\n transforms.RandomResizedCrop(img_dim),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]\n )\n \n dataset = (\n wds.WebDataset(\"pipe:cat /mnt/disks/dataset/webdataset/shards-640/imagenet-train-{000000..000639}.tar\", # FLAGS.wds_traindir, \n splitter=my_worker_splitter, nodesplitter=my_node_splitter, shardshuffle=True, length=epoch_size)\n .shuffle(shuffle)\n .decode(\"pil\")\n .to_tuple(\"ppm;jpg;jpeg;png\", \"cls\")\n .map_tuple(image_transform, identity)\n .batched(batch_size, partial=True)\n )\n\n loader = torch.utils.data.DataLoader(dataset, batch_size=None, shuffle=False, drop_last=False, num_workers=FLAGS.num_workers) # , worker_init_fn=worker_init_fn\n return loader\n \ndef make_val_loader(img_dim, resize_dim, batch_size=FLAGS.test_set_batch_size):\n \n num_dataset_instances = xm.xrt_world_size() * FLAGS.num_workers\n epoch_test_size = testsize // num_dataset_instances\n # num_batches = (epoch_size + batch_size - 1) // batch_size\n # num_test_batches = epoch_test_size // batch_size\n\n val_transform = transforms.Compose(\n [\n transforms.Resize(resize_dim),\n transforms.CenterCrop(img_dim),\n transforms.ToTensor(),\n normalize,\n ]\n )\n # \"pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-320/imagenet-val-{000000..000012}.tar\"\n # \"pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards/imagenet-val-{000000..000049}.tar\"\n # \"pipe:cat /mnt/disks/dataset/webdataset/shards/imagenet-val-{000000..000049}.tar\"\n # \"pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-640/imagenet-val-{000000..000024}.tar\"\n val_dataset = (\n wds.WebDataset(\"pipe:cat /mnt/disks/dataset/webdataset/shards/imagenet-val-{000000..000049}.tar\", # FLAGS.wds_testdir, \n splitter=my_worker_splitter, nodesplitter=my_node_splitter, shardshuffle=False, length=epoch_test_size) \n .decode(\"pil\")\n .to_tuple(\"ppm;jpg;jpeg;png\", \"cls\")\n .map_tuple(val_transform, identity)\n .batched(batch_size, partial=True)\n )\n\n val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=None, shuffle=False, num_workers=FLAGS.num_workers) # , worker_init_fn=worker_init_fn, pin_memory=False \n return val_loader\n\n \ndef train_imagenet():\n print('==> Preparing data..')\n img_dim = get_model_property('img_dim')\n resize_dim = max(img_dim, 256)\n train_loader = make_train_loader(img_dim, batch_size=FLAGS.batch_size, shuffle=10000)\n test_loader = make_val_loader(img_dim, resize_dim, batch_size=FLAGS.test_set_batch_size)\n\n torch.manual_seed(42)\n server = xp.start_server(FLAGS.profiler_port)\n\n device = xm.xla_device()\n model = get_model_property('model_fn')().to(device)\n writer = None\n if xm.is_master_ordinal():\n writer = test_utils.get_summary_writer(FLAGS.logdir)\n optimizer = optim.SGD(\n model.parameters(),\n lr=FLAGS.lr,\n momentum=FLAGS.momentum,\n weight_decay=1e-4)\n num_training_steps_per_epoch = trainsize // (\n FLAGS.batch_size * xm.xrt_world_size())\n lr_scheduler = schedulers.wrap_optimizer_with_scheduler(\n optimizer,\n scheduler_type=getattr(FLAGS, 'lr_scheduler_type', None),\n scheduler_divisor=getattr(FLAGS, 'lr_scheduler_divisor', None),\n scheduler_divide_every_n_epochs=getattr(\n FLAGS, 'lr_scheduler_divide_every_n_epochs', None),\n num_steps_per_epoch=num_training_steps_per_epoch,\n summary_writer=writer)\n loss_fn = nn.CrossEntropyLoss()\n# global_step = 0\n \n# server = xp.start_server(profiler_port)\n\n def train_loop_fn(loader, epoch):\n train_steps = trainsize // (FLAGS.batch_size * xm.xrt_world_size())\n tracker = xm.RateTracker()\n total_samples = 0\n rate_list = []\n model.train()\n for step, (data, target) in enumerate(loader): # repeatedly(loader) | enumerate(islice(loader, 0, train_steps))\n# global_step += 1\n optimizer.zero_grad()\n output = model(data)\n loss = loss_fn(output, target)\n loss.backward()\n xm.optimizer_step(optimizer)\n tracker.add(FLAGS.batch_size)\n total_samples += data.size()[0]\n# rate_list.append(tracker.rate())\n# replica_rate = tracker.rate()\n# global_rate = tracker.global_rate()\n if lr_scheduler:\n lr_scheduler.step()\n if step % FLAGS.log_steps == 0:\n xm.add_step_closure(\n _train_update, args=(device, step, loss, tracker, epoch, writer))\n test_utils.write_to_summary(writer, step, dict_to_write={'Rate_step': tracker.rate()}, write_xla_metrics=False)\n if step == train_steps:\n break \n \n# replica_max_rate = np.max(tracker.rate())\n reduced_global = xm.mesh_reduce('reduced_global', tracker.global_rate(), np.mean)\n# reduced_max_rate = xm.mesh_reduce('max_rate', tracker.rate(), np.mean)\n\n return total_samples, reduced_global \n \n def test_loop_fn(loader, epoch):\n test_steps = testsize // (FLAGS.test_set_batch_size * xm.xrt_world_size())\n total_samples, correct = 0, 0\n model.eval()\n for step, (data, target) in enumerate(loader): # repeatedly(loader) | enumerate(islice(loader, 0, test_steps)\n output = model(data)\n pred = output.max(1, keepdim=True)[1]\n correct += pred.eq(target.view_as(pred)).sum()\n total_samples += data.size()[0]\n if step % FLAGS.log_steps == 0:\n xm.add_step_closure(\n test_utils.print_test_update, args=(device, None, epoch, step))\n if step == test_steps:\n break\n correct_val = correct.item()\n accuracy_replica = 100.0 * correct_val / total_samples\n accuracy = xm.mesh_reduce('test_accuracy', accuracy_replica, np.mean)\n return accuracy, accuracy_replica, total_samples\n\n train_device_loader = pl.MpDeviceLoader(train_loader, device)\n test_device_loader = pl.MpDeviceLoader(test_loader, device)\n accuracy, max_accuracy = 0.0, 0.0\n training_start_time = time.time()\n for epoch in range(1, FLAGS.num_epochs + 1):\n xm.master_print('Epoch {} train begin {}'.format(\n epoch, test_utils.now()))\n replica_epoch_start = time.time()\n \n replica_train_samples, reduced_global = train_loop_fn(train_device_loader, epoch)\n \n replica_epoch_time = time.time() - replica_epoch_start\n avg_epoch_time_mesh = xm.mesh_reduce('epoch_time', replica_epoch_time, np.mean)\n reduced_global = reduced_global * xm.xrt_world_size()\n \n xm.master_print('Epoch {} train end {}, Epoch Time={}, Replica Train Samples={}, Reduced GlobalRate={:.2f}'.format(\n epoch, test_utils.now(), str(datetime.timedelta(seconds=avg_epoch_time_mesh)).split('.')[0], replica_train_samples, reduced_global))\n \n accuracy, accuracy_replica, replica_test_samples = test_loop_fn(test_device_loader, epoch)\n\n xm.master_print('Epoch {} test end {}, Reduced Accuracy={:.2f}%, Replica Accuracy={:.2f}%, Replica Test Samples={}'.format(\n epoch, test_utils.now(), accuracy, accuracy_replica, replica_test_samples))\n \n max_accuracy = max(accuracy, max_accuracy)\n test_utils.write_to_summary(\n writer,\n epoch,\n dict_to_write={'Accuracy/test': accuracy,\n 'Global Rate': reduced_global},\n write_xla_metrics=False)\n if FLAGS.metrics_debug:\n xm.master_print(met.metrics_report())\n test_utils.close_summary_writer(writer)\n total_train_time = time.time() - training_start_time\n xm.master_print('Total Train Time: {}'.format(str(datetime.timedelta(seconds=total_train_time)).split('.')[0])) \n xm.master_print('Max Accuracy: {:.2f}%'.format(max_accuracy))\n xm.master_print('Avg. Global Rate: {:.2f} examples per second'.format(reduced_global))\n return max_accuracy\n\n\ndef _mp_fn(index, flags):\n global FLAGS\n FLAGS = flags\n torch.set_default_tensor_type('torch.FloatTensor')\n accuracy = train_imagenet()\n if accuracy < FLAGS.target_accuracy:\n print('Accuracy {} is below target {}'.format(accuracy,\n FLAGS.target_accuracy))\n sys.exit(21)\n\n\nif __name__ == '__main__':\n xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=FLAGS.num_cores, start_method='fork') # , start_method='spawn'\n"
] |
[
[
"torch.utils.data.get_worker_info",
"torch.set_default_tensor_type",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss"
]
] |
ebezzam/frius
|
[
"c3acc98288c949085b7dea08ef3708581f86ce25"
] |
[
"report_results/fig1p6_pulse_shape.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport os, sys\n\nsys.path.append(os.path.join(os.path.dirname(__file__)))\nimport plot_settings\nfrom test_utilities import gausspuls_coeff, gausspulse, gauss_ft\n\n# time domain plot\nfc = 5e6\nbandwidth = 2/3\nbwr = -6\nt_vals = np.linspace(-3/fc, 3/fc, 200)\nh = gausspulse(t_vals, fc, bandwidth, bwr)\n\nplt.figure()\nplt.plot(t_vals, h)\nplt.xlim([-6e-7, 6e-7])\nplt.grid()\n\nplt.xlabel(\"Time [seconds]\")\n\nax = plt.gca()\nax.axes.yaxis.set_ticklabels([])\nplt.tight_layout()\n\nfp = os.path.join(os.path.dirname(__file__), \"figures\", \"_fig1p6a.pdf\")\nplt.savefig(fp, dpi=300)\n\n# frequency domain pulse\nf_vals = np.linspace(-3*fc-1e3, 3*fc+1e3, 1000)\na = gausspuls_coeff(fc, bandwidth, bwr)\nH = gauss_ft(f_vals, a, fc=fc)\nH = H / max(H)\n\nplt.figure()\nplt.semilogx(f_vals, 20*np.log10(np.abs(H)))\nplt.axvline(x=fc, c='r', label=\"$f_c$\")\nplt.grid()\nplt.autoscale(enable=True, axis='x', tight=True)\nplt.ylabel(\"[dB]\")\nplt.legend(loc=3)\nplt.xlabel(\"Frequency [Hz]\")\nplt.ylim([-40,0])\nplt.tight_layout()\n\nfp = os.path.join(os.path.dirname(__file__), \"figures\", \"_fig1p6b.pdf\")\nplt.savefig(fp, dpi=300)\n\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"numpy.abs",
"matplotlib.pyplot.axvline",
"numpy.linspace"
]
] |
cbymar/clv-modeling
|
[
"0ad130b9a1e116afc4df32d307c1e542da0b19aa"
] |
[
"clvscript00.py"
] |
[
"import datetime as dt\nimport matplotlib.pyplot as plt\nimport lifetimes\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\n\ndef numcard(x):\n return x.nunique(), len(x)\ndef todateclean(x):\n return pd.to_datetime(x, errors='coerce').dt.date.astype('datetime64')\n\n\"\"\"\n- info, shape, dtypes\n- df.isnull().sum() #Check for null counts/ value_counts()\n- Check for supposed imputed values (are there suspicious values of 0, like for Age. )\n- change zeros to nans where appropriate\n- Imputation of missing values\n- handle stringified json\n- df.dtypes # in case obj to (df.colname = df.colname.astype(\"category\"))\n- df['colname'] = pd.to_datetime(df['colname']).dt.date\n- df.drop(\"colname\", axis=1) # drop columns\n- How balanced are the outcomes? \nX = df.drop(\"diagnosis\", axis=1) # just saying which axis again\nY = df[\"diagnosis\"] # this is just a series now\n\ncol = X.columns # if we do type(col), it's an Index\nX.isnull().sum() # this covers every column in the df.\n\ndef rangenorm(x):\n return (x - x.mean())/(x.max() - x.min())\nle = LabelEncoder()\nle.fit(Y_norm)\n\"\"\"\n\ndf = pd.read_csv(\"./ignoreland/onlineretail.csv\")\ndf.info()\ndf.apply(lambda x: numcard(x))\n\ndatecols = ['InvoiceDate']\ndf.loc[:, datecols] = df.loc[:,datecols].apply(lambda x: todateclean(x))\n\ndfnew = df[(df.Quantity>0) & (df.CustomerID.isnull()==False)]\ndfnew['amt'] = dfnew['Quantity'] * dfnew['UnitPrice']\ndfnew.describe()\n\nfrom lifetimes.plotting import *\nfrom lifetimes.utils import *\nobservation_period_end = '2011-12-09'\nmonetary_value_col = 'amt'\nmodeldata = summary_data_from_transaction_data(dfnew,\n 'CustomerID',\n 'InvoiceDate',\n monetary_value_col=monetary_value_col,\n observation_period_end=observation_period_end)\n\nmodeldata.head()\nmodeldata.info() # 4 floats.\n# Eyeball distribution of frequency (calculated)\nmodeldata['frequency'].plot(kind='hist', bins=50)\nprint(modeldata['frequency'].describe())\nprint(modeldata['recency'].describe())\nprint(sum(modeldata['frequency'] == 0)/float(len(modeldata)))\n\n##### Lec21\nfrom lifetimes import BetaGeoFitter\n# similar to lifelines\nbgf = BetaGeoFitter(penalizer_coef=0.0) # no regularization param.\n\nbgf.fit(modeldata['frequency'], modeldata['recency'], modeldata['T'])\nprint(bgf)\n# See https://www.youtube.com/watch?v=guj2gVEEx4s and\n# https://www.youtube.com/watch?v=gx6oHqpRgpY\n## residual lifetime value is more useful construct\n\nfrom lifetimes.plotting import plot_frequency_recency_matrix\nplot_frequency_recency_matrix(bgf)\nfrom lifetimes.plotting import plot_probability_alive_matrix\nplot_probability_alive_matrix(bgf)\n\n# lec 24:\n# set an outer time boundary and predict cumulative purchases by that time\nt = 10 # from now until now+t periods\nmodeldata['predicted_purchases'] = \\\n bgf.conditional_expected_number_of_purchases_up_to_time(t,\n modeldata['frequency'],\n modeldata['recency'],\n modeldata['T'])\nmodeldata.sort_values(by='predicted_purchases').tail(5)\nmodeldata.sort_values(by='predicted_purchases').head(5)\n# lec 25: validation of model\nfrom lifetimes.plotting import plot_period_transactions\nplot_period_transactions(bgf) # this plot shows very clearly the model performance\n# in terms of transaction volume fit\n\n# Lec 26: splitting into train and test (by time period)\nsummary_cal_holdout = calibration_and_holdout_data(df,\n 'CustomerID',\n 'InvoiceDate',\n calibration_period_end='2011-06-08',\n observation_period_end='2011-12-09')\n\nsummary_cal_holdout.head()\n\nbgf.fit(summary_cal_holdout['frequency_cal'],\n summary_cal_holdout['recency_cal'],\n summary_cal_holdout['T_cal'])\n\nfrom lifetimes.plotting import plot_calibration_purchases_vs_holdout_purchases\n\nplot_calibration_purchases_vs_holdout_purchases(bgf, summary_cal_holdout)\n\nfrom lifetimes.plotting import plot_history_alive\n\n\ndays_since_birth = 365\nfig = plt.figure(figsize=(12,8))\nid = 14621 # choose a customer id\nsp_trans = df.loc[df['CustomerID'] == id] # specific customer's covariates\nplot_history_alive(bgf, days_since_birth, sp_trans, 'InvoiceDate')\n\n# Lec28: Subsetting to customers who repurchase.\nreturning_customers_summary = modeldata[modeldata['frequency']>0]\nreturning_customers_summary.head()\nreturning_customers_summary.shape\n# Lec 29: gamma-gamma model for LTV\n# Note: good practice to confirm small/no apparent corr for frequency and mean trxn value\n# Rev per trxn: predict total monetary value.\n# The Beta param for the gamma model of total spend is itself assumed gamma distributed\n# that is where the name comes from.\n# teh expectation of total spend for person i is calculated in empirical-bayes fashion, as a weighted\n# mean of population average and the sample mean for person i.\n# eq 5 in http://www.brucehardie.com/notes/025/gamma_gamma.pdf shows the arithmetic\n# https://antonsruberts.github.io/lifetimes-CLV/ also great additional code.\n# derivation here: http://www.brucehardie.com/notes/025/gamma_gamma.pdf\n# Output of ggf fitter:\n# p = the 'alpha' param in the gamma dist: E(Z|p, v) = p/v. Alpha adds upon convolution.\n# q = the alpha param in the gamma dist of v -- v is gamma(q, gam) in the pop\n# v = the 'beta' param in gamma dist. constant upon convolution.\n# -- Note that v varies among customers (ie, is gamma distributed)\nfrom lifetimes import GammaGammaFitter\nggf = GammaGammaFitter(penalizer_coef=0.0)\n\nggf.fit(returning_customers_summary['frequency'],\n returning_customers_summary['monetary_value'])\nggf.summary\nggf.conditional_expected_average_profit(modeldata['frequency'],\n modeldata['monetary_value'])\n# cond_exp_avg_profit => gives prediction of mean trxn value.\na0 = returning_customers_summary['monetary_value'].shape[0] # 2790 customers\n# Total spend:\na1 = returning_customers_summary['monetary_value'].sum()\n# Total time units (here, days) with purchase:\na2 = returning_customers_summary['frequency'].sum()\n# Mean monetary value (over all purchase days), roughly equal to estimated v\nreturning_customers_summary['monetary_value'].mean()\nggf.summary\np_here = ggf.summary.iloc[0,0]\nq_here = ggf.summary.iloc[1,0]\nv_here = ggf.summary.iloc[2,0] # model says 486; empirical average is 477.\n\nmoney_per_customer = a1/a0\n\n###############\n# review, per documentation:\nbgf.summary\n# r, alpha = shape, scale for gamma dist that represents sum (convolution) of purchase rates\n# a = alpha param for beta dist of churn\n# b = beta param for beta dist of churn\nx = np.random.gamma(.784, 49.28,10000) # r, alpha, n\nbgf.summary.loc[\"a\",:][0]/ (bgf.summary.loc[\"b\",:][0] + bgf.summary.loc[\"a\",:][0])\n\n###################################\n# lec31: other models\ndfnew.dtypes\ndfnew_train = dfnew[dfnew.InvoiceDate < '2011-11-09']\ndfnew_test = dfnew[dfnew.InvoiceDate >= '2011-11-09']\ndfnew_test.shape\ndfnew_train.shape\nmaxdate = dfnew_train.InvoiceDate.max()\nmindate = dfnew_train.InvoiceDate.min()\n\ndfnew_train['duration'] = (maxdate - dfnew_train.InvoiceDate)/np.timedelta64(1,'D')\ndfsum1 = dfnew_train.groupby(['CustomerID'])['duration'].min().reset_index()\ndfsum1.rename(columns = {'duration':'lasttime'}, inplace=True) # time from lasttime to now\n\ndfsum2 = dfnew_train.groupby(['CustomerID'])['duration'].max().reset_index()\ndfsum2.rename(columns = {'duration':'firsttime'}, inplace=True) # time from firsttime to now\n\ndfnew_train['freq'] = 1\ndfsum3 = dfnew_train.groupby(['CustomerID'])['freq'].sum().reset_index() # count of transactions by customer\n\ndfnew_train['freq3m'] = 1\ndfsum4 = dfnew_train[dfnew_train['duration'] < 91].groupby(['CustomerID'])['freq3m'].sum().reset_index()\n\n# now let's merge the 3 customer-level datasets together.\n# pd.concat uses indexes as the join keys,\nfrom functools import reduce\ndfs = [dfsum1, dfsum2, dfsum3, dfsum4]\ndfsum = reduce(lambda left, right: pd.merge(left, right, on=['CustomerID'], how='outer'), dfs)\ndfsum.shape\n[_ for _ in map(lambda x: x.shape, dfs)]\ndfsum.head()\n\n###################\nother_data = pd.read_csv(\"./ignoreland/oth.csv\")\nother_data.head()\ndfsum = pd.merge(dfsum, other_data, on=['CustomerID'], how='left')\n\ndfnew_test['target'] = 1\ndfsum_target = dfnew_test.groupby(['CustomerID'])['target'].sum().reset_index()\ndfsum = pd.merge(dfsum, dfsum_target, on=['CustomerID'], how='left')\ndfsum = dfsum.fillna(0).sort_values(['target'], ascending=False)\n\nlist(dfsum.columns)\n# Lec 35 Xgboost\n\"\"\"\nreduce(Create tree, use tree to predict residuals, add.)\nlightgbm is a faster implementation\n\"\"\"\n# lec36:\n# Use xgboost to model the count of transactions per customer\nimport xgboost\nfrom sklearn.model_selection import train_test_split\nxgb_model = xgboost.XGBRegressor(n_estimators=2000, objective='reg:squarederror', max_depth=5)\npredictors = ['lasttime', 'firsttime', 'freq', 'freq3m', 'score', 'discount']\nX = dfsum[predictors]\ny = dfsum['target']\n# Split x, x, y, y | train, test; give test frac and random state\nx_train, x_valid, y_train, y_valid = train_test_split(X, y, test_size=0.32, random_state=867)\nxgb_model.fit(x_train, y_train)\n\npred = xgb_model.predict(x_valid) # vector of predicted\nerr = (pred - y_valid)**2 # squared errors\nmse = err.sum()/len(err)\nrmse = np.sqrt(mse)\n\nfrom xgboost import plot_importance\nx = list(zip(predictors, xgb_model.feature_importances_))\nx.sort(key=lambda x: -x[1])\nx\nplot_importance(xgb_model)\n# https://towardsdatascience.com/interpretable-machine-learning-with-xgboost-9ec80d148d27\n### Some global measures of xgboost feature importance:\n# weight: number of times feature is used to split data (over all trees)\n# cover: weight, weighted by data points being touched by those splits\n# gain: mean training loss reduction (reduction in test-train) when the feature is used.\n# argsort here returns the indices of the (reverse-sorted) feature importance values.\n# Useful for grabbing index values and then working with arbitrarily zipped other lists (as I did above)\nsorted_idx = np.argsort(xgb_model.feature_importances_)[::-1]\nfor _ in sorted_idx:\n print([x_train.columns[_], xgb_model.feature_importances_[_]])\n\n[_ for _ in map(lambda x: xgb_model.get_booster().get_score(importance_type=x),\n ['gain','weight','cover','total_gain','total_cover'])]\n\n\ndef importances(model, lst):\n output = {}\n for x in lst:\n output[x] = model.get_booster().get_score(importance_type=x).values()\n return pd.concat([pd.Series(model.get_booster().feature_names), pd.DataFrame(output, columns=lst)],\n axis=1)\n\naa = importances(xgb_model,['gain','weight','cover','total_gain','total_cover'])\naa\n\npd.concat([pd.Series(xgb_model.get_booster().feature_names), aa], axis=1)\n\n\n\n##################\n# using lightgbm:\nimport lightgbm as lgb\nlgbparams = {\n 'boosting_type': 'gbdt',\n 'objective': 'regression',\n 'metric': 'mse',\n 'max_depth': 6,\n 'learning_rate': 0.02,\n}\nX1, X2, y1, y2 = train_test_split(X, y, test_size=0.32, random_state=867)\n\nx_train, x_valid, y_train, y_valid = train_test_split(X1, y1, test_size=0.1, random_state=867)\n\nx_train = x_train[predictors]\nx_valid = x_valid[predictors]\n\nd_train = lgb.Dataset(x_train, label=y_train)\nd_valid = lgb.Dataset(x_valid, label=y_valid)\n\nwatchlist = [d_valid]\n\nn_estimators = 2000\n\nlightmodel = lgb.train(lgbparams, d_train, n_estimators, watchlist, verbose_eval=1)\n\nimportancelist = ['gain','split']\n\nlightmodel.feature_importance(importance_type=importancelist[0])\n\nimportancdf = pd.DataFrame(pd.Series(predictors), columns=['feature'])\n\nimportancedf = reduce(lambda left, right: pd.concat([left, right], axis=1),\n [pd.Series(lightmodel.feature_importance(_)) for _ in importancelist])\n\nimportancedf.corr()\n\n\"\"\"\nfrequency = number of periods in which a non-first purchase was made\nT = age in same units of each customer\nrecency = period[last purchase] - period[first purchase]\nmonetary_value = sum(money)/(frequency+1)\n\n# use utility functions to aggregate into useable format.\n# https://lifetimes.readthedocs.io/en/latest/More%20examples%20and%20recipes.html\n# sql examples for aggregating into RFM and doing holdout split.\n\"\"\"\n\n\n\"\"\"\nAlso, per brucehardie,\nThe integrated (function of 2 functions) nature of these problems yields to \nThe gaussian hypergeometric function trick for evaluating the double integral.\n\"\"\"\n"
] |
[
[
"pandas.to_datetime",
"pandas.merge",
"pandas.concat",
"numpy.random.gamma",
"pandas.DataFrame",
"matplotlib.pyplot.figure",
"numpy.timedelta64",
"numpy.sqrt",
"numpy.argsort",
"sklearn.model_selection.train_test_split",
"pandas.read_csv",
"pandas.Series"
]
] |
kapilepatel/pandas
|
[
"3e652ac565e054b763ccbe23006c02704f6cd1b1"
] |
[
"pandas/core/generic.py"
] |
[
"# pylint: disable=W0231,E1101\nimport collections\nfrom datetime import timedelta\nimport functools\nimport gc\nimport json\nimport operator\nfrom textwrap import dedent\nimport warnings\nimport weakref\n\nimport numpy as np\n\nfrom pandas._libs import Timestamp, iNaT, properties\nimport pandas.compat as compat\nfrom pandas.compat import (\n cPickle as pkl, isidentifier, lrange, lzip, map, set_function_name,\n string_types, to_str, zip)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import (\n Appender, Substitution, rewrite_axis_style_signature)\nfrom pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs\n\nfrom pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask\nfrom pandas.core.dtypes.common import (\n ensure_int64, ensure_object, is_bool, is_bool_dtype,\n is_datetime64_any_dtype, is_datetime64tz_dtype, is_dict_like,\n is_extension_array_dtype, is_integer, is_list_like, is_number,\n is_numeric_dtype, is_object_dtype, is_period_arraylike, is_re_compilable,\n is_scalar, is_timedelta64_dtype, pandas_dtype)\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries\nfrom pandas.core.dtypes.inference import is_hashable\nfrom pandas.core.dtypes.missing import isna, notna\n\nimport pandas as pd\nfrom pandas.core import config, missing, nanops\nimport pandas.core.algorithms as algos\nfrom pandas.core.base import PandasObject, SelectionMixin\nimport pandas.core.common as com\nfrom pandas.core.index import (\n Index, InvalidIndexError, MultiIndex, RangeIndex, ensure_index)\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.period import Period, PeriodIndex\nimport pandas.core.indexing as indexing\nfrom pandas.core.internals import BlockManager\nfrom pandas.core.ops import _align_method_FRAME\n\nfrom pandas.io.formats.format import DataFrameFormatter, format_percentiles\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.tseries.frequencies import to_offset\n\n# goal is to be able to define the docs close to function, while still being\n# able to share\n_shared_docs = dict()\n_shared_doc_kwargs = dict(\n axes='keywords for axes', klass='NDFrame',\n axes_single_arg='int or labels for object',\n args_transpose='axes to permute (int or label for object)',\n optional_by=\"\"\"\n by : str or list of str\n Name or list of names to sort by\"\"\")\n\n# sentinel value to use as kwarg in place of None when None has special meaning\n# and needs to be distinguished from a user explicitly passing None.\nsentinel = object()\n\n\ndef _single_replace(self, to_replace, method, inplace, limit):\n \"\"\"\n Replaces values in a Series using the fill method specified when no\n replacement value is given in the replace method\n \"\"\"\n if self.ndim != 1:\n raise TypeError('cannot replace {0} with method {1} on a {2}'\n .format(to_replace, method, type(self).__name__))\n\n orig_dtype = self.dtype\n result = self if inplace else self.copy()\n fill_f = missing.get_fill_func(method)\n\n mask = missing.mask_missing(result.values, to_replace)\n values = fill_f(result.values, limit=limit, mask=mask)\n\n if values.dtype == orig_dtype and inplace:\n return\n\n result = pd.Series(values, index=self.index,\n dtype=self.dtype).__finalize__(self)\n\n if inplace:\n self._update_inplace(result._data)\n return\n\n return result\n\n\nclass NDFrame(PandasObject, SelectionMixin):\n \"\"\"\n N-dimensional analogue of DataFrame. Store multi-dimensional in a\n size-mutable, labeled data structure\n\n Parameters\n ----------\n data : BlockManager\n axes : list\n copy : boolean, default False\n \"\"\"\n _internal_names = ['_data', '_cacher', '_item_cache', '_cache', '_is_copy',\n '_subtyp', '_name', '_index', '_default_kind',\n '_default_fill_value', '_metadata', '__array_struct__',\n '__array_interface__']\n _internal_names_set = set(_internal_names)\n _accessors = frozenset()\n _deprecations = frozenset(['as_blocks', 'blocks',\n 'convert_objects', 'is_copy'])\n _metadata = []\n _is_copy = None\n\n # dummy attribute so that datetime.__eq__(Series/DataFrame) defers\n # by returning NotImplemented\n timetuple = None\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(self, data, axes=None, copy=False, dtype=None,\n fastpath=False):\n\n if not fastpath:\n if dtype is not None:\n data = data.astype(dtype)\n elif copy:\n data = data.copy()\n\n if axes is not None:\n for i, ax in enumerate(axes):\n data = data.reindex_axis(ax, axis=i)\n\n object.__setattr__(self, '_is_copy', None)\n object.__setattr__(self, '_data', data)\n object.__setattr__(self, '_item_cache', {})\n\n def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):\n \"\"\" passed a manager and a axes dict \"\"\"\n for a, axe in axes.items():\n if axe is not None:\n mgr = mgr.reindex_axis(axe,\n axis=self._get_block_manager_axis(a),\n copy=False)\n\n # make a copy if explicitly requested\n if copy:\n mgr = mgr.copy()\n if dtype is not None:\n # avoid further copies if we can\n if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:\n mgr = mgr.astype(dtype=dtype)\n return mgr\n\n # ----------------------------------------------------------------------\n\n @property\n def is_copy(self):\n \"\"\"\n Return the copy.\n \"\"\"\n warnings.warn(\"Attribute 'is_copy' is deprecated and will be removed \"\n \"in a future version.\", FutureWarning, stacklevel=2)\n return self._is_copy\n\n @is_copy.setter\n def is_copy(self, msg):\n warnings.warn(\"Attribute 'is_copy' is deprecated and will be removed \"\n \"in a future version.\", FutureWarning, stacklevel=2)\n self._is_copy = msg\n\n def _validate_dtype(self, dtype):\n \"\"\" validate the passed dtype \"\"\"\n\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n # a compound dtype\n if dtype.kind == 'V':\n raise NotImplementedError(\"compound dtypes are not implemented\"\n \" in the {0} constructor\"\n .format(self.__class__.__name__))\n\n return dtype\n\n # ----------------------------------------------------------------------\n # Construction\n\n @property\n def _constructor(self):\n \"\"\"Used when a manipulation result has the same dimensions as the\n original.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_sliced(self):\n \"\"\"Used when a manipulation result has one lower dimension(s) as the\n original, such as DataFrame single columns slicing.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_expanddim(self):\n \"\"\"Used when a manipulation result has one higher dimension as the\n original, such as Series.to_frame() and DataFrame.to_panel()\n \"\"\"\n raise NotImplementedError\n\n # ----------------------------------------------------------------------\n # Axis\n\n @classmethod\n def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None,\n slicers=None, axes_are_reversed=False, build_axes=True,\n ns=None, docs=None):\n \"\"\"Provide axes setup for the major PandasObjects.\n\n Parameters\n ----------\n axes : the names of the axes in order (lowest to highest)\n info_axis_num : the axis of the selector dimension (int)\n stat_axis_num : the number of axis for the default stats (int)\n aliases : other names for a single axis (dict)\n slicers : how axes slice to others (dict)\n axes_are_reversed : boolean whether to treat passed axes as\n reversed (DataFrame)\n build_axes : setup the axis properties (default True)\n \"\"\"\n\n cls._AXIS_ORDERS = axes\n cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}\n cls._AXIS_LEN = len(axes)\n cls._AXIS_ALIASES = aliases or dict()\n cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()}\n cls._AXIS_NAMES = dict(enumerate(axes))\n cls._AXIS_SLICEMAP = slicers or None\n cls._AXIS_REVERSED = axes_are_reversed\n\n # typ\n setattr(cls, '_typ', cls.__name__.lower())\n\n # indexing support\n cls._ix = None\n\n if info_axis is not None:\n cls._info_axis_number = info_axis\n cls._info_axis_name = axes[info_axis]\n\n if stat_axis is not None:\n cls._stat_axis_number = stat_axis\n cls._stat_axis_name = axes[stat_axis]\n\n # setup the actual axis\n if build_axes:\n\n def set_axis(a, i):\n setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))\n cls._internal_names_set.add(a)\n\n if axes_are_reversed:\n m = cls._AXIS_LEN - 1\n for i, a in cls._AXIS_NAMES.items():\n set_axis(a, m - i)\n else:\n for i, a in cls._AXIS_NAMES.items():\n set_axis(a, i)\n\n assert not isinstance(ns, dict)\n\n def _construct_axes_dict(self, axes=None, **kwargs):\n \"\"\"Return an axes dictionary for myself.\"\"\"\n d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}\n d.update(kwargs)\n return d\n\n @staticmethod\n def _construct_axes_dict_from(self, axes, **kwargs):\n \"\"\"Return an axes dictionary for the passed axes.\"\"\"\n d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)}\n d.update(kwargs)\n return d\n\n def _construct_axes_dict_for_slice(self, axes=None, **kwargs):\n \"\"\"Return an axes dictionary for myself.\"\"\"\n d = {self._AXIS_SLICEMAP[a]: self._get_axis(a)\n for a in (axes or self._AXIS_ORDERS)}\n d.update(kwargs)\n return d\n\n def _construct_axes_from_arguments(\n self, args, kwargs, require_all=False, sentinel=None):\n \"\"\"Construct and returns axes if supplied in args/kwargs.\n\n If require_all, raise if all axis arguments are not supplied\n return a tuple of (axes, kwargs).\n\n sentinel specifies the default parameter when an axis is not\n supplied; useful to distinguish when a user explicitly passes None\n in scenarios where None has special meaning.\n \"\"\"\n\n # construct the args\n args = list(args)\n for a in self._AXIS_ORDERS:\n\n # if we have an alias for this axis\n alias = self._AXIS_IALIASES.get(a)\n if alias is not None:\n if a in kwargs:\n if alias in kwargs:\n raise TypeError(\"arguments are mutually exclusive \"\n \"for [%s,%s]\" % (a, alias))\n continue\n if alias in kwargs:\n kwargs[a] = kwargs.pop(alias)\n continue\n\n # look for a argument by position\n if a not in kwargs:\n try:\n kwargs[a] = args.pop(0)\n except IndexError:\n if require_all:\n raise TypeError(\"not enough/duplicate arguments \"\n \"specified!\")\n\n axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS}\n return axes, kwargs\n\n @classmethod\n def _from_axes(cls, data, axes, **kwargs):\n # for construction from BlockManager\n if isinstance(data, BlockManager):\n return cls(data, **kwargs)\n else:\n if cls._AXIS_REVERSED:\n axes = axes[::-1]\n d = cls._construct_axes_dict_from(cls, axes, copy=False)\n d.update(kwargs)\n return cls(data, **d)\n\n @classmethod\n def _get_axis_number(cls, axis):\n axis = cls._AXIS_ALIASES.get(axis, axis)\n if is_integer(axis):\n if axis in cls._AXIS_NAMES:\n return axis\n else:\n try:\n return cls._AXIS_NUMBERS[axis]\n except KeyError:\n pass\n raise ValueError('No axis named {0} for object type {1}'\n .format(axis, type(cls)))\n\n @classmethod\n def _get_axis_name(cls, axis):\n axis = cls._AXIS_ALIASES.get(axis, axis)\n if isinstance(axis, string_types):\n if axis in cls._AXIS_NUMBERS:\n return axis\n else:\n try:\n return cls._AXIS_NAMES[axis]\n except KeyError:\n pass\n raise ValueError('No axis named {0} for object type {1}'\n .format(axis, type(cls)))\n\n def _get_axis(self, axis):\n name = self._get_axis_name(axis)\n return getattr(self, name)\n\n @classmethod\n def _get_block_manager_axis(cls, axis):\n \"\"\"Map the axis to the block_manager axis.\"\"\"\n axis = cls._get_axis_number(axis)\n if cls._AXIS_REVERSED:\n m = cls._AXIS_LEN - 1\n return m - axis\n return axis\n\n def _get_axis_resolvers(self, axis):\n # index or columns\n axis_index = getattr(self, axis)\n d = dict()\n prefix = axis[0]\n\n for i, name in enumerate(axis_index.names):\n if name is not None:\n key = level = name\n else:\n # prefix with 'i' or 'c' depending on the input axis\n # e.g., you must do ilevel_0 for the 0th level of an unnamed\n # multiiindex\n key = '{prefix}level_{i}'.format(prefix=prefix, i=i)\n level = i\n\n level_values = axis_index.get_level_values(level)\n s = level_values.to_series()\n s.index = axis_index\n d[key] = s\n\n # put the index/columns itself in the dict\n if isinstance(axis_index, MultiIndex):\n dindex = axis_index\n else:\n dindex = axis_index.to_series()\n\n d[axis] = dindex\n return d\n\n def _get_index_resolvers(self):\n d = {}\n for axis_name in self._AXIS_ORDERS:\n d.update(self._get_axis_resolvers(axis_name))\n return d\n\n @property\n def _info_axis(self):\n return getattr(self, self._info_axis_name)\n\n @property\n def _stat_axis(self):\n return getattr(self, self._stat_axis_name)\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple of axis dimensions\n \"\"\"\n return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)\n\n @property\n def axes(self):\n \"\"\"\n Return index label(s) of the internal NDFrame\n \"\"\"\n # we do it this way because if we have reversed axes, then\n # the block manager shows then reversed\n return [self._get_axis(a) for a in self._AXIS_ORDERS]\n\n @property\n def ndim(self):\n \"\"\"\n Return an int representing the number of axes / array dimensions.\n\n Return 1 if Series. Otherwise return 2 if DataFrame.\n\n See Also\n --------\n ndarray.ndim : Number of array dimensions.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.ndim\n 1\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.ndim\n 2\n \"\"\"\n return self._data.ndim\n\n @property\n def size(self):\n \"\"\"\n Return an int representing the number of elements in this object.\n\n Return the number of rows if Series. Otherwise return the number of\n rows times number of columns if DataFrame.\n\n See Also\n --------\n ndarray.size : Number of elements in the array.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.size\n 3\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.size\n 4\n \"\"\"\n return np.prod(self.shape)\n\n @property\n def _selected_obj(self):\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n @property\n def _obj_with_exclusions(self):\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n def _expand_axes(self, key):\n new_axes = []\n for k, ax in zip(key, self.axes):\n if k not in ax:\n if type(k) != ax.dtype.type:\n ax = ax.astype('O')\n new_axes.append(ax.insert(len(ax), k))\n else:\n new_axes.append(ax)\n\n return new_axes\n\n def set_axis(self, labels, axis=0, inplace=None):\n \"\"\"\n Assign desired index to given axis.\n\n Indexes for column or row labels can be changed by assigning\n a list-like or Index.\n\n .. versionchanged:: 0.21.0\n\n The signature is now `labels` and `axis`, consistent with\n the rest of pandas API. Previously, the `axis` and `labels`\n arguments were respectively the first and second positional\n arguments.\n\n Parameters\n ----------\n labels : list-like, Index\n The values for the new index.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to update. The value 0 identifies the rows, and 1\n identifies the columns.\n\n inplace : bool, default None\n Whether to return a new %(klass)s instance.\n\n .. warning::\n\n ``inplace=None`` currently falls back to to True, but in a\n future version, will default to False. Use inplace=True\n explicitly rather than relying on the default.\n\n Returns\n -------\n renamed : %(klass)s or None\n An object of same type as caller if inplace=False, None otherwise.\n\n See Also\n --------\n DataFrame.rename_axis : Alter the name of the index or columns.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> s.set_axis(['a', 'b', 'c'], axis=0, inplace=False)\n a 1\n b 2\n c 3\n dtype: int64\n\n The original object is not modified.\n\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n **DataFrame**\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n\n Change the row labels.\n\n >>> df.set_axis(['a', 'b', 'c'], axis='index', inplace=False)\n A B\n a 1 4\n b 2 5\n c 3 6\n\n Change the column labels.\n\n >>> df.set_axis(['I', 'II'], axis='columns', inplace=False)\n I II\n 0 1 4\n 1 2 5\n 2 3 6\n\n Now, update the labels inplace.\n\n >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)\n >>> df\n i ii\n 0 1 4\n 1 2 5\n 2 3 6\n \"\"\"\n if is_scalar(labels):\n warnings.warn(\n 'set_axis now takes \"labels\" as first argument, and '\n '\"axis\" as named parameter. The old form, with \"axis\" as '\n 'first parameter and \\\"labels\\\" as second, is still supported '\n 'but will be deprecated in a future version of pandas.',\n FutureWarning, stacklevel=2)\n labels, axis = axis, labels\n\n if inplace is None:\n warnings.warn(\n 'set_axis currently defaults to operating inplace.\\nThis '\n 'will change in a future version of pandas, use '\n 'inplace=True to avoid this warning.',\n FutureWarning, stacklevel=2)\n inplace = True\n if inplace:\n setattr(self, self._get_axis_name(axis), labels)\n else:\n obj = self.copy()\n obj.set_axis(labels, axis=axis, inplace=True)\n return obj\n\n def _set_axis(self, axis, labels):\n self._data.set_axis(axis, labels)\n self._clear_item_cache()\n\n def transpose(self, *args, **kwargs):\n \"\"\"\n Permute the dimensions of the %(klass)s\n\n Parameters\n ----------\n args : %(args_transpose)s\n copy : boolean, default False\n Make a copy of the underlying data. Mixed-dtype data will\n always result in a copy\n **kwargs\n Additional keyword arguments will be passed to the function.\n\n Returns\n -------\n y : same as input\n\n Examples\n --------\n >>> p.transpose(2, 0, 1)\n >>> p.transpose(2, 0, 1, copy=True)\n \"\"\"\n\n # construct the args\n axes, kwargs = self._construct_axes_from_arguments(args, kwargs,\n require_all=True)\n axes_names = tuple(self._get_axis_name(axes[a])\n for a in self._AXIS_ORDERS)\n axes_numbers = tuple(self._get_axis_number(axes[a])\n for a in self._AXIS_ORDERS)\n\n # we must have unique axes\n if len(axes) != len(set(axes)):\n raise ValueError('Must specify %s unique axes' % self._AXIS_LEN)\n\n new_axes = self._construct_axes_dict_from(self, [self._get_axis(x)\n for x in axes_names])\n new_values = self.values.transpose(axes_numbers)\n if kwargs.pop('copy', None) or (len(args) and args[-1]):\n new_values = new_values.copy()\n\n nv.validate_transpose_for_generic(self, kwargs)\n return self._constructor(new_values, **new_axes).__finalize__(self)\n\n def swapaxes(self, axis1, axis2, copy=True):\n \"\"\"\n Interchange axes and swap values axes appropriately.\n\n Returns\n -------\n y : same as input\n \"\"\"\n i = self._get_axis_number(axis1)\n j = self._get_axis_number(axis2)\n\n if i == j:\n if copy:\n return self.copy()\n return self\n\n mapping = {i: j, j: i}\n\n new_axes = (self._get_axis(mapping.get(k, k))\n for k in range(self._AXIS_LEN))\n new_values = self.values.swapaxes(i, j)\n if copy:\n new_values = new_values.copy()\n\n return self._constructor(new_values, *new_axes).__finalize__(self)\n\n def droplevel(self, level, axis=0):\n \"\"\"\n Return DataFrame with requested index / column level(s) removed.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n level : int, str, or list-like\n If a string is given, must be the name of a level\n If list-like, elements must be names or positional indexes\n of levels.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n Returns\n -------\n DataFrame.droplevel()\n\n Examples\n --------\n >>> df = pd.DataFrame([\n ... [1, 2, 3, 4],\n ... [5, 6, 7, 8],\n ... [9, 10, 11, 12]\n ... ]).set_index([0, 1]).rename_axis(['a', 'b'])\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... ('c', 'e'), ('d', 'f')\n ... ], names=['level_1', 'level_2'])\n\n >>> df\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel('a')\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel('level2', axis=1)\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n \"\"\"\n labels = self._get_axis(axis)\n new_labels = labels.droplevel(level)\n result = self.set_axis(new_labels, axis=axis, inplace=False)\n return result\n\n def pop(self, item):\n \"\"\"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey','mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n \"\"\"\n result = self[item]\n del self[item]\n try:\n result._reset_cacher()\n except AttributeError:\n pass\n\n return result\n\n def squeeze(self, axis=None):\n \"\"\"\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = pd.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_0a = df.loc[df.index < 1, ['a']]\n >>> df_0a\n a\n 0 1\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_0a.squeeze('rows')\n a 1\n Name: 0, dtype: int64\n\n Squeezing all axes wil project directly into a scalar:\n\n >>> df_0a.squeeze()\n 1\n \"\"\"\n axis = (self._AXIS_NAMES if axis is None else\n (self._get_axis_number(axis),))\n try:\n return self.iloc[\n tuple(0 if i in axis and len(a) == 1 else slice(None)\n for i, a in enumerate(self.axes))]\n except Exception:\n return self\n\n def swaplevel(self, i=-2, j=-1, axis=0):\n \"\"\"\n Swap levels i and j in a MultiIndex on a particular axis\n\n Parameters\n ----------\n i, j : int, str (can be mixed)\n Level of index to be swapped. Can pass level name as string.\n\n Returns\n -------\n swapped : same type as caller (new object)\n\n .. versionchanged:: 0.18.1\n\n The indexes ``i`` and ``j`` are now optional, and default to\n the two innermost levels of the index.\n\n \"\"\"\n axis = self._get_axis_number(axis)\n result = self.copy()\n labels = result._data.axes[axis]\n result._data.set_axis(axis, labels.swaplevel(i, j))\n return result\n\n # ----------------------------------------------------------------------\n # Rename\n\n def rename(self, *args, **kwargs):\n \"\"\"\n Alter axes input function or functions. Function / dict values must be\n unique (1-to-1). Labels not contained in a dict / Series will be left\n as-is. Extra labels listed don't throw an error. Alternatively, change\n ``Series.name`` with a scalar value (Series only).\n\n Parameters\n ----------\n %(axes)s : scalar, list-like, dict-like or function, optional\n Scalar or list-like will alter the ``Series.name`` attribute,\n and raise on DataFrame or Panel.\n dict-like or functions are transformations to apply to\n that axis' values\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Whether to return a new %(klass)s. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n errors : {'ignore', 'raise'}, default 'ignore'\n If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,\n or `columns` contains labels that are not present in the Index\n being transformed.\n If 'ignore', existing keys will be renamed and extra keys will be\n ignored.\n\n Returns\n -------\n renamed : %(klass)s (new object)\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n \"errors='raise'\".\n\n See Also\n --------\n NDFrame.rename_axis\n\n Examples\n --------\n\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.rename(\"my_name\") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n >>> s.rename({1: 3, 2: 5}) # mapping, changes labels\n 0 1\n 3 2\n 5 3\n dtype: int64\n\n Since ``DataFrame`` doesn't have a ``.name`` attribute,\n only mapping-type arguments are allowed.\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> df.rename(2)\n Traceback (most recent call last):\n ...\n TypeError: 'int' object is not callable\n\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"B\": \"c\"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"C\": \"c\"})\n a B\n 0 1 4\n 1 2 5\n 2 3 6\n\n Using axis-style parameters\n\n >>> df.rename(str.lower, axis='columns')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis='index')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n\n See the :ref:`user guide <basics.rename>` for more.\n \"\"\"\n axes, kwargs = self._construct_axes_from_arguments(args, kwargs)\n copy = kwargs.pop('copy', True)\n inplace = kwargs.pop('inplace', False)\n level = kwargs.pop('level', None)\n axis = kwargs.pop('axis', None)\n errors = kwargs.pop('errors', 'ignore')\n if axis is not None:\n # Validate the axis\n self._get_axis_number(axis)\n\n if kwargs:\n raise TypeError('rename() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n if com.count_not_none(*axes.values()) == 0:\n raise TypeError('must pass an index to rename')\n\n self._consolidate_inplace()\n result = self if inplace else self.copy(deep=copy)\n\n # start in the axis order to eliminate too many copies\n for axis in lrange(self._AXIS_LEN):\n v = axes.get(self._AXIS_NAMES[axis])\n if v is None:\n continue\n f = com._get_rename_function(v)\n baxis = self._get_block_manager_axis(axis)\n if level is not None:\n level = self.axes[axis]._get_level_number(level)\n\n # GH 13473\n if not callable(v):\n indexer = self.axes[axis].get_indexer_for(v)\n if errors == 'raise' and len(indexer[indexer == -1]):\n missing_labels = [label for index, label in enumerate(v)\n if indexer[index] == -1]\n raise KeyError('{} not found in axis'\n .format(missing_labels))\n\n result._data = result._data.rename_axis(f, axis=baxis, copy=copy,\n level=level)\n result._clear_item_cache()\n\n if inplace:\n self._update_inplace(result._data)\n else:\n return result.__finalize__(self)\n\n @rewrite_axis_style_signature('mapper', [('copy', True),\n ('inplace', False)])\n def rename_axis(self, mapper=sentinel, **kwargs):\n \"\"\"\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n Value to set the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis' values.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n\n .. versionchanged:: 0.24.0\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to rename.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Series\n or DataFrame.\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n Prior to version 0.21.0, ``rename_axis`` could also be used to change\n the axis *labels* by passing a mapping or scalar. This behavior is\n deprecated and will be removed in a future version. Use ``rename``\n instead.\n\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n In this case, the parameter ``copy`` is ignored.\n\n The second calling convention will modify the names of the\n the corresponding index if mapper is a list or a scalar.\n However, if mapper is dict-like or a function, it will use the\n deprecated behavior of modifying the axis *labels*.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([\"dog\", \"cat\", \"monkey\"])\n >>> s\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n >>> s.rename_axis(\"animal\")\n animal\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n\n **DataFrame**\n\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2],\n ... \"num_arms\": [0, 0, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"animal\")\n >>> df\n num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"limbs\", axis=\"columns\")\n >>> df\n limbs num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> df.index = pd.MultiIndex.from_product([['mammal'],\n ... ['dog', 'cat', 'monkey']],\n ... names=['type', 'name'])\n >>> df\n limbs num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={'type': 'class'})\n limbs num_legs num_arms\n class name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(columns=str.upper)\n LIMBS num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n \"\"\"\n axes, kwargs = self._construct_axes_from_arguments(\n (), kwargs, sentinel=sentinel)\n copy = kwargs.pop('copy', True)\n inplace = kwargs.pop('inplace', False)\n axis = kwargs.pop('axis', 0)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n if kwargs:\n raise TypeError('rename_axis() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n if (mapper is not sentinel):\n # Use v0.23 behavior if a scalar or list\n non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not\n is_dict_like(mapper))\n if non_mapper:\n return self._set_axis_name(mapper, axis=axis, inplace=inplace)\n else:\n # Deprecated (v0.21) behavior is if mapper is specified,\n # and not a list or scalar, then call rename\n msg = (\"Using 'rename_axis' to alter labels is deprecated. \"\n \"Use '.rename' instead\")\n warnings.warn(msg, FutureWarning, stacklevel=3)\n axis = self._get_axis_name(axis)\n d = {'copy': copy, 'inplace': inplace}\n d[axis] = mapper\n return self.rename(**d)\n else:\n # Use new behavior. Means that index and/or columns\n # is specified\n result = self if inplace else self.copy(deep=copy)\n\n for axis in lrange(self._AXIS_LEN):\n v = axes.get(self._AXIS_NAMES[axis])\n if v is sentinel:\n continue\n non_mapper = is_scalar(v) or (is_list_like(v) and not\n is_dict_like(v))\n if non_mapper:\n newnames = v\n else:\n f = com._get_rename_function(v)\n curnames = self._get_axis(axis).names\n newnames = [f(name) for name in curnames]\n result._set_axis_name(newnames, axis=axis,\n inplace=True)\n if not inplace:\n return result\n\n def _set_axis_name(self, name, axis=0, inplace=False):\n \"\"\"\n Set the name(s) of the axis.\n\n Parameters\n ----------\n name : str or list of str\n Name(s) to set.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to set the label. The value 0 or 'index' specifies index,\n and the value 1 or 'columns' specifies columns.\n inplace : bool, default False\n If `True`, do operation inplace and return None.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or `None` if `inplace` is `True`.\n\n See Also\n --------\n DataFrame.rename : Alter the axis labels of :class:`DataFrame`.\n Series.rename : Alter the index labels or set the index name\n of :class:`Series`.\n Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs\n dog 4\n cat 4\n monkey 2\n >>> df._set_axis_name(\"animal\")\n num_legs\n animal\n dog 4\n cat 4\n monkey 2\n >>> df.index = pd.MultiIndex.from_product(\n ... [[\"mammal\"], ['dog', 'cat', 'monkey']])\n >>> df._set_axis_name([\"type\", \"name\"])\n legs\n type name\n mammal dog 4\n cat 4\n monkey 2\n \"\"\"\n axis = self._get_axis_number(axis)\n idx = self._get_axis(axis).set_names(name)\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n renamed = self if inplace else self.copy()\n renamed.set_axis(idx, axis=axis, inplace=True)\n if not inplace:\n return renamed\n\n # ----------------------------------------------------------------------\n # Comparison Methods\n\n def _indexed_same(self, other):\n return all(self._get_axis(a).equals(other._get_axis(a))\n for a in self._AXIS_ORDERS)\n\n def equals(self, other):\n \"\"\"\n Test whether two objects contain the same elements.\n\n This function allows two Series or DataFrames to be compared against\n each other to see if they have the same shape and elements. NaNs in\n the same location are considered equal. The column headers do not\n need to have the same type, but the elements within the columns must\n be the same dtype.\n\n Parameters\n ----------\n other : Series or DataFrame\n The other Series or DataFrame to be compared with the first.\n\n Returns\n -------\n bool\n True if all elements are the same in both objects, False\n otherwise.\n\n See Also\n --------\n Series.eq : Compare two Series objects of the same length\n and return a Series where each element is True if the element\n in each Series is equal, False otherwise.\n DataFrame.eq : Compare two DataFrame objects of the same shape and\n return a DataFrame where each element is True if the respective\n element in each DataFrame is equal, False otherwise.\n assert_series_equal : Return True if left and right Series are equal,\n False otherwise.\n assert_frame_equal : Return True if left and right DataFrames are\n equal, False otherwise.\n numpy.array_equal : Return True if two arrays have the same shape\n and elements, False otherwise.\n\n Notes\n -----\n This function requires that the elements have the same dtype as their\n respective elements in the other Series or DataFrame. However, the\n column labels do not need to have the same type, as long as they are\n still considered equal.\n\n Examples\n --------\n >>> df = pd.DataFrame({1: [10], 2: [20]})\n >>> df\n 1 2\n 0 10 20\n\n DataFrames df and exactly_equal have the same types and values for\n their elements and column labels, which will return True.\n\n >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})\n >>> exactly_equal\n 1 2\n 0 10 20\n >>> df.equals(exactly_equal)\n True\n\n DataFrames df and different_column_type have the same element\n types and values, but have different types for the column labels,\n which will still return True.\n\n >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})\n >>> different_column_type\n 1.0 2.0\n 0 10 20\n >>> df.equals(different_column_type)\n True\n\n DataFrames df and different_data_type have different types for the\n same values for their elements, and will return False even though\n their column labels are the same values and types.\n\n >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})\n >>> different_data_type\n 1 2\n 0 10.0 20.0\n >>> df.equals(different_data_type)\n False\n \"\"\"\n if not isinstance(other, self._constructor):\n return False\n return self._data.equals(other._data)\n\n # -------------------------------------------------------------------------\n # Unary Methods\n\n def __neg__(self):\n values = com.values_from_object(self)\n if is_bool_dtype(values):\n arr = operator.inv(values)\n elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)\n or is_object_dtype(values)):\n arr = operator.neg(values)\n else:\n raise TypeError(\"Unary negative expects numeric dtype, not {}\"\n .format(values.dtype))\n return self.__array_wrap__(arr)\n\n def __pos__(self):\n values = com.values_from_object(self)\n if (is_bool_dtype(values) or is_period_arraylike(values)):\n arr = values\n elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)\n or is_object_dtype(values)):\n arr = operator.pos(values)\n else:\n raise TypeError(\"Unary plus expects numeric dtype, not {}\"\n .format(values.dtype))\n return self.__array_wrap__(arr)\n\n def __invert__(self):\n try:\n arr = operator.inv(com.values_from_object(self))\n return self.__array_wrap__(arr)\n except Exception:\n\n # inv fails with 0 len\n if not np.prod(self.shape):\n return self\n\n raise\n\n def __nonzero__(self):\n raise ValueError(\"The truth value of a {0} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n .format(self.__class__.__name__))\n\n __bool__ = __nonzero__\n\n def bool(self):\n \"\"\"\n Return the bool of a single element PandasObject.\n\n This must be a boolean scalar value, either True or False. Raise a\n ValueError if the PandasObject does not have exactly 1 element, or that\n element is not boolean\n \"\"\"\n v = self.squeeze()\n if isinstance(v, (bool, np.bool_)):\n return bool(v)\n elif is_scalar(v):\n raise ValueError(\"bool cannot act on a non-boolean single element \"\n \"{0}\".format(self.__class__.__name__))\n\n self.__nonzero__()\n\n def __abs__(self):\n return self.abs()\n\n def __round__(self, decimals=0):\n return self.round(decimals)\n\n # -------------------------------------------------------------------------\n # Label or Level Combination Helpers\n #\n # A collection of helper methods for DataFrame/Series operations that\n # accept a combination of column/index labels and levels. All such\n # operations should utilize/extend these methods when possible so that we\n # have consistent precedence and validation logic throughout the library.\n\n def _is_level_reference(self, key, axis=0):\n \"\"\"\n Test whether a key is a level reference for a given axis.\n\n To be considered a level reference, `key` must be a string that:\n - (axis=0): Matches the name of an index level and does NOT match\n a column label.\n - (axis=1): Matches the name of a column level and does NOT match\n an index label.\n\n Parameters\n ----------\n key : str\n Potential level name for the given axis\n axis : int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_level : bool\n \"\"\"\n axis = self._get_axis_number(axis)\n\n if self.ndim > 2:\n raise NotImplementedError(\n \"_is_level_reference is not implemented for {type}\"\n .format(type=type(self)))\n\n return (key is not None and\n is_hashable(key) and\n key in self.axes[axis].names and\n not self._is_label_reference(key, axis=axis))\n\n def _is_label_reference(self, key, axis=0):\n \"\"\"\n Test whether a key is a label reference for a given axis.\n\n To be considered a label reference, `key` must be a string that:\n - (axis=0): Matches a column label\n - (axis=1): Matches an index label\n\n Parameters\n ----------\n key: str\n Potential label name\n axis: int, default 0\n Axis perpendicular to the axis that labels are associated with\n (0 means search for column labels, 1 means search for index labels)\n\n Returns\n -------\n is_label: bool\n \"\"\"\n if self.ndim > 2:\n raise NotImplementedError(\n \"_is_label_reference is not implemented for {type}\"\n .format(type=type(self)))\n\n axis = self._get_axis_number(axis)\n other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)\n\n return (key is not None and\n is_hashable(key) and\n any(key in self.axes[ax] for ax in other_axes))\n\n def _is_label_or_level_reference(self, key, axis=0):\n \"\"\"\n Test whether a key is a label or level reference for a given axis.\n\n To be considered either a label or a level reference, `key` must be a\n string that:\n - (axis=0): Matches a column label or an index level\n - (axis=1): Matches an index label or a column level\n\n Parameters\n ----------\n key: str\n Potential label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_label_or_level: bool\n \"\"\"\n\n if self.ndim > 2:\n raise NotImplementedError(\n \"_is_label_or_level_reference is not implemented for {type}\"\n .format(type=type(self)))\n\n return (self._is_level_reference(key, axis=axis) or\n self._is_label_reference(key, axis=axis))\n\n def _check_label_or_level_ambiguity(self, key, axis=0):\n \"\"\"\n Check whether `key` is ambiguous.\n\n By ambiguous, we mean that it matches both a level of the input\n `axis` and a label of the other axis.\n\n Parameters\n ----------\n key: str or object\n label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Raises\n ------\n ValueError: `key` is ambiguous\n \"\"\"\n if self.ndim > 2:\n raise NotImplementedError(\n \"_check_label_or_level_ambiguity is not implemented for {type}\"\n .format(type=type(self)))\n\n axis = self._get_axis_number(axis)\n other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)\n\n if (key is not None and\n is_hashable(key) and\n key in self.axes[axis].names and\n any(key in self.axes[ax] for ax in other_axes)):\n\n # Build an informative and grammatical warning\n level_article, level_type = (('an', 'index')\n if axis == 0 else\n ('a', 'column'))\n\n label_article, label_type = (('a', 'column')\n if axis == 0 else\n ('an', 'index'))\n\n msg = (\"'{key}' is both {level_article} {level_type} level and \"\n \"{label_article} {label_type} label, which is ambiguous.\"\n ).format(key=key,\n level_article=level_article,\n level_type=level_type,\n label_article=label_article,\n label_type=label_type)\n raise ValueError(msg)\n\n def _get_label_or_level_values(self, key, axis=0):\n \"\"\"\n Return a 1-D array of values associated with `key`, a label or level\n from the given `axis`.\n\n Retrieval logic:\n - (axis=0): Return column values if `key` matches a column label.\n Otherwise return index level values if `key` matches an index\n level.\n - (axis=1): Return row values if `key` matches an index label.\n Otherwise return column level values if 'key' matches a column\n level\n\n Parameters\n ----------\n key: str\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n values: np.ndarray\n\n Raises\n ------\n KeyError\n if `key` matches neither a label nor a level\n ValueError\n if `key` matches multiple labels\n FutureWarning\n if `key` is ambiguous. This will become an ambiguity error in a\n future version\n \"\"\"\n if self.ndim > 2:\n raise NotImplementedError(\n \"_get_label_or_level_values is not implemented for {type}\"\n .format(type=type(self)))\n\n axis = self._get_axis_number(axis)\n other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]\n\n if self._is_label_reference(key, axis=axis):\n self._check_label_or_level_ambiguity(key, axis=axis)\n values = self.xs(key, axis=other_axes[0])._values\n elif self._is_level_reference(key, axis=axis):\n values = self.axes[axis].get_level_values(key)._values\n else:\n raise KeyError(key)\n\n # Check for duplicates\n if values.ndim > 1:\n\n if other_axes and isinstance(\n self._get_axis(other_axes[0]), MultiIndex):\n multi_message = ('\\n'\n 'For a multi-index, the label must be a '\n 'tuple with elements corresponding to '\n 'each level.')\n else:\n multi_message = ''\n\n label_axis_name = 'column' if axis == 0 else 'index'\n raise ValueError((\"The {label_axis_name} label '{key}' \"\n \"is not unique.{multi_message}\")\n .format(key=key,\n label_axis_name=label_axis_name,\n multi_message=multi_message))\n\n return values\n\n def _drop_labels_or_levels(self, keys, axis=0):\n \"\"\"\n Drop labels and/or levels for the given `axis`.\n\n For each key in `keys`:\n - (axis=0): If key matches a column label then drop the column.\n Otherwise if key matches an index level then drop the level.\n - (axis=1): If key matches an index label then drop the row.\n Otherwise if key matches a column level then drop the level.\n\n Parameters\n ----------\n keys: str or list of str\n labels or levels to drop\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n dropped: DataFrame\n\n Raises\n ------\n ValueError\n if any `keys` match neither a label nor a level\n \"\"\"\n if self.ndim > 2:\n raise NotImplementedError(\n \"_drop_labels_or_levels is not implemented for {type}\"\n .format(type=type(self)))\n\n axis = self._get_axis_number(axis)\n\n # Validate keys\n keys = com.maybe_make_list(keys)\n invalid_keys = [k for k in keys if not\n self._is_label_or_level_reference(k, axis=axis)]\n\n if invalid_keys:\n raise ValueError((\"The following keys are not valid labels or \"\n \"levels for axis {axis}: {invalid_keys}\")\n .format(axis=axis,\n invalid_keys=invalid_keys))\n\n # Compute levels and labels to drop\n levels_to_drop = [k for k in keys\n if self._is_level_reference(k, axis=axis)]\n\n labels_to_drop = [k for k in keys\n if not self._is_level_reference(k, axis=axis)]\n\n # Perform copy upfront and then use inplace operations below.\n # This ensures that we always perform exactly one copy.\n # ``copy`` and/or ``inplace`` options could be added in the future.\n dropped = self.copy()\n\n if axis == 0:\n # Handle dropping index levels\n if levels_to_drop:\n dropped.reset_index(levels_to_drop, drop=True, inplace=True)\n\n # Handle dropping columns labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=1, inplace=True)\n else:\n # Handle dropping column levels\n if levels_to_drop:\n if isinstance(dropped.columns, MultiIndex):\n # Drop the specified levels from the MultiIndex\n dropped.columns = dropped.columns.droplevel(levels_to_drop)\n else:\n # Drop the last level of Index by replacing with\n # a RangeIndex\n dropped.columns = RangeIndex(dropped.columns.size)\n\n # Handle dropping index labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=0, inplace=True)\n\n return dropped\n\n # ----------------------------------------------------------------------\n # Iteration\n\n def __hash__(self):\n raise TypeError('{0!r} objects are mutable, thus they cannot be'\n ' hashed'.format(self.__class__.__name__))\n\n def __iter__(self):\n \"\"\"Iterate over info axis\"\"\"\n return iter(self._info_axis)\n\n # can we get a better explanation of this?\n def keys(self):\n \"\"\"Get the 'info axis' (see Indexing for more)\n\n This is index for Series, columns for DataFrame and major_axis for\n Panel.\n \"\"\"\n return self._info_axis\n\n def iteritems(self):\n \"\"\"Iterate over (label, values) on info axis\n\n This is index for Series, columns for DataFrame, major_axis for Panel,\n and so on.\n \"\"\"\n for h in self._info_axis:\n yield h, self[h]\n\n def __len__(self):\n \"\"\"Returns length of info axis\"\"\"\n return len(self._info_axis)\n\n def __contains__(self, key):\n \"\"\"True if the key is in the info axis\"\"\"\n return key in self._info_axis\n\n @property\n def empty(self):\n \"\"\"\n Indicator whether DataFrame is empty.\n\n True if DataFrame is entirely empty (no items), meaning any of the\n axes are of length 0.\n\n Returns\n -------\n bool\n If DataFrame is empty, return True, if not return False.\n\n See Also\n --------\n Series.dropna\n DataFrame.dropna\n\n Notes\n -----\n If DataFrame contains only NaNs, it is still not considered empty. See\n the example below.\n\n Examples\n --------\n An example of an actual empty DataFrame. Notice the index is empty:\n\n >>> df_empty = pd.DataFrame({'A' : []})\n >>> df_empty\n Empty DataFrame\n Columns: [A]\n Index: []\n >>> df_empty.empty\n True\n\n If we only have NaNs in our DataFrame, it is not considered empty! We\n will need to drop the NaNs to make the DataFrame empty:\n\n >>> df = pd.DataFrame({'A' : [np.nan]})\n >>> df\n A\n 0 NaN\n >>> df.empty\n False\n >>> df.dropna().empty\n True\n \"\"\"\n return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)\n\n # ----------------------------------------------------------------------\n # Array Interface\n\n # This is also set in IndexOpsMixin\n # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented\n __array_priority__ = 1000\n\n def __array__(self, dtype=None):\n return com.values_from_object(self)\n\n def __array_wrap__(self, result, context=None):\n d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)\n return self._constructor(result, **d).__finalize__(self)\n\n # ideally we would define this to avoid the getattr checks, but\n # is slower\n # @property\n # def __array_interface__(self):\n # \"\"\" provide numpy array interface method \"\"\"\n # values = self.values\n # return dict(typestr=values.dtype.str,shape=values.shape,data=values)\n\n def to_dense(self):\n \"\"\"\n Return dense representation of NDFrame (as opposed to sparse).\n \"\"\"\n # compat\n return self\n\n # ----------------------------------------------------------------------\n # Picklability\n\n def __getstate__(self):\n meta = {k: getattr(self, k, None) for k in self._metadata}\n return dict(_data=self._data, _typ=self._typ, _metadata=self._metadata,\n **meta)\n\n def __setstate__(self, state):\n\n if isinstance(state, BlockManager):\n self._data = state\n elif isinstance(state, dict):\n typ = state.get('_typ')\n if typ is not None:\n\n # set in the order of internal names\n # to avoid definitional recursion\n # e.g. say fill_value needing _data to be\n # defined\n meta = set(self._internal_names + self._metadata)\n for k in list(meta):\n if k in state:\n v = state[k]\n object.__setattr__(self, k, v)\n\n for k, v in state.items():\n if k not in meta:\n object.__setattr__(self, k, v)\n\n else:\n self._unpickle_series_compat(state)\n elif isinstance(state[0], dict):\n if len(state) == 5:\n self._unpickle_sparse_frame_compat(state)\n else:\n self._unpickle_frame_compat(state)\n elif len(state) == 4:\n self._unpickle_panel_compat(state)\n elif len(state) == 2:\n self._unpickle_series_compat(state)\n else: # pragma: no cover\n # old pickling format, for compatibility\n self._unpickle_matrix_compat(state)\n\n self._item_cache = {}\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def __unicode__(self):\n # unicode representation based upon iterating over self\n # (since, by definition, `PandasContainers` are iterable)\n prepr = '[%s]' % ','.join(map(pprint_thing, self))\n return '%s(%s)' % (self.__class__.__name__, prepr)\n\n def _repr_latex_(self):\n \"\"\"\n Returns a LaTeX representation for a particular object.\n Mainly for use with nbconvert (jupyter notebook conversion to pdf).\n \"\"\"\n if config.get_option('display.latex.repr'):\n return self.to_latex()\n else:\n return None\n\n def _repr_data_resource_(self):\n \"\"\"\n Not a real Jupyter special repr method, but we use the same\n naming convention.\n \"\"\"\n if config.get_option(\"display.html.table_schema\"):\n data = self.head(config.get_option('display.max_rows'))\n payload = json.loads(data.to_json(orient='table'),\n object_pairs_hook=collections.OrderedDict)\n return payload\n\n # ----------------------------------------------------------------------\n # I/O Methods\n\n _shared_docs['to_excel'] = \"\"\"\n Write %(klass)s to an Excel sheet.\n\n To write a single %(klass)s to an Excel .xlsx file it is only necessary to\n specify a target file name. To write to multiple sheets it is necessary to\n create an `ExcelWriter` object with a target file name, and specify a sheet\n in the file to write to.\n\n Multiple sheets may be written to by specifying unique `sheet_name`.\n With all data written to the file it is necessary to save the changes.\n Note that creating an `ExcelWriter` object with a file name that already\n exists will result in the contents of the existing file being erased.\n\n Parameters\n ----------\n excel_writer : str or ExcelWriter object\n File path or existing ExcelWriter.\n sheet_name : str, default 'Sheet1'\n Name of sheet which will contain DataFrame.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, optional\n Format string for floating point numbers. For example\n ``float_format=\"%%.2f\"`` will format 0.1234 to 0.12.\n columns : sequence or list of str, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of string is given it is\n assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, optional\n Column label for index column(s) if desired. If not specified, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : int, default 0\n Upper left cell row to dump data frame.\n startcol : int, default 0\n Upper left cell column to dump data frame.\n engine : str, optional\n Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this\n via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : bool, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding : str, optional\n Encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : str, default 'inf'\n Representation for infinity (there is no native representation for\n infinity in Excel).\n verbose : bool, default True\n Display more information in the error logs.\n freeze_panes : tuple of int (length 2), optional\n Specifies the one-based bottommost row and rightmost column that\n is to be frozen.\n\n .. versionadded:: 0.20.0.\n\n See Also\n --------\n to_csv : Write DataFrame to a comma-separated values (csv) file.\n ExcelWriter : Class for writing DataFrame objects into excel sheets.\n read_excel : Read an Excel file into a pandas DataFrame.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n\n Notes\n -----\n For compatibility with :meth:`~DataFrame.to_csv`,\n to_excel serializes lists and dicts to strings before writing.\n\n Once a workbook has been saved it is not possible write further data\n without rewriting the whole workbook.\n\n Examples\n --------\n\n Create, write to and save a workbook:\n\n >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df1.to_excel(\"output.xlsx\") # doctest: +SKIP\n\n To specify the sheet name:\n\n >>> df1.to_excel(\"output.xlsx\",\n ... sheet_name='Sheet_name_1') # doctest: +SKIP\n\n If you wish to write to more than one sheet in the workbook, it is\n necessary to specify an ExcelWriter object:\n\n >>> df2 = df1.copy()\n >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP\n ... df1.to_excel(writer, sheet_name='Sheet_name_1')\n ... df2.to_excel(writer, sheet_name='Sheet_name_2')\n\n To set the library that is used to write the Excel file,\n you can pass the `engine` keyword (the default engine is\n automatically chosen depending on the file extension):\n\n >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP\n \"\"\"\n\n @Appender(_shared_docs[\"to_excel\"] % dict(klass=\"object\"))\n def to_excel(self, excel_writer, sheet_name=\"Sheet1\", na_rep=\"\",\n float_format=None, columns=None, header=True, index=True,\n index_label=None, startrow=0, startcol=0, engine=None,\n merge_cells=True, encoding=None, inf_rep=\"inf\", verbose=True,\n freeze_panes=None):\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n from pandas.io.formats.excel import ExcelFormatter\n formatter = ExcelFormatter(df, na_rep=na_rep, cols=columns,\n header=header,\n float_format=float_format, index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep)\n formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,\n startcol=startcol, freeze_panes=freeze_panes,\n engine=engine)\n\n def to_json(self, path_or_buf=None, orient=None, date_format=None,\n double_precision=10, force_ascii=True, date_unit='ms',\n default_handler=None, lines=False, compression='infer',\n index=True):\n \"\"\"\n Convert the object to a JSON string.\n\n Note NaN's and None will be converted to null and datetime objects\n will be converted to UNIX timestamps.\n\n Parameters\n ----------\n path_or_buf : string or file handle, optional\n File path or object. If not specified, the result is returned as\n a string.\n orient : string\n Indication of expected JSON string format.\n\n * Series\n\n - default is 'index'\n - allowed values are: {'split','records','index','table'}\n\n * DataFrame\n\n - default is 'columns'\n - allowed values are:\n {'split','records','index','columns','values','table'}\n\n * The format of the JSON string\n\n - 'split' : dict like {'index' -> [index],\n 'columns' -> [columns], 'data' -> [values]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n - 'columns' : dict like {column -> {index -> value}}\n - 'values' : just the values array\n - 'table' : dict like {'schema': {schema}, 'data': {data}}\n describing the data, and the data component is\n like ``orient='records'``.\n\n .. versionchanged:: 0.20.0\n\n date_format : {None, 'epoch', 'iso'}\n Type of date conversion. 'epoch' = epoch milliseconds,\n 'iso' = ISO8601. The default depends on the `orient`. For\n ``orient='table'``, the default is 'iso'. For all other orients,\n the default is 'epoch'.\n double_precision : int, default 10\n The number of decimal places to use when encoding\n floating point values.\n force_ascii : bool, default True\n Force encoded string to be ASCII.\n date_unit : string, default 'ms' (milliseconds)\n The time unit to encode to, governs timestamp and ISO8601\n precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,\n microsecond, and nanosecond respectively.\n default_handler : callable, default None\n Handler to call if object cannot otherwise be converted to a\n suitable format for JSON. Should receive a single argument which is\n the object to convert and return a serialisable object.\n lines : bool, default False\n If 'orient' is 'records' write out line delimited json format. Will\n throw ValueError if incorrect 'orient' since others are not list\n like.\n\n .. versionadded:: 0.19.0\n\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}\n\n A string representing the compression to use in the output file,\n only used when the first argument is a filename. By default, the\n compression is inferred from the filename.\n\n .. versionadded:: 0.21.0\n .. versionchanged:: 0.24.0\n 'infer' option added and set to default\n index : bool, default True\n Whether to include the index values in the JSON string. Not\n including the index (``index=False``) is only supported when\n orient is 'split' or 'table'.\n\n .. versionadded:: 0.23.0\n\n See Also\n --------\n read_json\n\n Examples\n --------\n\n >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df.to_json(orient='split')\n '{\"columns\":[\"col 1\",\"col 2\"],\n \"index\":[\"row 1\",\"row 2\"],\n \"data\":[[\"a\",\"b\"],[\"c\",\"d\"]]}'\n\n Encoding/decoding a Dataframe using ``'records'`` formatted JSON.\n Note that index labels are not preserved with this encoding.\n\n >>> df.to_json(orient='records')\n '[{\"col 1\":\"a\",\"col 2\":\"b\"},{\"col 1\":\"c\",\"col 2\":\"d\"}]'\n\n Encoding/decoding a Dataframe using ``'index'`` formatted JSON:\n\n >>> df.to_json(orient='index')\n '{\"row 1\":{\"col 1\":\"a\",\"col 2\":\"b\"},\"row 2\":{\"col 1\":\"c\",\"col 2\":\"d\"}}'\n\n Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:\n\n >>> df.to_json(orient='columns')\n '{\"col 1\":{\"row 1\":\"a\",\"row 2\":\"c\"},\"col 2\":{\"row 1\":\"b\",\"row 2\":\"d\"}}'\n\n Encoding/decoding a Dataframe using ``'values'`` formatted JSON:\n\n >>> df.to_json(orient='values')\n '[[\"a\",\"b\"],[\"c\",\"d\"]]'\n\n Encoding with Table Schema\n\n >>> df.to_json(orient='table')\n '{\"schema\": {\"fields\": [{\"name\": \"index\", \"type\": \"string\"},\n {\"name\": \"col 1\", \"type\": \"string\"},\n {\"name\": \"col 2\", \"type\": \"string\"}],\n \"primaryKey\": \"index\",\n \"pandas_version\": \"0.20.0\"},\n \"data\": [{\"index\": \"row 1\", \"col 1\": \"a\", \"col 2\": \"b\"},\n {\"index\": \"row 2\", \"col 1\": \"c\", \"col 2\": \"d\"}]}'\n \"\"\"\n\n from pandas.io import json\n if date_format is None and orient == 'table':\n date_format = 'iso'\n elif date_format is None:\n date_format = 'epoch'\n return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient,\n date_format=date_format,\n double_precision=double_precision,\n force_ascii=force_ascii, date_unit=date_unit,\n default_handler=default_handler,\n lines=lines, compression=compression,\n index=index)\n\n def to_hdf(self, path_or_buf, key, **kwargs):\n \"\"\"\n Write the contained data to an HDF5 file using HDFStore.\n\n Hierarchical Data Format (HDF) is self-describing, allowing an\n application to interpret the structure and contents of a file with\n no outside information. One HDF file can hold a mix of related objects\n which can be accessed as a group or as individual objects.\n\n In order to add another DataFrame or Series to an existing HDF file\n please use append mode and a different a key.\n\n For more information see the :ref:`user guide <io.hdf5>`.\n\n Parameters\n ----------\n path_or_buf : str or pandas.HDFStore\n File path or HDFStore object.\n key : str\n Identifier for the group in the store.\n mode : {'a', 'w', 'r+'}, default 'a'\n Mode to open file:\n\n - 'w': write, a new file is created (an existing file with\n the same name would be deleted).\n - 'a': append, an existing file is opened for reading and\n writing, and if the file does not exist it is created.\n - 'r+': similar to 'a', but the file must already exist.\n format : {'fixed', 'table'}, default 'fixed'\n Possible values:\n\n - 'fixed': Fixed format. Fast writing/reading. Not-appendable,\n nor searchable.\n - 'table': Table format. Write as a PyTables Table structure\n which may perform worse but allow more flexible operations\n like searching / selecting subsets of the data.\n append : bool, default False\n For Table formats, append the input data to the existing.\n data_columns : list of columns or True, optional\n List of columns to create as indexed data columns for on-disk\n queries, or True to use all columns. By default only the axes\n of the object are indexed. See :ref:`io.hdf5-query-data-columns`.\n Applicable only to format='table'.\n complevel : {0-9}, optional\n Specifies a compression level for data.\n A value of 0 disables compression.\n complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'\n Specifies the compression library to be used.\n As of v0.20.2 these additional compressors for Blosc are supported\n (default if no compressor specified: 'blosc:blosclz'):\n {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',\n 'blosc:zlib', 'blosc:zstd'}.\n Specifying a compression library which is not available issues\n a ValueError.\n fletcher32 : bool, default False\n If applying compression use the fletcher32 checksum.\n dropna : bool, default False\n If true, ALL nan rows will not be written to store.\n errors : str, default 'strict'\n Specifies how encoding and decoding errors are to be handled.\n See the errors argument for :func:`open` for a full list\n of options.\n\n See Also\n --------\n DataFrame.read_hdf : Read from HDF file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_feather : Write out feather-format for DataFrames.\n DataFrame.to_csv : Write out to a csv file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n ... index=['a', 'b', 'c'])\n >>> df.to_hdf('data.h5', key='df', mode='w')\n\n We can add another object to the same file:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.to_hdf('data.h5', key='s')\n\n Reading from HDF file:\n\n >>> pd.read_hdf('data.h5', 'df')\n A B\n a 1 4\n b 2 5\n c 3 6\n >>> pd.read_hdf('data.h5', 's')\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n Deleting file with data:\n\n >>> import os\n >>> os.remove('data.h5')\n \"\"\"\n from pandas.io import pytables\n return pytables.to_hdf(path_or_buf, key, self, **kwargs)\n\n def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):\n \"\"\"\n Serialize object to input file path using msgpack format.\n\n THIS IS AN EXPERIMENTAL LIBRARY and the storage format\n may not be stable until a future release.\n\n Parameters\n ----------\n path : string File path, buffer-like, or None\n if None, return generated string\n append : bool whether to append to an existing msgpack\n (default is False)\n compress : type of compressor (zlib or blosc), default to None (no\n compression)\n \"\"\"\n\n from pandas.io import packers\n return packers.to_msgpack(path_or_buf, self, encoding=encoding,\n **kwargs)\n\n def to_sql(self, name, con, schema=None, if_exists='fail', index=True,\n index_label=None, chunksize=None, dtype=None, method=None):\n \"\"\"\n Write records stored in a DataFrame to a SQL database.\n\n Databases supported by SQLAlchemy [1]_ are supported. Tables can be\n newly created, appended to, or overwritten.\n\n Parameters\n ----------\n name : string\n Name of SQL table.\n con : sqlalchemy.engine.Engine or sqlite3.Connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library. Legacy support is provided for sqlite3.Connection objects.\n schema : string, optional\n Specify the schema (if database flavor supports this). If None, use\n default schema.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n How to behave if the table already exists.\n\n * fail: Raise a ValueError.\n * replace: Drop the table before inserting new values.\n * append: Insert new values to the existing table.\n\n index : bool, default True\n Write DataFrame index as a column. Uses `index_label` as the column\n name in the table.\n index_label : string or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, optional\n Rows will be written in batches of this size at a time. By default,\n all rows will be written at once.\n dtype : dict, optional\n Specifying the datatype for columns. The keys should be the column\n names and the values should be the SQLAlchemy types or strings for\n the sqlite3 legacy mode.\n method : {None, 'multi', callable}, default None\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n\n .. versionadded:: 0.24.0\n\n Raises\n ------\n ValueError\n When the table already exists and `if_exists` is 'fail' (the\n default).\n\n See Also\n --------\n read_sql : Read a DataFrame from a table.\n\n Notes\n -----\n Timezone aware datetime columns will be written as\n ``Timestamp with timezone`` type with SQLAlchemy if supported by the\n database. Otherwise, the datetimes will be stored as timezone unaware\n timestamps local to the original timezone.\n\n .. versionadded:: 0.24.0\n\n References\n ----------\n .. [1] http://docs.sqlalchemy.org\n .. [2] https://www.python.org/dev/peps/pep-0249/\n\n Examples\n --------\n\n Create an in-memory SQLite database.\n\n >>> from sqlalchemy import create_engine\n >>> engine = create_engine('sqlite://', echo=False)\n\n Create a table from scratch with 3 rows.\n\n >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})\n >>> df\n name\n 0 User 1\n 1 User 2\n 2 User 3\n\n >>> df.to_sql('users', con=engine)\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]\n\n >>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})\n >>> df1.to_sql('users', con=engine, if_exists='append')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),\n (0, 'User 4'), (1, 'User 5')]\n\n Overwrite the table with just ``df1``.\n\n >>> df1.to_sql('users', con=engine, if_exists='replace',\n ... index_label='id')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 4'), (1, 'User 5')]\n\n Specify the dtype (especially useful for integers with missing values).\n Notice that while pandas is forced to store the data as floating point,\n the database supports nullable integers. When fetching the data with\n Python, we get back integer scalars.\n\n >>> df = pd.DataFrame({\"A\": [1, None, 2]})\n >>> df\n A\n 0 1.0\n 1 NaN\n 2 2.0\n\n >>> from sqlalchemy.types import Integer\n >>> df.to_sql('integers', con=engine, index=False,\n ... dtype={\"A\": Integer()})\n\n >>> engine.execute(\"SELECT * FROM integers\").fetchall()\n [(1,), (None,), (2,)]\n \"\"\"\n from pandas.io import sql\n sql.to_sql(self, name, con, schema=schema, if_exists=if_exists,\n index=index, index_label=index_label, chunksize=chunksize,\n dtype=dtype, method=method)\n\n def to_pickle(self, path, compression='infer',\n protocol=pkl.HIGHEST_PROTOCOL):\n \"\"\"\n Pickle (serialize) object to file.\n\n Parameters\n ----------\n path : str\n File path where the pickled object will be stored.\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \\\n default 'infer'\n A string representing the compression to use in the output file. By\n default, infers from the file extension in specified path.\n\n .. versionadded:: 0.20.0\n protocol : int\n Int which indicates which protocol should be used by the pickler,\n default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible\n values for this parameter depend on the version of Python. For\n Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a\n valid value. For Python >= 3.4, 4 is a valid value. A negative\n value for the protocol parameter is equivalent to setting its value\n to HIGHEST_PROTOCOL.\n\n .. [1] https://docs.python.org/3/library/pickle.html\n .. versionadded:: 0.21.0\n\n See Also\n --------\n read_pickle : Load pickled pandas object (or any object) from file.\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_sql : Write DataFrame to a SQL database.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Examples\n --------\n >>> original_df = pd.DataFrame({\"foo\": range(5), \"bar\": range(5, 10)})\n >>> original_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> original_df.to_pickle(\"./dummy.pkl\")\n\n >>> unpickled_df = pd.read_pickle(\"./dummy.pkl\")\n >>> unpickled_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n\n >>> import os\n >>> os.remove(\"./dummy.pkl\")\n \"\"\"\n from pandas.io.pickle import to_pickle\n return to_pickle(self, path, compression=compression,\n protocol=protocol)\n\n def to_clipboard(self, excel=True, sep=None, **kwargs):\n r\"\"\"\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n Parameters\n ----------\n excel : bool, default True\n - True, use the provided separator, writing in a csv format for\n allowing easy pasting into excel.\n - False, write a string representation of the object to the\n clipboard.\n\n sep : str, default ``'\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n See Also\n --------\n DataFrame.to_csv : Write a DataFrame to a comma-separated values\n (csv) file.\n read_clipboard : Read text from clipboard and pass to read_table.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])\n >>> df.to_clipboard(sep=',')\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False)\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n \"\"\"\n from pandas.io import clipboards\n clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)\n\n def to_xarray(self):\n \"\"\"\n Return an xarray object from the pandas object.\n\n Returns\n -------\n xarray.DataArray or xarray.Dataset\n Data in the pandas structure converted to Dataset if the object is\n a DataFrame, or a DataArray if the object is a Series.\n\n See Also\n --------\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Notes\n -----\n See the `xarray docs <http://xarray.pydata.org/en/stable/>`__\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),\n ... ('parrot', 'bird', 24.0, 2),\n ... ('lion', 'mammal', 80.5, 4),\n ... ('monkey', 'mammal', np.nan, 4)],\n ... columns=['name', 'class', 'max_speed',\n ... 'num_legs'])\n >>> df\n name class max_speed num_legs\n 0 falcon bird 389.0 2\n 1 parrot bird 24.0 2\n 2 lion mammal 80.5 4\n 3 monkey mammal NaN 4\n\n >>> df.to_xarray()\n <xarray.Dataset>\n Dimensions: (index: 4)\n Coordinates:\n * index (index) int64 0 1 2 3\n Data variables:\n name (index) object 'falcon' 'parrot' 'lion' 'monkey'\n class (index) object 'bird' 'bird' 'mammal' 'mammal'\n max_speed (index) float64 389.0 24.0 80.5 nan\n num_legs (index) int64 2 2 4 4\n\n >>> df['max_speed'].to_xarray()\n <xarray.DataArray 'max_speed' (index: 4)>\n array([389. , 24. , 80.5, nan])\n Coordinates:\n * index (index) int64 0 1 2 3\n\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',\n ... '2018-01-02', '2018-01-02'])\n >>> df_multiindex = pd.DataFrame({'date': dates,\n ... 'animal': ['falcon', 'parrot', 'falcon',\n ... 'parrot'],\n ... 'speed': [350, 18, 361, 15]}).set_index(['date',\n ... 'animal'])\n >>> df_multiindex\n speed\n date animal\n 2018-01-01 falcon 350\n parrot 18\n 2018-01-02 falcon 361\n parrot 15\n\n >>> df_multiindex.to_xarray()\n <xarray.Dataset>\n Dimensions: (animal: 2, date: 2)\n Coordinates:\n * date (date) datetime64[ns] 2018-01-01 2018-01-02\n * animal (animal) object 'falcon' 'parrot'\n Data variables:\n speed (date, animal) int64 350 18 361 15\n \"\"\"\n\n try:\n import xarray\n except ImportError:\n # Give a nice error message\n raise ImportError(\"the xarray library is not installed\\n\"\n \"you can install via conda\\n\"\n \"conda install xarray\\n\"\n \"or via pip\\n\"\n \"pip install xarray\\n\")\n\n if self.ndim == 1:\n return xarray.DataArray.from_series(self)\n elif self.ndim == 2:\n return xarray.Dataset.from_dataframe(self)\n\n # > 2 dims\n coords = [(a, self._get_axis(a)) for a in self._AXIS_ORDERS]\n return xarray.DataArray(self,\n coords=coords,\n )\n\n def to_latex(self, buf=None, columns=None, col_space=None, header=True,\n index=True, na_rep='NaN', formatters=None, float_format=None,\n sparsify=None, index_names=True, bold_rows=False,\n column_format=None, longtable=None, escape=None,\n encoding=None, decimal='.', multicolumn=None,\n multicolumn_format=None, multirow=None):\n r\"\"\"\n Render an object to a LaTeX tabular environment table.\n\n Render an object to a tabular environment table. You can splice\n this into a LaTeX document. Requires \\usepackage{booktabs}.\n\n .. versionchanged:: 0.20.2\n Added to Series\n\n Parameters\n ----------\n buf : file descriptor or None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given,\n it is assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default 'NaN'\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns' elements by position or\n name. The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : str, optional\n Format string for floating point numbers.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row. By default, the value will be\n read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in `LaTeX table format\n <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3\n columns. By default, 'l' will be used for all columns except\n columns of numbers, which default to 'r'.\n longtable : bool, optional\n By default, the value will be read from the pandas config\n module. Use a longtable environment instead of tabular. Requires\n adding a \\usepackage{longtable} to your LaTeX preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config\n module. When set to False prevents from escaping latex special\n characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n\n .. versionadded:: 0.18.0\n multicolumn : bool, default True\n Use \\multicolumn to enhance MultiIndex columns.\n The default will be read from the config module.\n\n .. versionadded:: 0.20.0\n multicolumn_format : str, default 'l'\n The alignment for multicolumns, similar to `column_format`\n The default will be read from the config module.\n\n .. versionadded:: 0.20.0\n multirow : bool, default False\n Use \\multirow to enhance MultiIndex rows. Requires adding a\n \\usepackage{multirow} to your LaTeX preamble. Will print\n centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read\n from the pandas config module.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n str or None\n If buf is None, returns the resulting LateX format as a\n string. Otherwise returns None.\n\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE\n '\\\\begin{tabular}{lll}\\n\\\\toprule\\n name & mask & weapon\n \\\\\\\\\\n\\\\midrule\\n Raphael & red & sai \\\\\\\\\\n Donatello &\n purple & bo staff \\\\\\\\\\n\\\\bottomrule\\n\\\\end{tabular}\\n'\n \"\"\"\n # Get defaults from the pandas config\n if self.ndim == 1:\n self = self.to_frame()\n if longtable is None:\n longtable = config.get_option(\"display.latex.longtable\")\n if escape is None:\n escape = config.get_option(\"display.latex.escape\")\n if multicolumn is None:\n multicolumn = config.get_option(\"display.latex.multicolumn\")\n if multicolumn_format is None:\n multicolumn_format = config.get_option(\n \"display.latex.multicolumn_format\")\n if multirow is None:\n multirow = config.get_option(\"display.latex.multirow\")\n\n formatter = DataFrameFormatter(self, buf=buf, columns=columns,\n col_space=col_space, na_rep=na_rep,\n header=header, index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n index_names=index_names,\n escape=escape, decimal=decimal)\n formatter.to_latex(column_format=column_format, longtable=longtable,\n encoding=encoding, multicolumn=multicolumn,\n multicolumn_format=multicolumn_format,\n multirow=multirow)\n\n if buf is None:\n return formatter.buf.getvalue()\n\n def to_csv(self, path_or_buf=None, sep=\",\", na_rep='', float_format=None,\n columns=None, header=True, index=True, index_label=None,\n mode='w', encoding=None, compression='infer', quoting=None,\n quotechar='\"', line_terminator=None, chunksize=None,\n tupleize_cols=None, date_format=None, doublequote=True,\n escapechar=None, decimal='.'):\n r\"\"\"\n Write object to a comma-separated values (csv) file.\n\n .. versionchanged:: 0.24.0\n The order of arguments for Series was changed.\n\n Parameters\n ----------\n path_or_buf : str or file handle, default None\n File path or object, if None is provided the result is returned as\n a string.\n\n .. versionchanged:: 0.24.0\n\n Was previously named \"path\" for Series.\n\n sep : str, default ','\n String of length 1. Field delimiter for the output file.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, default None\n Format string for floating point numbers.\n columns : sequence, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given it is\n assumed to be aliases for the column names.\n\n .. versionchanged:: 0.24.0\n\n Previously defaulted to False for Series.\n\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, or False, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the object uses MultiIndex. If\n False do not print fields for index names. Use index_label=False\n for easier importing in R.\n mode : str\n Python write mode, default 'w'.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.\n compression : str, default 'infer'\n Compression mode among the following possible values: {'infer',\n 'gzip', 'bz2', 'zip', 'xz', None}. If 'infer' and `path_or_buf`\n is path-like, then detect compression from the following\n extensions: '.gz', '.bz2', '.zip' or '.xz'. (otherwise no\n compression).\n\n .. versionchanged:: 0.24.0\n\n 'infer' option added and set to default.\n\n quoting : optional constant from csv module\n Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`\n then floats are converted to strings and thus csv.QUOTE_NONNUMERIC\n will treat them as non-numeric.\n quotechar : str, default '\\\"'\n String of length 1. Character used to quote fields.\n line_terminator : str, optional\n The newline character or character sequence to use in the output\n file. Defaults to `os.linesep`, which depends on the OS in which\n this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.).\n\n .. versionchanged:: 0.24.0\n chunksize : int or None\n Rows to write at a time.\n tupleize_cols : bool, default False\n Write MultiIndex columns as a list of tuples (if True) or in\n the new, expanded format, where each MultiIndex column is a row\n in the CSV (if False).\n\n .. deprecated:: 0.21.0\n This argument will be removed and will always write each row\n of the multi-index as a separate row in the CSV file.\n date_format : str, default None\n Format string for datetime objects.\n doublequote : bool, default True\n Control quoting of `quotechar` inside a field.\n escapechar : str, default None\n String of length 1. Character used to escape `sep` and `quotechar`\n when appropriate.\n decimal : str, default '.'\n Character recognized as decimal separator. E.g. use ',' for\n European data.\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting csv format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_csv : Load a CSV file into a DataFrame.\n to_excel : Load an Excel file into a DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> df.to_csv(index=False)\n 'name,mask,weapon\\nRaphael,red,sai\\nDonatello,purple,bo staff\\n'\n \"\"\"\n\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n if tupleize_cols is not None:\n warnings.warn(\"The 'tupleize_cols' parameter is deprecated and \"\n \"will be removed in a future version\",\n FutureWarning, stacklevel=2)\n else:\n tupleize_cols = False\n\n from pandas.io.formats.csvs import CSVFormatter\n formatter = CSVFormatter(df, path_or_buf,\n line_terminator=line_terminator, sep=sep,\n encoding=encoding,\n compression=compression, quoting=quoting,\n na_rep=na_rep, float_format=float_format,\n cols=columns, header=header, index=index,\n index_label=index_label, mode=mode,\n chunksize=chunksize, quotechar=quotechar,\n tupleize_cols=tupleize_cols,\n date_format=date_format,\n doublequote=doublequote,\n escapechar=escapechar, decimal=decimal)\n formatter.save()\n\n if path_or_buf is None:\n return formatter.path_or_buf.getvalue()\n\n # ----------------------------------------------------------------------\n # Fancy Indexing\n\n @classmethod\n def _create_indexer(cls, name, indexer):\n \"\"\"Create an indexer like _name in the class.\"\"\"\n if getattr(cls, name, None) is None:\n _indexer = functools.partial(indexer, name)\n setattr(cls, name, property(_indexer, doc=indexer.__doc__))\n\n def get(self, key, default=None):\n \"\"\"\n Get item from object for given key (DataFrame column, Panel slice,\n etc.). Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n \"\"\"\n try:\n return self[key]\n except (KeyError, ValueError, IndexError):\n return default\n\n def __getitem__(self, item):\n return self._get_item_cache(item)\n\n def _get_item_cache(self, item):\n \"\"\"Return the cached item, item represents a label indexer.\"\"\"\n cache = self._item_cache\n res = cache.get(item)\n if res is None:\n values = self._data.get(item)\n res = self._box_item_values(item, values)\n cache[item] = res\n res._set_as_cached(item, self)\n\n # for a chain\n res._is_copy = self._is_copy\n return res\n\n def _set_as_cached(self, item, cacher):\n \"\"\"Set the _cacher attribute on the calling object with a weakref to\n cacher.\n \"\"\"\n self._cacher = (item, weakref.ref(cacher))\n\n def _reset_cacher(self):\n \"\"\"Reset the cacher.\"\"\"\n if hasattr(self, '_cacher'):\n del self._cacher\n\n def _iget_item_cache(self, item):\n \"\"\"Return the cached item, item represents a positional indexer.\"\"\"\n ax = self._info_axis\n if ax.is_unique:\n lower = self._get_item_cache(ax[item])\n else:\n lower = self._take(item, axis=self._info_axis_number)\n return lower\n\n def _box_item_values(self, key, values):\n raise AbstractMethodError(self)\n\n def _maybe_cache_changed(self, item, value):\n \"\"\"The object has called back to us saying maybe it has changed.\n \"\"\"\n self._data.set(item, value)\n\n @property\n def _is_cached(self):\n \"\"\"Return boolean indicating if self is cached or not.\"\"\"\n return getattr(self, '_cacher', None) is not None\n\n def _get_cacher(self):\n \"\"\"return my cacher or None\"\"\"\n cacher = getattr(self, '_cacher', None)\n if cacher is not None:\n cacher = cacher[1]()\n return cacher\n\n @property\n def _is_view(self):\n \"\"\"Return boolean indicating if self is view of another array \"\"\"\n return self._data.is_view\n\n def _maybe_update_cacher(self, clear=False, verify_is_copy=True):\n \"\"\"\n See if we need to update our parent cacher if clear, then clear our\n cache.\n\n Parameters\n ----------\n clear : boolean, default False\n clear the item cache\n verify_is_copy : boolean, default True\n provide is_copy checks\n\n \"\"\"\n\n cacher = getattr(self, '_cacher', None)\n if cacher is not None:\n ref = cacher[1]()\n\n # we are trying to reference a dead referant, hence\n # a copy\n if ref is None:\n del self._cacher\n else:\n try:\n ref._maybe_cache_changed(cacher[0], self)\n except Exception:\n pass\n\n if verify_is_copy:\n self._check_setitem_copy(stacklevel=5, t='referant')\n\n if clear:\n self._clear_item_cache()\n\n def _clear_item_cache(self, i=None):\n if i is not None:\n self._item_cache.pop(i, None)\n else:\n self._item_cache.clear()\n\n def _slice(self, slobj, axis=0, kind=None):\n \"\"\"\n Construct a slice of this container.\n\n kind parameter is maintained for compatibility with Series slicing.\n \"\"\"\n axis = self._get_block_manager_axis(axis)\n result = self._constructor(self._data.get_slice(slobj, axis=axis))\n result = result.__finalize__(self)\n\n # this could be a view\n # but only in a single-dtyped view slicable case\n is_copy = axis != 0 or result._is_view\n result._set_is_copy(self, copy=is_copy)\n return result\n\n def _set_item(self, key, value):\n self._data.set(key, value)\n self._clear_item_cache()\n\n def _set_is_copy(self, ref=None, copy=True):\n if not copy:\n self._is_copy = None\n else:\n if ref is not None:\n self._is_copy = weakref.ref(ref)\n else:\n self._is_copy = None\n\n def _check_is_chained_assignment_possible(self):\n \"\"\"\n Check if we are a view, have a cacher, and are of mixed type.\n If so, then force a setitem_copy check.\n\n Should be called just near setting a value\n\n Will return a boolean if it we are a view and are cached, but a\n single-dtype meaning that the cacher should be updated following\n setting.\n \"\"\"\n if self._is_view and self._is_cached:\n ref = self._get_cacher()\n if ref is not None and ref._is_mixed_type:\n self._check_setitem_copy(stacklevel=4, t='referant',\n force=True)\n return True\n elif self._is_copy:\n self._check_setitem_copy(stacklevel=4, t='referant')\n return False\n\n def _check_setitem_copy(self, stacklevel=4, t='setting', force=False):\n \"\"\"\n\n Parameters\n ----------\n stacklevel : integer, default 4\n the level to show of the stack when the error is output\n t : string, the type of setting error\n force : boolean, default False\n if True, then force showing an error\n\n validate if we are doing a settitem on a chained copy.\n\n If you call this function, be sure to set the stacklevel such that the\n user will see the error *at the level of setting*\n\n It is technically possible to figure out that we are setting on\n a copy even WITH a multi-dtyped pandas object. In other words, some\n blocks may be views while other are not. Currently _is_view will ALWAYS\n return False for multi-blocks to avoid having to handle this case.\n\n df = DataFrame(np.arange(0,9), columns=['count'])\n df['group'] = 'b'\n\n # This technically need not raise SettingWithCopy if both are view\n # (which is not # generally guaranteed but is usually True. However,\n # this is in general not a good practice and we recommend using .loc.\n df.iloc[0:5]['group'] = 'a'\n\n \"\"\"\n\n if force or self._is_copy:\n\n value = config.get_option('mode.chained_assignment')\n if value is None:\n return\n\n # see if the copy is not actually referred; if so, then dissolve\n # the copy weakref\n try:\n gc.collect(2)\n if not gc.get_referents(self._is_copy()):\n self._is_copy = None\n return\n except Exception:\n pass\n\n # we might be a false positive\n try:\n if self._is_copy().shape == self.shape:\n self._is_copy = None\n return\n except Exception:\n pass\n\n # a custom message\n if isinstance(self._is_copy, string_types):\n t = self._is_copy\n\n elif t == 'referant':\n t = (\"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame\\n\\n\"\n \"See the caveats in the documentation: \"\n \"http://pandas.pydata.org/pandas-docs/stable/\"\n \"indexing.html#indexing-view-versus-copy\"\n )\n\n else:\n t = (\"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame.\\n\"\n \"Try using .loc[row_indexer,col_indexer] = value \"\n \"instead\\n\\nSee the caveats in the documentation: \"\n \"http://pandas.pydata.org/pandas-docs/stable/\"\n \"indexing.html#indexing-view-versus-copy\"\n )\n\n if value == 'raise':\n raise com.SettingWithCopyError(t)\n elif value == 'warn':\n warnings.warn(t, com.SettingWithCopyWarning,\n stacklevel=stacklevel)\n\n def __delitem__(self, key):\n \"\"\"\n Delete item\n \"\"\"\n deleted = False\n\n maybe_shortcut = False\n if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex):\n try:\n maybe_shortcut = key not in self.columns._engine\n except TypeError:\n pass\n\n if maybe_shortcut:\n # Allow shorthand to delete all columns whose first len(key)\n # elements match key:\n if not isinstance(key, tuple):\n key = (key, )\n for col in self.columns:\n if isinstance(col, tuple) and col[:len(key)] == key:\n del self[col]\n deleted = True\n if not deleted:\n # If the above loop ran and didn't delete anything because\n # there was no match, this call should raise the appropriate\n # exception:\n self._data.delete(key)\n\n # delete from the caches\n try:\n del self._item_cache[key]\n except KeyError:\n pass\n\n def _take(self, indices, axis=0, is_copy=True):\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n This is the internal version of ``.take()`` and will contain a wider\n selection of parameters useful for internal use but not as suitable\n for public usage.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : int, default 0\n The axis on which to select elements. \"0\" means that we are\n selecting rows, \"1\" means that we are selecting columns, etc.\n is_copy : bool, default True\n Whether to return a copy of the original object or not.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n numpy.ndarray.take\n numpy.take\n \"\"\"\n self._consolidate_inplace()\n\n new_data = self._data.take(indices,\n axis=self._get_block_manager_axis(axis),\n verify=True)\n result = self._constructor(new_data).__finalize__(self)\n\n # Maybe set copy if we didn't actually change the index.\n if is_copy:\n if not result._get_axis(axis).equals(self._get_axis(axis)):\n result._set_is_copy(self)\n\n return result\n\n def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs):\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n convert : bool, default True\n Whether to convert negative indices into positive ones.\n For example, ``-1`` would map to the ``len(axis) - 1``.\n The conversions are similar to the behavior of indexing a\n regular Python list.\n\n .. deprecated:: 0.21.0\n In the future, negative indices will always be converted.\n\n is_copy : bool, default True\n Whether to return a copy of the original object or not.\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3])\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2])\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n \"\"\"\n if convert is not None:\n msg = (\"The 'convert' parameter is deprecated \"\n \"and will be removed in a future version.\")\n warnings.warn(msg, FutureWarning, stacklevel=2)\n\n nv.validate_take(tuple(), kwargs)\n return self._take(indices, axis=axis, is_copy=is_copy)\n\n def xs(self, key, axis=0, level=None, drop_level=True):\n \"\"\"\n Return cross-section from the Series/DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to retrieve cross-section on.\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n drop_level : bool, default True\n If False, returns object with same levels as self.\n\n Returns\n -------\n Series or DataFrame\n Cross-section from the original Series or DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Notes\n -----\n `xs` can not be used to set values.\n\n MultiIndex Slicers is a generic way to get/set values on\n any level or levels.\n It is a superset of `xs` functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = pd.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal')\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog'))\n num_legs num_wings\n locomotion\n walks 4 0\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1)\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n\n Get values at several indexes and levels\n\n >>> df.xs(('bird', 'walks'),\n ... level=[0, 'locomotion'])\n num_legs num_wings\n animal\n penguin 2 2\n\n Get values at specified column and axis\n\n >>> df.xs('num_wings', axis=1)\n class animal locomotion\n mammal cat walks 0\n dog walks 0\n bat flies 2\n bird penguin walks 2\n Name: num_wings, dtype: int64\n \"\"\"\n axis = self._get_axis_number(axis)\n labels = self._get_axis(axis)\n if level is not None:\n loc, new_ax = labels.get_loc_level(key, level=level,\n drop_level=drop_level)\n\n # create the tuple of the indexer\n indexer = [slice(None)] * self.ndim\n indexer[axis] = loc\n indexer = tuple(indexer)\n\n result = self.iloc[indexer]\n setattr(result, result._get_axis_name(axis), new_ax)\n return result\n\n if axis == 1:\n return self[key]\n\n self._consolidate_inplace()\n\n index = self.index\n if isinstance(index, MultiIndex):\n loc, new_index = self.index.get_loc_level(key,\n drop_level=drop_level)\n else:\n loc = self.index.get_loc(key)\n\n if isinstance(loc, np.ndarray):\n if loc.dtype == np.bool_:\n inds, = loc.nonzero()\n return self._take(inds, axis=axis)\n else:\n return self._take(loc, axis=axis)\n\n if not is_scalar(loc):\n new_index = self.index[loc]\n\n if is_scalar(loc):\n new_values = self._data.fast_xs(loc)\n\n # may need to box a datelike-scalar\n #\n # if we encounter an array-like and we only have 1 dim\n # that means that their are list/ndarrays inside the Series!\n # so just return them (GH 6394)\n if not is_list_like(new_values) or self.ndim == 1:\n return com.maybe_box_datetimelike(new_values)\n\n result = self._constructor_sliced(\n new_values, index=self.columns,\n name=self.index[loc], dtype=new_values.dtype)\n\n else:\n result = self.iloc[loc]\n result.index = new_index\n\n # this could be a view\n # but only in a single-dtyped view slicable case\n result._set_is_copy(self, copy=not result._is_view)\n return result\n\n _xs = xs\n\n def select(self, crit, axis=0):\n \"\"\"\n Return data corresponding to axis labels matching criteria.\n\n .. deprecated:: 0.21.0\n Use df.loc[df.index.map(crit)] to select via labels\n\n Parameters\n ----------\n crit : function\n To be called on each index (label). Should return True or False\n axis : int\n\n Returns\n -------\n selection : same type as caller\n \"\"\"\n warnings.warn(\"'select' is deprecated and will be removed in a \"\n \"future release. You can use \"\n \".loc[labels.map(crit)] as a replacement\",\n FutureWarning, stacklevel=2)\n\n axis = self._get_axis_number(axis)\n axis_name = self._get_axis_name(axis)\n axis_values = self._get_axis(axis)\n\n if len(axis_values) > 0:\n new_axis = axis_values[\n np.asarray([bool(crit(label)) for label in axis_values])]\n else:\n new_axis = axis_values\n\n return self.reindex(**{axis_name: new_axis})\n\n def reindex_like(self, other, method=None, copy=True, limit=None,\n tolerance=None):\n \"\"\"\n Return an object with matching indices as other object.\n\n Conform the object to the same index on all axes. Optional\n filling logic, placing NaN in locations having no value\n in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : Object of the same data type\n Its row and column indices are used to define the new indices\n of this object.\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n limit : int, default None\n Maximum number of consecutive labels to fill for inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n Series or DataFrame\n Same type as caller, but with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = pd.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1)\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN NaN\n 2014-02-15 35.1 NaN medium\n \"\"\"\n d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method,\n copy=copy, limit=limit,\n tolerance=tolerance)\n\n return self.reindex(**d)\n\n def drop(self, labels=None, axis=0, index=None, columns=None, level=None,\n inplace=False, errors='raise'):\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n if labels is not None:\n if index is not None or columns is not None:\n raise ValueError(\"Cannot specify both 'labels' and \"\n \"'index'/'columns'\")\n axis_name = self._get_axis_name(axis)\n axes = {axis_name: labels}\n elif index is not None or columns is not None:\n axes, _ = self._construct_axes_from_arguments((index, columns), {})\n else:\n raise ValueError(\"Need to specify at least one of 'labels', \"\n \"'index' or 'columns'\")\n\n obj = self\n\n for axis, labels in axes.items():\n if labels is not None:\n obj = obj._drop_axis(labels, axis, level=level, errors=errors)\n\n if inplace:\n self._update_inplace(obj)\n else:\n return obj\n\n def _drop_axis(self, labels, axis, level=None, errors='raise'):\n \"\"\"\n Drop labels from specified axis. Used in the ``drop`` method\n internally.\n\n Parameters\n ----------\n labels : single label or list-like\n axis : int or axis name\n level : int or level name, default None\n For MultiIndex\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n \"\"\"\n axis = self._get_axis_number(axis)\n axis_name = self._get_axis_name(axis)\n axis = self._get_axis(axis)\n\n if axis.is_unique:\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError('axis must be a MultiIndex')\n new_axis = axis.drop(labels, level=level, errors=errors)\n else:\n new_axis = axis.drop(labels, errors=errors)\n result = self.reindex(**{axis_name: new_axis})\n\n # Case for non-unique axis\n else:\n labels = ensure_object(com.index_labels_to_array(labels))\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError('axis must be a MultiIndex')\n indexer = ~axis.get_level_values(level).isin(labels)\n\n # GH 18561 MultiIndex.drop should raise if label is absent\n if errors == 'raise' and indexer.all():\n raise KeyError('{} not found in axis'.format(labels))\n else:\n indexer = ~axis.isin(labels)\n # Check if label doesn't exist along axis\n labels_missing = (axis.get_indexer_for(labels) == -1).any()\n if errors == 'raise' and labels_missing:\n raise KeyError('{} not found in axis'.format(labels))\n\n slicer = [slice(None)] * self.ndim\n slicer[self._get_axis_number(axis_name)] = indexer\n\n result = self.loc[tuple(slicer)]\n\n return result\n\n def _update_inplace(self, result, verify_is_copy=True):\n \"\"\"\n Replace self internals with result.\n\n Parameters\n ----------\n verify_is_copy : boolean, default True\n provide is_copy checks\n\n \"\"\"\n # NOTE: This does *not* call __finalize__ and that's an explicit\n # decision that we may revisit in the future.\n\n self._reset_cache()\n self._clear_item_cache()\n self._data = getattr(result, '_data', result)\n self._maybe_update_cacher(verify_is_copy=verify_is_copy)\n\n def add_prefix(self, prefix):\n \"\"\"\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_prefix('item_')\n item_0 1\n item_1 2\n item_2 3\n item_3 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial('{prefix}{}'.format, prefix=prefix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper)\n\n def add_suffix(self, suffix):\n \"\"\"\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add after each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_suffix('_item')\n 0_item 1\n 1_item 2\n 2_item 3\n 3_item 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial('{}{suffix}'.format, suffix=suffix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper)\n\n def sort_values(self, by=None, axis=0, ascending=True, inplace=False,\n kind='quicksort', na_position='last'):\n \"\"\"\n Sort by the values along either axis.\n\n Parameters\n ----------%(optional_by)s\n axis : %(axes_single_arg)s, default 0\n Axis to be sorted.\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the\n end.\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted values if inplace=False, None otherwise.\n\n Examples\n --------\n >>> df = pd.DataFrame({\n ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... })\n >>> df\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort by multiple columns\n\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n 3 NaN 8 4\n\n Putting NAs first\n\n >>> df.sort_values(by='col1', ascending=False, na_position='first')\n col1 col2 col3\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n \"\"\"\n raise NotImplementedError(\"sort_values has not been implemented \"\n \"on Panel or Panel4D objects.\")\n\n def sort_index(self, axis=0, level=None, ascending=True, inplace=False,\n kind='quicksort', na_position='last', sort_remaining=True):\n \"\"\"\n Sort object by labels (along an axis).\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis along which to sort. The value 0 identifies the rows,\n and 1 identifies the columns.\n level : int or level name or list of ints or list of level names\n If not None, sort on values in specified index level(s).\n ascending : bool, default True\n Sort ascending vs. descending.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.\n Not implemented for MultiIndex.\n sort_remaining : bool, default True\n If True and sorting by level and index is multilevel, sort by other\n levels too (in order) after sorting by specified level.\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted index if inplace=False, None otherwise.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n axis = self._get_axis_number(axis)\n axis_name = self._get_axis_name(axis)\n labels = self._get_axis(axis)\n\n if level is not None:\n raise NotImplementedError(\"level is not implemented\")\n if inplace:\n raise NotImplementedError(\"inplace is not implemented\")\n\n sort_index = labels.argsort()\n if not ascending:\n sort_index = sort_index[::-1]\n\n new_axis = labels.take(sort_index)\n return self.reindex(**{axis_name: new_axis})\n\n def reindex(self, *args, **kwargs):\n \"\"\"\n Conform %(klass)s to new index with optional filling logic, placing\n NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n %(optional_labels)s\n %(axes)s : array-like, optional\n New labels / index to conform to, should be specified using\n keywords. Preferably an Index object to avoid duplicating data\n %(optional_axis)s\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n level : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n limit : int, default None\n Maximum number of consecutive elements to forward or backward fill.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n %(klass)s with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']\n >>> df = pd.DataFrame({\n ... 'http_status': [200,200,404,404,301],\n ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},\n ... index=index)\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',\n ... 'Chrome']\n >>> df.reindex(new_index)\n http_status response_time\n Safari 404.0 0.07\n Iceweasel NaN NaN\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Chrome 200.0 0.02\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``. Because the index is not monotonically\n increasing or decreasing, we cannot use arguments to the keyword\n ``method`` to fill the ``NaN`` values.\n\n >>> df.reindex(new_index, fill_value=0)\n http_status response_time\n Safari 404 0.07\n Iceweasel 0 0.00\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Chrome 200 0.02\n\n >>> df.reindex(new_index, fill_value='missing')\n http_status response_time\n Safari 404 0.07\n Iceweasel missing missing\n Comodo Dragon missing missing\n IE10 404 0.08\n Chrome 200 0.02\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=['http_status', 'user_agent'])\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n Or we can use \"axis-style\" keyword arguments\n\n >>> df.reindex(['http_status', 'user_agent'], axis=\"columns\")\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')\n >>> df2 = pd.DataFrame({\"prices\": [100, 101, np.nan, 100, 89, 88]},\n ... index=date_index)\n >>> df2\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')\n >>> df2.reindex(date_index2)\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n The index entries that did not have a value in the original data frame\n (for example, '2009-12-29') are by default filled with ``NaN``.\n If desired, we can fill in the missing values using one of several\n options.\n\n For example, to back-propagate the last valid value to fill the ``NaN``\n values, pass ``bfill`` as an argument to the ``method`` keyword.\n\n >>> df2.reindex(date_index2, method='bfill')\n prices\n 2009-12-29 100.0\n 2009-12-30 100.0\n 2009-12-31 100.0\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n Please note that the ``NaN`` value present in the original dataframe\n (at index value 2010-01-03) will not be filled by any of the\n value propagation schemes. This is because filling while reindexing\n does not look at dataframe values, but only compares the original and\n desired indexes. If you do want to fill in the ``NaN`` values present\n in the original dataframe, use the ``fillna()`` method.\n\n See the :ref:`user guide <basics.reindexing>` for more.\n \"\"\"\n # TODO: Decide if we care about having different examples for different\n # kinds\n\n # construct the args\n axes, kwargs = self._construct_axes_from_arguments(args, kwargs)\n method = missing.clean_reindex_fill_method(kwargs.pop('method', None))\n level = kwargs.pop('level', None)\n copy = kwargs.pop('copy', True)\n limit = kwargs.pop('limit', None)\n tolerance = kwargs.pop('tolerance', None)\n fill_value = kwargs.pop('fill_value', None)\n\n # Series.reindex doesn't use / need the axis kwarg\n # We pop and ignore it here, to make writing Series/Frame generic code\n # easier\n kwargs.pop(\"axis\", None)\n\n if kwargs:\n raise TypeError('reindex() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n self._consolidate_inplace()\n\n # if all axes that are requested to reindex are equal, then only copy\n # if indicated must have index names equal here as well as values\n if all(self._get_axis(axis).identical(ax)\n for axis, ax in axes.items() if ax is not None):\n if copy:\n return self.copy()\n return self\n\n # check if we are a multi reindex\n if self._needs_reindex_multi(axes, method, level):\n try:\n return self._reindex_multi(axes, copy, fill_value)\n except Exception:\n pass\n\n # perform the reindex on the axes\n return self._reindex_axes(axes, level, limit, tolerance, method,\n fill_value, copy).__finalize__(self)\n\n def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,\n copy):\n \"\"\"Perform the reindex for all the axes.\"\"\"\n obj = self\n for a in self._AXIS_ORDERS:\n labels = axes[a]\n if labels is None:\n continue\n\n ax = self._get_axis(a)\n new_index, indexer = ax.reindex(labels, level=level, limit=limit,\n tolerance=tolerance, method=method)\n\n axis = self._get_axis_number(a)\n obj = obj._reindex_with_indexers({axis: [new_index, indexer]},\n fill_value=fill_value,\n copy=copy, allow_dups=False)\n\n return obj\n\n def _needs_reindex_multi(self, axes, method, level):\n \"\"\"Check if we do need a multi reindex.\"\"\"\n return ((com.count_not_none(*axes.values()) == self._AXIS_LEN) and\n method is None and level is None and not self._is_mixed_type)\n\n def _reindex_multi(self, axes, copy, fill_value):\n return NotImplemented\n\n _shared_docs['reindex_axis'] = (\"\"\"\n Conform input object to new index.\n\n .. deprecated:: 0.21.0\n Use `reindex` instead.\n\n By default, places NaN in locations having no value in the\n previous index. A new object is produced unless the new index\n is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n labels : array-like\n New labels / index to conform to. Preferably an Index object to\n avoid duplicating data.\n axis : %(axes_single_arg)s\n Indicate whether to use rows or columns.\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional\n Method to use for filling holes in reindexed DataFrame:\n\n * default: don't fill gaps.\n * pad / ffill: propagate last valid observation forward to next\n valid.\n * backfill / bfill: use next valid observation to fill gap.\n * nearest: use nearest valid observations to fill gap.\n\n level : int or str\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n limit : int, optional\n Maximum number of consecutive elements to forward or backward fill.\n fill_value : float, default NaN\n Value used to fill in locations having no value in the previous\n index.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n %(klass)s\n Returns a new DataFrame object with new indices, unless the new\n index is equivalent to the current one and copy=False.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},\n ... index=['dog', 'hawk'])\n >>> df\n num_legs num_wings\n dog 4 0\n hawk 2 2\n >>> df.reindex(['num_wings', 'num_legs', 'num_heads'],\n ... axis='columns')\n num_wings num_legs num_heads\n dog 0 4 NaN\n hawk 2 2 NaN\n \"\"\")\n\n @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)\n def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,\n limit=None, fill_value=None):\n msg = (\"'.reindex_axis' is deprecated and will be removed in a future \"\n \"version. Use '.reindex' instead.\")\n self._consolidate_inplace()\n\n axis_name = self._get_axis_name(axis)\n axis_values = self._get_axis(axis_name)\n method = missing.clean_reindex_fill_method(method)\n warnings.warn(msg, FutureWarning, stacklevel=3)\n new_index, indexer = axis_values.reindex(labels, method, level,\n limit=limit)\n return self._reindex_with_indexers({axis: [new_index, indexer]},\n fill_value=fill_value, copy=copy)\n\n def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False,\n allow_dups=False):\n \"\"\"allow_dups indicates an internal call here \"\"\"\n\n # reindex doing multiple operations on different axes if indicated\n new_data = self._data\n for axis in sorted(reindexers.keys()):\n index, indexer = reindexers[axis]\n baxis = self._get_block_manager_axis(axis)\n\n if index is None:\n continue\n\n index = ensure_index(index)\n if indexer is not None:\n indexer = ensure_int64(indexer)\n\n # TODO: speed up on homogeneous DataFrame objects\n new_data = new_data.reindex_indexer(index, indexer, axis=baxis,\n fill_value=fill_value,\n allow_dups=allow_dups,\n copy=copy)\n\n if copy and new_data is self._data:\n new_data = new_data.copy()\n\n return self._constructor(new_data).__finalize__(self)\n\n def filter(self, items=None, like=None, regex=None, axis=None):\n \"\"\"\n Subset rows or columns of dataframe according to labels in\n the specified index.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n List of axis to restrict to (must not all be present).\n like : string\n Keep axis where \"arg in col == True\".\n regex : string (regular expression)\n Keep axis with re.search(regex, col) == True.\n axis : int or string axis name\n The axis to filter on. By default this is the info axis,\n 'index' for Series, 'columns' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array(([1,2,3], [4,5,6])),\n ... index=['mouse', 'rabbit'],\n ... columns=['one', 'two', 'three'])\n\n >>> # select columns by name\n >>> df.filter(items=['one', 'three'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex='e$', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing 'bbi'\n >>> df.filter(like='bbi', axis=0)\n one two three\n rabbit 4 5 6\n \"\"\"\n import re\n\n nkw = com.count_not_none(items, like, regex)\n if nkw > 1:\n raise TypeError('Keyword arguments `items`, `like`, or `regex` '\n 'are mutually exclusive')\n\n if axis is None:\n axis = self._info_axis_name\n labels = self._get_axis(axis)\n\n if items is not None:\n name = self._get_axis_name(axis)\n return self.reindex(\n **{name: [r for r in items if r in labels]})\n elif like:\n def f(x):\n return like in to_str(x)\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n elif regex:\n def f(x):\n return matcher.search(to_str(x)) is not None\n matcher = re.compile(regex)\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n else:\n raise TypeError('Must pass either `items`, `like`, or `regex`')\n\n def head(self, n=5):\n \"\"\"\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n obj_head : same type as caller\n The first `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.tail: Returns the last `n` rows.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n \"\"\"\n\n return self.iloc[:n]\n\n def tail(self, n=5):\n \"\"\"\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail()\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3)\n animal\n 6 shark\n 7 whale\n 8 zebra\n \"\"\"\n\n if n == 0:\n return self.iloc[0:0]\n return self.iloc[-n:]\n\n def sample(self, n=None, frac=None, replace=False, weights=None,\n random_state=None, axis=None):\n \"\"\"\n Return a random sample of items from an axis of object.\n\n You can use `random_state` for reproducibility.\n\n Parameters\n ----------\n n : int, optional\n Number of items from axis to return. Cannot be used with `frac`.\n Default = 1 if `frac` = None.\n frac : float, optional\n Fraction of axis items to return. Cannot be used with `n`.\n replace : bool, default False\n Sample with or without replacement.\n weights : str or ndarray-like, optional\n Default 'None' results in equal probability weighting.\n If passed a Series, will align with target object on index. Index\n values in weights not found in sampled object will be ignored and\n index values in sampled object not in weights will be assigned\n weights of zero.\n If called on a DataFrame, will accept the name of a column\n when axis = 0.\n Unless weights are a Series, weights must be same length as axis\n being sampled.\n If weights do not sum to 1, they will be normalized to sum to 1.\n Missing values in the weights column will be treated as zero.\n Infinite values not allowed.\n random_state : int or numpy.random.RandomState, optional\n Seed for the random number generator (if int), or numpy RandomState\n object.\n axis : int or string, optional\n Axis to sample. Accepts axis number or name. Default is stat axis\n for given data type (0 for Series and DataFrames, 1 for Panels).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing `n` items randomly\n sampled from the caller object.\n\n See Also\n --------\n numpy.random.choice: Generates a random sample from a given 1-D numpy\n array.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'])\n >>> df\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n Extract 3 random elements from the ``Series`` ``df['num_legs']``:\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df['num_legs'].sample(n=3, random_state=1)\n fish 0\n spider 8\n falcon 2\n Name: num_legs, dtype: int64\n\n A random 50% sample of the ``DataFrame`` with replacement:\n\n >>> df.sample(frac=0.5, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n\n Using a DataFrame column as weights. Rows with larger value in the\n `num_specimen_seen` column are more likely to be sampled.\n\n >>> df.sample(n=2, weights='num_specimen_seen', random_state=1)\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n \"\"\"\n\n if axis is None:\n axis = self._stat_axis_number\n\n axis = self._get_axis_number(axis)\n axis_length = self.shape[axis]\n\n # Process random_state argument\n rs = com.random_state(random_state)\n\n # Check weights for compliance\n if weights is not None:\n\n # If a series, align with frame\n if isinstance(weights, pd.Series):\n weights = weights.reindex(self.axes[axis])\n\n # Strings acceptable if a dataframe and axis = 0\n if isinstance(weights, string_types):\n if isinstance(self, pd.DataFrame):\n if axis == 0:\n try:\n weights = self[weights]\n except KeyError:\n raise KeyError(\"String passed to weights not a \"\n \"valid column\")\n else:\n raise ValueError(\"Strings can only be passed to \"\n \"weights when sampling from rows on \"\n \"a DataFrame\")\n else:\n raise ValueError(\"Strings cannot be passed as weights \"\n \"when sampling from a Series or Panel.\")\n\n weights = pd.Series(weights, dtype='float64')\n\n if len(weights) != axis_length:\n raise ValueError(\"Weights and axis to be sampled must be of \"\n \"same length\")\n\n if (weights == np.inf).any() or (weights == -np.inf).any():\n raise ValueError(\"weight vector may not include `inf` values\")\n\n if (weights < 0).any():\n raise ValueError(\"weight vector many not include negative \"\n \"values\")\n\n # If has nan, set to zero.\n weights = weights.fillna(0)\n\n # Renormalize if don't sum to 1\n if weights.sum() != 1:\n if weights.sum() != 0:\n weights = weights / weights.sum()\n else:\n raise ValueError(\"Invalid weights: weights sum to zero\")\n\n weights = weights.values\n\n # If no frac or n, default to n=1.\n if n is None and frac is None:\n n = 1\n elif n is not None and frac is None and n % 1 != 0:\n raise ValueError(\"Only integers accepted as `n` values\")\n elif n is None and frac is not None:\n n = int(round(frac * axis_length))\n elif n is not None and frac is not None:\n raise ValueError('Please enter a value for `frac` OR `n`, not '\n 'both')\n\n # Check for negative sizes\n if n < 0:\n raise ValueError(\"A negative number of rows requested. Please \"\n \"provide positive value.\")\n\n locs = rs.choice(axis_length, size=n, replace=replace, p=weights)\n return self.take(locs, axis=axis, is_copy=False)\n\n _shared_docs['pipe'] = (r\"\"\"\n Apply func(self, \\*args, \\*\\*kwargs).\n\n Parameters\n ----------\n func : function\n function to apply to the %(klass)s.\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the %(klass)s.\n args : iterable, optional\n positional arguments passed into ``func``.\n kwargs : mapping, optional\n a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n See Also\n --------\n DataFrame.apply\n DataFrame.applymap\n Series.map\n\n Notes\n -----\n\n Use ``.pipe`` when chaining together functions that expect\n Series, DataFrames or GroupBy objects. Instead of writing\n\n >>> f(g(h(df), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe(f, arg2=b, arg3=c)\n ... )\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe((f, 'arg2'), arg1=a, arg3=c)\n ... )\n \"\"\")\n\n @Appender(_shared_docs['pipe'] % _shared_doc_kwargs)\n def pipe(self, func, *args, **kwargs):\n return com._pipe(self, func, *args, **kwargs)\n\n _shared_docs['aggregate'] = dedent(\"\"\"\n Aggregate using one or more operations over the specified axis.\n\n %(versionadded)s\n\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for aggregating the data. If a function, must either\n work when passed a %(klass)s or when passed to %(klass)s.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.sum, 'mean']``\n - dict of axis labels -> functions, function names or list of such.\n %(axis)s\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n scalar, Series or DataFrame\n\n The return can be:\n\n * scalar : when Series.agg is called with single function\n * Series : when DataFrame.agg is called with a single function\n * DataFrame : when DataFrame.agg is called with several functions\n\n Return scalar, Series or DataFrame.\n\n %(see_also)s\n\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n A passed user-defined-function will be passed a Series for evaluation.\n\n %(examples)s\n \"\"\")\n\n _shared_docs['transform'] = (\"\"\"\n Call ``func`` on self producing a %(klass)s with transformed values\n and that has the same axis length as self.\n\n .. versionadded:: 0.20.0\n\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for transforming the data. If a function, must either\n work when passed a %(klass)s or when passed to %(klass)s.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``\n - dict of axis labels -> functions, function names or list of such.\n %(axis)s\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n %(klass)s\n A %(klass)s that must have the same length as self.\n\n Raises\n ------\n ValueError : If the returned %(klass)s has a different length than self.\n\n See Also\n --------\n %(klass)s.agg : Only perform aggregating type operations.\n %(klass)s.apply : Invoke function on a %(klass)s.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})\n >>> df\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n >>> df.transform(lambda x: x + 1)\n A B\n 0 1 2\n 1 2 3\n 2 3 4\n\n Even though the resulting %(klass)s must have the same length as the\n input %(klass)s, it is possible to provide several input functions:\n\n >>> s = pd.Series(range(3))\n >>> s\n 0 0\n 1 1\n 2 2\n dtype: int64\n >>> s.transform([np.sqrt, np.exp])\n sqrt exp\n 0 0.000000 1.000000\n 1 1.000000 2.718282\n 2 1.414214 7.389056\n \"\"\")\n\n # ----------------------------------------------------------------------\n # Attribute access\n\n def __finalize__(self, other, method=None, **kwargs):\n \"\"\"\n Propagate metadata from other to self.\n\n Parameters\n ----------\n other : the object from which to get the attributes that we are going\n to propagate\n method : optional, a passed method name ; possibly to take different\n types of propagation actions based on this\n\n \"\"\"\n if isinstance(other, NDFrame):\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def __getattr__(self, name):\n \"\"\"After regular attribute access, try looking up the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n\n # Note: obj.x will always call obj.__getattribute__('x') prior to\n # calling obj.__getattr__('x').\n\n if (name in self._internal_names_set or name in self._metadata or\n name in self._accessors):\n return object.__getattribute__(self, name)\n else:\n if self._info_axis._can_hold_identifiers_and_holds_name(name):\n return self[name]\n return object.__getattribute__(self, name)\n\n def __setattr__(self, name, value):\n \"\"\"After regular attribute access, try setting the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n\n # first try regular attribute access via __getattribute__, so that\n # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify\n # the same attribute.\n\n try:\n object.__getattribute__(self, name)\n return object.__setattr__(self, name, value)\n except AttributeError:\n pass\n\n # if this fails, go on to more involved attribute setting\n # (note that this matches __getattr__, above).\n if name in self._internal_names_set:\n object.__setattr__(self, name, value)\n elif name in self._metadata:\n object.__setattr__(self, name, value)\n else:\n try:\n existing = getattr(self, name)\n if isinstance(existing, Index):\n object.__setattr__(self, name, value)\n elif name in self._info_axis:\n self[name] = value\n else:\n object.__setattr__(self, name, value)\n except (AttributeError, TypeError):\n if isinstance(self, ABCDataFrame) and (is_list_like(value)):\n warnings.warn(\"Pandas doesn't allow columns to be \"\n \"created via a new attribute name - see \"\n \"https://pandas.pydata.org/pandas-docs/\"\n \"stable/indexing.html#attribute-access\",\n stacklevel=2)\n object.__setattr__(self, name, value)\n\n def _dir_additions(self):\n \"\"\" add the string-like attributes from the info_axis.\n If info_axis is a MultiIndex, it's first level values are used.\n \"\"\"\n additions = {c for c in self._info_axis.unique(level=0)[:100]\n if isinstance(c, string_types) and isidentifier(c)}\n return super(NDFrame, self)._dir_additions().union(additions)\n\n # ----------------------------------------------------------------------\n # Getting and setting elements\n\n # ----------------------------------------------------------------------\n # Consolidation of internals\n\n def _protect_consolidate(self, f):\n \"\"\"Consolidate _data -- if the blocks have changed, then clear the\n cache\n \"\"\"\n blocks_before = len(self._data.blocks)\n result = f()\n if len(self._data.blocks) != blocks_before:\n self._clear_item_cache()\n return result\n\n def _consolidate_inplace(self):\n \"\"\"Consolidate data in place and return None\"\"\"\n\n def f():\n self._data = self._data.consolidate()\n\n self._protect_consolidate(f)\n\n def _consolidate(self, inplace=False):\n \"\"\"\n Compute NDFrame with \"consolidated\" internals (data of each dtype\n grouped together in a single ndarray).\n\n Parameters\n ----------\n inplace : boolean, default False\n If False return new object, otherwise modify existing object\n\n Returns\n -------\n consolidated : same type as caller\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n if inplace:\n self._consolidate_inplace()\n else:\n f = lambda: self._data.consolidate()\n cons_data = self._protect_consolidate(f)\n return self._constructor(cons_data).__finalize__(self)\n\n @property\n def _is_mixed_type(self):\n f = lambda: self._data.is_mixed_type\n return self._protect_consolidate(f)\n\n @property\n def _is_numeric_mixed_type(self):\n f = lambda: self._data.is_numeric_mixed_type\n return self._protect_consolidate(f)\n\n @property\n def _is_datelike_mixed_type(self):\n f = lambda: self._data.is_datelike_mixed_type\n return self._protect_consolidate(f)\n\n def _check_inplace_setting(self, value):\n \"\"\" check whether we allow in-place setting with this type of value \"\"\"\n\n if self._is_mixed_type:\n if not self._is_numeric_mixed_type:\n\n # allow an actual np.nan thru\n try:\n if np.isnan(value):\n return True\n except Exception:\n pass\n\n raise TypeError('Cannot do inplace boolean setting on '\n 'mixed-types with a non np.nan value')\n\n return True\n\n def _get_numeric_data(self):\n return self._constructor(\n self._data.get_numeric_data()).__finalize__(self)\n\n def _get_bool_data(self):\n return self._constructor(self._data.get_bool_data()).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Internal Interface Methods\n\n def as_matrix(self, columns=None):\n \"\"\"\n Convert the frame to its Numpy-array representation.\n\n .. deprecated:: 0.23.0\n Use :meth:`DataFrame.values` instead.\n\n Parameters\n ----------\n columns : list, optional, default:None\n If None, return all columns, otherwise, returns specified columns.\n\n Returns\n -------\n values : ndarray\n If the caller is heterogeneous and contains booleans or objects,\n the result will be of dtype=object. See Notes.\n\n See Also\n --------\n DataFrame.values\n\n Notes\n -----\n Return is NOT a Numpy-matrix, rather, a Numpy-array.\n\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcase to\n int32. By numpy.find_common_type convention, mixing int64 and uint64\n will result in a float64 dtype.\n\n This method is provided for backwards compatibility. Generally,\n it is recommended to use '.values'.\n \"\"\"\n warnings.warn(\"Method .as_matrix will be removed in a future version. \"\n \"Use .values instead.\", FutureWarning, stacklevel=2)\n self._consolidate_inplace()\n return self._data.as_array(transpose=self._AXIS_REVERSED,\n items=columns)\n\n @property\n def values(self):\n \"\"\"\n Return a Numpy representation of the DataFrame.\n\n .. warning::\n\n We recommend using :meth:`DataFrame.to_numpy` instead.\n\n Only the values in the DataFrame will be returned, the axes labels\n will be removed.\n\n Returns\n -------\n numpy.ndarray\n The values of the DataFrame.\n\n See Also\n --------\n DataFrame.to_numpy : Recommended alternative to this method.\n DataFrame.index : Retrieve the index labels.\n DataFrame.columns : Retrieving the column names.\n\n Notes\n -----\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcast to\n int32. By :func:`numpy.find_common_type` convention, mixing int64\n and uint64 will result in a float64 dtype.\n\n Examples\n --------\n A DataFrame where all columns are the same type (e.g., int64) results\n in an array of the same type.\n\n >>> df = pd.DataFrame({'age': [ 3, 29],\n ... 'height': [94, 170],\n ... 'weight': [31, 115]})\n >>> df\n age height weight\n 0 3 94 31\n 1 29 170 115\n >>> df.dtypes\n age int64\n height int64\n weight int64\n dtype: object\n >>> df.values\n array([[ 3, 94, 31],\n [ 29, 170, 115]], dtype=int64)\n\n A DataFrame with mixed type columns(e.g., str/object, int64, float32)\n results in an ndarray of the broadest type that accommodates these\n mixed types (e.g., object).\n\n >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),\n ... ('lion', 80.5, 1),\n ... ('monkey', np.nan, None)],\n ... columns=('name', 'max_speed', 'rank'))\n >>> df2.dtypes\n name object\n max_speed float64\n rank object\n dtype: object\n >>> df2.values\n array([['parrot', 24.0, 'second'],\n ['lion', 80.5, 1],\n ['monkey', nan, None]], dtype=object)\n \"\"\"\n self._consolidate_inplace()\n return self._data.as_array(transpose=self._AXIS_REVERSED)\n\n @property\n def _values(self):\n \"\"\"internal implementation\"\"\"\n return self.values\n\n @property\n def _get_values(self):\n # compat\n return self.values\n\n def get_values(self):\n \"\"\"\n Return an ndarray after converting sparse values to dense.\n\n This is the same as ``.values`` for non-sparse data. For sparse\n data contained in a `SparseArray`, the data are first\n converted to a dense representation.\n\n Returns\n -------\n numpy.ndarray\n Numpy representation of DataFrame.\n\n See Also\n --------\n values : Numpy representation of DataFrame.\n SparseArray : Container for sparse data.\n\n Examples\n --------\n >>> df = pd.DataFrame({'a': [1, 2], 'b': [True, False],\n ... 'c': [1.0, 2.0]})\n >>> df\n a b c\n 0 1 True 1.0\n 1 2 False 2.0\n\n >>> df.get_values()\n array([[1, True, 1.0], [2, False, 2.0]], dtype=object)\n\n >>> df = pd.DataFrame({\"a\": pd.SparseArray([1, None, None]),\n ... \"c\": [1.0, 2.0, 3.0]})\n >>> df\n a c\n 0 1.0 1.0\n 1 NaN 2.0\n 2 NaN 3.0\n\n >>> df.get_values()\n array([[ 1., 1.],\n [nan, 2.],\n [nan, 3.]])\n \"\"\"\n return self.values\n\n def get_dtype_counts(self):\n \"\"\"\n Return counts of unique dtypes in this object.\n\n Returns\n -------\n dtype : Series\n Series with the count of columns with each dtype.\n\n See Also\n --------\n dtypes : Return the dtypes in this object.\n\n Examples\n --------\n >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]\n >>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])\n >>> df\n str int float\n 0 a 1 1.0\n 1 b 2 2.0\n 2 c 3 3.0\n\n >>> df.get_dtype_counts()\n float64 1\n int64 1\n object 1\n dtype: int64\n \"\"\"\n from pandas import Series\n return Series(self._data.get_dtype_counts())\n\n def get_ftype_counts(self):\n \"\"\"\n Return counts of unique ftypes in this object.\n\n .. deprecated:: 0.23.0\n\n This is useful for SparseDataFrame or for DataFrames containing\n sparse arrays.\n\n Returns\n -------\n dtype : Series\n Series with the count of columns with each type and\n sparsity (dense/sparse).\n\n See Also\n --------\n ftypes : Return ftypes (indication of sparse/dense and dtype) in\n this object.\n\n Examples\n --------\n >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]\n >>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])\n >>> df\n str int float\n 0 a 1 1.0\n 1 b 2 2.0\n 2 c 3 3.0\n\n >>> df.get_ftype_counts() # doctest: +SKIP\n float64:dense 1\n int64:dense 1\n object:dense 1\n dtype: int64\n \"\"\"\n warnings.warn(\"get_ftype_counts is deprecated and will \"\n \"be removed in a future version\",\n FutureWarning, stacklevel=2)\n\n from pandas import Series\n return Series(self._data.get_ftype_counts())\n\n @property\n def dtypes(self):\n \"\"\"\n Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type of each column.\n\n See Also\n --------\n DataFrame.ftypes : Dtype and sparsity information.\n\n Examples\n --------\n >>> df = pd.DataFrame({'float': [1.0],\n ... 'int': [1],\n ... 'datetime': [pd.Timestamp('20180310')],\n ... 'string': ['foo']})\n >>> df.dtypes\n float float64\n int int64\n datetime datetime64[ns]\n string object\n dtype: object\n \"\"\"\n from pandas import Series\n return Series(self._data.get_dtypes(), index=self._info_axis,\n dtype=np.object_)\n\n @property\n def ftypes(self):\n \"\"\"\n Return the ftypes (indication of sparse/dense and dtype) in DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type and indication of sparse/dense of each column.\n\n See Also\n --------\n DataFrame.dtypes: Series with just dtype information.\n SparseDataFrame : Container for sparse tabular data.\n\n Notes\n -----\n Sparse data should have the same dtypes as its dense representation.\n\n Examples\n --------\n >>> arr = np.random.RandomState(0).randn(100, 4)\n >>> arr[arr < .8] = np.nan\n >>> pd.DataFrame(arr).ftypes\n 0 float64:dense\n 1 float64:dense\n 2 float64:dense\n 3 float64:dense\n dtype: object\n\n >>> pd.SparseDataFrame(arr).ftypes\n 0 float64:sparse\n 1 float64:sparse\n 2 float64:sparse\n 3 float64:sparse\n dtype: object\n \"\"\"\n from pandas import Series\n return Series(self._data.get_ftypes(), index=self._info_axis,\n dtype=np.object_)\n\n def as_blocks(self, copy=True):\n \"\"\"\n Convert the frame to a dict of dtype -> Constructor Types that each has\n a homogeneous dtype.\n\n .. deprecated:: 0.21.0\n\n NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in\n as_matrix)\n\n Parameters\n ----------\n copy : boolean, default True\n\n Returns\n -------\n values : a dict of dtype -> Constructor Types\n \"\"\"\n warnings.warn(\"as_blocks is deprecated and will \"\n \"be removed in a future version\",\n FutureWarning, stacklevel=2)\n return self._to_dict_of_blocks(copy=copy)\n\n @property\n def blocks(self):\n \"\"\"\n Internal property, property synonym for as_blocks().\n\n .. deprecated:: 0.21.0\n \"\"\"\n return self.as_blocks()\n\n def _to_dict_of_blocks(self, copy=True):\n \"\"\"\n Return a dict of dtype -> Constructor Types that\n each is a homogeneous dtype.\n\n Internal ONLY\n \"\"\"\n return {k: self._constructor(v).__finalize__(self)\n for k, v, in self._data.to_dict(copy=copy).items()}\n\n def astype(self, dtype, copy=True, errors='raise', **kwargs):\n \"\"\"\n Cast a pandas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n copy : bool, default True\n Return a copy when ``copy=True`` (be very careful setting\n ``copy=False`` as changes to values then may propagate to other\n pandas objects).\n errors : {'raise', 'ignore'}, default 'raise'\n Control raising of exceptions on invalid data for provided dtype.\n\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n\n .. versionadded:: 0.20.0\n\n kwargs : keyword arguments to pass on to the constructor\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n numpy.ndarray.astype : Cast a numpy array to a specified type.\n\n Examples\n --------\n >>> ser = pd.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n Convert to categorical type:\n\n >>> ser.astype('category')\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [1, 2]\n\n Convert to ordered categorical type with custom ordering:\n\n >>> cat_dtype = pd.api.types.CategoricalDtype(\n ... categories=[2, 1], ordered=True)\n >>> ser.astype(cat_dtype)\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [2 < 1]\n\n Note that using ``copy=False`` and changing data on a new\n pandas object may propagate changes:\n\n >>> s1 = pd.Series([1,2])\n >>> s2 = s1.astype('int64', copy=False)\n >>> s2[0] = 10\n >>> s1 # note that s1[0] has changed too\n 0 10\n 1 2\n dtype: int64\n \"\"\"\n if is_dict_like(dtype):\n if self.ndim == 1: # i.e. Series\n if len(dtype) > 1 or self.name not in dtype:\n raise KeyError('Only the Series name can be used for '\n 'the key in Series dtype mappings.')\n new_type = dtype[self.name]\n return self.astype(new_type, copy, errors, **kwargs)\n elif self.ndim > 2:\n raise NotImplementedError(\n 'astype() only accepts a dtype arg of type dict when '\n 'invoked on Series and DataFrames. A single dtype must be '\n 'specified when invoked on a Panel.'\n )\n for col_name in dtype.keys():\n if col_name not in self:\n raise KeyError('Only a column name can be used for the '\n 'key in a dtype mappings argument.')\n results = []\n for col_name, col in self.iteritems():\n if col_name in dtype:\n results.append(col.astype(dtype[col_name], copy=copy))\n else:\n results.append(results.append(col.copy() if copy else col))\n\n elif is_extension_array_dtype(dtype) and self.ndim > 1:\n # GH 18099/22869: columnwise conversion to extension dtype\n # GH 24704: use iloc to handle duplicate column names\n results = (self.iloc[:, i].astype(dtype, copy=copy)\n for i in range(len(self.columns)))\n\n else:\n # else, only a single dtype is given\n new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,\n **kwargs)\n return self._constructor(new_data).__finalize__(self)\n\n # GH 19920: retain column metadata after concat\n result = pd.concat(results, axis=1, copy=False)\n result.columns = self.columns\n return result\n\n def copy(self, deep=True):\n \"\"\"\n Make a copy of this object's indices and data.\n\n When ``deep=True`` (default), a new object will be created with a\n copy of the calling object's data and indices. Modifications to\n the data or indices of the copy will not be reflected in the\n original object (see notes below).\n\n When ``deep=False``, a new object will be created without copying\n the calling object's data or index (only references to the data\n and index are copied). Any changes to the data of the original\n will be reflected in the shallow copy (and vice versa).\n\n Parameters\n ----------\n deep : bool, default True\n Make a deep copy, including a copy of the data and the indices.\n With ``deep=False`` neither the indices nor the data are copied.\n\n Returns\n -------\n copy : Series, DataFrame or Panel\n Object type matches caller.\n\n Notes\n -----\n When ``deep=True``, data is copied but actual Python objects\n will not be copied recursively, only the reference to the object.\n This is in contrast to `copy.deepcopy` in the Standard Library,\n which recursively copies object data (see examples below).\n\n While ``Index`` objects are copied when ``deep=True``, the underlying\n numpy array is not copied for performance reasons. Since ``Index`` is\n immutable, the underlying data can be safely shared and a copy\n is not needed.\n\n Examples\n --------\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> s\n a 1\n b 2\n dtype: int64\n\n >>> s_copy = s.copy()\n >>> s_copy\n a 1\n b 2\n dtype: int64\n\n **Shallow copy versus default (deep) copy:**\n\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> deep = s.copy()\n >>> shallow = s.copy(deep=False)\n\n Shallow copy shares data and index with original.\n\n >>> s is shallow\n False\n >>> s.values is shallow.values and s.index is shallow.index\n True\n\n Deep copy has own copy of data and index.\n\n >>> s is deep\n False\n >>> s.values is deep.values or s.index is deep.index\n False\n\n Updates to the data shared by shallow copy and original is reflected\n in both; deep copy remains unchanged.\n\n >>> s[0] = 3\n >>> shallow[1] = 4\n >>> s\n a 3\n b 4\n dtype: int64\n >>> shallow\n a 3\n b 4\n dtype: int64\n >>> deep\n a 1\n b 2\n dtype: int64\n\n Note that when copying an object containing Python objects, a deep copy\n will copy the data, but will not do so recursively. Updating a nested\n data object will be reflected in the deep copy.\n\n >>> s = pd.Series([[1, 2], [3, 4]])\n >>> deep = s.copy()\n >>> s[0][0] = 10\n >>> s\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n >>> deep\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n \"\"\"\n data = self._data.copy(deep=deep)\n return self._constructor(data).__finalize__(self)\n\n def __copy__(self, deep=True):\n return self.copy(deep=deep)\n\n def __deepcopy__(self, memo=None):\n \"\"\"\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n \"\"\"\n if memo is None:\n memo = {}\n return self.copy(deep=True)\n\n def _convert(self, datetime=False, numeric=False, timedelta=False,\n coerce=False, copy=True):\n \"\"\"\n Attempt to infer better dtype for object columns\n\n Parameters\n ----------\n datetime : boolean, default False\n If True, convert to date where possible.\n numeric : boolean, default False\n If True, attempt to convert to numbers (including strings), with\n unconvertible values becoming NaN.\n timedelta : boolean, default False\n If True, convert to timedelta where possible.\n coerce : boolean, default False\n If True, force conversion with unconvertible values converted to\n nulls (NaN or NaT)\n copy : boolean, default True\n If True, return a copy even if no copy is necessary (e.g. no\n conversion was done). Note: This is meant for internal use, and\n should not be confused with inplace.\n\n Returns\n -------\n converted : same as input object\n \"\"\"\n return self._constructor(\n self._data.convert(datetime=datetime, numeric=numeric,\n timedelta=timedelta, coerce=coerce,\n copy=copy)).__finalize__(self)\n\n def convert_objects(self, convert_dates=True, convert_numeric=False,\n convert_timedeltas=True, copy=True):\n \"\"\"\n Attempt to infer better dtype for object columns.\n\n .. deprecated:: 0.21.0\n\n Parameters\n ----------\n convert_dates : boolean, default True\n If True, convert to date where possible. If 'coerce', force\n conversion, with unconvertible values becoming NaT.\n convert_numeric : boolean, default False\n If True, attempt to coerce to numbers (including strings), with\n unconvertible values becoming NaN.\n convert_timedeltas : boolean, default True\n If True, convert to timedelta where possible. If 'coerce', force\n conversion, with unconvertible values becoming NaT.\n copy : boolean, default True\n If True, return a copy even if no copy is necessary (e.g. no\n conversion was done). Note: This is meant for internal use, and\n should not be confused with inplace.\n\n Returns\n -------\n converted : same as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n \"\"\"\n msg = (\"convert_objects is deprecated. To re-infer data dtypes for \"\n \"object columns, use {klass}.infer_objects()\\nFor all \"\n \"other conversions use the data-type specific converters \"\n \"pd.to_datetime, pd.to_timedelta and pd.to_numeric.\"\n ).format(klass=self.__class__.__name__)\n warnings.warn(msg, FutureWarning, stacklevel=2)\n\n return self._constructor(\n self._data.convert(convert_dates=convert_dates,\n convert_numeric=convert_numeric,\n convert_timedeltas=convert_timedeltas,\n copy=copy)).__finalize__(self)\n\n def infer_objects(self):\n \"\"\"\n Attempt to infer better dtypes for object columns.\n\n Attempts soft conversion of object-dtyped\n columns, leaving non-object and unconvertible\n columns unchanged. The inference rules are the\n same as during normal Series/DataFrame construction.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n converted : same type as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [\"a\", 1, 2, 3]})\n >>> df = df.iloc[1:]\n >>> df\n A\n 1 1\n 2 2\n 3 3\n\n >>> df.dtypes\n A object\n dtype: object\n\n >>> df.infer_objects().dtypes\n A int64\n dtype: object\n \"\"\"\n # numeric=False necessary to only soft convert;\n # python objects will still be converted to\n # native numpy numeric types\n return self._constructor(\n self._data.convert(datetime=True, numeric=False,\n timedelta=True, coerce=False,\n copy=True)).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Filling NA's\n\n def fillna(self, value=None, method=None, axis=None, inplace=False,\n limit=None, downcast=None):\n \"\"\"\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series, or DataFrame\n Value to use to fill holes (e.g. 0), alternately a\n dict/Series/DataFrame of values specifying which value to use for\n each index (for a Series) or column (for a DataFrame). Values not\n in the dict/Series/DataFrame will not be filled. This value cannot\n be a list.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use next valid observation to fill gap.\n axis : %(axes_single_arg)s\n Axis along which to fill missing values.\n inplace : bool, default False\n If True, fill in-place. Note: this will modify any\n other views on this object (e.g., a no-copy slice for a column in a\n DataFrame).\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n %(klass)s\n Object with missing values filled.\n\n See Also\n --------\n interpolate : Fill NaN values using interpolation.\n reindex : Conform object to new index.\n asfreq : Convert TimeSeries to specified frequency.\n\n Examples\n --------\n >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],\n ... [3, 4, np.nan, 1],\n ... [np.nan, np.nan, np.nan, 5],\n ... [np.nan, 3, np.nan, 4]],\n ... columns=list('ABCD'))\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 NaN 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 0.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 NaN 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 2.0 4\n\n Only replace the first NaN element.\n\n >>> df.fillna(value=values, limit=1)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 NaN 1\n 2 NaN 1.0 NaN 5\n 3 NaN 3.0 NaN 4\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n value, method = validate_fillna_kwargs(value, method)\n\n self._consolidate_inplace()\n\n # set the default here, so functions examining the signaure\n # can detect if something was set (e.g. in groupby) (GH9221)\n if axis is None:\n axis = 0\n axis = self._get_axis_number(axis)\n\n from pandas import DataFrame\n if value is None:\n\n if self._is_mixed_type and axis == 1:\n if inplace:\n raise NotImplementedError()\n result = self.T.fillna(method=method, limit=limit).T\n\n # need to downcast here because of all of the transposes\n result._data = result._data.downcast()\n\n return result\n\n # > 3d\n if self.ndim > 3:\n raise NotImplementedError('Cannot fillna with a method for > '\n '3dims')\n\n # 3d\n elif self.ndim == 3:\n # fill in 2d chunks\n result = {col: s.fillna(method=method, value=value)\n for col, s in self.iteritems()}\n prelim_obj = self._constructor.from_dict(result)\n new_obj = prelim_obj.__finalize__(self)\n new_data = new_obj._data\n\n else:\n # 2d or less\n new_data = self._data.interpolate(method=method, axis=axis,\n limit=limit, inplace=inplace,\n coerce=True,\n downcast=downcast)\n else:\n if len(self._get_axis(axis)) == 0:\n return self\n\n if self.ndim == 1:\n if isinstance(value, (dict, ABCSeries)):\n from pandas import Series\n value = Series(value)\n elif not is_list_like(value):\n pass\n else:\n raise TypeError('\"value\" parameter must be a scalar, dict '\n 'or Series, but you passed a '\n '\"{0}\"'.format(type(value).__name__))\n\n new_data = self._data.fillna(value=value, limit=limit,\n inplace=inplace,\n downcast=downcast)\n\n elif isinstance(value, (dict, ABCSeries)):\n if axis == 1:\n raise NotImplementedError('Currently only can fill '\n 'with dict/Series column '\n 'by column')\n\n result = self if inplace else self.copy()\n for k, v in compat.iteritems(value):\n if k not in result:\n continue\n obj = result[k]\n obj.fillna(v, limit=limit, inplace=True, downcast=downcast)\n return result if not inplace else None\n\n elif not is_list_like(value):\n new_data = self._data.fillna(value=value, limit=limit,\n inplace=inplace,\n downcast=downcast)\n elif isinstance(value, DataFrame) and self.ndim == 2:\n new_data = self.where(self.notna(), value)\n else:\n raise ValueError(\"invalid fill value with a %s\" % type(value))\n\n if inplace:\n self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n def ffill(self, axis=None, inplace=False, limit=None, downcast=None):\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.\n \"\"\"\n return self.fillna(method='ffill', axis=axis, inplace=inplace,\n limit=limit, downcast=downcast)\n\n def bfill(self, axis=None, inplace=False, limit=None, downcast=None):\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.\n \"\"\"\n return self.fillna(method='bfill', axis=axis, inplace=inplace,\n limit=limit, downcast=downcast)\n\n _shared_docs['replace'] = (\"\"\"\n Replace values given in `to_replace` with `value`.\n\n Values of the %(klass)s are replaced with other values dynamically.\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\n\n Parameters\n ----------\n to_replace : str, regex, list, dict, Series, int, float, or None\n How to find the values that will be replaced.\n\n * numeric, str or regex:\n\n - numeric: numeric values equal to `to_replace` will be\n replaced with `value`\n - str: string exactly matching `to_replace` will be replaced\n with `value`\n - regex: regexs matching `to_replace` will be replaced with\n `value`\n\n * list of str, regex, or numeric:\n\n - First, if `to_replace` and `value` are both lists, they\n **must** be the same length.\n - Second, if ``regex=True`` then all of the strings in **both**\n lists will be interpreted as regexs otherwise they will match\n directly. This doesn't matter much for `value` since there\n are only a few possible substitution regexes you can use.\n - str, regex and numeric rules apply as above.\n\n * dict:\n\n - Dicts can be used to specify different replacement values\n for different existing values. For example,\n ``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and\n 'y' with 'z'. To use a dict in this way the `value`\n parameter should be `None`.\n - For a DataFrame a dict can specify that different values\n should be replaced in different columns. For example,\n ``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'\n and the value 'z' in column 'b' and replaces these values\n with whatever is specified in `value`. The `value` parameter\n should not be ``None`` in this case. You can treat this as a\n special case of passing two lists except that you are\n specifying the column to search in.\n - For a DataFrame nested dictionaries, e.g.,\n ``{'a': {'b': np.nan}}``, are read as follows: look in column\n 'a' for the value 'b' and replace it with NaN. The `value`\n parameter should be ``None`` to use a nested dict in this\n way. You can nest regular expressions as well. Note that\n column names (the top-level dictionary keys in a nested\n dictionary) **cannot** be regular expressions.\n\n * None:\n\n - This means that the `regex` argument must be a string,\n compiled regular expression, or list, dict, ndarray or\n Series of such elements. If `value` is also ``None`` then\n this **must** be a nested dictionary or Series.\n\n See the examples section for examples of each of these.\n value : scalar, dict, list, str, regex, default None\n Value to replace any values matching `to_replace` with.\n For a DataFrame a dict of values can be used to specify which\n value to use for each column (columns not in the dict will not be\n filled). Regular expressions, strings and lists or dicts of such\n objects are also allowed.\n inplace : bool, default False\n If True, in place. Note: this will modify any\n other views on this object (e.g. a column from a DataFrame).\n Returns the caller if this is True.\n limit : int, default None\n Maximum size gap to forward or backward fill.\n regex : bool or same types as `to_replace`, default False\n Whether to interpret `to_replace` and/or `value` as regular\n expressions. If this is ``True`` then `to_replace` *must* be a\n string. Alternatively, this could be a regular expression or a\n list, dict, or array of regular expressions in which case\n `to_replace` must be ``None``.\n method : {'pad', 'ffill', 'bfill', `None`}\n The method to use when for replacement, when `to_replace` is a\n scalar, list or tuple and `value` is ``None``.\n\n .. versionchanged:: 0.23.0\n Added to DataFrame.\n\n Returns\n -------\n %(klass)s\n Object after replacement.\n\n Raises\n ------\n AssertionError\n * If `regex` is not a ``bool`` and `to_replace` is not\n ``None``.\n TypeError\n * If `to_replace` is a ``dict`` and `value` is not a ``list``,\n ``dict``, ``ndarray``, or ``Series``\n * If `to_replace` is ``None`` and `regex` is not compilable\n into a regular expression or is a list, dict, ndarray, or\n Series.\n * When replacing multiple ``bool`` or ``datetime64`` objects and\n the arguments to `to_replace` does not match the type of the\n value being replaced\n ValueError\n * If a ``list`` or an ``ndarray`` is passed to `to_replace` and\n `value` but they are not the same length.\n\n See Also\n --------\n %(klass)s.fillna : Fill NA values.\n %(klass)s.where : Replace values based on boolean condition.\n Series.str.replace : Simple string replacement.\n\n Notes\n -----\n * Regex substitution is performed under the hood with ``re.sub``. The\n rules for substitution for ``re.sub`` are the same.\n * Regular expressions will only substitute on strings, meaning you\n cannot provide, for example, a regular expression matching floating\n point numbers and expect the columns in your frame that have a\n numeric dtype to be matched. However, if those floating point\n numbers *are* strings, then you can do this.\n * This method has *a lot* of options. You are encouraged to experiment\n and play with this method to gain intuition about how it works.\n * When dict is used as the `to_replace` value, it is like\n key(s) in the dict are the to_replace part and\n value(s) in the dict are the value parameter.\n\n Examples\n --------\n\n **Scalar `to_replace` and `value`**\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.replace(0, 5)\n 0 5\n 1 1\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],\n ... 'B': [5, 6, 7, 8, 9],\n ... 'C': ['a', 'b', 'c', 'd', 'e']})\n >>> df.replace(0, 5)\n A B C\n 0 5 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n **List-like `to_replace`**\n\n >>> df.replace([0, 1, 2, 3], 4)\n A B C\n 0 4 5 a\n 1 4 6 b\n 2 4 7 c\n 3 4 8 d\n 4 4 9 e\n\n >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])\n A B C\n 0 4 5 a\n 1 3 6 b\n 2 2 7 c\n 3 1 8 d\n 4 4 9 e\n\n >>> s.replace([1, 2], method='bfill')\n 0 0\n 1 3\n 2 3\n 3 3\n 4 4\n dtype: int64\n\n **dict-like `to_replace`**\n\n >>> df.replace({0: 10, 1: 100})\n A B C\n 0 10 5 a\n 1 100 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({'A': 0, 'B': 5}, 100)\n A B C\n 0 100 100 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({'A': {0: 100, 4: 400}})\n A B C\n 0 100 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 400 9 e\n\n **Regular expression `to_replace`**\n\n >>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],\n ... 'B': ['abc', 'bar', 'xyz']})\n >>> df.replace(to_replace=r'^ba.$', value='new', regex=True)\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)\n A B\n 0 new abc\n 1 foo bar\n 2 bait xyz\n\n >>> df.replace(regex=r'^ba.$', value='new')\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})\n A B\n 0 new abc\n 1 xyz new\n 2 bait xyz\n\n >>> df.replace(regex=[r'^ba.$', 'foo'], value='new')\n A B\n 0 new abc\n 1 new new\n 2 bait xyz\n\n Note that when replacing multiple ``bool`` or ``datetime64`` objects,\n the data types in the `to_replace` parameter must match the data\n type of the value being replaced:\n\n >>> df = pd.DataFrame({'A': [True, False, True],\n ... 'B': [False, True, False]})\n >>> df.replace({'a string': 'new value', True: False}) # raises\n Traceback (most recent call last):\n ...\n TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'\n\n This raises a ``TypeError`` because one of the ``dict`` keys is not of\n the correct type for replacement.\n\n Compare the behavior of ``s.replace({'a': None})`` and\n ``s.replace('a', None)`` to understand the peculiarities\n of the `to_replace` parameter:\n\n >>> s = pd.Series([10, 'a', 'a', 'b', 'a'])\n\n When one uses a dict as the `to_replace` value, it is like the\n value(s) in the dict are equal to the `value` parameter.\n ``s.replace({'a': None})`` is equivalent to\n ``s.replace(to_replace={'a': None}, value=None, method=None)``:\n\n >>> s.replace({'a': None})\n 0 10\n 1 None\n 2 None\n 3 b\n 4 None\n dtype: object\n\n When ``value=None`` and `to_replace` is a scalar, list or\n tuple, `replace` uses the method parameter (default 'pad') to do the\n replacement. So this is why the 'a' values are being replaced by 10\n in rows 1 and 2 and 'b' in row 4 in this case.\n The command ``s.replace('a', None)`` is actually equivalent to\n ``s.replace(to_replace='a', value=None, method='pad')``:\n\n >>> s.replace('a', None)\n 0 10\n 1 10\n 2 10\n 3 b\n 4 b\n dtype: object\n \"\"\")\n\n @Appender(_shared_docs['replace'] % _shared_doc_kwargs)\n def replace(self, to_replace=None, value=None, inplace=False, limit=None,\n regex=False, method='pad'):\n inplace = validate_bool_kwarg(inplace, 'inplace')\n if not is_bool(regex) and to_replace is not None:\n raise AssertionError(\"'to_replace' must be 'None' if 'regex' is \"\n \"not a bool\")\n\n self._consolidate_inplace()\n\n if value is None:\n # passing a single value that is scalar like\n # when value is None (GH5319), for compat\n if not is_dict_like(to_replace) and not is_dict_like(regex):\n to_replace = [to_replace]\n\n if isinstance(to_replace, (tuple, list)):\n if isinstance(self, pd.DataFrame):\n return self.apply(_single_replace,\n args=(to_replace, method, inplace,\n limit))\n return _single_replace(self, to_replace, method, inplace,\n limit)\n\n if not is_dict_like(to_replace):\n if not is_dict_like(regex):\n raise TypeError('If \"to_replace\" and \"value\" are both None'\n ' and \"to_replace\" is not a list, then '\n 'regex must be a mapping')\n to_replace = regex\n regex = True\n\n items = list(compat.iteritems(to_replace))\n keys, values = lzip(*items) or ([], [])\n\n are_mappings = [is_dict_like(v) for v in values]\n\n if any(are_mappings):\n if not all(are_mappings):\n raise TypeError(\"If a nested mapping is passed, all values\"\n \" of the top level mapping must be \"\n \"mappings\")\n # passed a nested dict/Series\n to_rep_dict = {}\n value_dict = {}\n\n for k, v in items:\n keys, values = lzip(*v.items()) or ([], [])\n if set(keys) & set(values):\n raise ValueError(\"Replacement not allowed with \"\n \"overlapping keys and values\")\n to_rep_dict[k] = list(keys)\n value_dict[k] = list(values)\n\n to_replace, value = to_rep_dict, value_dict\n else:\n to_replace, value = keys, values\n\n return self.replace(to_replace, value, inplace=inplace,\n limit=limit, regex=regex)\n else:\n\n # need a non-zero len on all axes\n for a in self._AXIS_ORDERS:\n if not len(self._get_axis(a)):\n return self\n\n new_data = self._data\n if is_dict_like(to_replace):\n if is_dict_like(value): # {'A' : NA} -> {'A' : 0}\n res = self if inplace else self.copy()\n for c, src in compat.iteritems(to_replace):\n if c in value and c in self:\n # object conversion is handled in\n # series.replace which is called recursivelly\n res[c] = res[c].replace(to_replace=src,\n value=value[c],\n inplace=False,\n regex=regex)\n return None if inplace else res\n\n # {'A': NA} -> 0\n elif not is_list_like(value):\n keys = [(k, src) for k, src in compat.iteritems(to_replace)\n if k in self]\n keys_len = len(keys) - 1\n for i, (k, src) in enumerate(keys):\n convert = i == keys_len\n new_data = new_data.replace(to_replace=src,\n value=value,\n filter=[k],\n inplace=inplace,\n regex=regex,\n convert=convert)\n else:\n raise TypeError('value argument must be scalar, dict, or '\n 'Series')\n\n elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']\n if is_list_like(value):\n if len(to_replace) != len(value):\n raise ValueError('Replacement lists must match '\n 'in length. Expecting %d got %d ' %\n (len(to_replace), len(value)))\n\n new_data = self._data.replace_list(src_list=to_replace,\n dest_list=value,\n inplace=inplace,\n regex=regex)\n\n else: # [NA, ''] -> 0\n new_data = self._data.replace(to_replace=to_replace,\n value=value, inplace=inplace,\n regex=regex)\n elif to_replace is None:\n if not (is_re_compilable(regex) or\n is_list_like(regex) or is_dict_like(regex)):\n raise TypeError(\"'regex' must be a string or a compiled \"\n \"regular expression or a list or dict of \"\n \"strings or regular expressions, you \"\n \"passed a\"\n \" {0!r}\".format(type(regex).__name__))\n return self.replace(regex, value, inplace=inplace, limit=limit,\n regex=True)\n else:\n\n # dest iterable dict-like\n if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}\n new_data = self._data\n\n for k, v in compat.iteritems(value):\n if k in self:\n new_data = new_data.replace(to_replace=to_replace,\n value=v, filter=[k],\n inplace=inplace,\n regex=regex)\n\n elif not is_list_like(value): # NA -> 0\n new_data = self._data.replace(to_replace=to_replace,\n value=value, inplace=inplace,\n regex=regex)\n else:\n msg = ('Invalid \"to_replace\" type: '\n '{0!r}').format(type(to_replace).__name__)\n raise TypeError(msg) # pragma: no cover\n\n if inplace:\n self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n _shared_docs['interpolate'] = \"\"\"\n Please note that only ``method='linear'`` is supported for\n DataFrame/Series with a MultiIndex.\n\n Parameters\n ----------\n method : str, default 'linear'\n Interpolation technique to use. One of:\n\n * 'linear': Ignore the index and treat the values as equally\n spaced. This is the only method supported on MultiIndexes.\n * 'time': Works on daily and higher resolution data to interpolate\n given length of interval.\n * 'index', 'values': use the actual numerical values of the index.\n * 'pad': Fill in NaNs using existing values.\n * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',\n 'barycentric', 'polynomial': Passed to\n `scipy.interpolate.interp1d`. These methods use the numerical\n values of the index. Both 'polynomial' and 'spline' require that\n you also specify an `order` (int), e.g.\n ``df.interpolate(method='polynomial', order=5)``.\n * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':\n Wrappers around the SciPy interpolation methods of similar\n names. See `Notes`.\n * 'from_derivatives': Refers to\n `scipy.interpolate.BPoly.from_derivatives` which\n replaces 'piecewise_polynomial' interpolation method in\n scipy 0.18.\n\n .. versionadded:: 0.18.1\n\n Added support for the 'akima' method.\n Added interpolate method 'from_derivatives' which replaces\n 'piecewise_polynomial' in SciPy 0.18; backwards-compatible with\n SciPy < 0.18\n\n axis : {0 or 'index', 1 or 'columns', None}, default None\n Axis to interpolate along.\n limit : int, optional\n Maximum number of consecutive NaNs to fill. Must be greater than\n 0.\n inplace : bool, default False\n Update the data in place if possible.\n limit_direction : {'forward', 'backward', 'both'}, default 'forward'\n If limit is specified, consecutive NaNs will be filled in this\n direction.\n limit_area : {`None`, 'inside', 'outside'}, default None\n If limit is specified, consecutive NaNs will be filled with this\n restriction.\n\n * ``None``: No fill restriction.\n * 'inside': Only fill NaNs surrounded by valid values\n (interpolate).\n * 'outside': Only fill NaNs outside valid values (extrapolate).\n\n .. versionadded:: 0.23.0\n\n downcast : optional, 'infer' or None, defaults to None\n Downcast dtypes if possible.\n **kwargs\n Keyword arguments to pass on to the interpolating function.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller, interpolated at\n some or all ``NaN`` values.\n\n See Also\n --------\n fillna : Fill missing values using different methods.\n scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials\n (Akima interpolator).\n scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the\n Bernstein basis.\n scipy.interpolate.interp1d : Interpolate a 1-D function.\n scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh\n interpolator).\n scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic\n interpolation.\n scipy.interpolate.CubicSpline : Cubic spline data interpolator.\n\n Notes\n -----\n The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'\n methods are wrappers around the respective SciPy implementations of\n similar names. These use the actual numerical values of the index.\n For more information on their behavior, see the\n `SciPy documentation\n <http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__\n and `SciPy tutorial\n <http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.\n\n Examples\n --------\n Filling in ``NaN`` in a :class:`~pandas.Series` via linear\n interpolation.\n\n >>> s = pd.Series([0, 1, np.nan, 3])\n >>> s\n 0 0.0\n 1 1.0\n 2 NaN\n 3 3.0\n dtype: float64\n >>> s.interpolate()\n 0 0.0\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n Filling in ``NaN`` in a Series by padding, but filling at most two\n consecutive ``NaN`` at a time.\n\n >>> s = pd.Series([np.nan, \"single_one\", np.nan,\n ... \"fill_two_more\", np.nan, np.nan, np.nan,\n ... 4.71, np.nan])\n >>> s\n 0 NaN\n 1 single_one\n 2 NaN\n 3 fill_two_more\n 4 NaN\n 5 NaN\n 6 NaN\n 7 4.71\n 8 NaN\n dtype: object\n >>> s.interpolate(method='pad', limit=2)\n 0 NaN\n 1 single_one\n 2 single_one\n 3 fill_two_more\n 4 fill_two_more\n 5 fill_two_more\n 6 NaN\n 7 4.71\n 8 4.71\n dtype: object\n\n Filling in ``NaN`` in a Series via polynomial interpolation or splines:\n Both 'polynomial' and 'spline' methods require that you also specify\n an ``order`` (int).\n\n >>> s = pd.Series([0, 2, np.nan, 8])\n >>> s.interpolate(method='polynomial', order=2)\n 0 0.000000\n 1 2.000000\n 2 4.666667\n 3 8.000000\n dtype: float64\n\n Fill the DataFrame forward (that is, going down) along each column\n using linear interpolation.\n\n Note how the last entry in column 'a' is interpolated differently,\n because there is no entry after it to use for interpolation.\n Note how the first entry in column 'b' remains ``NaN``, because there\n is no entry befofe it to use for interpolation.\n\n >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),\n ... (np.nan, 2.0, np.nan, np.nan),\n ... (2.0, 3.0, np.nan, 9.0),\n ... (np.nan, 4.0, -4.0, 16.0)],\n ... columns=list('abcd'))\n >>> df\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 NaN 2.0 NaN NaN\n 2 2.0 3.0 NaN 9.0\n 3 NaN 4.0 -4.0 16.0\n >>> df.interpolate(method='linear', limit_direction='forward', axis=0)\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 1.0 2.0 -2.0 5.0\n 2 2.0 3.0 -3.0 9.0\n 3 2.0 4.0 -4.0 16.0\n\n Using polynomial interpolation.\n\n >>> df['d'].interpolate(method='polynomial', order=2)\n 0 1.0\n 1 4.0\n 2 9.0\n 3 16.0\n Name: d, dtype: float64\n \"\"\"\n\n @Appender(_shared_docs['interpolate'] % _shared_doc_kwargs)\n def interpolate(self, method='linear', axis=0, limit=None, inplace=False,\n limit_direction='forward', limit_area=None,\n downcast=None, **kwargs):\n \"\"\"\n Interpolate values according to different methods.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n if self.ndim > 2:\n raise NotImplementedError(\"Interpolate has not been implemented \"\n \"on Panel and Panel 4D objects.\")\n\n if axis == 0:\n ax = self._info_axis_name\n _maybe_transposed_self = self\n elif axis == 1:\n _maybe_transposed_self = self.T\n ax = 1\n else:\n _maybe_transposed_self = self\n ax = _maybe_transposed_self._get_axis_number(ax)\n\n if _maybe_transposed_self.ndim == 2:\n alt_ax = 1 - ax\n else:\n alt_ax = ax\n\n if (isinstance(_maybe_transposed_self.index, MultiIndex) and\n method != 'linear'):\n raise ValueError(\"Only `method=linear` interpolation is supported \"\n \"on MultiIndexes.\")\n\n if _maybe_transposed_self._data.get_dtype_counts().get(\n 'object') == len(_maybe_transposed_self.T):\n raise TypeError(\"Cannot interpolate with all object-dtype columns \"\n \"in the DataFrame. Try setting at least one \"\n \"column to a numeric dtype.\")\n\n # create/use the index\n if method == 'linear':\n # prior default\n index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))\n else:\n index = _maybe_transposed_self._get_axis(alt_ax)\n\n if isna(index).any():\n raise NotImplementedError(\"Interpolation with NaNs in the index \"\n \"has not been implemented. Try filling \"\n \"those NaNs before interpolating.\")\n data = _maybe_transposed_self._data\n new_data = data.interpolate(method=method, axis=ax, index=index,\n values=_maybe_transposed_self, limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n inplace=inplace, downcast=downcast,\n **kwargs)\n\n if inplace:\n if axis == 1:\n new_data = self._constructor(new_data).T._data\n self._update_inplace(new_data)\n else:\n res = self._constructor(new_data).__finalize__(self)\n if axis == 1:\n res = res.T\n return res\n\n # ----------------------------------------------------------------------\n # Timeseries methods Methods\n\n def asof(self, where, subset=None):\n \"\"\"\n Return the last row(s) without any NaNs before `where`.\n\n The last row (for each element in `where`, if list) without any\n NaN is taken.\n In case of a :class:`~pandas.DataFrame`, the last row without NaN\n considering only the subset of columns (if not `None`)\n\n .. versionadded:: 0.19.0 For DataFrame\n\n If there is no good value, NaN is returned for a Series or\n a Series of NaN values for a DataFrame\n\n Parameters\n ----------\n where : date or array-like of dates\n Date(s) before which the last row(s) are returned.\n subset : str or array-like of str, default `None`\n For DataFrame, if not `None`, only use these columns to\n check for NaNs.\n\n Returns\n -------\n scalar, Series, or DataFrame\n\n The return can be:\n\n * scalar : when `self` is a Series and `where` is a scalar\n * Series: when `self` is a Series and `where` is an array-like,\n or when `self` is a DataFrame and `where` is a scalar\n * DataFrame : when `self` is a DataFrame and `where` is an\n array-like\n\n Return scalar, Series, or DataFrame.\n\n See Also\n --------\n merge_asof : Perform an asof merge. Similar to left join.\n\n Notes\n -----\n Dates are assumed to be sorted. Raises if this is not the case.\n\n Examples\n --------\n A Series and a scalar `where`.\n\n >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])\n >>> s\n 10 1.0\n 20 2.0\n 30 NaN\n 40 4.0\n dtype: float64\n\n >>> s.asof(20)\n 2.0\n\n For a sequence `where`, a Series is returned. The first value is\n NaN, because the first element of `where` is before the first\n index value.\n\n >>> s.asof([5, 20])\n 5 NaN\n 20 2.0\n dtype: float64\n\n Missing values are not considered. The following is ``2.0``, not\n NaN, even though NaN is at the index location for ``30``.\n\n >>> s.asof(30)\n 2.0\n\n Take all columns into consideration\n\n >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],\n ... 'b': [None, None, None, None, 500]},\n ... index=pd.DatetimeIndex(['2018-02-27 09:01:00',\n ... '2018-02-27 09:02:00',\n ... '2018-02-27 09:03:00',\n ... '2018-02-27 09:04:00',\n ... '2018-02-27 09:05:00']))\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']))\n a b\n 2018-02-27 09:03:30 NaN NaN\n 2018-02-27 09:04:30 NaN NaN\n\n Take a single column into consideration\n\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']),\n ... subset=['a'])\n a b\n 2018-02-27 09:03:30 30.0 NaN\n 2018-02-27 09:04:30 40.0 NaN\n \"\"\"\n if isinstance(where, compat.string_types):\n from pandas import to_datetime\n where = to_datetime(where)\n\n if not self.index.is_monotonic:\n raise ValueError(\"asof requires a sorted index\")\n\n is_series = isinstance(self, ABCSeries)\n if is_series:\n if subset is not None:\n raise ValueError(\"subset is not valid for Series\")\n elif self.ndim > 2:\n raise NotImplementedError(\"asof is not implemented \"\n \"for {type}\".format(type=type(self)))\n else:\n if subset is None:\n subset = self.columns\n if not is_list_like(subset):\n subset = [subset]\n\n is_list = is_list_like(where)\n if not is_list:\n start = self.index[0]\n if isinstance(self.index, PeriodIndex):\n where = Period(where, freq=self.index.freq).ordinal\n start = start.ordinal\n\n if where < start:\n if not is_series:\n from pandas import Series\n return Series(index=self.columns, name=where)\n return np.nan\n\n # It's always much faster to use a *while* loop here for\n # Series than pre-computing all the NAs. However a\n # *while* loop is extremely expensive for DataFrame\n # so we later pre-compute all the NAs and use the same\n # code path whether *where* is a scalar or list.\n # See PR: https://github.com/pandas-dev/pandas/pull/14476\n if is_series:\n loc = self.index.searchsorted(where, side='right')\n if loc > 0:\n loc -= 1\n\n values = self._values\n while loc > 0 and isna(values[loc]):\n loc -= 1\n return values[loc]\n\n if not isinstance(where, Index):\n where = Index(where) if is_list else Index([where])\n\n nulls = self.isna() if is_series else self[subset].isna().any(1)\n if nulls.all():\n if is_series:\n return self._constructor(np.nan, index=where, name=self.name)\n elif is_list:\n from pandas import DataFrame\n return DataFrame(np.nan, index=where, columns=self.columns)\n else:\n from pandas import Series\n return Series(np.nan, index=self.columns, name=where[0])\n\n locs = self.index.asof_locs(where, ~(nulls.values))\n\n # mask the missing\n missing = locs == -1\n data = self.take(locs, is_copy=False)\n data.index = where\n data.loc[missing] = np.nan\n return data if is_list else data.iloc[-1]\n\n # ----------------------------------------------------------------------\n # Action Methods\n\n _shared_docs['isna'] = \"\"\"\n Detect missing values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as None or :attr:`numpy.NaN`, gets mapped to True\n values.\n Everything else gets mapped to False values. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n\n Returns\n -------\n %(klass)s\n Mask of bool values for each element in %(klass)s that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n %(klass)s.isnull : Alias of isna.\n %(klass)s.notna : Boolean inverse of isna.\n %(klass)s.dropna : Omit axes labels with missing values.\n isna : Top-level isna.\n\n Examples\n --------\n Show which entries in a DataFrame are NA.\n\n >>> df = pd.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.isna()\n age born name toy\n 0 False True False True\n 1 False False False False\n 2 True False False False\n\n Show which entries in a Series are NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.isna()\n 0 False\n 1 False\n 2 True\n dtype: bool\n \"\"\"\n\n @Appender(_shared_docs['isna'] % _shared_doc_kwargs)\n def isna(self):\n return isna(self).__finalize__(self)\n\n @Appender(_shared_docs['isna'] % _shared_doc_kwargs)\n def isnull(self):\n return isna(self).__finalize__(self)\n\n _shared_docs['notna'] = \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n NA values, such as None or :attr:`numpy.NaN`, get mapped to False\n values.\n\n Returns\n -------\n %(klass)s\n Mask of bool values for each element in %(klass)s that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n %(klass)s.notnull : Alias of notna.\n %(klass)s.isna : Boolean inverse of notna.\n %(klass)s.dropna : Omit axes labels with missing values.\n notna : Top-level notna.\n\n Examples\n --------\n Show which entries in a DataFrame are not NA.\n\n >>> df = pd.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.notna()\n age born name toy\n 0 True False True False\n 1 True True True True\n 2 False True True True\n\n Show which entries in a Series are not NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.notna()\n 0 True\n 1 True\n 2 False\n dtype: bool\n \"\"\"\n\n @Appender(_shared_docs['notna'] % _shared_doc_kwargs)\n def notna(self):\n return notna(self).__finalize__(self)\n\n @Appender(_shared_docs['notna'] % _shared_doc_kwargs)\n def notnull(self):\n return notna(self).__finalize__(self)\n\n def _clip_with_scalar(self, lower, upper, inplace=False):\n if ((lower is not None and np.any(isna(lower))) or\n (upper is not None and np.any(isna(upper)))):\n raise ValueError(\"Cannot use an NA value as a clip threshold\")\n\n result = self\n mask = isna(self.values)\n\n with np.errstate(all='ignore'):\n if upper is not None:\n subset = self.to_numpy() <= upper\n result = result.where(subset, upper, axis=None, inplace=False)\n if lower is not None:\n subset = self.to_numpy() >= lower\n result = result.where(subset, lower, axis=None, inplace=False)\n\n if np.any(mask):\n result[mask] = np.nan\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def _clip_with_one_bound(self, threshold, method, axis, inplace):\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # method is self.le for upper bound and self.ge for lower bound\n if is_scalar(threshold) and is_number(threshold):\n if method.__name__ == 'le':\n return self._clip_with_scalar(None, threshold, inplace=inplace)\n return self._clip_with_scalar(threshold, None, inplace=inplace)\n\n subset = method(threshold, axis=axis) | isna(self)\n\n # GH #15390\n # In order for where method to work, the threshold must\n # be transformed to NDFrame from other array like structure.\n if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):\n if isinstance(self, ABCSeries):\n threshold = pd.Series(threshold, index=self.index)\n else:\n threshold = _align_method_FRAME(self, threshold,\n axis)\n return self.where(subset, threshold, axis=axis, inplace=inplace)\n\n def clip(self, lower=None, upper=None, axis=None, inplace=False,\n *args, **kwargs):\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values. Thresholds\n can be singular values or array like, and in the latter case\n the clipping is performed element-wise in the specified axis.\n\n Parameters\n ----------\n lower : float or array_like, default None\n Minimum threshold value. All values below this\n threshold will be set to it.\n upper : float or array_like, default None\n Maximum threshold value. All values above this\n threshold will be set to it.\n axis : int or str axis name, optional\n Align object with lower and upper along the given axis.\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n *args, **kwargs\n Additional keywords have no effect but might be accepted\n for compatibility with numpy.\n\n Returns\n -------\n Series or DataFrame\n Same type as calling object with the values outside the\n clip boundaries replaced.\n\n Examples\n --------\n >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}\n >>> df = pd.DataFrame(data)\n >>> df\n col_0 col_1\n 0 9 -2\n 1 -3 -7\n 2 0 6\n 3 -1 8\n 4 5 -5\n\n Clips per column using lower and upper thresholds:\n\n >>> df.clip(-4, 6)\n col_0 col_1\n 0 6 -2\n 1 -3 -4\n 2 0 6\n 3 -1 6\n 4 5 -4\n\n Clips using specific lower and upper thresholds per column element:\n\n >>> t = pd.Series([2, -4, -1, 6, 3])\n >>> t\n 0 2\n 1 -4\n 2 -1\n 3 6\n 4 3\n dtype: int64\n\n >>> df.clip(t, t + 4, axis=0)\n col_0 col_1\n 0 6 2\n 1 -3 -4\n 2 0 3\n 3 6 8\n 4 5 3\n \"\"\"\n if isinstance(self, ABCPanel):\n raise NotImplementedError(\"clip is not supported yet for panels\")\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n axis = nv.validate_clip_with_axis(axis, args, kwargs)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # GH 17276\n # numpy doesn't like NaN as a clip value\n # so ignore\n # GH 19992\n # numpy doesn't drop a list-like bound containing NaN\n if not is_list_like(lower) and np.any(pd.isnull(lower)):\n lower = None\n if not is_list_like(upper) and np.any(pd.isnull(upper)):\n upper = None\n\n # GH 2747 (arguments were reversed)\n if lower is not None and upper is not None:\n if is_scalar(lower) and is_scalar(upper):\n lower, upper = min(lower, upper), max(lower, upper)\n\n # fast-path for scalars\n if ((lower is None or (is_scalar(lower) and is_number(lower))) and\n (upper is None or (is_scalar(upper) and is_number(upper)))):\n return self._clip_with_scalar(lower, upper, inplace=inplace)\n\n result = self\n if lower is not None:\n result = result._clip_with_one_bound(lower, method=self.ge,\n axis=axis, inplace=inplace)\n if upper is not None:\n if inplace:\n result = self\n result = result._clip_with_one_bound(upper, method=self.le,\n axis=axis, inplace=inplace)\n\n return result\n\n def clip_upper(self, threshold, axis=None, inplace=False):\n \"\"\"\n Trim values above a given threshold.\n\n .. deprecated:: 0.24.0\n Use clip(upper=threshold) instead.\n\n Elements above the `threshold` will be changed to match the\n `threshold` value(s). Threshold can be a single value or an array,\n in the latter case it performs the truncation element-wise.\n\n Parameters\n ----------\n threshold : numeric or array-like\n Maximum value allowed. All values above threshold will be set to\n this value.\n\n * float : every value is compared to `threshold`.\n * array-like : The shape of `threshold` should match the object\n it's compared to. When `self` is a Series, `threshold` should be\n the length. When `self` is a DataFrame, `threshold` should 2-D\n and the same shape as `self` for ``axis=None``, or 1-D and the\n same length as the axis being compared.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Align object with `threshold` along the given axis.\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series or DataFrame\n Original data with values trimmed.\n\n See Also\n --------\n Series.clip : General purpose method to trim Series values to given\n threshold(s).\n DataFrame.clip : General purpose method to trim DataFrame values to\n given threshold(s).\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4, 5])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n dtype: int64\n\n >>> s.clip(upper=3)\n 0 1\n 1 2\n 2 3\n 3 3\n 4 3\n dtype: int64\n\n >>> elemwise_thresholds = [5, 4, 3, 2, 1]\n >>> elemwise_thresholds\n [5, 4, 3, 2, 1]\n\n >>> s.clip(upper=elemwise_thresholds)\n 0 1\n 1 2\n 2 3\n 3 2\n 4 1\n dtype: int64\n \"\"\"\n warnings.warn('clip_upper(threshold) is deprecated, '\n 'use clip(upper=threshold) instead',\n FutureWarning, stacklevel=2)\n return self._clip_with_one_bound(threshold, method=self.le,\n axis=axis, inplace=inplace)\n\n def clip_lower(self, threshold, axis=None, inplace=False):\n \"\"\"\n Trim values below a given threshold.\n\n .. deprecated:: 0.24.0\n Use clip(lower=threshold) instead.\n\n Elements below the `threshold` will be changed to match the\n `threshold` value(s). Threshold can be a single value or an array,\n in the latter case it performs the truncation element-wise.\n\n Parameters\n ----------\n threshold : numeric or array-like\n Minimum value allowed. All values below threshold will be set to\n this value.\n\n * float : every value is compared to `threshold`.\n * array-like : The shape of `threshold` should match the object\n it's compared to. When `self` is a Series, `threshold` should be\n the length. When `self` is a DataFrame, `threshold` should 2-D\n and the same shape as `self` for ``axis=None``, or 1-D and the\n same length as the axis being compared.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Align `self` with `threshold` along the given axis.\n\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series or DataFrame\n Original data with values trimmed.\n\n See Also\n --------\n Series.clip : General purpose method to trim Series values to given\n threshold(s).\n DataFrame.clip : General purpose method to trim DataFrame values to\n given threshold(s).\n\n Examples\n --------\n\n Series single threshold clipping:\n\n >>> s = pd.Series([5, 6, 7, 8, 9])\n >>> s.clip(lower=8)\n 0 8\n 1 8\n 2 8\n 3 8\n 4 9\n dtype: int64\n\n Series clipping element-wise using an array of thresholds. `threshold`\n should be the same length as the Series.\n\n >>> elemwise_thresholds = [4, 8, 7, 2, 5]\n >>> s.clip(lower=elemwise_thresholds)\n 0 5\n 1 8\n 2 7\n 3 8\n 4 9\n dtype: int64\n\n DataFrames can be compared to a scalar.\n\n >>> df = pd.DataFrame({\"A\": [1, 3, 5], \"B\": [2, 4, 6]})\n >>> df\n A B\n 0 1 2\n 1 3 4\n 2 5 6\n\n >>> df.clip(lower=3)\n A B\n 0 3 3\n 1 3 4\n 2 5 6\n\n Or to an array of values. By default, `threshold` should be the same\n shape as the DataFrame.\n\n >>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]]))\n A B\n 0 3 4\n 1 3 4\n 2 6 6\n\n Control how `threshold` is broadcast with `axis`. In this case\n `threshold` should be the same length as the axis specified by\n `axis`.\n\n >>> df.clip(lower=[3, 3, 5], axis='index')\n A B\n 0 3 3\n 1 3 4\n 2 5 6\n\n >>> df.clip(lower=[4, 5], axis='columns')\n A B\n 0 4 5\n 1 4 5\n 2 5 6\n \"\"\"\n warnings.warn('clip_lower(threshold) is deprecated, '\n 'use clip(lower=threshold) instead',\n FutureWarning, stacklevel=2)\n return self._clip_with_one_bound(threshold, method=self.ge,\n axis=axis, inplace=inplace)\n\n def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,\n group_keys=True, squeeze=False, observed=False, **kwargs):\n \"\"\"\n Group DataFrame or Series using a mapper or by a Series of columns.\n\n A groupby operation involves some combination of splitting the\n object, applying a function, and combining the results. This can be\n used to group large amounts of data and compute operations on these\n groups.\n\n Parameters\n ----------\n by : mapping, function, label, or list of labels\n Used to determine the groups for the groupby.\n If ``by`` is a function, it's called on each value of the object's\n index. If a dict or Series is passed, the Series or dict VALUES\n will be used to determine the groups (the Series' values are first\n aligned; see ``.align()`` method). If an ndarray is passed, the\n values are used as-is determine the groups. A label or list of\n labels may be passed to group by the columns in ``self``. Notice\n that a tuple is interpreted a (single) key.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Split along rows (0) or columns (1).\n level : int, level name, or sequence of such, default None\n If the axis is a MultiIndex (hierarchical), group by a particular\n level or levels.\n as_index : bool, default True\n For aggregated output, return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively \"SQL-style\" grouped output.\n sort : bool, default True\n Sort group keys. Get better performance by turning this off.\n Note this does not influence the order of observations within each\n group. Groupby preserves the order of rows within each group.\n group_keys : bool, default True\n When calling apply, add group keys to index to identify pieces.\n squeeze : bool, default False\n Reduce the dimensionality of the return type if possible,\n otherwise return a consistent type.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionadded:: 0.23.0\n\n **kwargs\n Optional, only accepts keyword argument 'mutated' and is passed\n to groupby.\n\n Returns\n -------\n DataFrameGroupBy or SeriesGroupBy\n Depends on the calling object and returns groupby object that\n contains information about the groups.\n\n See Also\n --------\n resample : Convenience method for frequency conversion and resampling\n of time series.\n\n Notes\n -----\n See the `user guide\n <http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.\n\n Examples\n --------\n >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',\n ... 'Parrot', 'Parrot'],\n ... 'Max Speed': [380., 370., 24., 26.]})\n >>> df\n Animal Max Speed\n 0 Falcon 380.0\n 1 Falcon 370.0\n 2 Parrot 24.0\n 3 Parrot 26.0\n >>> df.groupby(['Animal']).mean()\n Max Speed\n Animal\n Falcon 375.0\n Parrot 25.0\n\n **Hierarchical Indexes**\n\n We can groupby different levels of a hierarchical index\n using the `level` parameter:\n\n >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n ... ['Captive', 'Wild', 'Captive', 'Wild']]\n >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))\n >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},\n ... index=index)\n >>> df\n Max Speed\n Animal Type\n Falcon Captive 390.0\n Wild 350.0\n Parrot Captive 30.0\n Wild 20.0\n >>> df.groupby(level=0).mean()\n Max Speed\n Animal\n Falcon 370.0\n Parrot 25.0\n >>> df.groupby(level=1).mean()\n Max Speed\n Type\n Captive 210.0\n Wild 185.0\n \"\"\"\n from pandas.core.groupby.groupby import groupby\n\n if level is None and by is None:\n raise TypeError(\"You have to supply one of 'by' and 'level'\")\n axis = self._get_axis_number(axis)\n return groupby(self, by=by, axis=axis, level=level, as_index=as_index,\n sort=sort, group_keys=group_keys, squeeze=squeeze,\n observed=observed, **kwargs)\n\n def asfreq(self, freq, method=None, how=None, normalize=False,\n fill_value=None):\n \"\"\"\n Convert TimeSeries to specified frequency.\n\n Optionally provide filling method to pad/backfill missing values.\n\n Returns the original data conformed to a new index with the specified\n frequency. ``resample`` is more appropriate if an operation, such as\n summarization, is necessary to represent the data at the new frequency.\n\n Parameters\n ----------\n freq : DateOffset object, or string\n method : {'backfill'/'bfill', 'pad'/'ffill'}, default None\n Method to use for filling holes in reindexed Series (note this\n does not fill NaNs that already were present):\n\n * 'pad' / 'ffill': propagate last valid observation forward to next\n valid\n * 'backfill' / 'bfill': use NEXT valid observation to fill\n how : {'start', 'end'}, default end\n For PeriodIndex only, see PeriodIndex.asfreq\n normalize : bool, default False\n Whether to reset output index to midnight\n fill_value : scalar, optional\n Value to use for missing values, applied during upsampling (note\n this does not fill NaNs that already were present).\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n converted : same type as caller\n\n See Also\n --------\n reindex\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n\n Start by creating a series with 4 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=4, freq='T')\n >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)\n >>> df = pd.DataFrame({'s':series})\n >>> df\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:03:00 3.0\n\n Upsample the series into 30 second bins.\n\n >>> df.asfreq(freq='30S')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 NaN\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``fill value``.\n\n >>> df.asfreq(freq='30S', fill_value=9.0)\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 9.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 9.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 9.0\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``method``.\n\n >>> df.asfreq(freq='30S', method='bfill')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 2.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 3.0\n 2000-01-01 00:03:00 3.0\n \"\"\"\n from pandas.core.resample import asfreq\n return asfreq(self, freq, method=method, how=how, normalize=normalize,\n fill_value=fill_value)\n\n def at_time(self, time, asof=False, axis=None):\n \"\"\"\n Select values at particular time of day (e.g. 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> ts.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n try:\n indexer = index.indexer_at_time(time, asof=asof)\n except AttributeError:\n raise TypeError('Index must be DatetimeIndex')\n\n return self._take(indexer, axis=axis)\n\n def between_time(self, start_time, end_time, include_start=True,\n include_end=True, axis=None):\n \"\"\"\n Select values between particular times of the day (e.g., 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n end_time : datetime.time or str\n include_start : bool, default True\n include_end : bool, default True\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> ts.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> ts.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n try:\n indexer = index.indexer_between_time(\n start_time, end_time, include_start=include_start,\n include_end=include_end)\n except AttributeError:\n raise TypeError('Index must be DatetimeIndex')\n\n return self._take(indexer, axis=axis)\n\n def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,\n label=None, convention='start', kind=None, loffset=None,\n limit=None, base=0, on=None, level=None):\n \"\"\"\n Resample time-series data.\n\n Convenience method for frequency conversion and resampling of time\n series. Object must have a datetime-like index (`DatetimeIndex`,\n `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values\n to the `on` or `level` keyword.\n\n Parameters\n ----------\n rule : str\n The offset string or object representing target conversion.\n how : str\n Method for down/re-sampling, default to 'mean' for downsampling.\n\n .. deprecated:: 0.18.0\n The new syntax is ``.resample(...).mean()``, or\n ``.resample(...).apply(<func>)``\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Which axis to use for up- or down-sampling. For `Series` this\n will default to 0, i.e. along the rows. Must be\n `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.\n fill_method : str, default None\n Filling method for upsampling.\n\n .. deprecated:: 0.18.0\n The new syntax is ``.resample(...).<func>()``,\n e.g. ``.resample(...).pad()``\n closed : {'right', 'left'}, default None\n Which side of bin interval is closed. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n label : {'right', 'left'}, default None\n Which bin edge label to label bucket with. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n convention : {'start', 'end', 's', 'e'}, default 'start'\n For `PeriodIndex` only, controls whether to use the start or\n end of `rule`.\n kind : {'timestamp', 'period'}, optional, default None\n Pass 'timestamp' to convert the resulting index to a\n `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.\n By default the input representation is retained.\n loffset : timedelta, default None\n Adjust the resampled time labels.\n limit : int, default None\n Maximum size gap when reindexing with `fill_method`.\n\n .. deprecated:: 0.18.0\n base : int, default 0\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n on : str, optional\n For a DataFrame, column to use instead of index for resampling.\n Column must be datetime-like.\n\n .. versionadded:: 0.19.0\n\n level : str or int, optional\n For a MultiIndex, level (name or number) to use for\n resampling. `level` must be datetime-like.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n Resampler object\n\n See Also\n --------\n groupby : Group by mapping, function, label, or list of labels.\n Series.resample : Resample a Series.\n DataFrame.resample: Resample a DataFrame.\n\n Notes\n -----\n See the `user guide\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_\n for more.\n\n To learn more about the offset strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n\n Start by creating a series with 9 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=9, freq='T')\n >>> series = pd.Series(range(9), index=index)\n >>> series\n 2000-01-01 00:00:00 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:02:00 2\n 2000-01-01 00:03:00 3\n 2000-01-01 00:04:00 4\n 2000-01-01 00:05:00 5\n 2000-01-01 00:06:00 6\n 2000-01-01 00:07:00 7\n 2000-01-01 00:08:00 8\n Freq: T, dtype: int64\n\n Downsample the series into 3 minute bins and sum the values\n of the timestamps falling into a bin.\n\n >>> series.resample('3T').sum()\n 2000-01-01 00:00:00 3\n 2000-01-01 00:03:00 12\n 2000-01-01 00:06:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but label each\n bin using the right edge instead of the left. Please note that the\n value in the bucket used as the label is not included in the bucket,\n which it labels. For example, in the original series the\n bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed\n value in the resampled bucket with the label ``2000-01-01 00:03:00``\n does not include 3 (if it did, the summed value would be 6, not 3).\n To include this value close the right side of the bin interval as\n illustrated in the example below this one.\n\n >>> series.resample('3T', label='right').sum()\n 2000-01-01 00:03:00 3\n 2000-01-01 00:06:00 12\n 2000-01-01 00:09:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but close the right\n side of the bin interval.\n\n >>> series.resample('3T', label='right', closed='right').sum()\n 2000-01-01 00:00:00 0\n 2000-01-01 00:03:00 6\n 2000-01-01 00:06:00 15\n 2000-01-01 00:09:00 15\n Freq: 3T, dtype: int64\n\n Upsample the series into 30 second bins.\n\n >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 1.0\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n Freq: 30S, dtype: float64\n\n Upsample the series into 30 second bins and fill the ``NaN``\n values using the ``pad`` method.\n\n >>> series.resample('30S').pad()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 1\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Upsample the series into 30 second bins and fill the\n ``NaN`` values using the ``bfill`` method.\n\n >>> series.resample('30S').bfill()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 1\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 2\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Pass a custom function via ``apply``\n\n >>> def custom_resampler(array_like):\n ... return np.sum(array_like) + 5\n ...\n >>> series.resample('3T').apply(custom_resampler)\n 2000-01-01 00:00:00 8\n 2000-01-01 00:03:00 17\n 2000-01-01 00:06:00 26\n Freq: 3T, dtype: int64\n\n For a Series with a PeriodIndex, the keyword `convention` can be\n used to control whether to use the start or end of `rule`.\n\n Resample a year by quarter using 'start' `convention`. Values are\n assigned to the first quarter of the period.\n\n >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',\n ... freq='A',\n ... periods=2))\n >>> s\n 2012 1\n 2013 2\n Freq: A-DEC, dtype: int64\n >>> s.resample('Q', convention='start').asfreq()\n 2012Q1 1.0\n 2012Q2 NaN\n 2012Q3 NaN\n 2012Q4 NaN\n 2013Q1 2.0\n 2013Q2 NaN\n 2013Q3 NaN\n 2013Q4 NaN\n Freq: Q-DEC, dtype: float64\n\n Resample quarters by month using 'end' `convention`. Values are\n assigned to the last month of the period.\n\n >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',\n ... freq='Q',\n ... periods=4))\n >>> q\n 2018Q1 1\n 2018Q2 2\n 2018Q3 3\n 2018Q4 4\n Freq: Q-DEC, dtype: int64\n >>> q.resample('M', convention='end').asfreq()\n 2018-03 1.0\n 2018-04 NaN\n 2018-05 NaN\n 2018-06 2.0\n 2018-07 NaN\n 2018-08 NaN\n 2018-09 3.0\n 2018-10 NaN\n 2018-11 NaN\n 2018-12 4.0\n Freq: M, dtype: float64\n\n For DataFrame objects, the keyword `on` can be used to specify the\n column instead of the index for resampling.\n\n >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df = pd.DataFrame(d)\n >>> df['week_starting'] = pd.date_range('01/01/2018',\n ... periods=8,\n ... freq='W')\n >>> df\n price volume week_starting\n 0 10 50 2018-01-07\n 1 11 60 2018-01-14\n 2 9 40 2018-01-21\n 3 13 100 2018-01-28\n 4 14 50 2018-02-04\n 5 18 100 2018-02-11\n 6 17 40 2018-02-18\n 7 19 50 2018-02-25\n >>> df.resample('M', on='week_starting').mean()\n price volume\n week_starting\n 2018-01-31 10.75 62.5\n 2018-02-28 17.00 60.0\n\n For a DataFrame with MultiIndex, the keyword `level` can be used to\n specify on which level the resampling needs to take place.\n\n >>> days = pd.date_range('1/1/2000', periods=4, freq='D')\n >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df2 = pd.DataFrame(d2,\n ... index=pd.MultiIndex.from_product([days,\n ... ['morning',\n ... 'afternoon']]\n ... ))\n >>> df2\n price volume\n 2000-01-01 morning 10 50\n afternoon 11 60\n 2000-01-02 morning 9 40\n afternoon 13 100\n 2000-01-03 morning 14 50\n afternoon 18 100\n 2000-01-04 morning 17 40\n afternoon 19 50\n >>> df2.resample('D', level=0).sum()\n price volume\n 2000-01-01 21 110\n 2000-01-02 22 140\n 2000-01-03 32 150\n 2000-01-04 36 90\n \"\"\"\n\n from pandas.core.resample import (resample,\n _maybe_process_deprecations)\n axis = self._get_axis_number(axis)\n r = resample(self, freq=rule, label=label, closed=closed,\n axis=axis, kind=kind, loffset=loffset,\n convention=convention,\n base=base, key=on, level=level)\n return _maybe_process_deprecations(r,\n how=how,\n fill_method=fill_method,\n limit=limit)\n\n def first(self, offset):\n \"\"\"\n Convenience method for subsetting initial periods of time series data\n based on a date offset.\n\n Parameters\n ----------\n offset : string, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n last : Select final periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the first 3 days:\n\n >>> ts.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calender days were returned, not the first\n 3 days observed in the dataset, and therefore data for 2018-04-13 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'first' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n end_date = end = self.index[0] + offset\n\n # Tick-like, e.g. 3 weeks\n if not offset.isAnchored() and hasattr(offset, '_inc'):\n if end_date in self.index:\n end = self.index.searchsorted(end_date, side='left')\n return self.iloc[:end]\n\n return self.loc[:end]\n\n def last(self, offset):\n \"\"\"\n Convenience method for subsetting final periods of time series data\n based on a date offset.\n\n Parameters\n ----------\n offset : string, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n first : Select initial periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> ts.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calender days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'last' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n\n start_date = self.index[-1] - offset\n start = self.index.searchsorted(start_date, side='right')\n return self.iloc[start:]\n\n def rank(self, axis=0, method='average', numeric_only=None,\n na_option='keep', ascending=True, pct=False):\n \"\"\"\n Compute numerical data ranks (1 through n) along axis. Equal values are\n assigned a rank that is the average of the ranks of those values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n index to direct ranking\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n numeric_only : boolean, default None\n Include only float, int, boolean data. Valid only for DataFrame or\n Panel objects\n na_option : {'keep', 'top', 'bottom'}\n * keep: leave NA values where they are\n * top: smallest rank if ascending\n * bottom: smallest rank if descending\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n pct : boolean, default False\n Computes percentage rank of data\n\n Returns\n -------\n ranks : same type as caller\n \"\"\"\n axis = self._get_axis_number(axis)\n\n if self.ndim > 2:\n msg = \"rank does not make sense when ndim > 2\"\n raise NotImplementedError(msg)\n\n if na_option not in {'keep', 'top', 'bottom'}:\n msg = \"na_option must be one of 'keep', 'top', or 'bottom'\"\n raise ValueError(msg)\n\n def ranker(data):\n ranks = algos.rank(data.values, axis=axis, method=method,\n ascending=ascending, na_option=na_option,\n pct=pct)\n ranks = self._constructor(ranks, **data._construct_axes_dict())\n return ranks.__finalize__(self)\n\n # if numeric_only is None, and we can't get anything, we try with\n # numeric_only=True\n if numeric_only is None:\n try:\n return ranker(self)\n except TypeError:\n numeric_only = True\n\n if numeric_only:\n data = self._get_numeric_data()\n else:\n data = self\n\n return ranker(data)\n\n _shared_docs['align'] = (\"\"\"\n Align two objects on their axes with the\n specified join method for each axis Index.\n\n Parameters\n ----------\n other : DataFrame or Series\n join : {'outer', 'inner', 'left', 'right'}, default 'outer'\n axis : allowed axis of the other object, default None\n Align on index (0), columns (1), or both (None)\n level : int or level name, default None\n Broadcast across a level, matching Index values on the\n passed MultiIndex level\n copy : boolean, default True\n Always returns new objects. If copy=False and no reindexing is\n required then original objects are returned.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use NEXT valid observation to fill gap\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n fill_axis : %(axes_single_arg)s, default 0\n Filling axis, method and limit\n broadcast_axis : %(axes_single_arg)s, default None\n Broadcast values along this axis, if aligning two objects of\n different dimensions\n\n Returns\n -------\n (left, right) : (%(klass)s, type of other)\n Aligned objects.\n \"\"\")\n\n @Appender(_shared_docs['align'] % _shared_doc_kwargs)\n def align(self, other, join='outer', axis=None, level=None, copy=True,\n fill_value=None, method=None, limit=None, fill_axis=0,\n broadcast_axis=None):\n from pandas import DataFrame, Series\n method = missing.clean_fill_method(method)\n\n if broadcast_axis == 1 and self.ndim != other.ndim:\n if isinstance(self, Series):\n # this means other is a DataFrame, and we need to broadcast\n # self\n cons = self._constructor_expanddim\n df = cons({c: self for c in other.columns},\n **other._construct_axes_dict())\n return df._align_frame(other, join=join, axis=axis,\n level=level, copy=copy,\n fill_value=fill_value, method=method,\n limit=limit, fill_axis=fill_axis)\n elif isinstance(other, Series):\n # this means self is a DataFrame, and we need to broadcast\n # other\n cons = other._constructor_expanddim\n df = cons({c: other for c in self.columns},\n **self._construct_axes_dict())\n return self._align_frame(df, join=join, axis=axis, level=level,\n copy=copy, fill_value=fill_value,\n method=method, limit=limit,\n fill_axis=fill_axis)\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n if isinstance(other, DataFrame):\n return self._align_frame(other, join=join, axis=axis, level=level,\n copy=copy, fill_value=fill_value,\n method=method, limit=limit,\n fill_axis=fill_axis)\n elif isinstance(other, Series):\n return self._align_series(other, join=join, axis=axis, level=level,\n copy=copy, fill_value=fill_value,\n method=method, limit=limit,\n fill_axis=fill_axis)\n else: # pragma: no cover\n raise TypeError('unsupported type: %s' % type(other))\n\n def _align_frame(self, other, join='outer', axis=None, level=None,\n copy=True, fill_value=None, method=None, limit=None,\n fill_axis=0):\n # defaults\n join_index, join_columns = None, None\n ilidx, iridx = None, None\n clidx, cridx = None, None\n\n is_series = isinstance(self, ABCSeries)\n\n if axis is None or axis == 0:\n if not self.index.equals(other.index):\n join_index, ilidx, iridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True)\n\n if axis is None or axis == 1:\n if not is_series and not self.columns.equals(other.columns):\n join_columns, clidx, cridx = self.columns.join(\n other.columns, how=join, level=level, return_indexers=True)\n\n if is_series:\n reindexers = {0: [join_index, ilidx]}\n else:\n reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}\n\n left = self._reindex_with_indexers(reindexers, copy=copy,\n fill_value=fill_value,\n allow_dups=True)\n # other must be always DataFrame\n right = other._reindex_with_indexers({0: [join_index, iridx],\n 1: [join_columns, cridx]},\n copy=copy, fill_value=fill_value,\n allow_dups=True)\n\n if method is not None:\n left = left.fillna(axis=fill_axis, method=method, limit=limit)\n right = right.fillna(axis=fill_axis, method=method, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_datetime64tz_dtype(left.index):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return left.__finalize__(self), right.__finalize__(other)\n\n def _align_series(self, other, join='outer', axis=None, level=None,\n copy=True, fill_value=None, method=None, limit=None,\n fill_axis=0):\n\n is_series = isinstance(self, ABCSeries)\n\n # series/series compat, other must always be a Series\n if is_series:\n if axis:\n raise ValueError('cannot align series to a series other than '\n 'axis 0')\n\n # equal\n if self.index.equals(other.index):\n join_index, lidx, ridx = None, None, None\n else:\n join_index, lidx, ridx = self.index.join(other.index, how=join,\n level=level,\n return_indexers=True)\n\n left = self._reindex_indexer(join_index, lidx, copy)\n right = other._reindex_indexer(join_index, ridx, copy)\n\n else:\n # one has > 1 ndim\n fdata = self._data\n if axis == 0:\n join_index = self.index\n lidx, ridx = None, None\n if not self.index.equals(other.index):\n join_index, lidx, ridx = self.index.join(\n other.index, how=join, level=level,\n return_indexers=True)\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=1)\n\n elif axis == 1:\n join_index = self.columns\n lidx, ridx = None, None\n if not self.columns.equals(other.index):\n join_index, lidx, ridx = self.columns.join(\n other.index, how=join, level=level,\n return_indexers=True)\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=0)\n else:\n raise ValueError('Must specify axis=0 or 1')\n\n if copy and fdata is self._data:\n fdata = fdata.copy()\n\n left = self._constructor(fdata)\n\n if ridx is None:\n right = other\n else:\n right = other.reindex(join_index, level=level)\n\n # fill\n fill_na = notna(fill_value) or (method is not None)\n if fill_na:\n left = left.fillna(fill_value, method=method, limit=limit,\n axis=fill_axis)\n right = right.fillna(fill_value, method=method, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_series or (not is_series and axis == 0):\n if is_datetime64tz_dtype(left.index):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return left.__finalize__(self), right.__finalize__(other)\n\n def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,\n errors='raise', try_cast=False):\n \"\"\"\n Equivalent to public method `where`, except that `other` is not\n applied as a function even if callable. Used in __setitem__.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n # align the cond to same shape as myself\n cond = com.apply_if_callable(cond, self)\n if isinstance(cond, NDFrame):\n cond, _ = cond.align(self, join='right', broadcast_axis=1)\n else:\n if not hasattr(cond, 'shape'):\n cond = np.asanyarray(cond)\n if cond.shape != self.shape:\n raise ValueError('Array conditional must be same shape as '\n 'self')\n cond = self._constructor(cond, **self._construct_axes_dict())\n\n # make sure we are boolean\n fill_value = bool(inplace)\n cond = cond.fillna(fill_value)\n\n msg = \"Boolean array expected for the condition, not {dtype}\"\n\n if not isinstance(cond, pd.DataFrame):\n # This is a single-dimensional object.\n if not is_bool_dtype(cond):\n raise ValueError(msg.format(dtype=cond.dtype))\n elif not cond.empty:\n for dt in cond.dtypes:\n if not is_bool_dtype(dt):\n raise ValueError(msg.format(dtype=dt))\n\n cond = -cond if inplace else cond\n\n # try to align with other\n try_quick = True\n if hasattr(other, 'align'):\n\n # align with me\n if other.ndim <= self.ndim:\n\n _, other = self.align(other, join='left', axis=axis,\n level=level, fill_value=np.nan)\n\n # if we are NOT aligned, raise as we cannot where index\n if (axis is None and\n not all(other._get_axis(i).equals(ax)\n for i, ax in enumerate(self.axes))):\n raise InvalidIndexError\n\n # slice me out of the other\n else:\n raise NotImplementedError(\"cannot align with a higher \"\n \"dimensional NDFrame\")\n\n if isinstance(other, np.ndarray):\n\n if other.shape != self.shape:\n\n if self.ndim == 1:\n\n icond = cond.values\n\n # GH 2745 / GH 4192\n # treat like a scalar\n if len(other) == 1:\n other = np.array(other[0])\n\n # GH 3235\n # match True cond to other\n elif len(cond[icond]) == len(other):\n\n # try to not change dtype at first (if try_quick)\n if try_quick:\n\n try:\n new_other = com.values_from_object(self)\n new_other = new_other.copy()\n new_other[icond] = other\n other = new_other\n except Exception:\n try_quick = False\n\n # let's create a new (if we failed at the above\n # or not try_quick\n if not try_quick:\n\n dtype, fill_value = maybe_promote(other.dtype)\n new_other = np.empty(len(icond), dtype=dtype)\n new_other.fill(fill_value)\n maybe_upcast_putmask(new_other, icond, other)\n other = new_other\n\n else:\n raise ValueError('Length of replacements must equal '\n 'series length')\n\n else:\n raise ValueError('other must be the same shape as self '\n 'when an ndarray')\n\n # we are the same shape, so create an actual object for alignment\n else:\n other = self._constructor(other, **self._construct_axes_dict())\n\n if axis is None:\n axis = 0\n\n if self.ndim == getattr(other, 'ndim', 0):\n align = True\n else:\n align = (self._get_axis_number(axis) == 1)\n\n block_axis = self._get_block_manager_axis(axis)\n\n if inplace:\n # we may have different type blocks come out of putmask, so\n # reconstruct the block manager\n\n self._check_inplace_setting(other)\n new_data = self._data.putmask(mask=cond, new=other, align=align,\n inplace=True, axis=block_axis,\n transpose=self._AXIS_REVERSED)\n self._update_inplace(new_data)\n\n else:\n new_data = self._data.where(other=other, cond=cond, align=align,\n errors=errors,\n try_cast=try_cast, axis=block_axis,\n transpose=self._AXIS_REVERSED)\n\n return self._constructor(new_data).__finalize__(self)\n\n _shared_docs['where'] = (\"\"\"\n Replace values where the condition is %(cond_rev)s.\n\n Parameters\n ----------\n cond : boolean %(klass)s, array-like, or callable\n Where `cond` is %(cond)s, keep the original value. Where\n %(cond_rev)s, replace with corresponding value from `other`.\n If `cond` is callable, it is computed on the %(klass)s and\n should return boolean %(klass)s or array. The callable must\n not change input %(klass)s (though pandas doesn't check it).\n\n .. versionadded:: 0.18.1\n A callable can be used as cond.\n\n other : scalar, %(klass)s, or callable\n Entries where `cond` is %(cond_rev)s are replaced with\n corresponding value from `other`.\n If other is callable, it is computed on the %(klass)s and\n should return scalar or %(klass)s. The callable must not\n change input %(klass)s (though pandas doesn't check it).\n\n .. versionadded:: 0.18.1\n A callable can be used as other.\n\n inplace : boolean, default False\n Whether to perform the operation in place on the data.\n axis : int, default None\n Alignment axis if needed.\n level : int, default None\n Alignment level if needed.\n errors : str, {'raise', 'ignore'}, default `raise`\n Note that currently this parameter won't affect\n the results and will always coerce to a suitable dtype.\n\n - `raise` : allow exceptions to be raised.\n - `ignore` : suppress exceptions. On error return original object.\n\n try_cast : boolean, default False\n Try to cast the result back to the input type (if possible).\n raise_on_error : boolean, default True\n Whether to raise on invalid data types (e.g. trying to where on\n strings).\n\n .. deprecated:: 0.21.0\n\n Use `errors`.\n\n Returns\n -------\n wh : same type as caller\n\n See Also\n --------\n :func:`DataFrame.%(name_other)s` : Return an object of same shape as\n self.\n\n Notes\n -----\n The %(name)s method is an application of the if-then idiom. For each\n element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the\n element is used; otherwise the corresponding element from the DataFrame\n ``other`` is used.\n\n The signature for :func:`DataFrame.where` differs from\n :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to\n ``np.where(m, df1, df2)``.\n\n For further details and examples see the ``%(name)s`` documentation in\n :ref:`indexing <indexing.where_mask>`.\n\n Examples\n --------\n >>> s = pd.Series(range(5))\n >>> s.where(s > 0)\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n 4 4.0\n dtype: float64\n\n >>> s.mask(s > 0)\n 0 0.0\n 1 NaN\n 2 NaN\n 3 NaN\n 4 NaN\n dtype: float64\n\n >>> s.where(s > 1, 10)\n 0 10\n 1 10\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])\n >>> m = df %% 3 == 0\n >>> df.where(m, -df)\n A B\n 0 0 -1\n 1 -2 3\n 2 -4 -5\n 3 6 -7\n 4 -8 9\n >>> df.where(m, -df) == np.where(m, df, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n >>> df.where(m, -df) == df.mask(~m, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n \"\"\")\n\n @Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond=\"True\",\n cond_rev=\"False\", name='where',\n name_other='mask'))\n def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,\n errors='raise', try_cast=False, raise_on_error=None):\n\n if raise_on_error is not None:\n warnings.warn(\n \"raise_on_error is deprecated in \"\n \"favor of errors='raise|ignore'\",\n FutureWarning, stacklevel=2)\n\n if raise_on_error:\n errors = 'raise'\n else:\n errors = 'ignore'\n\n other = com.apply_if_callable(other, self)\n return self._where(cond, other, inplace, axis, level,\n errors=errors, try_cast=try_cast)\n\n @Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond=\"False\",\n cond_rev=\"True\", name='mask',\n name_other='where'))\n def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,\n errors='raise', try_cast=False, raise_on_error=None):\n\n if raise_on_error is not None:\n warnings.warn(\n \"raise_on_error is deprecated in \"\n \"favor of errors='raise|ignore'\",\n FutureWarning, stacklevel=2)\n\n if raise_on_error:\n errors = 'raise'\n else:\n errors = 'ignore'\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n cond = com.apply_if_callable(cond, self)\n\n # see gh-21891\n if not hasattr(cond, \"__invert__\"):\n cond = np.array(cond)\n\n return self.where(~cond, other=other, inplace=inplace, axis=axis,\n level=level, try_cast=try_cast,\n errors=errors)\n\n _shared_docs['shift'] = (\"\"\"\n Shift index by desired number of periods with an optional time `freq`.\n\n When `freq` is not passed, shift the index without realigning the data.\n If `freq` is passed (in this case, the index must be date or datetime,\n or it will raise a `NotImplementedError`), the index will be\n increased using the periods and the `freq`.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n freq : DateOffset, tseries.offsets, timedelta, or str, optional\n Offset to use from the tseries module or time rule (e.g. 'EOM').\n If `freq` is specified then the index values are shifted but the\n data is not realigned. That is, use `freq` if you would like to\n extend the index when shifting and preserve the original data.\n axis : {0 or 'index', 1 or 'columns', None}, default None\n Shift direction.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n the default depends on the dtype of `self`.\n For numeric data, ``np.nan`` is used.\n For datetime, timedelta, or period data, etc. :attr:`NaT` is used.\n For extension dtypes, ``self.dtype.na_value`` is used.\n\n .. versionchanged:: 0.24.0\n\n Returns\n -------\n %(klass)s\n Copy of input object, shifted.\n\n See Also\n --------\n Index.shift : Shift values of Index.\n DatetimeIndex.shift : Shift values of DatetimeIndex.\n PeriodIndex.shift : Shift values of PeriodIndex.\n tshift : Shift the time index, using the index's frequency if\n available.\n\n Examples\n --------\n >>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]})\n\n >>> df.shift(periods=3)\n Col1 Col2 Col3\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 10.0 13.0 17.0\n 4 20.0 23.0 27.0\n\n >>> df.shift(periods=1, axis='columns')\n Col1 Col2 Col3\n 0 NaN 10.0 13.0\n 1 NaN 20.0 23.0\n 2 NaN 15.0 18.0\n 3 NaN 30.0 33.0\n 4 NaN 45.0 48.0\n\n >>> df.shift(periods=3, fill_value=0)\n Col1 Col2 Col3\n 0 0 0 0\n 1 0 0 0\n 2 0 0 0\n 3 10 13 17\n 4 20 23 27\n \"\"\")\n\n @Appender(_shared_docs['shift'] % _shared_doc_kwargs)\n def shift(self, periods=1, freq=None, axis=0, fill_value=None):\n if periods == 0:\n return self.copy()\n\n block_axis = self._get_block_manager_axis(axis)\n if freq is None:\n new_data = self._data.shift(periods=periods, axis=block_axis,\n fill_value=fill_value)\n else:\n return self.tshift(periods, freq)\n\n return self._constructor(new_data).__finalize__(self)\n\n def slice_shift(self, periods=1, axis=0):\n \"\"\"\n Equivalent to `shift` without copying data. The shifted data will\n not include the dropped periods and the shifted axis will be smaller\n than the original.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n\n Returns\n -------\n shifted : same type as caller\n\n Notes\n -----\n While the `slice_shift` is faster than `shift`, you may pay for it\n later during alignment.\n \"\"\"\n if periods == 0:\n return self\n\n if periods > 0:\n vslicer = slice(None, -periods)\n islicer = slice(periods, None)\n else:\n vslicer = slice(-periods, None)\n islicer = slice(None, periods)\n\n new_obj = self._slice(vslicer, axis=axis)\n shifted_axis = self._get_axis(axis)[islicer]\n new_obj.set_axis(shifted_axis, axis=axis, inplace=True)\n\n return new_obj.__finalize__(self)\n\n def tshift(self, periods=1, freq=None, axis=0):\n \"\"\"\n Shift the time index, using the index's frequency if available.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n freq : DateOffset, timedelta, or time rule string, default None\n Increment to use from the tseries module or time rule (e.g. 'EOM')\n axis : int or basestring\n Corresponds to the axis that contains the Index\n\n Returns\n -------\n shifted : NDFrame\n\n Notes\n -----\n If freq is not specified then tries to use the freq or inferred_freq\n attributes of the index. If neither of those attributes exist, a\n ValueError is thrown\n \"\"\"\n\n index = self._get_axis(axis)\n if freq is None:\n freq = getattr(index, 'freq', None)\n\n if freq is None:\n freq = getattr(index, 'inferred_freq', None)\n\n if freq is None:\n msg = 'Freq was not given and was not set in the index'\n raise ValueError(msg)\n\n if periods == 0:\n return self\n\n if isinstance(freq, string_types):\n freq = to_offset(freq)\n\n block_axis = self._get_block_manager_axis(axis)\n if isinstance(index, PeriodIndex):\n orig_freq = to_offset(index.freq)\n if freq == orig_freq:\n new_data = self._data.copy()\n new_data.axes[block_axis] = index.shift(periods)\n else:\n msg = ('Given freq %s does not match PeriodIndex freq %s' %\n (freq.rule_code, orig_freq.rule_code))\n raise ValueError(msg)\n else:\n new_data = self._data.copy()\n new_data.axes[block_axis] = index.shift(periods, freq)\n\n return self._constructor(new_data).__finalize__(self)\n\n def truncate(self, before=None, after=None, axis=None, copy=True):\n \"\"\"\n Truncate a Series or DataFrame before and after some index value.\n\n This is a useful shorthand for boolean indexing based on index\n values above or below certain thresholds.\n\n Parameters\n ----------\n before : date, string, int\n Truncate all rows before this index value.\n after : date, string, int\n Truncate all rows after this index value.\n axis : {0 or 'index', 1 or 'columns'}, optional\n Axis to truncate. Truncates the index (rows) by default.\n copy : boolean, default is True,\n Return a copy of the truncated section.\n\n Returns\n -------\n type of caller\n The truncated Series or DataFrame.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by label.\n DataFrame.iloc : Select a subset of a DataFrame by position.\n\n Notes\n -----\n If the index being truncated contains only datetime values,\n `before` and `after` may be specified as strings instead of\n Timestamps.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],\n ... 'B': ['f', 'g', 'h', 'i', 'j'],\n ... 'C': ['k', 'l', 'm', 'n', 'o']},\n ... index=[1, 2, 3, 4, 5])\n >>> df\n A B C\n 1 a f k\n 2 b g l\n 3 c h m\n 4 d i n\n 5 e j o\n\n >>> df.truncate(before=2, after=4)\n A B C\n 2 b g l\n 3 c h m\n 4 d i n\n\n The columns of a DataFrame can be truncated.\n\n >>> df.truncate(before=\"A\", after=\"B\", axis=\"columns\")\n A B\n 1 a f\n 2 b g\n 3 c h\n 4 d i\n 5 e j\n\n For Series, only rows can be truncated.\n\n >>> df['A'].truncate(before=2, after=4)\n 2 b\n 3 c\n 4 d\n Name: A, dtype: object\n\n The index values in ``truncate`` can be datetimes or string\n dates.\n\n >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')\n >>> df = pd.DataFrame(index=dates, data={'A': 1})\n >>> df.tail()\n A\n 2016-01-31 23:59:56 1\n 2016-01-31 23:59:57 1\n 2016-01-31 23:59:58 1\n 2016-01-31 23:59:59 1\n 2016-02-01 00:00:00 1\n\n >>> df.truncate(before=pd.Timestamp('2016-01-05'),\n ... after=pd.Timestamp('2016-01-10')).tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Because the index is a DatetimeIndex containing only dates, we can\n specify `before` and `after` as strings. They will be coerced to\n Timestamps before truncation.\n\n >>> df.truncate('2016-01-05', '2016-01-10').tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Note that ``truncate`` assumes a 0 value for any unspecified time\n component (midnight). This differs from partial string slicing, which\n returns any partially matching dates.\n\n >>> df.loc['2016-01-05':'2016-01-10', :].tail()\n A\n 2016-01-10 23:59:55 1\n 2016-01-10 23:59:56 1\n 2016-01-10 23:59:57 1\n 2016-01-10 23:59:58 1\n 2016-01-10 23:59:59 1\n \"\"\"\n\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n # GH 17935\n # Check that index is sorted\n if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:\n raise ValueError(\"truncate requires a sorted index\")\n\n # if we have a date index, convert to dates, otherwise\n # treat like a slice\n if ax.is_all_dates:\n from pandas.core.tools.datetimes import to_datetime\n before = to_datetime(before)\n after = to_datetime(after)\n\n if before is not None and after is not None:\n if before > after:\n raise ValueError('Truncate: %s must be after %s' %\n (after, before))\n\n slicer = [slice(None, None)] * self._AXIS_LEN\n slicer[axis] = slice(before, after)\n result = self.loc[tuple(slicer)]\n\n if isinstance(ax, MultiIndex):\n setattr(result, self._get_axis_name(axis),\n ax.truncate(before, after))\n\n if copy:\n result = result.copy()\n\n return result\n\n def tz_convert(self, tz, axis=0, level=None, copy=True):\n \"\"\"\n Convert tz-aware axis to target time zone.\n\n Parameters\n ----------\n tz : string or pytz.timezone object\n axis : the axis to convert\n level : int, str, default None\n If axis ia a MultiIndex, convert a specific level. Otherwise\n must be None\n copy : boolean, default True\n Also make a copy of the underlying data\n\n Returns\n -------\n\n Raises\n ------\n TypeError\n If the axis is tz-naive.\n \"\"\"\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_convert(ax, tz):\n if not hasattr(ax, 'tz_convert'):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError('%s is not a valid DatetimeIndex or '\n 'PeriodIndex' % ax_name)\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_convert(tz)\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_convert(ax.levels[level], tz)\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(\"The level {0} is not valid\".format(level))\n ax = _tz_convert(ax, tz)\n\n result = self._constructor(self._data, copy=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self)\n\n def tz_localize(self, tz, axis=0, level=None, copy=True,\n ambiguous='raise', nonexistent='raise'):\n \"\"\"\n Localize tz-naive index of a Series or DataFrame to target time zone.\n\n This operation localizes the Index. To localize the values in a\n timezone-naive Series, use :meth:`Series.dt.tz_localize`.\n\n Parameters\n ----------\n tz : string or pytz.timezone object\n axis : the axis to localize\n level : int, str, default None\n If axis ia a MultiIndex, localize a specific level. Otherwise\n must be None\n copy : boolean, default True\n Also make a copy of the underlying data\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from\n 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at\n 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the\n `ambiguous` parameter dictates how ambiguous times should be\n handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times\n nonexistent : str, default 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST. Valid valuse are:\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Same type as the input.\n\n Raises\n ------\n TypeError\n If the TimeSeries is tz-aware and tz is not None.\n\n Examples\n --------\n\n Localize local times:\n\n >>> s = pd.Series([1],\n ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))\n >>> s.tz_localize('CET')\n 2018-09-15 01:30:00+02:00 1\n dtype: int64\n\n Be careful with DST changes. When there is sequential data, pandas\n can infer the DST time:\n\n >>> s = pd.Series(range(7), index=pd.DatetimeIndex([\n ... '2018-10-28 01:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 03:00:00',\n ... '2018-10-28 03:30:00']))\n >>> s.tz_localize('CET', ambiguous='infer')\n 2018-10-28 01:30:00+02:00 0\n 2018-10-28 02:00:00+02:00 1\n 2018-10-28 02:30:00+02:00 2\n 2018-10-28 02:00:00+01:00 3\n 2018-10-28 02:30:00+01:00 4\n 2018-10-28 03:00:00+01:00 5\n 2018-10-28 03:30:00+01:00 6\n dtype: int64\n\n In some cases, inferring the DST is impossible. In such cases, you can\n pass an ndarray to the ambiguous parameter to set the DST explicitly\n\n >>> s = pd.Series(range(3), index=pd.DatetimeIndex([\n ... '2018-10-28 01:20:00',\n ... '2018-10-28 02:36:00',\n ... '2018-10-28 03:46:00']))\n >>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))\n 2018-10-28 01:20:00+02:00 0\n 2018-10-28 02:36:00+02:00 1\n 2018-10-28 03:46:00+01:00 2\n dtype: int64\n\n If the DST transition causes nonexistent times, you can shift these\n dates forward or backwards with a timedelta object or `'shift_forward'`\n or `'shift_backwards'`.\n >>> s = pd.Series(range(2), index=pd.DatetimeIndex([\n ... '2015-03-29 02:30:00',\n ... '2015-03-29 03:30:00']))\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')\n 2015-03-29 03:00:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')\n 2015-03-29 01:59:59.999999999+01:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))\n 2015-03-29 03:30:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n \"\"\"\n nonexistent_options = ('raise', 'NaT', 'shift_forward',\n 'shift_backward')\n if nonexistent not in nonexistent_options and not isinstance(\n nonexistent, timedelta):\n raise ValueError(\"The nonexistent argument must be one of 'raise',\"\n \" 'NaT', 'shift_forward', 'shift_backward' or\"\n \" a timedelta object\")\n\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_localize(ax, tz, ambiguous, nonexistent):\n if not hasattr(ax, 'tz_localize'):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError('%s is not a valid DatetimeIndex or '\n 'PeriodIndex' % ax_name)\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_localize(\n tz, ambiguous=ambiguous, nonexistent=nonexistent\n )\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_localize(\n ax.levels[level], tz, ambiguous, nonexistent\n )\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(\"The level {0} is not valid\".format(level))\n ax = _tz_localize(ax, tz, ambiguous, nonexistent)\n\n result = self._constructor(self._data, copy=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Numeric Methods\n def abs(self):\n \"\"\"\n Return a Series/DataFrame with absolute numeric value of each element.\n\n This function only applies to elements that are all numeric.\n\n Returns\n -------\n abs\n Series/DataFrame containing the absolute value of each element.\n\n See Also\n --------\n numpy.absolute : Calculate the absolute value element-wise.\n\n Notes\n -----\n For ``complex`` inputs, ``1.2 + 1j``, the absolute value is\n :math:`\\\\sqrt{ a^2 + b^2 }`.\n\n Examples\n --------\n Absolute numeric values in a Series.\n\n >>> s = pd.Series([-1.10, 2, -3.33, 4])\n >>> s.abs()\n 0 1.10\n 1 2.00\n 2 3.33\n 3 4.00\n dtype: float64\n\n Absolute numeric values in a Series with complex numbers.\n\n >>> s = pd.Series([1.2 + 1j])\n >>> s.abs()\n 0 1.56205\n dtype: float64\n\n Absolute numeric values in a Series with a Timedelta element.\n\n >>> s = pd.Series([pd.Timedelta('1 days')])\n >>> s.abs()\n 0 1 days\n dtype: timedelta64[ns]\n\n Select rows with data closest to certain value using argsort (from\n `StackOverflow <https://stackoverflow.com/a/17758115>`__).\n\n >>> df = pd.DataFrame({\n ... 'a': [4, 5, 6, 7],\n ... 'b': [10, 20, 30, 40],\n ... 'c': [100, 50, -30, -50]\n ... })\n >>> df\n a b c\n 0 4 10 100\n 1 5 20 50\n 2 6 30 -30\n 3 7 40 -50\n >>> df.loc[(df.c - 43).abs().argsort()]\n a b c\n 1 5 20 50\n 0 4 10 100\n 2 6 30 -30\n 3 7 40 -50\n \"\"\"\n return np.abs(self)\n\n def describe(self, percentiles=None, include=None, exclude=None):\n \"\"\"\n Generate descriptive statistics that summarize the central tendency,\n dispersion and shape of a dataset's distribution, excluding\n ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list-like of numbers, optional\n The percentiles to include in the output. All should\n fall between 0 and 1. The default is\n ``[.25, .5, .75]``, which returns the 25th, 50th, and\n 75th percentiles.\n include : 'all', list-like of dtypes or None (default), optional\n A white list of data types to include in the result. Ignored\n for ``Series``. Here are the options:\n\n - 'all' : All columns of the input will be included in the output.\n - A list-like of dtypes : Limits the results to the\n provided data types.\n To limit the result to numeric types submit\n ``numpy.number``. To limit it instead to object columns submit\n the ``numpy.object`` data type. Strings\n can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n select pandas categorical columns, use ``'category'``\n - None (default) : The result will include all numeric columns.\n exclude : list-like of dtypes or None (default), optional,\n A black list of data types to omit from the result. Ignored\n for ``Series``. Here are the options:\n\n - A list-like of dtypes : Excludes the provided data types\n from the result. To exclude numeric types submit\n ``numpy.number``. To exclude object columns submit the data\n type ``numpy.object``. Strings can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n exclude pandas categorical columns, use ``'category'``\n - None (default) : The result will exclude nothing.\n\n Returns\n -------\n Series or DataFrame\n Summary statistics of the Series or Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the obersvations.\n DataFrame.select_dtypes: Subset of a DataFrame including/excluding\n columns based on their dtype.\n\n Notes\n -----\n For numeric data, the result's index will include ``count``,\n ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and\n upper percentiles. By default the lower percentile is ``25`` and the\n upper percentile is ``75``. The ``50`` percentile is the\n same as the median.\n\n For object data (e.g. strings or timestamps), the result's index\n will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``\n is the most common value. The ``freq`` is the most common value's\n frequency. Timestamps also include the ``first`` and ``last`` items.\n\n If multiple object values have the highest count, then the\n ``count`` and ``top`` results will be arbitrarily chosen from\n among those with the highest count.\n\n For mixed data types provided via a ``DataFrame``, the default is to\n return only an analysis of numeric columns. If the dataframe consists\n only of object and categorical data without any numeric columns, the\n default is to return an analysis of both the object and categorical\n columns. If ``include='all'`` is provided as an option, the result\n will include a union of attributes of each type.\n\n The `include` and `exclude` parameters can be used to limit\n which columns in a ``DataFrame`` are analyzed for the output.\n The parameters are ignored when analyzing a ``Series``.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n dtype: float64\n\n Describing a categorical ``Series``.\n\n >>> s = pd.Series(['a', 'a', 'b', 'c'])\n >>> s.describe()\n count 4\n unique 3\n top a\n freq 2\n dtype: object\n\n Describing a timestamp ``Series``.\n\n >>> s = pd.Series([\n ... np.datetime64(\"2000-01-01\"),\n ... np.datetime64(\"2010-01-01\"),\n ... np.datetime64(\"2010-01-01\")\n ... ])\n >>> s.describe()\n count 3\n unique 2\n top 2010-01-01 00:00:00\n freq 2\n first 2000-01-01 00:00:00\n last 2010-01-01 00:00:00\n dtype: object\n\n Describing a ``DataFrame``. By default only numeric fields\n are returned.\n\n >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),\n ... 'numeric': [1, 2, 3],\n ... 'object': ['a', 'b', 'c']\n ... })\n >>> df.describe()\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Describing all columns of a ``DataFrame`` regardless of data type.\n\n >>> df.describe(include='all')\n categorical numeric object\n count 3 3.0 3\n unique 3 NaN 3\n top f NaN c\n freq 1 NaN 1\n mean NaN 2.0 NaN\n std NaN 1.0 NaN\n min NaN 1.0 NaN\n 25% NaN 1.5 NaN\n 50% NaN 2.0 NaN\n 75% NaN 2.5 NaN\n max NaN 3.0 NaN\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n Name: numeric, dtype: float64\n\n Including only numeric columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.number])\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Including only string columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.object])\n object\n count 3\n unique 3\n top c\n freq 1\n\n Including only categorical columns from a ``DataFrame`` description.\n\n >>> df.describe(include=['category'])\n categorical\n count 3\n unique 3\n top f\n freq 1\n\n Excluding numeric columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.number])\n categorical object\n count 3 3\n unique 3 3\n top f c\n freq 1 1\n\n Excluding object columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.object])\n categorical numeric\n count 3 3.0\n unique 3 NaN\n top f NaN\n freq 1 NaN\n mean NaN 2.0\n std NaN 1.0\n min NaN 1.0\n 25% NaN 1.5\n 50% NaN 2.0\n 75% NaN 2.5\n max NaN 3.0\n \"\"\"\n if self.ndim >= 3:\n msg = \"describe is not implemented on Panel objects.\"\n raise NotImplementedError(msg)\n elif self.ndim == 2 and self.columns.size == 0:\n raise ValueError(\"Cannot describe a DataFrame without columns\")\n\n if percentiles is not None:\n # explicit conversion of `percentiles` to list\n percentiles = list(percentiles)\n\n # get them all to be in [0, 1]\n self._check_percentile(percentiles)\n\n # median should always be included\n if 0.5 not in percentiles:\n percentiles.append(0.5)\n percentiles = np.asarray(percentiles)\n else:\n percentiles = np.array([0.25, 0.5, 0.75])\n\n # sort and check for duplicates\n unique_pcts = np.unique(percentiles)\n if len(unique_pcts) < len(percentiles):\n raise ValueError(\"percentiles cannot contain duplicates\")\n percentiles = unique_pcts\n\n formatted_percentiles = format_percentiles(percentiles)\n\n def describe_numeric_1d(series):\n stat_index = (['count', 'mean', 'std', 'min'] +\n formatted_percentiles + ['max'])\n d = ([series.count(), series.mean(), series.std(), series.min()] +\n series.quantile(percentiles).tolist() + [series.max()])\n return pd.Series(d, index=stat_index, name=series.name)\n\n def describe_categorical_1d(data):\n names = ['count', 'unique']\n objcounts = data.value_counts()\n count_unique = len(objcounts[objcounts != 0])\n result = [data.count(), count_unique]\n if result[1] > 0:\n top, freq = objcounts.index[0], objcounts.iloc[0]\n\n if is_datetime64_any_dtype(data):\n tz = data.dt.tz\n asint = data.dropna().values.view('i8')\n top = Timestamp(top)\n if top.tzinfo is not None and tz is not None:\n # Don't tz_localize(None) if key is already tz-aware\n top = top.tz_convert(tz)\n else:\n top = top.tz_localize(tz)\n names += ['top', 'freq', 'first', 'last']\n result += [top, freq,\n Timestamp(asint.min(), tz=tz),\n Timestamp(asint.max(), tz=tz)]\n else:\n names += ['top', 'freq']\n result += [top, freq]\n\n return pd.Series(result, index=names, name=data.name)\n\n def describe_1d(data):\n if is_bool_dtype(data):\n return describe_categorical_1d(data)\n elif is_numeric_dtype(data):\n return describe_numeric_1d(data)\n elif is_timedelta64_dtype(data):\n return describe_numeric_1d(data)\n else:\n return describe_categorical_1d(data)\n\n if self.ndim == 1:\n return describe_1d(self)\n elif (include is None) and (exclude is None):\n # when some numerics are found, keep only numerics\n data = self.select_dtypes(include=[np.number])\n if len(data.columns) == 0:\n data = self\n elif include == 'all':\n if exclude is not None:\n msg = \"exclude must be None when include is 'all'\"\n raise ValueError(msg)\n data = self\n else:\n data = self.select_dtypes(include=include, exclude=exclude)\n\n ldesc = [describe_1d(s) for _, s in data.iteritems()]\n # set a convenient order for rows\n names = []\n ldesc_indexes = sorted((x.index for x in ldesc), key=len)\n for idxnames in ldesc_indexes:\n for name in idxnames:\n if name not in names:\n names.append(name)\n\n d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)\n d.columns = data.columns.copy()\n return d\n\n def _check_percentile(self, q):\n \"\"\"\n Validate percentiles (used by describe and quantile).\n \"\"\"\n\n msg = (\"percentiles should all be in the interval [0, 1]. \"\n \"Try {0} instead.\")\n q = np.asarray(q)\n if q.ndim == 0:\n if not 0 <= q <= 1:\n raise ValueError(msg.format(q / 100.0))\n else:\n if not all(0 <= qs <= 1 for qs in q):\n raise ValueError(msg.format(q / 100.0))\n return q\n\n _shared_docs['pct_change'] = \"\"\"\n Percentage change between the current and a prior element.\n\n Computes the percentage change from the immediately previous row by\n default. This is useful in comparing the percentage of change in a time\n series of elements.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n fill_method : str, default 'pad'\n How to handle NAs before computing percent changes.\n limit : int, default None\n The number of consecutive NAs to fill before stopping.\n freq : DateOffset, timedelta, or offset alias string, optional\n Increment to use from time series API (e.g. 'M' or BDay()).\n **kwargs\n Additional keyword arguments are passed into\n `DataFrame.shift` or `Series.shift`.\n\n Returns\n -------\n chg : Series or DataFrame\n The same type as the calling object.\n\n See Also\n --------\n Series.diff : Compute the difference of two elements in a Series.\n DataFrame.diff : Compute the difference of two elements in a DataFrame.\n Series.shift : Shift the index by some number of periods.\n DataFrame.shift : Shift the index by some number of periods.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([90, 91, 85])\n >>> s\n 0 90\n 1 91\n 2 85\n dtype: int64\n\n >>> s.pct_change()\n 0 NaN\n 1 0.011111\n 2 -0.065934\n dtype: float64\n\n >>> s.pct_change(periods=2)\n 0 NaN\n 1 NaN\n 2 -0.055556\n dtype: float64\n\n See the percentage change in a Series where filling NAs with last\n valid observation forward to next valid.\n\n >>> s = pd.Series([90, 91, None, 85])\n >>> s\n 0 90.0\n 1 91.0\n 2 NaN\n 3 85.0\n dtype: float64\n\n >>> s.pct_change(fill_method='ffill')\n 0 NaN\n 1 0.011111\n 2 0.000000\n 3 -0.065934\n dtype: float64\n\n **DataFrame**\n\n Percentage change in French franc, Deutsche Mark, and Italian lira from\n 1980-01-01 to 1980-03-01.\n\n >>> df = pd.DataFrame({\n ... 'FR': [4.0405, 4.0963, 4.3149],\n ... 'GR': [1.7246, 1.7482, 1.8519],\n ... 'IT': [804.74, 810.01, 860.13]},\n ... index=['1980-01-01', '1980-02-01', '1980-03-01'])\n >>> df\n FR GR IT\n 1980-01-01 4.0405 1.7246 804.74\n 1980-02-01 4.0963 1.7482 810.01\n 1980-03-01 4.3149 1.8519 860.13\n\n >>> df.pct_change()\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 0.013810 0.013684 0.006549\n 1980-03-01 0.053365 0.059318 0.061876\n\n Percentage of change in GOOG and APPL stock volume. Shows computing\n the percentage change between columns.\n\n >>> df = pd.DataFrame({\n ... '2016': [1769950, 30586265],\n ... '2015': [1500923, 40912316],\n ... '2014': [1371819, 41403351]},\n ... index=['GOOG', 'APPL'])\n >>> df\n 2016 2015 2014\n GOOG 1769950 1500923 1371819\n APPL 30586265 40912316 41403351\n\n >>> df.pct_change(axis='columns')\n 2016 2015 2014\n GOOG NaN -0.151997 -0.086016\n APPL NaN 0.337604 0.012002\n \"\"\"\n\n @Appender(_shared_docs['pct_change'] % _shared_doc_kwargs)\n def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,\n **kwargs):\n # TODO: Not sure if above is correct - need someone to confirm.\n axis = self._get_axis_number(kwargs.pop('axis', self._stat_axis_name))\n if fill_method is None:\n data = self\n else:\n data = self.fillna(method=fill_method, limit=limit, axis=axis)\n\n rs = (data.div(data.shift(periods=periods, freq=freq, axis=axis,\n **kwargs)) - 1)\n rs = rs.reindex_like(data)\n if freq is None:\n mask = isna(com.values_from_object(data))\n np.putmask(rs.values, mask, np.nan)\n return rs\n\n def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):\n if axis is None:\n raise ValueError(\"Must specify 'axis' when aggregating by level.\")\n grouped = self.groupby(level=level, axis=axis, sort=False)\n if hasattr(grouped, name) and skipna:\n return getattr(grouped, name)(**kwargs)\n axis = self._get_axis_number(axis)\n method = getattr(type(self), name)\n applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)\n return grouped.aggregate(applyf)\n\n @classmethod\n def _add_numeric_operations(cls):\n \"\"\"\n Add the operations to the cls; evaluate the doc strings again\n \"\"\"\n\n axis_descr, name, name2 = _doc_parms(cls)\n\n cls.any = _make_logical_function(\n cls, 'any', name, name2, axis_descr, _any_desc, nanops.nanany,\n _any_see_also, _any_examples, empty_value=False)\n cls.all = _make_logical_function(\n cls, 'all', name, name2, axis_descr, _all_desc, nanops.nanall,\n _all_see_also, _all_examples, empty_value=True)\n\n @Substitution(desc=\"Return the mean absolute deviation of the values \"\n \"for the requested axis.\",\n name1=name, name2=name2, axis_descr=axis_descr,\n min_count='', see_also='', examples='')\n @Appender(_num_doc)\n def mad(self, axis=None, skipna=None, level=None):\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level('mad', axis=axis, level=level,\n skipna=skipna)\n\n data = self._get_numeric_data()\n if axis == 0:\n demeaned = data - data.mean(axis=0)\n else:\n demeaned = data.sub(data.mean(axis=1), axis=0)\n return np.abs(demeaned).mean(axis=axis, skipna=skipna)\n\n cls.mad = mad\n\n cls.sem = _make_stat_function_ddof(\n cls, 'sem', name, name2, axis_descr,\n \"Return unbiased standard error of the mean over requested \"\n \"axis.\\n\\nNormalized by N-1 by default. This can be changed \"\n \"using the ddof argument\",\n nanops.nansem)\n cls.var = _make_stat_function_ddof(\n cls, 'var', name, name2, axis_descr,\n \"Return unbiased variance over requested axis.\\n\\nNormalized by \"\n \"N-1 by default. This can be changed using the ddof argument\",\n nanops.nanvar)\n cls.std = _make_stat_function_ddof(\n cls, 'std', name, name2, axis_descr,\n \"Return sample standard deviation over requested axis.\"\n \"\\n\\nNormalized by N-1 by default. This can be changed using the \"\n \"ddof argument\",\n nanops.nanstd)\n\n @Substitution(desc=\"Return the compound percentage of the values for \"\n \"the requested axis.\", name1=name, name2=name2,\n axis_descr=axis_descr,\n min_count='', see_also='', examples='')\n @Appender(_num_doc)\n def compound(self, axis=None, skipna=None, level=None):\n if skipna is None:\n skipna = True\n return (1 + self).prod(axis=axis, skipna=skipna, level=level) - 1\n\n cls.compound = compound\n\n cls.cummin = _make_cum_function(\n cls, 'cummin', name, name2, axis_descr, \"minimum\",\n lambda y, axis: np.minimum.accumulate(y, axis), \"min\",\n np.inf, np.nan, _cummin_examples)\n cls.cumsum = _make_cum_function(\n cls, 'cumsum', name, name2, axis_descr, \"sum\",\n lambda y, axis: y.cumsum(axis), \"sum\", 0.,\n np.nan, _cumsum_examples)\n cls.cumprod = _make_cum_function(\n cls, 'cumprod', name, name2, axis_descr, \"product\",\n lambda y, axis: y.cumprod(axis), \"prod\", 1.,\n np.nan, _cumprod_examples)\n cls.cummax = _make_cum_function(\n cls, 'cummax', name, name2, axis_descr, \"maximum\",\n lambda y, axis: np.maximum.accumulate(y, axis), \"max\",\n -np.inf, np.nan, _cummax_examples)\n\n cls.sum = _make_min_count_stat_function(\n cls, 'sum', name, name2, axis_descr,\n \"\"\"Return the sum of the values for the requested axis.\\n\n This is equivalent to the method ``numpy.sum``.\"\"\",\n nanops.nansum, _stat_func_see_also, _sum_examples)\n cls.mean = _make_stat_function(\n cls, 'mean', name, name2, axis_descr,\n 'Return the mean of the values for the requested axis.',\n nanops.nanmean)\n cls.skew = _make_stat_function(\n cls, 'skew', name, name2, axis_descr,\n 'Return unbiased skew over requested axis\\nNormalized by N-1.',\n nanops.nanskew)\n cls.kurt = _make_stat_function(\n cls, 'kurt', name, name2, axis_descr,\n \"Return unbiased kurtosis over requested axis using Fisher's \"\n \"definition of\\nkurtosis (kurtosis of normal == 0.0). Normalized \"\n \"by N-1.\",\n nanops.nankurt)\n cls.kurtosis = cls.kurt\n cls.prod = _make_min_count_stat_function(\n cls, 'prod', name, name2, axis_descr,\n 'Return the product of the values for the requested axis.',\n nanops.nanprod, examples=_prod_examples)\n cls.product = cls.prod\n cls.median = _make_stat_function(\n cls, 'median', name, name2, axis_descr,\n 'Return the median of the values for the requested axis.',\n nanops.nanmedian)\n cls.max = _make_stat_function(\n cls, 'max', name, name2, axis_descr,\n \"\"\"Return the maximum of the values for the requested axis.\\n\n If you want the *index* of the maximum, use ``idxmax``. This is\n the equivalent of the ``numpy.ndarray`` method ``argmax``.\"\"\",\n nanops.nanmax, _stat_func_see_also, _max_examples)\n cls.min = _make_stat_function(\n cls, 'min', name, name2, axis_descr,\n \"\"\"Return the minimum of the values for the requested axis.\\n\n If you want the *index* of the minimum, use ``idxmin``. This is\n the equivalent of the ``numpy.ndarray`` method ``argmin``.\"\"\",\n nanops.nanmin, _stat_func_see_also, _min_examples)\n\n @classmethod\n def _add_series_only_operations(cls):\n \"\"\"\n Add the series only operations to the cls; evaluate the doc\n strings again.\n \"\"\"\n\n axis_descr, name, name2 = _doc_parms(cls)\n\n def nanptp(values, axis=0, skipna=True):\n nmax = nanops.nanmax(values, axis, skipna)\n nmin = nanops.nanmin(values, axis, skipna)\n warnings.warn(\"Method .ptp is deprecated and will be removed \"\n \"in a future version. Use numpy.ptp instead.\",\n FutureWarning, stacklevel=4)\n return nmax - nmin\n\n cls.ptp = _make_stat_function(\n cls, 'ptp', name, name2, axis_descr,\n \"\"\"Return the difference between the maximum value and the\n minimum value in the object. This is the equivalent of the\n ``numpy.ndarray`` method ``ptp``.\\n\\n.. deprecated:: 0.24.0\n Use numpy.ptp instead\"\"\",\n nanptp)\n\n @classmethod\n def _add_series_or_dataframe_operations(cls):\n \"\"\"\n Add the series or dataframe only operations to the cls; evaluate\n the doc strings again.\n \"\"\"\n\n from pandas.core import window as rwindow\n\n @Appender(rwindow.rolling.__doc__)\n def rolling(self, window, min_periods=None, center=False,\n win_type=None, on=None, axis=0, closed=None):\n axis = self._get_axis_number(axis)\n return rwindow.rolling(self, window=window,\n min_periods=min_periods,\n center=center, win_type=win_type,\n on=on, axis=axis, closed=closed)\n\n cls.rolling = rolling\n\n @Appender(rwindow.expanding.__doc__)\n def expanding(self, min_periods=1, center=False, axis=0):\n axis = self._get_axis_number(axis)\n return rwindow.expanding(self, min_periods=min_periods,\n center=center, axis=axis)\n\n cls.expanding = expanding\n\n @Appender(rwindow.ewm.__doc__)\n def ewm(self, com=None, span=None, halflife=None, alpha=None,\n min_periods=0, adjust=True, ignore_na=False,\n axis=0):\n axis = self._get_axis_number(axis)\n return rwindow.ewm(self, com=com, span=span, halflife=halflife,\n alpha=alpha, min_periods=min_periods,\n adjust=adjust, ignore_na=ignore_na, axis=axis)\n\n cls.ewm = ewm\n\n @Appender(_shared_docs['transform'] % dict(axis=\"\", **_shared_doc_kwargs))\n def transform(self, func, *args, **kwargs):\n result = self.agg(func, *args, **kwargs)\n if is_scalar(result) or len(result) != len(self):\n raise ValueError(\"transforms cannot produce \"\n \"aggregated results\")\n\n return result\n\n # ----------------------------------------------------------------------\n # Misc methods\n\n _shared_docs['valid_index'] = \"\"\"\n Return index for %(position)s non-NA/null value.\n\n Returns\n --------\n scalar : type of index\n\n Notes\n --------\n If all elements are non-NA/null, returns None.\n Also returns None for empty %(klass)s.\n \"\"\"\n\n def _find_valid_index(self, how):\n \"\"\"\n Retrieves the index of the first valid value.\n\n Parameters\n ----------\n how : {'first', 'last'}\n Use this parameter to change between the first or last valid index.\n\n Returns\n -------\n idx_first_valid : type of index\n \"\"\"\n assert how in ['first', 'last']\n\n if len(self) == 0: # early stop\n return None\n is_valid = ~self.isna()\n\n if self.ndim == 2:\n is_valid = is_valid.any(1) # reduce axis 1\n\n if how == 'first':\n idxpos = is_valid.values[::].argmax()\n\n if how == 'last':\n idxpos = len(self) - 1 - is_valid.values[::-1].argmax()\n\n chk_notna = is_valid.iat[idxpos]\n idx = self.index[idxpos]\n\n if not chk_notna:\n return None\n return idx\n\n @Appender(_shared_docs['valid_index'] % {'position': 'first',\n 'klass': 'NDFrame'})\n def first_valid_index(self):\n return self._find_valid_index('first')\n\n @Appender(_shared_docs['valid_index'] % {'position': 'last',\n 'klass': 'NDFrame'})\n def last_valid_index(self):\n return self._find_valid_index('last')\n\n\ndef _doc_parms(cls):\n \"\"\"Return a tuple of the doc parms.\"\"\"\n axis_descr = \"{%s}\" % ', '.join(\"{0} ({1})\".format(a, i)\n for i, a in enumerate(cls._AXIS_ORDERS))\n name = (cls._constructor_sliced.__name__\n if cls._AXIS_LEN > 1 else 'scalar')\n name2 = cls.__name__\n return axis_descr, name, name2\n\n\n_num_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\n Axis for the function to be applied on.\nskipna : bool, default True\n Exclude NA/null values when computing the result.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n%(min_count)s\\\n**kwargs\n Additional keyword arguments to be passed to the function.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\n%(see_also)s\n%(examples)s\\\n\"\"\"\n\n_num_ddof_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s\nddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\n\"\"\"\n\n_bool_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns', None}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n * 1 / 'columns' : reduce the columns, return a Series whose index is the\n original index.\n * None : reduce all axes, return a scalar.\n\nbool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data. Not implemented for Series.\nskipna : bool, default True\n Exclude NA/null values. If the entire row/column is NA and skipna is\n True, then the result will be %(empty_value)s, as for an empty row/column.\n If skipna is False, then NA are treated as True, because these are not\n equal to zero.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\n**kwargs : any, default None\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\n If level is specified, then, %(name2)s is returned; otherwise, %(name1)s\n is returned.\n\n%(see_also)s\n%(examples)s\"\"\"\n\n_all_desc = \"\"\"\\\nReturn whether all elements are True, potentially over an axis.\n\nReturns True unless there at least one element within a series or\nalong a Dataframe axis that is False or equivalent (e.g. zero or\nempty).\"\"\"\n\n_all_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> pd.Series([True, True]).all()\nTrue\n>>> pd.Series([True, False]).all()\nFalse\n>>> pd.Series([]).all()\nTrue\n>>> pd.Series([np.nan]).all()\nTrue\n>>> pd.Series([np.nan]).all(skipna=False)\nTrue\n\n**DataFrames**\n\nCreate a dataframe from a dictionary.\n\n>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})\n>>> df\n col1 col2\n0 True True\n1 True False\n\nDefault behaviour checks if column-wise values all return True.\n\n>>> df.all()\ncol1 True\ncol2 False\ndtype: bool\n\nSpecify ``axis='columns'`` to check if row-wise values all return True.\n\n>>> df.all(axis='columns')\n0 True\n1 False\ndtype: bool\n\nOr ``axis=None`` for whether every value is True.\n\n>>> df.all(axis=None)\nFalse\n\"\"\"\n\n_all_see_also = \"\"\"\\\nSee Also\n--------\nSeries.all : Return True if all elements are True.\nDataFrame.any : Return True if one (or more) elements are True.\n\"\"\"\n\n_cnum_doc = \"\"\"\nReturn cumulative %(desc)s over a DataFrame or Series axis.\n\nReturns a DataFrame or Series of the same size containing the cumulative\n%(desc)s.\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns'}, default 0\n The index or the name of the axis. 0 is equivalent to None or 'index'.\nskipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n*args, **kwargs :\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\\n\nSee Also\n--------\ncore.window.Expanding.%(accum_func_name)s : Similar functionality\n but ignores ``NaN`` values.\n%(name2)s.%(accum_func_name)s : Return the %(desc)s over\n %(name2)s axis.\n%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.\n%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.\n%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.\n%(name2)s.cumprod : Return cumulative product over %(name2)s axis.\n\n%(examples)s\n\"\"\"\n\n_cummin_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummin()\n0 2.0\n1 NaN\n2 2.0\n3 -1.0\n4 -1.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummin(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the minimum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummin()\n A B\n0 2.0 1.0\n1 2.0 NaN\n2 1.0 0.0\n\nTo iterate over columns and find the minimum in each row,\nuse ``axis=1``\n\n>>> df.cummin(axis=1)\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cumsum_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumsum()\n0 2.0\n1 NaN\n2 7.0\n3 6.0\n4 6.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumsum(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the sum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumsum()\n A B\n0 2.0 1.0\n1 5.0 NaN\n2 6.0 1.0\n\nTo iterate over columns and find the sum in each row,\nuse ``axis=1``\n\n>>> df.cumsum(axis=1)\n A B\n0 2.0 3.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_cumprod_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumprod()\n0 2.0\n1 NaN\n2 10.0\n3 -10.0\n4 -0.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumprod(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the product\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumprod()\n A B\n0 2.0 1.0\n1 6.0 NaN\n2 6.0 0.0\n\nTo iterate over columns and find the product in each row,\nuse ``axis=1``\n\n>>> df.cumprod(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cummax_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummax()\n0 2.0\n1 NaN\n2 5.0\n3 5.0\n4 5.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummax(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the maximum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummax()\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 3.0 1.0\n\nTo iterate over columns and find the maximum in each row,\nuse ``axis=1``\n\n>>> df.cummax(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_any_see_also = \"\"\"\\\nSee Also\n--------\nnumpy.any : Numpy version of this method.\nSeries.any : Return whether any element is True.\nSeries.all : Return whether all elements are True.\nDataFrame.any : Return whether any element is True over requested axis.\nDataFrame.all : Return whether all elements are True over requested axis.\n\"\"\"\n\n_any_desc = \"\"\"\\\nReturn whether any element is True, potentially over an axis.\n\nReturns False unless there at least one element within a series or\nalong a Dataframe axis that is True or equivalent (e.g. non-zero or\nnon-empty).\"\"\"\n\n_any_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\nFor Series input, the output is a scalar indicating whether any element\nis True.\n\n>>> pd.Series([False, False]).any()\nFalse\n>>> pd.Series([True, False]).any()\nTrue\n>>> pd.Series([]).any()\nFalse\n>>> pd.Series([np.nan]).any()\nFalse\n>>> pd.Series([np.nan]).any(skipna=False)\nTrue\n\n**DataFrame**\n\nWhether each column contains at least one True element (the default).\n\n>>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [0, 2], \"C\": [0, 0]})\n>>> df\n A B C\n0 1 0 0\n1 2 2 0\n\n>>> df.any()\nA True\nB True\nC False\ndtype: bool\n\nAggregating over the columns.\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 2]})\n>>> df\n A B\n0 True 1\n1 False 2\n\n>>> df.any(axis='columns')\n0 True\n1 True\ndtype: bool\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 0]})\n>>> df\n A B\n0 True 1\n1 False 0\n\n>>> df.any(axis='columns')\n0 True\n1 False\ndtype: bool\n\nAggregating over the entire DataFrame with ``axis=None``.\n\n>>> df.any(axis=None)\nTrue\n\n`any` for an empty DataFrame is an empty Series.\n\n>>> pd.DataFrame([]).any()\nSeries([], dtype: bool)\n\"\"\"\n\n_shared_docs['stat_func_example'] = \"\"\"\\\nExamples\n--------\n\n>>> idx = pd.MultiIndex.from_arrays([\n... ['warm', 'warm', 'cold', 'cold'],\n... ['dog', 'falcon', 'fish', 'spider']],\n... names=['blooded', 'animal'])\n>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)\n>>> s\nblooded animal\nwarm dog 4\n falcon 2\ncold fish 0\n spider 8\nName: legs, dtype: int64\n\n>>> s.{stat_func}()\n{default_output}\n\n{verb} using level names, as well as indices.\n\n>>> s.{stat_func}(level='blooded')\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\n\n>>> s.{stat_func}(level=0)\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\n\"\"\"\n\n_sum_examples = _shared_docs['stat_func_example'].format(\n stat_func='sum',\n verb='Sum',\n default_output=14,\n level_output_0=6,\n level_output_1=8)\n\n_sum_examples += \"\"\"\nBy default, the sum of an empty or all-NA Series is ``0``.\n\n>>> pd.Series([]).sum() # min_count=0 is the default\n0.0\n\nThis can be controlled with the ``min_count`` parameter. For example, if\nyou'd like the sum of an empty series to be NaN, pass ``min_count=1``.\n\n>>> pd.Series([]).sum(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).sum()\n0.0\n\n>>> pd.Series([np.nan]).sum(min_count=1)\nnan\n\"\"\"\n\n_max_examples = _shared_docs['stat_func_example'].format(\n stat_func='max',\n verb='Max',\n default_output=8,\n level_output_0=4,\n level_output_1=8)\n\n_min_examples = _shared_docs['stat_func_example'].format(\n stat_func='min',\n verb='Min',\n default_output=0,\n level_output_0=2,\n level_output_1=0)\n\n_stat_func_see_also = \"\"\"\nSee Also\n--------\nSeries.sum : Return the sum.\nSeries.min : Return the minimum.\nSeries.max : Return the maximum.\nSeries.idxmin : Return the index of the minimum.\nSeries.idxmax : Return the index of the maximum.\nDataFrame.sum : Return the sum over the requested axis.\nDataFrame.min : Return the minimum over the requested axis.\nDataFrame.max : Return the maximum over the requested axis.\nDataFrame.idxmin : Return the index of the minimum over the requested axis.\nDataFrame.idxmax : Return the index of the maximum over the requested axis.\n\"\"\"\n\n_prod_examples = \"\"\"\\\nExamples\n--------\nBy default, the product of an empty or all-NA Series is ``1``\n\n>>> pd.Series([]).prod()\n1.0\n\nThis can be controlled with the ``min_count`` parameter\n\n>>> pd.Series([]).prod(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).prod()\n1.0\n\n>>> pd.Series([np.nan]).prod(min_count=1)\nnan\n\"\"\"\n\n_min_count_stub = \"\"\"\\\nmin_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n .. versionadded :: 0.22.0\n\n Added with the default being 0. This means the sum of an all-NA\n or empty Series is 0, and the product of an all-NA or empty\n Series is 1.\n\"\"\"\n\n\ndef _make_min_count_stat_function(cls, name, name1, name2, axis_descr, desc,\n f, see_also='', examples=''):\n @Substitution(desc=desc, name1=name1, name2=name2,\n axis_descr=axis_descr, min_count=_min_count_stub,\n see_also=see_also, examples=examples)\n @Appender(_num_doc)\n def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,\n min_count=0,\n **kwargs):\n if name == 'sum':\n nv.validate_sum(tuple(), kwargs)\n elif name == 'prod':\n nv.validate_prod(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(name, axis=axis, level=level,\n skipna=skipna, min_count=min_count)\n return self._reduce(f, name, axis=axis, skipna=skipna,\n numeric_only=numeric_only, min_count=min_count)\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function(cls, name, name1, name2, axis_descr, desc, f,\n see_also='', examples=''):\n @Substitution(desc=desc, name1=name1, name2=name2,\n axis_descr=axis_descr, min_count='', see_also=see_also,\n examples=examples)\n @Appender(_num_doc)\n def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,\n **kwargs):\n if name == 'median':\n nv.validate_median(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(name, axis=axis, level=level,\n skipna=skipna)\n return self._reduce(f, name, axis=axis, skipna=skipna,\n numeric_only=numeric_only)\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):\n @Substitution(desc=desc, name1=name1, name2=name2,\n axis_descr=axis_descr)\n @Appender(_num_ddof_doc)\n def stat_func(self, axis=None, skipna=None, level=None, ddof=1,\n numeric_only=None, **kwargs):\n nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(name, axis=axis, level=level,\n skipna=skipna, ddof=ddof)\n return self._reduce(f, name, axis=axis, numeric_only=numeric_only,\n skipna=skipna, ddof=ddof)\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_cum_function(cls, name, name1, name2, axis_descr, desc,\n accum_func, accum_func_name, mask_a, mask_b, examples):\n @Substitution(desc=desc, name1=name1, name2=name2,\n axis_descr=axis_descr, accum_func_name=accum_func_name,\n examples=examples)\n @Appender(_cnum_doc)\n def cum_func(self, axis=None, skipna=True, *args, **kwargs):\n skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)\n if axis is None:\n axis = self._stat_axis_number\n else:\n axis = self._get_axis_number(axis)\n\n y = com.values_from_object(self).copy()\n\n if (skipna and\n issubclass(y.dtype.type, (np.datetime64, np.timedelta64))):\n result = accum_func(y, axis)\n mask = isna(self)\n np.putmask(result, mask, iNaT)\n elif skipna and not issubclass(y.dtype.type, (np.integer, np.bool_)):\n mask = isna(self)\n np.putmask(y, mask, mask_a)\n result = accum_func(y, axis)\n np.putmask(result, mask, mask_b)\n else:\n result = accum_func(y, axis)\n\n d = self._construct_axes_dict()\n d['copy'] = False\n return self._constructor(result, **d).__finalize__(self)\n\n return set_function_name(cum_func, name, cls)\n\n\ndef _make_logical_function(cls, name, name1, name2, axis_descr, desc, f,\n see_also, examples, empty_value):\n @Substitution(desc=desc, name1=name1, name2=name2,\n axis_descr=axis_descr, see_also=see_also, examples=examples,\n empty_value=empty_value)\n @Appender(_bool_doc)\n def logical_func(self, axis=0, bool_only=None, skipna=True, level=None,\n **kwargs):\n nv.validate_logical_func(tuple(), kwargs, fname=name)\n if level is not None:\n if bool_only is not None:\n raise NotImplementedError(\"Option bool_only is not \"\n \"implemented with option level.\")\n return self._agg_by_level(name, axis=axis, level=level,\n skipna=skipna)\n return self._reduce(f, name, axis=axis, skipna=skipna,\n numeric_only=bool_only, filter_type='bool')\n\n return set_function_name(logical_func, name, cls)\n\n\n# install the indexes\nfor _name, _indexer in indexing.get_indexers_list():\n NDFrame._create_indexer(_name, _indexer)\n"
] |
[
[
"pandas.io.pickle.to_pickle",
"pandas.core.index.RangeIndex",
"pandas.core.missing.clean_reindex_fill_method",
"pandas.compat.lzip",
"pandas.io.formats.excel.ExcelFormatter",
"pandas.core.dtypes.common.is_number",
"pandas.core.window.rolling",
"pandas.compat.iteritems",
"pandas.DataFrame",
"pandas.compat.numpy.function.validate_clip_with_axis",
"pandas.compat.numpy.function.validate_cum_func_with_skipna",
"pandas.core.common.SettingWithCopyError",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.common._get_rename_function",
"numpy.prod",
"pandas.core.dtypes.common.is_bool",
"pandas.util._decorators.rewrite_axis_style_signature",
"pandas.core.common.count_not_none",
"pandas.Index",
"numpy.minimum.accumulate",
"numpy.errstate",
"numpy.any",
"pandas.core.window.ewm",
"pandas.core.resample.asfreq",
"pandas.io.formats.csvs.CSVFormatter",
"pandas.concat",
"pandas.core.dtypes.missing.isna",
"pandas.core.index.Index",
"pandas.core.nanops.nanmin",
"pandas.core.dtypes.common.is_re_compilable",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.common.random_state",
"pandas.core.missing.clean_fill_method",
"pandas.compat.to_str",
"numpy.array",
"pandas.io.sql.to_sql",
"pandas.core.common.maybe_box_datetimelike",
"pandas.core.ops._align_method_FRAME",
"pandas.isnull",
"pandas.core.common.maybe_make_list",
"pandas.core.indexes.datetimes.DatetimeIndex",
"numpy.maximum.accumulate",
"pandas.core.dtypes.common.is_period_arraylike",
"pandas.Series",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas._libs.Timestamp",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.indexing.get_indexers_list",
"pandas.core.dtypes.cast.maybe_upcast_putmask",
"pandas.core.common.apply_if_callable",
"pandas.io.formats.format.format_percentiles",
"pandas.core.nanops.nanmax",
"pandas.core.dtypes.common.is_dict_like",
"pandas.core.indexes.period.Period",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.util._validators.validate_fillna_kwargs",
"pandas.core.config.get_option",
"pandas.util._decorators.Substitution",
"pandas.errors.AbstractMethodError",
"pandas.core.common.index_labels_to_array",
"pandas.core.dtypes.cast.maybe_promote",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.common.values_from_object",
"pandas.core.index.ensure_index",
"pandas.core.dtypes.common.is_integer",
"pandas.core.algorithms.rank",
"pandas.io.json.to_json",
"pandas.io.formats.format.DataFrameFormatter",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_list_like",
"numpy.asarray",
"pandas.core.missing.mask_missing",
"pandas.core.missing.get_fill_func",
"numpy.abs",
"pandas.core.resample.resample",
"pandas.core.groupby.groupby.groupby",
"pandas.io.pytables.to_hdf",
"pandas.core.window.expanding",
"pandas.compat.map",
"pandas.core.dtypes.inference.is_hashable",
"pandas.core.dtypes.missing.notna",
"numpy.putmask",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.common._pipe",
"pandas.util._validators.validate_bool_kwarg",
"pandas.core.tools.datetimes.to_datetime",
"pandas.compat.isidentifier",
"pandas.util._decorators.Appender",
"pandas.compat.lrange",
"pandas.core.resample._maybe_process_deprecations",
"pandas.compat.numpy.function.validate_transpose_for_generic",
"pandas.tseries.frequencies.to_offset",
"numpy.isnan",
"pandas.compat.zip",
"pandas.io.clipboards.to_clipboard",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.io.packers.to_msgpack",
"pandas.compat.set_function_name",
"numpy.asanyarray",
"numpy.unique"
]
] |
aditzkabitz/pandas
|
[
"a3702e2207af106939804ceb193bf4fae3e23a31"
] |
[
"pandas/core/groupby/ops.py"
] |
[
"\"\"\"\nProvide classes to perform the groupby aggregate operations.\n\nThese are not exposed to the user and provide implementations of the grouping\noperations, primarily in cython. These classes (BaseGrouper and BinGrouper)\nare contained *in* the SeriesGroupBy and DataFrameGroupBy objects.\n\"\"\"\nfrom __future__ import annotations\n\nimport collections\nimport functools\nfrom typing import (\n Callable,\n Generic,\n Hashable,\n Iterator,\n Sequence,\n final,\n overload,\n)\n\nimport numpy as np\n\nfrom pandas._libs import (\n NaT,\n lib,\n)\nimport pandas._libs.groupby as libgroupby\nimport pandas._libs.reduction as libreduction\nfrom pandas._typing import (\n ArrayLike,\n DtypeObj,\n NDFrameT,\n Shape,\n npt,\n)\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import cache_readonly\n\nfrom pandas.core.dtypes.cast import (\n maybe_cast_pointwise_result,\n maybe_downcast_to_dtype,\n)\nfrom pandas.core.dtypes.common import (\n ensure_float64,\n ensure_int64,\n ensure_platform_int,\n is_1d_only_ea_obj,\n is_bool_dtype,\n is_categorical_dtype,\n is_complex_dtype,\n is_datetime64_any_dtype,\n is_float_dtype,\n is_integer_dtype,\n is_numeric_dtype,\n is_sparse,\n is_timedelta64_dtype,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.missing import (\n isna,\n maybe_fill,\n)\n\nfrom pandas.core.arrays import (\n DatetimeArray,\n ExtensionArray,\n PeriodArray,\n TimedeltaArray,\n)\nfrom pandas.core.arrays.boolean import BooleanDtype\nfrom pandas.core.arrays.floating import (\n Float64Dtype,\n FloatingDtype,\n)\nfrom pandas.core.arrays.integer import (\n Int64Dtype,\n _IntegerDtype,\n)\nfrom pandas.core.arrays.masked import (\n BaseMaskedArray,\n BaseMaskedDtype,\n)\nfrom pandas.core.arrays.string_ import StringDtype\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame\nfrom pandas.core.groupby import grouper\nfrom pandas.core.indexes.api import (\n CategoricalIndex,\n Index,\n MultiIndex,\n ensure_index,\n)\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import (\n compress_group_index,\n decons_obs_group_ids,\n get_flattened_list,\n get_group_index,\n get_group_index_sorter,\n get_indexer_dict,\n)\n\n\nclass WrappedCythonOp:\n \"\"\"\n Dispatch logic for functions defined in _libs.groupby\n \"\"\"\n\n # Functions for which we do _not_ attempt to cast the cython result\n # back to the original dtype.\n cast_blocklist = frozenset([\"rank\", \"count\", \"size\", \"idxmin\", \"idxmax\"])\n\n def __init__(self, kind: str, how: str):\n self.kind = kind\n self.how = how\n\n _CYTHON_FUNCTIONS = {\n \"aggregate\": {\n \"add\": \"group_add\",\n \"prod\": \"group_prod\",\n \"min\": \"group_min\",\n \"max\": \"group_max\",\n \"mean\": \"group_mean\",\n \"median\": \"group_median\",\n \"var\": \"group_var\",\n \"first\": \"group_nth\",\n \"last\": \"group_last\",\n \"ohlc\": \"group_ohlc\",\n },\n \"transform\": {\n \"cumprod\": \"group_cumprod\",\n \"cumsum\": \"group_cumsum\",\n \"cummin\": \"group_cummin\",\n \"cummax\": \"group_cummax\",\n \"rank\": \"group_rank\",\n },\n }\n\n _MASKED_CYTHON_FUNCTIONS = {\"cummin\", \"cummax\", \"min\", \"max\"}\n\n _cython_arity = {\"ohlc\": 4} # OHLC\n\n # Note: we make this a classmethod and pass kind+how so that caching\n # works at the class level and not the instance level\n @classmethod\n @functools.lru_cache(maxsize=None)\n def _get_cython_function(\n cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool\n ):\n\n dtype_str = dtype.name\n ftype = cls._CYTHON_FUNCTIONS[kind][how]\n\n # see if there is a fused-type version of function\n # only valid for numeric\n f = getattr(libgroupby, ftype)\n if is_numeric:\n return f\n # error: Non-overlapping equality check (left operand type: \"dtype[Any]\", right\n # operand type: \"Literal['object']\")\n elif dtype == object: # type: ignore[comparison-overlap]\n if \"object\" not in f.__signatures__:\n # raise NotImplementedError here rather than TypeError later\n raise NotImplementedError(\n f\"function is not implemented for this dtype: \"\n f\"[how->{how},dtype->{dtype_str}]\"\n )\n return f\n\n def get_cython_func_and_vals(self, values: np.ndarray, is_numeric: bool):\n \"\"\"\n Find the appropriate cython function, casting if necessary.\n\n Parameters\n ----------\n values : np.ndarray\n is_numeric : bool\n\n Returns\n -------\n func : callable\n values : np.ndarray\n \"\"\"\n how = self.how\n kind = self.kind\n\n if how in [\"median\", \"cumprod\"]:\n # these two only have float64 implementations\n if is_numeric:\n values = ensure_float64(values)\n else:\n raise NotImplementedError(\n f\"function is not implemented for this dtype: \"\n f\"[how->{how},dtype->{values.dtype.name}]\"\n )\n func = getattr(libgroupby, f\"group_{how}_float64\")\n return func, values\n\n func = self._get_cython_function(kind, how, values.dtype, is_numeric)\n\n if values.dtype.kind in [\"i\", \"u\"]:\n if how in [\"add\", \"var\", \"prod\", \"mean\", \"ohlc\"]:\n # result may still include NaN, so we have to cast\n values = ensure_float64(values)\n\n return func, values\n\n def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):\n \"\"\"\n Check if we can do this operation with our cython functions.\n\n Raises\n ------\n NotImplementedError\n This is either not a valid function for this dtype, or\n valid but not implemented in cython.\n \"\"\"\n how = self.how\n\n if is_numeric:\n # never an invalid op for those dtypes, so return early as fastpath\n return\n\n if is_categorical_dtype(dtype):\n # NotImplementedError for methods that can fall back to a\n # non-cython implementation.\n if how in [\"add\", \"prod\", \"cumsum\", \"cumprod\"]:\n raise TypeError(f\"{dtype} type does not support {how} operations\")\n raise NotImplementedError(f\"{dtype} dtype not supported\")\n\n elif is_sparse(dtype):\n # categoricals are only 1d, so we\n # are not setup for dim transforming\n raise NotImplementedError(f\"{dtype} dtype not supported\")\n elif is_datetime64_any_dtype(dtype):\n # we raise NotImplemented if this is an invalid operation\n # entirely, e.g. adding datetimes\n if how in [\"add\", \"prod\", \"cumsum\", \"cumprod\"]:\n raise TypeError(f\"datetime64 type does not support {how} operations\")\n elif is_timedelta64_dtype(dtype):\n if how in [\"prod\", \"cumprod\"]:\n raise TypeError(f\"timedelta64 type does not support {how} operations\")\n\n def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:\n how = self.how\n kind = self.kind\n\n arity = self._cython_arity.get(how, 1)\n\n out_shape: Shape\n if how == \"ohlc\":\n out_shape = (ngroups, 4)\n elif arity > 1:\n raise NotImplementedError(\n \"arity of more than 1 is not supported for the 'how' argument\"\n )\n elif kind == \"transform\":\n out_shape = values.shape\n else:\n out_shape = (ngroups,) + values.shape[1:]\n return out_shape\n\n def get_out_dtype(self, dtype: np.dtype) -> np.dtype:\n how = self.how\n\n if how == \"rank\":\n out_dtype = \"float64\"\n else:\n if is_numeric_dtype(dtype):\n out_dtype = f\"{dtype.kind}{dtype.itemsize}\"\n else:\n out_dtype = \"object\"\n return np.dtype(out_dtype)\n\n @overload\n def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:\n ... # pragma: no cover\n\n @overload\n def _get_result_dtype(self, dtype: ExtensionDtype) -> ExtensionDtype:\n ... # pragma: no cover\n\n def _get_result_dtype(self, dtype: DtypeObj) -> DtypeObj:\n \"\"\"\n Get the desired dtype of a result based on the\n input dtype and how it was computed.\n\n Parameters\n ----------\n dtype : np.dtype or ExtensionDtype\n Input dtype.\n\n Returns\n -------\n np.dtype or ExtensionDtype\n The desired dtype of the result.\n \"\"\"\n how = self.how\n\n if how in [\"add\", \"cumsum\", \"sum\", \"prod\"]:\n if dtype == np.dtype(bool):\n return np.dtype(np.int64)\n elif isinstance(dtype, (BooleanDtype, _IntegerDtype)):\n return Int64Dtype()\n elif how in [\"mean\", \"median\", \"var\"]:\n if isinstance(dtype, (BooleanDtype, _IntegerDtype)):\n return Float64Dtype()\n elif is_float_dtype(dtype) or is_complex_dtype(dtype):\n return dtype\n elif is_numeric_dtype(dtype):\n return np.dtype(np.float64)\n return dtype\n\n def uses_mask(self) -> bool:\n return self.how in self._MASKED_CYTHON_FUNCTIONS\n\n @final\n def _ea_wrap_cython_operation(\n self,\n values: ExtensionArray,\n min_count: int,\n ngroups: int,\n comp_ids: np.ndarray,\n **kwargs,\n ) -> ArrayLike:\n \"\"\"\n If we have an ExtensionArray, unwrap, call _cython_operation, and\n re-wrap if appropriate.\n \"\"\"\n # TODO: general case implementation overridable by EAs.\n if isinstance(values, BaseMaskedArray) and self.uses_mask():\n return self._masked_ea_wrap_cython_operation(\n values,\n min_count=min_count,\n ngroups=ngroups,\n comp_ids=comp_ids,\n **kwargs,\n )\n\n if isinstance(values, (DatetimeArray, PeriodArray, TimedeltaArray)):\n # All of the functions implemented here are ordinal, so we can\n # operate on the tz-naive equivalents\n npvalues = values._ndarray.view(\"M8[ns]\")\n elif isinstance(values.dtype, (BooleanDtype, _IntegerDtype)):\n # IntegerArray or BooleanArray\n npvalues = values.to_numpy(\"float64\", na_value=np.nan)\n elif isinstance(values.dtype, FloatingDtype):\n # FloatingArray\n npvalues = values.to_numpy(values.dtype.numpy_dtype, na_value=np.nan)\n elif isinstance(values.dtype, StringDtype):\n # StringArray\n npvalues = values.to_numpy(object, na_value=np.nan)\n else:\n raise NotImplementedError(\n f\"function is not implemented for this dtype: {values.dtype}\"\n )\n\n res_values = self._cython_op_ndim_compat(\n npvalues,\n min_count=min_count,\n ngroups=ngroups,\n comp_ids=comp_ids,\n mask=None,\n **kwargs,\n )\n\n if self.how in [\"rank\"]:\n # i.e. how in WrappedCythonOp.cast_blocklist, since\n # other cast_blocklist methods dont go through cython_operation\n return res_values\n\n return self._reconstruct_ea_result(values, res_values)\n\n def _reconstruct_ea_result(self, values, res_values):\n \"\"\"\n Construct an ExtensionArray result from an ndarray result.\n \"\"\"\n # TODO: allow EAs to override this logic\n\n if isinstance(\n values.dtype, (BooleanDtype, _IntegerDtype, FloatingDtype, StringDtype)\n ):\n dtype = self._get_result_dtype(values.dtype)\n cls = dtype.construct_array_type()\n return cls._from_sequence(res_values, dtype=dtype)\n\n elif needs_i8_conversion(values.dtype):\n i8values = res_values.view(\"i8\")\n return type(values)(i8values, dtype=values.dtype)\n\n raise NotImplementedError\n\n @final\n def _masked_ea_wrap_cython_operation(\n self,\n values: BaseMaskedArray,\n min_count: int,\n ngroups: int,\n comp_ids: np.ndarray,\n **kwargs,\n ) -> BaseMaskedArray:\n \"\"\"\n Equivalent of `_ea_wrap_cython_operation`, but optimized for masked EA's\n and cython algorithms which accept a mask.\n \"\"\"\n orig_values = values\n\n # Copy to ensure input and result masks don't end up shared\n mask = values._mask.copy()\n result_mask = np.zeros(ngroups, dtype=bool)\n arr = values._data\n\n res_values = self._cython_op_ndim_compat(\n arr,\n min_count=min_count,\n ngroups=ngroups,\n comp_ids=comp_ids,\n mask=mask,\n result_mask=result_mask,\n **kwargs,\n )\n\n dtype = self._get_result_dtype(orig_values.dtype)\n assert isinstance(dtype, BaseMaskedDtype)\n cls = dtype.construct_array_type()\n\n if self.kind != \"aggregate\":\n return cls(res_values.astype(dtype.type, copy=False), mask)\n else:\n return cls(res_values.astype(dtype.type, copy=False), result_mask)\n\n @final\n def _cython_op_ndim_compat(\n self,\n values: np.ndarray,\n *,\n min_count: int,\n ngroups: int,\n comp_ids: np.ndarray,\n mask: np.ndarray | None = None,\n result_mask: np.ndarray | None = None,\n **kwargs,\n ) -> np.ndarray:\n if values.ndim == 1:\n # expand to 2d, dispatch, then squeeze if appropriate\n values2d = values[None, :]\n if mask is not None:\n mask = mask[None, :]\n if result_mask is not None:\n result_mask = result_mask[None, :]\n res = self._call_cython_op(\n values2d,\n min_count=min_count,\n ngroups=ngroups,\n comp_ids=comp_ids,\n mask=mask,\n result_mask=result_mask,\n **kwargs,\n )\n if res.shape[0] == 1:\n return res[0]\n\n # otherwise we have OHLC\n return res.T\n\n return self._call_cython_op(\n values,\n min_count=min_count,\n ngroups=ngroups,\n comp_ids=comp_ids,\n mask=mask,\n result_mask=result_mask,\n **kwargs,\n )\n\n @final\n def _call_cython_op(\n self,\n values: np.ndarray, # np.ndarray[ndim=2]\n *,\n min_count: int,\n ngroups: int,\n comp_ids: np.ndarray,\n mask: np.ndarray | None,\n result_mask: np.ndarray | None,\n **kwargs,\n ) -> np.ndarray: # np.ndarray[ndim=2]\n orig_values = values\n\n dtype = values.dtype\n is_numeric = is_numeric_dtype(dtype)\n\n is_datetimelike = needs_i8_conversion(dtype)\n\n if is_datetimelike:\n values = values.view(\"int64\")\n is_numeric = True\n elif is_bool_dtype(dtype):\n values = values.astype(\"int64\")\n elif is_integer_dtype(dtype):\n # e.g. uint8 -> uint64, int16 -> int64\n dtype_str = dtype.kind + \"8\"\n values = values.astype(dtype_str, copy=False)\n elif is_numeric:\n if not is_complex_dtype(dtype):\n values = ensure_float64(values)\n\n values = values.T\n if mask is not None:\n mask = mask.T\n if result_mask is not None:\n result_mask = result_mask.T\n\n out_shape = self._get_output_shape(ngroups, values)\n func, values = self.get_cython_func_and_vals(values, is_numeric)\n out_dtype = self.get_out_dtype(values.dtype)\n\n result = maybe_fill(np.empty(out_shape, dtype=out_dtype))\n if self.kind == \"aggregate\":\n counts = np.zeros(ngroups, dtype=np.int64)\n if self.how in [\"min\", \"max\", \"mean\"]:\n func(\n result,\n counts,\n values,\n comp_ids,\n min_count,\n mask=mask,\n result_mask=result_mask,\n is_datetimelike=is_datetimelike,\n )\n elif self.how in [\"add\"]:\n # We support datetimelike\n func(\n result,\n counts,\n values,\n comp_ids,\n min_count,\n datetimelike=is_datetimelike,\n )\n else:\n func(result, counts, values, comp_ids, min_count)\n else:\n # TODO: min_count\n if self.uses_mask():\n func(\n result,\n values,\n comp_ids,\n ngroups,\n is_datetimelike,\n mask=mask,\n **kwargs,\n )\n else:\n func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs)\n\n if self.kind == \"aggregate\":\n # i.e. counts is defined. Locations where count<min_count\n # need to have the result set to np.nan, which may require casting,\n # see GH#40767\n if is_integer_dtype(result.dtype) and not is_datetimelike:\n cutoff = max(1, min_count)\n empty_groups = counts < cutoff\n if empty_groups.any():\n # Note: this conversion could be lossy, see GH#40767\n result = result.astype(\"float64\")\n result[empty_groups] = np.nan\n\n result = result.T\n\n if self.how not in self.cast_blocklist:\n # e.g. if we are int64 and need to restore to datetime64/timedelta64\n # \"rank\" is the only member of cast_blocklist we get here\n res_dtype = self._get_result_dtype(orig_values.dtype)\n op_result = maybe_downcast_to_dtype(result, res_dtype)\n else:\n op_result = result\n\n # error: Incompatible return value type (got \"Union[ExtensionArray, ndarray]\",\n # expected \"ndarray\")\n return op_result # type: ignore[return-value]\n\n @final\n def cython_operation(\n self,\n *,\n values: ArrayLike,\n axis: int,\n min_count: int = -1,\n comp_ids: np.ndarray,\n ngroups: int,\n **kwargs,\n ) -> ArrayLike:\n \"\"\"\n Call our cython function, with appropriate pre- and post- processing.\n \"\"\"\n if values.ndim > 2:\n raise NotImplementedError(\"number of dimensions is currently limited to 2\")\n elif values.ndim == 2:\n assert axis == 1, axis\n elif not is_1d_only_ea_obj(values):\n # Note: it is *not* the case that axis is always 0 for 1-dim values,\n # as we can have 1D ExtensionArrays that we need to treat as 2D\n assert axis == 0\n\n dtype = values.dtype\n is_numeric = is_numeric_dtype(dtype)\n\n # can we do this operation with our cython functions\n # if not raise NotImplementedError\n self._disallow_invalid_ops(dtype, is_numeric)\n\n if not isinstance(values, np.ndarray):\n # i.e. ExtensionArray\n return self._ea_wrap_cython_operation(\n values,\n min_count=min_count,\n ngroups=ngroups,\n comp_ids=comp_ids,\n **kwargs,\n )\n\n return self._cython_op_ndim_compat(\n values,\n min_count=min_count,\n ngroups=ngroups,\n comp_ids=comp_ids,\n mask=None,\n **kwargs,\n )\n\n\nclass BaseGrouper:\n \"\"\"\n This is an internal Grouper class, which actually holds\n the generated groups\n\n Parameters\n ----------\n axis : Index\n groupings : Sequence[Grouping]\n all the grouping instances to handle in this grouper\n for example for grouper list to groupby, need to pass the list\n sort : bool, default True\n whether this grouper will give sorted result or not\n group_keys : bool, default True\n mutated : bool, default False\n indexer : np.ndarray[np.intp], optional\n the indexer created by Grouper\n some groupers (TimeGrouper) will sort its axis and its\n group_info is also sorted, so need the indexer to reorder\n\n \"\"\"\n\n axis: Index\n\n def __init__(\n self,\n axis: Index,\n groupings: Sequence[grouper.Grouping],\n sort: bool = True,\n group_keys: bool = True,\n mutated: bool = False,\n indexer: npt.NDArray[np.intp] | None = None,\n dropna: bool = True,\n ):\n assert isinstance(axis, Index), axis\n\n self.axis = axis\n self._groupings: list[grouper.Grouping] = list(groupings)\n self._sort = sort\n self.group_keys = group_keys\n self.mutated = mutated\n self.indexer = indexer\n self.dropna = dropna\n\n @property\n def groupings(self) -> list[grouper.Grouping]:\n return self._groupings\n\n @property\n def shape(self) -> Shape:\n return tuple(ping.ngroups for ping in self.groupings)\n\n def __iter__(self):\n return iter(self.indices)\n\n @property\n def nkeys(self) -> int:\n return len(self.groupings)\n\n def get_iterator(\n self, data: NDFrameT, axis: int = 0\n ) -> Iterator[tuple[Hashable, NDFrameT]]:\n \"\"\"\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n \"\"\"\n splitter = self._get_splitter(data, axis=axis)\n keys = self.group_keys_seq\n for key, group in zip(keys, splitter):\n yield key, group.__finalize__(data, method=\"groupby\")\n\n @final\n def _get_splitter(self, data: NDFrame, axis: int = 0) -> DataSplitter:\n \"\"\"\n Returns\n -------\n Generator yielding subsetted objects\n\n __finalize__ has not been called for the subsetted objects returned.\n \"\"\"\n ids, _, ngroups = self.group_info\n return get_splitter(data, ids, ngroups, axis=axis)\n\n def _get_grouper(self):\n \"\"\"\n We are a grouper as part of another's groupings.\n\n We have a specific method of grouping, so cannot\n convert to a Index for our grouper.\n \"\"\"\n return self.groupings[0].grouping_vector\n\n @final\n @cache_readonly\n def group_keys_seq(self):\n if len(self.groupings) == 1:\n return self.levels[0]\n else:\n ids, _, ngroups = self.group_info\n\n # provide \"flattened\" iterator for multi-group setting\n return get_flattened_list(ids, ngroups, self.levels, self.codes)\n\n @final\n def apply(\n self, f: Callable, data: DataFrame | Series, axis: int = 0\n ) -> tuple[list, bool]:\n mutated = self.mutated\n splitter = self._get_splitter(data, axis=axis)\n group_keys = self.group_keys_seq\n result_values = []\n\n # This calls DataSplitter.__iter__\n zipped = zip(group_keys, splitter)\n\n for key, group in zipped:\n object.__setattr__(group, \"name\", key)\n\n # group might be modified\n group_axes = group.axes\n res = f(group)\n if not mutated and not _is_indexed_like(res, group_axes, axis):\n mutated = True\n result_values.append(res)\n\n # getattr pattern for __name__ is needed for functools.partial objects\n if len(group_keys) == 0 and getattr(f, \"__name__\", None) not in [\n \"idxmin\",\n \"idxmax\",\n \"nanargmin\",\n \"nanargmax\",\n ]:\n # If group_keys is empty, then no function calls have been made,\n # so we will not have raised even if this is an invalid dtype.\n # So do one dummy call here to raise appropriate TypeError.\n f(data.iloc[:0])\n\n return result_values, mutated\n\n @cache_readonly\n def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:\n \"\"\"dict {group name -> group indices}\"\"\"\n if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):\n # This shows unused categories in indices GH#38642\n return self.groupings[0].indices\n codes_list = [ping.codes for ping in self.groupings]\n keys = [ping.group_index for ping in self.groupings]\n return get_indexer_dict(codes_list, keys)\n\n @final\n @property\n def codes(self) -> list[np.ndarray]:\n return [ping.codes for ping in self.groupings]\n\n @property\n def levels(self) -> list[Index]:\n return [ping.group_index for ping in self.groupings]\n\n @property\n def names(self) -> list[Hashable]:\n return [ping.name for ping in self.groupings]\n\n @final\n def size(self) -> Series:\n \"\"\"\n Compute group sizes.\n \"\"\"\n ids, _, ngroups = self.group_info\n if ngroups:\n out = np.bincount(ids[ids != -1], minlength=ngroups)\n else:\n out = []\n return Series(out, index=self.result_index, dtype=\"int64\")\n\n @cache_readonly\n def groups(self) -> dict[Hashable, np.ndarray]:\n \"\"\"dict {group name -> group labels}\"\"\"\n if len(self.groupings) == 1:\n return self.groupings[0].groups\n else:\n to_groupby = zip(*(ping.grouping_vector for ping in self.groupings))\n index = Index(to_groupby)\n return self.axis.groupby(index)\n\n @final\n @cache_readonly\n def is_monotonic(self) -> bool:\n # return if my group orderings are monotonic\n return Index(self.group_info[0]).is_monotonic\n\n @cache_readonly\n def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:\n comp_ids, obs_group_ids = self._get_compressed_codes()\n\n ngroups = len(obs_group_ids)\n comp_ids = ensure_platform_int(comp_ids)\n\n return comp_ids, obs_group_ids, ngroups\n\n @final\n @cache_readonly\n def codes_info(self) -> npt.NDArray[np.intp]:\n # return the codes of items in original grouped axis\n ids, _, _ = self.group_info\n if self.indexer is not None:\n sorter = np.lexsort((ids, self.indexer))\n ids = ids[sorter]\n ids = ensure_platform_int(ids)\n # TODO: if numpy annotates np.lexsort, this ensure_platform_int\n # may become unnecessary\n return ids\n\n @final\n def _get_compressed_codes(self) -> tuple[np.ndarray, npt.NDArray[np.intp]]:\n # The first returned ndarray may have any signed integer dtype\n if len(self.groupings) > 1:\n group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True)\n return compress_group_index(group_index, sort=self._sort)\n\n ping = self.groupings[0]\n return ping.codes, np.arange(len(ping.group_index), dtype=np.intp)\n\n @final\n @cache_readonly\n def ngroups(self) -> int:\n return len(self.result_index)\n\n @property\n def reconstructed_codes(self) -> list[np.ndarray]:\n codes = self.codes\n ids, obs_ids, _ = self.group_info\n return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True)\n\n @final\n @cache_readonly\n def result_arraylike(self) -> ArrayLike:\n \"\"\"\n Analogous to result_index, but returning an ndarray/ExtensionArray\n allowing us to retain ExtensionDtypes not supported by Index.\n \"\"\"\n # TODO(ExtensionIndex): once Index supports arbitrary EAs, this can\n # be removed in favor of result_index\n if len(self.groupings) == 1:\n return self.groupings[0].group_arraylike\n\n # result_index is MultiIndex\n return self.result_index._values\n\n @cache_readonly\n def result_index(self) -> Index:\n if len(self.groupings) == 1:\n return self.groupings[0].result_index.rename(self.names[0])\n\n codes = self.reconstructed_codes\n levels = [ping.result_index for ping in self.groupings]\n return MultiIndex(\n levels=levels, codes=codes, verify_integrity=False, names=self.names\n )\n\n @final\n def get_group_levels(self) -> list[ArrayLike]:\n # Note: only called from _insert_inaxis_grouper_inplace, which\n # is only called for BaseGrouper, never for BinGrouper\n if len(self.groupings) == 1:\n return [self.groupings[0].group_arraylike]\n\n name_list = []\n for ping, codes in zip(self.groupings, self.reconstructed_codes):\n codes = ensure_platform_int(codes)\n levels = ping.group_arraylike.take(codes)\n\n name_list.append(levels)\n\n return name_list\n\n # ------------------------------------------------------------\n # Aggregation functions\n\n @final\n def _cython_operation(\n self,\n kind: str,\n values,\n how: str,\n axis: int,\n min_count: int = -1,\n **kwargs,\n ) -> ArrayLike:\n \"\"\"\n Returns the values of a cython operation.\n \"\"\"\n assert kind in [\"transform\", \"aggregate\"]\n\n cy_op = WrappedCythonOp(kind=kind, how=how)\n\n ids, _, _ = self.group_info\n ngroups = self.ngroups\n return cy_op.cython_operation(\n values=values,\n axis=axis,\n min_count=min_count,\n comp_ids=ids,\n ngroups=ngroups,\n **kwargs,\n )\n\n @final\n def agg_series(\n self, obj: Series, func: Callable, preserve_dtype: bool = False\n ) -> ArrayLike:\n \"\"\"\n Parameters\n ----------\n obj : Series\n func : function taking a Series and returning a scalar-like\n preserve_dtype : bool\n Whether the aggregation is known to be dtype-preserving.\n\n Returns\n -------\n np.ndarray or ExtensionArray\n \"\"\"\n # test_groupby_empty_with_category gets here with self.ngroups == 0\n # and len(obj) > 0\n\n if len(obj) == 0:\n # SeriesGrouper would raise if we were to call _aggregate_series_fast\n result = self._aggregate_series_pure_python(obj, func)\n\n elif not isinstance(obj._values, np.ndarray):\n result = self._aggregate_series_pure_python(obj, func)\n\n # we can preserve a little bit more aggressively with EA dtype\n # because maybe_cast_pointwise_result will do a try/except\n # with _from_sequence. NB we are assuming here that _from_sequence\n # is sufficiently strict that it casts appropriately.\n preserve_dtype = True\n\n else:\n result = self._aggregate_series_pure_python(obj, func)\n\n npvalues = lib.maybe_convert_objects(result, try_float=False)\n if preserve_dtype:\n out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)\n else:\n out = npvalues\n return out\n\n @final\n def _aggregate_series_pure_python(\n self, obj: Series, func: Callable\n ) -> npt.NDArray[np.object_]:\n ids, _, ngroups = self.group_info\n\n counts = np.zeros(ngroups, dtype=int)\n result = np.empty(ngroups, dtype=\"O\")\n initialized = False\n\n # equiv: splitter = self._get_splitter(obj, axis=0)\n splitter = get_splitter(obj, ids, ngroups, axis=0)\n\n for i, group in enumerate(splitter):\n res = func(group)\n res = libreduction.extract_result(res)\n\n if not initialized:\n # We only do this validation on the first iteration\n libreduction.check_result_array(res, group.dtype)\n initialized = True\n\n counts[i] = group.shape[0]\n result[i] = res\n\n return result\n\n\nclass BinGrouper(BaseGrouper):\n \"\"\"\n This is an internal Grouper class\n\n Parameters\n ----------\n bins : the split index of binlabels to group the item of axis\n binlabels : the label list\n mutated : bool, default False\n indexer : np.ndarray[np.intp]\n\n Examples\n --------\n bins: [2, 4, 6, 8, 10]\n binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',\n '2005-01-05', '2005-01-07', '2005-01-09'],\n dtype='datetime64[ns]', freq='2D')\n\n the group_info, which contains the label of each item in grouped\n axis, the index of label in label list, group number, is\n\n (array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)\n\n means that, the grouped axis has 10 items, can be grouped into 5\n labels, the first and second items belong to the first label, the\n third and forth items belong to the second label, and so on\n\n \"\"\"\n\n bins: npt.NDArray[np.int64]\n binlabels: Index\n mutated: bool\n\n def __init__(\n self,\n bins,\n binlabels,\n mutated: bool = False,\n indexer=None,\n ):\n self.bins = ensure_int64(bins)\n self.binlabels = ensure_index(binlabels)\n self.mutated = mutated\n self.indexer = indexer\n\n # These lengths must match, otherwise we could call agg_series\n # with empty self.bins, which would raise in libreduction.\n assert len(self.binlabels) == len(self.bins)\n\n @cache_readonly\n def groups(self):\n \"\"\"dict {group name -> group labels}\"\"\"\n # this is mainly for compat\n # GH 3881\n result = {\n key: value\n for key, value in zip(self.binlabels, self.bins)\n if key is not NaT\n }\n return result\n\n @property\n def nkeys(self) -> int:\n # still matches len(self.groupings), but we can hard-code\n return 1\n\n def _get_grouper(self):\n \"\"\"\n We are a grouper as part of another's groupings.\n\n We have a specific method of grouping, so cannot\n convert to a Index for our grouper.\n \"\"\"\n return self\n\n def get_iterator(self, data: NDFrame, axis: int = 0):\n \"\"\"\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n \"\"\"\n if axis == 0:\n slicer = lambda start, edge: data.iloc[start:edge]\n else:\n slicer = lambda start, edge: data.iloc[:, start:edge]\n\n length = len(data.axes[axis])\n\n start = 0\n for edge, label in zip(self.bins, self.binlabels):\n if label is not NaT:\n yield label, slicer(start, edge)\n start = edge\n\n if start < length:\n yield self.binlabels[-1], slicer(start, None)\n\n @cache_readonly\n def indices(self):\n indices = collections.defaultdict(list)\n\n i = 0\n for label, bin in zip(self.binlabels, self.bins):\n if i < bin:\n if label is not NaT:\n indices[label] = list(range(i, bin))\n i = bin\n return indices\n\n @cache_readonly\n def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:\n ngroups = self.ngroups\n obs_group_ids = np.arange(ngroups, dtype=np.intp)\n rep = np.diff(np.r_[0, self.bins])\n\n rep = ensure_platform_int(rep)\n if ngroups == len(self.bins):\n comp_ids = np.repeat(np.arange(ngroups), rep)\n else:\n comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)\n\n return (\n ensure_platform_int(comp_ids),\n obs_group_ids,\n ngroups,\n )\n\n @cache_readonly\n def reconstructed_codes(self) -> list[np.ndarray]:\n # get unique result indices, and prepend 0 as groupby starts from the first\n return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]\n\n @cache_readonly\n def result_index(self):\n if len(self.binlabels) != 0 and isna(self.binlabels[0]):\n return self.binlabels[1:]\n\n return self.binlabels\n\n @property\n def levels(self) -> list[Index]:\n return [self.binlabels]\n\n @property\n def names(self) -> list[Hashable]:\n return [self.binlabels.name]\n\n @property\n def groupings(self) -> list[grouper.Grouping]:\n lev = self.binlabels\n ping = grouper.Grouping(lev, lev, in_axis=False, level=None)\n return [ping]\n\n def _aggregate_series_fast(self, obj: Series, func: Callable) -> np.ndarray:\n # -> np.ndarray[object]\n raise NotImplementedError(\n \"This should not be reached; use _aggregate_series_pure_python\"\n )\n\n\ndef _is_indexed_like(obj, axes, axis: int) -> bool:\n if isinstance(obj, Series):\n if len(axes) > 1:\n return False\n return obj.axes[axis].equals(axes[axis])\n elif isinstance(obj, DataFrame):\n return obj.axes[axis].equals(axes[axis])\n\n return False\n\n\n# ----------------------------------------------------------------------\n# Splitting / application\n\n\nclass DataSplitter(Generic[NDFrameT]):\n def __init__(\n self,\n data: NDFrameT,\n labels: npt.NDArray[np.intp],\n ngroups: int,\n axis: int = 0,\n ):\n self.data = data\n self.labels = ensure_platform_int(labels) # _should_ already be np.intp\n self.ngroups = ngroups\n\n self.axis = axis\n assert isinstance(axis, int), axis\n\n @cache_readonly\n def slabels(self) -> npt.NDArray[np.intp]:\n # Sorted labels\n return self.labels.take(self._sort_idx)\n\n @cache_readonly\n def _sort_idx(self) -> npt.NDArray[np.intp]:\n # Counting sort indexer\n return get_group_index_sorter(self.labels, self.ngroups)\n\n def __iter__(self):\n sdata = self.sorted_data\n\n if self.ngroups == 0:\n # we are inside a generator, rather than raise StopIteration\n # we merely return signal the end\n return\n\n starts, ends = lib.generate_slices(self.slabels, self.ngroups)\n\n for start, end in zip(starts, ends):\n yield self._chop(sdata, slice(start, end))\n\n @cache_readonly\n def sorted_data(self) -> NDFrameT:\n return self.data.take(self._sort_idx, axis=self.axis)\n\n def _chop(self, sdata, slice_obj: slice) -> NDFrame:\n raise AbstractMethodError(self)\n\n\nclass SeriesSplitter(DataSplitter):\n def _chop(self, sdata: Series, slice_obj: slice) -> Series:\n # fastpath equivalent to `sdata.iloc[slice_obj]`\n mgr = sdata._mgr.get_slice(slice_obj)\n # __finalize__ not called here, must be applied by caller if applicable\n\n # fastpath equivalent to:\n # `return sdata._constructor(mgr, name=sdata.name, fastpath=True)`\n obj = type(sdata)._from_mgr(mgr)\n object.__setattr__(obj, \"_flags\", sdata._flags)\n object.__setattr__(obj, \"_name\", sdata._name)\n return obj\n\n\nclass FrameSplitter(DataSplitter):\n def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:\n # Fastpath equivalent to:\n # if self.axis == 0:\n # return sdata.iloc[slice_obj]\n # else:\n # return sdata.iloc[:, slice_obj]\n mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis)\n # __finalize__ not called here, must be applied by caller if applicable\n\n # fastpath equivalent to `return sdata._constructor(mgr)`\n obj = type(sdata)._from_mgr(mgr)\n object.__setattr__(obj, \"_flags\", sdata._flags)\n return obj\n\n\ndef get_splitter(\n data: NDFrame, labels: np.ndarray, ngroups: int, axis: int = 0\n) -> DataSplitter:\n if isinstance(data, Series):\n klass: type[DataSplitter] = SeriesSplitter\n else:\n # i.e. DataFrame\n klass = FrameSplitter\n\n return klass(data, labels, ngroups, axis)\n"
] |
[
[
"pandas.core.sorting.get_group_index",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.indexes.api.ensure_index",
"pandas.core.arrays.integer.Int64Dtype",
"pandas._libs.reduction.extract_result",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.sorting.get_group_index_sorter",
"numpy.dtype",
"pandas.core.dtypes.common.ensure_platform_int",
"numpy.bincount",
"pandas.core.dtypes.missing.isna",
"numpy.empty",
"pandas.core.indexes.api.Index",
"pandas.core.arrays.floating.Float64Dtype",
"numpy.arange",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_1d_only_ea_obj",
"numpy.flatnonzero",
"pandas._libs.lib.maybe_convert_objects",
"pandas.errors.AbstractMethodError",
"numpy.zeros",
"numpy.lexsort",
"pandas.core.dtypes.cast.maybe_cast_pointwise_result",
"pandas.core.groupby.grouper.Grouping",
"numpy.diff",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_complex_dtype",
"pandas.core.sorting.decons_obs_group_ids",
"pandas.core.sorting.compress_group_index",
"pandas._libs.lib.generate_slices",
"pandas.core.indexes.api.MultiIndex",
"pandas.core.series.Series",
"pandas.core.dtypes.common.is_sparse",
"pandas.core.dtypes.common.ensure_float64",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.core.sorting.get_flattened_list",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas._libs.reduction.check_result_array",
"pandas.core.sorting.get_indexer_dict"
]
] |
socsol/infsocsol
|
[
"67038e95e891c1f921974b88b072e1fbe41c8c0f"
] |
[
"tests/test_fisheries_det_basic.py"
] |
[
"# Copyright 2019 Alastair Pharo\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport numpy\nimport os\nimport scipy\nfrom numpy.testing import assert_allclose\nfrom infsocsol.helpers import matrix\n\n@pytest.fixture(scope=\"module\", params=[\n # engine states time_step start steps steady steady_accuracy optim_accuracy\n ( 'matlab', 10, 1, (100.0, 0.5), 100, True, 0.01, 0.009 ),\n ( 'matlab', 20, 0.5, (600.0, 0.6), 200, True, 0.01, 0.015 ),\n ( 'matlab', 40, 0.25, (60.0, 0.1), 300, True, 0.01, 0.018 ),\n ( 'matlab', 10, 1, (600.0, 1.0), 200, False, 0.001, None ),\n ( 'octave', 10, 1, (100.0, 0.5), 100, True, 0.001, 0.009 ),\n ( 'octave', 20, 0.5, (600.0, 0.6), 200, True, 0.001, 0.015 )\n])\ndef fisheries_scenario(request):\n return request.param\n\ndef test_fisheries_det_basic(engines, fisheries_scenario):\n _engine, states, time_step, _start, steps, steady, steady_accuracy, optim_accuracy = fisheries_scenario\n engine = engines[_engine]\n start = matrix(engine, _start)\n engine.cd(os.path.join(os.path.dirname(__file__), \"fisheries_det_basic\"))\n\n engine.solve(float(states), float(time_step), nargout=0)\n final = numpy.array(engine.sim_final(start, steps))\n # This is determined by setting s\\dot = 0, which solves to 1 = x/L + q/r e\n steady_one = numpy.dot(final, [1/600, 5/4])\n\n if steady:\n assert_allclose(steady_one, 1, atol=steady_accuracy)\n\n # This is the most profitable steady state -- x = L/2 + c/2pq\n profit_max_steady = numpy.array([[302.5, 0.39667]])\n assert_allclose(final, profit_max_steady, rtol=optim_accuracy)\n else:\n assert steady_one > 1 + steady_accuracy\n"
] |
[
[
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.dot"
]
] |
qphong/BayesOpt-LV
|
[
"b852a94e9bae8716566e014ca1dd02186bcdf7ca"
] |
[
"functions.py"
] |
[
"import numpy as np\nimport scipy.stats as scst\nimport scipy.special as scsp\nimport scipy.optimize as scopt\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport pickle\nimport os\nimport sys\n\ntry:\n import gpflow\nexcept:\n raise Exception(\"Requires gpflow!\")\n\n\nimport utils\n\n\ndef fit_gp(\n X,\n Y,\n noise_var=None,\n train_noise_var=True,\n min_var=1e-4,\n max_var=4.0,\n kernel_type=\"matern52\",\n):\n # use gpflow to get the hyperparameters for the function\n with tf.Graph().as_default() as graph:\n with tf.Session(graph=graph).as_default():\n xdim = X.shape[1]\n\n if kernel_type == \"se\":\n kernel = gpflow.kernels.RBF(xdim, ARD=True)\n elif kernel_type == \"matern52\":\n kernel = gpflow.kernels.Matern52(xdim, ARD=True)\n else:\n raise Exception(\"Unknown kernel:\", kernel_type)\n\n meanf = gpflow.mean_functions.Constant()\n\n with gpflow.defer_build():\n m = gpflow.models.GPR(X, Y, kern=kernel, mean_function=meanf)\n\n if train_noise_var:\n # concentration, rate = 1.1, 1./0.5 (in BoRisk)\n # => shape, scale = 1.1, 0.5\n gamma_shape = 1.1\n gamma_scale = 0.5\n m.likelihood.variance.prior = gpflow.priors.Gamma(\n gamma_shape, gamma_scale\n ) # shape, scale\n m.likelihood.variance.transform = gpflow.transforms.Logistic(\n min_var, max_var\n )\n \n # \"Initialize likelihood variance at the mode of the prior (from BoRisk)\"\n prior_mode = (gamma_shape - 1) * gamma_scale\n m.likelihood.variance.assign(prior_mode) # 1e-4\n elif noise_var is not None:\n m.likelihood.variance = noise_var\n m.likelihood.variance.trainable = False\n\n else:\n raise Exception(\"Require noise variance!\")\n\n m.compile()\n opt = gpflow.train.ScipyOptimizer()\n has_error = False\n\n try:\n opt.minimize(m)\n except:\n has_error = True\n\n if has_error:\n return has_error, None\n else:\n gpf_lscale = m.kern.lengthscales.value\n gpf_signal_var = m.kern.variance.value\n lscale = 1.0 / (gpf_lscale * gpf_lscale)\n meanf_const = m.mean_function.c.value\n noise_var = m.likelihood.variance.value\n\n return has_error, {\n \"meanf\": meanf_const,\n \"signal_var\": gpf_signal_var,\n \"lengthscale\": lscale,\n \"noise_var\": noise_var,\n }\n\n\ndef get_meshgrid(xmin, xmax, nx, xdim):\n x1d = np.linspace(xmin, xmax, nx)\n vals = [x1d] * xdim\n xds = np.meshgrid(*vals)\n\n xs = np.concatenate([xd.reshape(-1, 1) for xd in xds], axis=1)\n return xs\n\n\ndef func_gp_prior(xdim, l, sigma, seed, name=\"\"):\n np.random.seed(seed)\n\n filename = \"func_gp_prior_param_seed{}_{}.pkl\".format(seed, name)\n n_feats = 1000\n\n if os.path.isfile(filename):\n with open(filename, \"rb\") as infile:\n data = pickle.load(infile)\n W = data[\"W\"]\n b = data[\"b\"]\n theta = data[\"theta\"]\n\n else:\n l = np.ones([1, xdim]) * l\n W = np.random.randn(n_feats, xdim) * np.tile(np.sqrt(l), (n_feats, 1))\n b = 2.0 * np.pi * np.random.rand(n_feats, 1)\n theta = np.random.randn(n_feats, 1)\n\n with open(filename, \"wb\") as outfile:\n pickle.dump(\n {\"W\": W, \"b\": b, \"theta\": theta},\n outfile,\n protocol=pickle.HIGHEST_PROTOCOL,\n )\n\n def f(x):\n x = np.array(x).reshape(-1, xdim)\n return (\n theta.T.dot(np.sqrt(2.0 * sigma / n_feats)).dot(\n np.cos(W.dot(x.T) + np.tile(b, (1, x.shape[0])))\n )\n ).squeeze()\n\n return f\n\n\ndef func_gp_prior_tf(xdim, l, sigma, seed, name=\"\", dtype=tf.float64):\n filename = \"func_gp_prior_param_seed{}_{}.pkl\".format(seed, name)\n n_feats = 1000\n\n if os.path.isfile(filename):\n with open(filename, \"rb\") as infile:\n data = pickle.load(infile)\n W = tf.constant(data[\"W\"], dtype=dtype)\n b = tf.constant(data[\"b\"], dtype=dtype)\n theta = tf.constant(data[\"theta\"], dtype=dtype)\n else:\n raise Exception(\"Require to run func_gp_prior to generate the parameters!\")\n\n def f(x):\n x = tf.reshape(x, shape=(-1, xdim))\n\n return tf.squeeze(\n tf.cast(tf.sqrt(2.0 * sigma / n_feats), dtype=dtype)\n * tf.linalg.matrix_transpose(theta)\n @ (\n tf.cos(\n W @ tf.linalg.matrix_transpose(x)\n + tf.tile(b, multiples=(1, tf.shape(x)[0]))\n )\n )\n )\n\n return f\n\n\ndef negative_branin_uniform(dtype=tf.float64):\n xdim = 1\n zdim = 1\n input_dim = xdim + zdim\n\n xmin = 0.0\n xmax = 1.0\n\n # zmin, zmax only used for continuous z\n zmin = 0.0\n zmax = 1.0\n\n xs = get_meshgrid(xmin, xmax, 50, xdim)\n # xs = np.random.rand(1000, xdim) * (xmax - xmin) + xmin\n\n def f(x):\n x = x.reshape(-1, input_dim)\n x = 15.0 * x - np.array([5.0, 0.0])\n\n val = (\n -1.0\n / 51.95\n * (\n (\n x[:, 1]\n - 5.1 * x[:, 0] ** 2 / (4 * np.pi ** 2)\n + 5.0 * x[:, 0] / np.pi\n - 6.0\n )\n ** 2\n + (10.0 - 10.0 / (8.0 * np.pi)) * np.cos(x[:, 0])\n - 44.81\n )\n )\n return val\n\n def f_tf(x):\n x = tf.reshape(x, shape=(-1, input_dim))\n x = tf.cast(15.0, dtype) * x - tf.cast([5.0, 0.0], dtype)\n\n val = (\n tf.cast(-1.0, dtype)\n / tf.cast(51.95, dtype)\n * (\n tf.math.pow(\n x[:, 1]\n - tf.cast(5.1, dtype)\n * x[:, 0]\n * x[:, 0]\n / tf.cast(4 * np.pi ** 2, dtype)\n + tf.cast(5.0, dtype) * x[:, 0] / tf.cast(np.pi, dtype)\n - tf.cast(6.0, dtype),\n 2,\n )\n + (\n tf.cast(10.0, dtype)\n - tf.cast(10.0, dtype) / tf.cast(8.0 * np.pi, dtype)\n )\n * tf.cos(x[:, 0])\n - tf.cast(44.81, dtype)\n )\n )\n return val\n\n mean_tnorm = (zmin + zmax) / 2.0\n std_tnorm = (zmax - zmin) / 8.0\n low_tnorm = mean_tnorm - 2.0 * std_tnorm\n high_tnorm = mean_tnorm + 2.0 * std_tnorm\n\n truncated_normal = tfp.distributions.TruncatedNormal(\n loc=tf.cast(mean_tnorm, dtype=tf.float64),\n scale=tf.cast(std_tnorm, dtype=tf.float64),\n low=tf.cast(low_tnorm, dtype=tf.float64),\n high=tf.cast(high_tnorm, dtype=tf.float64),\n name=\"branin_truncated_normal\",\n )\n\n def z_tnorm_generator(n):\n return truncated_normal.sample(sample_shape=(n, zdim))\n\n def z_lpdf(z): # (None,zdim)\n return tf.reduce_sum(truncated_normal.log_prob(z), axis=1)\n\n zmid = (zmin + zmax) / 2.0\n z_values = np.linspace(zmin, zmax, 30).reshape(-1, 1)\n z_probs = np.ones(30) / 30.0\n z_lprobs = np.log(z_probs)\n\n return {\n \"function\": f,\n \"function_tf\": f_tf,\n \"name\": \"negative_branin_uniform\",\n \"xdim\": xdim,\n \"zdim\": zdim,\n \"xmin\": xmin,\n \"xmax\": xmax,\n \"zmin\": zmin,\n \"zmax\": zmax,\n \"z_generator\": z_tnorm_generator,\n \"z_lpdf\": z_lpdf,\n \"zvalues\": z_values,\n \"zlprobs\": z_lprobs,\n \"zprobs\": z_probs,\n \"lengthscale\": np.array([12.14689435, 0.3134626]),\n \"signal_variance\": 1.5294688560240726,\n \"likelihood_variance\": 1e-2,\n \"rand_opt_init_x\": xs,\n \"max_var_discrete\": 0.7324786070977395,\n \"max_var_continuous\": 0.64118695,\n \"max_cvar_discrete\": -0.2899622792949111,\n }\n\n\ndef negative_goldstein_uniform(dtype=tf.float64):\n xdim = 1\n zdim = 1\n input_dim = xdim + zdim\n\n xmin = 0.0\n xmax = 1.0\n\n # zmin, zmax only used for continuous z\n zmin = 0.0\n zmax = 1.0\n\n xs = get_meshgrid(xmin, xmax, 50, xdim)\n # xs = np.random.rand(1000, xdim) * (xmax - xmin) + xmin\n\n def f(x):\n x = x.reshape(-1, input_dim)\n xb = x * 4.0 - 2.0\n\n val = -(\n np.log(\n (\n 1\n + (xb[:, 0] + xb[:, 1] + 1.0) ** 2\n * (\n 19\n - 14 * xb[:, 0]\n + 3 * xb[:, 0] ** 2\n - 14 * xb[:, 1]\n + 6 * xb[:, 0] * xb[:, 1]\n + 3 * xb[:, 1] ** 2\n )\n )\n * (\n 30\n + (2 * xb[:, 0] - 3 * xb[:, 1]) ** 2\n * (\n 18\n - 32 * xb[:, 0]\n + 12 * xb[:, 0] ** 2\n + 48 * xb[:, 1]\n - 36 * xb[:, 0] * xb[:, 1]\n + 27 * xb[:, 1] ** 2\n )\n )\n )\n - 8.693\n ) # / 2.427\n return val\n\n def f_tf(x):\n x = tf.reshape(x, shape=(-1, input_dim))\n xb = x * tf.cast(4.0, dtype) - tf.cast(2.0, dtype)\n\n val = -(\n tf.log(\n (\n tf.cast(1.0, dtype)\n + tf.math.pow(xb[:, 0] + xb[:, 1] + tf.cast(1.0, dtype), 2)\n * (\n tf.cast(19.0, dtype)\n - tf.cast(14.0, dtype) * xb[:, 0]\n + tf.cast(3.0, dtype) * xb[:, 0] * xb[:, 0]\n - tf.cast(14.0, dtype) * xb[:, 1]\n + tf.cast(6.0, dtype) * xb[:, 0] * xb[:, 1]\n + tf.cast(3.0, dtype) * xb[:, 1] * xb[:, 1]\n )\n )\n * (\n tf.cast(30.0, dtype)\n + tf.math.pow(\n tf.cast(2.0, dtype) * xb[:, 0] - tf.cast(3.0, dtype) * xb[:, 1],\n 2,\n )\n * (\n tf.cast(18.0, dtype)\n - tf.cast(32.0, dtype) * xb[:, 0]\n + tf.cast(12.0, dtype) * xb[:, 0] * xb[:, 0]\n + tf.cast(48.0, dtype) * xb[:, 1]\n - tf.cast(36.0, dtype) * xb[:, 0] * xb[:, 1]\n + tf.cast(27.0, dtype) * xb[:, 1] * xb[:, 1]\n )\n )\n )\n - tf.cast(8.693, dtype)\n ) # / tf.cast(2.427, dtype)\n return val\n\n mean_tnorm = (zmin + zmax) / 2.0\n std_tnorm = (zmax - zmin) / 8.0\n low_tnorm = mean_tnorm - 2.0 * std_tnorm\n high_tnorm = mean_tnorm + 2.0 * std_tnorm\n\n truncated_normal = tfp.distributions.TruncatedNormal(\n loc=tf.cast(mean_tnorm, dtype=tf.float64),\n scale=tf.cast(std_tnorm, dtype=tf.float64),\n low=tf.cast(low_tnorm, dtype=tf.float64),\n high=tf.cast(high_tnorm, dtype=tf.float64),\n name=\"branin_truncated_normal\",\n )\n\n def z_tnorm_generator(n):\n return truncated_normal.sample(sample_shape=(n, zdim))\n\n def z_lpdf(z): # (None,zdim)\n return tf.reduce_sum(truncated_normal.log_prob(z), axis=1)\n\n zmid = (zmin + zmax) / 2.0\n z_values = np.linspace(zmin, zmax, 50).reshape(-1, 1)\n z_probs = np.ones(50) / 50.0\n z_lprobs = np.log(z_probs)\n\n return {\n \"function\": f,\n \"function_tf\": f_tf,\n \"name\": \"negative_goldstein_uniform\",\n \"xdim\": xdim,\n \"zdim\": zdim,\n \"xmin\": xmin,\n \"xmax\": xmax,\n \"zmin\": zmin,\n \"zmax\": zmax,\n \"z_generator\": z_tnorm_generator,\n \"z_lpdf\": z_lpdf,\n \"zvalues\": z_values,\n \"zlprobs\": z_lprobs,\n \"zprobs\": z_probs,\n \"lengthscale\": np.array([81.1012626, 83.22416009]),\n \"signal_variance\": 0.02584212360067521,\n \"likelihood_variance\": 1e-2,\n \"rand_opt_init_x\": xs,\n \"max_var_discrete\": 1.7992384381492217,\n \"max_var_continuous\": 1.50360403,\n \"max_cvar_discrete\": -2.394406754560626,\n }\n\n\ndef portfolio_computeKmm_np(X, l, sigma):\n n = X.shape[0]\n xdim = X.shape[1]\n\n l = l.reshape(1, xdim)\n\n X = X / l\n\n Q = np.tile(np.sum(X * X, axis=1, keepdims=True), reps=(1, n))\n dist = Q + Q.T - 2 * X.dot(X.T)\n\n kmm = sigma * np.exp(-0.5 * dist)\n return kmm\n\n\ndef portfolio_computeKnm_np(X, Xbar, l, sigma):\n \"\"\"\n X: n x d\n l: d\n \"\"\"\n n = np.shape(X)[0]\n m = np.shape(Xbar)[0]\n xdim = np.shape(X)[1]\n\n l = l.reshape(1, xdim)\n\n X = X / l\n Xbar = Xbar / l\n\n Q = np.tile(np.sum(X * X, axis=1, keepdims=True), reps=(1, m))\n Qbar = np.tile(np.sum(Xbar * Xbar, axis=1, keepdims=True).T, reps=(n, 1))\n\n dist = Qbar + Q - 2 * X.dot(Xbar.T)\n knm = sigma * np.exp(-0.5 * dist)\n return knm\n\n\ndef portfolio_computeKnm(X, Xbar, l, sigma, dtype=tf.float32):\n \"\"\"\n X: n x d\n l: d\n \"\"\"\n n = tf.shape(X)[0]\n m = tf.shape(Xbar)[0]\n\n X = X / l\n Xbar = Xbar / l\n\n Q = tf.tile(tf.reduce_sum(tf.square(X), axis=1, keepdims=True), multiples=(1, m))\n Qbar = tf.tile(\n tf.transpose(tf.reduce_sum(tf.square(Xbar), axis=1, keepdims=True)),\n multiples=(n, 1),\n )\n\n dist = Qbar + Q - 2 * X @ tf.transpose(Xbar)\n knm = sigma * tf.exp(-0.5 * dist)\n return knm\n\n\ndef negative_portfolio_optimization_gaussian(dtype=tf.float64):\n # noise is 1e-2\n # z follows Gaussian\n xdim = 3\n zdim = 2\n input_dim = xdim + zdim\n\n xmin = 0.0\n xmax = 1.0\n\n # zmin, zmax only used for continuous z\n zmin = 0.0\n zmax = 1.0\n\n xs = get_meshgrid(xmin, xmax, 5, xdim)\n\n with open(\"portfolio_data/data.pkl\", \"rb\") as readfile:\n data = pickle.load(readfile)\n X = data[\"X\"].astype(np.float64) # (3000,5)\n Y = data[\"Y\"].astype(np.float64) # (3000,1)\n\n with open(\"portfolio_data/GP_params.pkl\", \"rb\") as readfile:\n params = pickle.load(readfile)\n lengthscales = params[\"lengthscales\"]\n kern_var = params[\"kern_variance\"]\n noise_var = params[\"noise_variance\"]\n mean_constant = params[\"mean_constant\"]\n invKmm = params[\"invKmm\"]\n\n print(Y)\n print(\"**PARAMS:\", params)\n\n invKmm_tf = tf.constant(invKmm, dtype=dtype)\n mean_constant_tf = tf.constant(mean_constant, dtype=dtype)\n Y_tf = tf.constant(Y, dtype=dtype)\n\n def f(x):\n x = x.reshape(-1, input_dim)\n Knm = portfolio_computeKnm_np(x, X, lengthscales, kern_var)\n val = mean_constant + Knm @ invKmm @ (Y - mean_constant) # posterior mean\n\n return -val.reshape(x.shape[0])\n\n def f_tf(x):\n x = tf.reshape(x, shape=(-1, input_dim))\n Knm = portfolio_computeKnm(x, X, lengthscales, kern_var)\n val = mean_constant_tf + Knm @ invKmm_tf @ (Y_tf - mean_constant_tf)\n\n return -tf.reshape(val, shape=(tf.shape(x)[0],))\n\n def z_tnorm_generator(n):\n return tf.random.uniform(shape=(n, zdim), minval=0.0, maxval=1.0, dtype=dtype)\n\n def z_lpdf(z):\n # dummy, not really pdf\n # but returning a constant\n return tf.reduce_sum(tf.ones_like(z, dtype=dtype), axis=1)\n\n zmid = (zmin + zmax) / 2.0\n z_values = get_meshgrid(zmid - 0.25, zmid + 0.25, 5, zdim)\n z_lprobs = -np.sum((z_values - np.ones(zdim) * zmid) ** 2, axis=1) / 0.15 ** 2\n z_lprobs = np.squeeze(z_lprobs - scsp.logsumexp(z_lprobs))\n z_probs = np.exp(z_lprobs)\n\n return {\n \"function\": f,\n \"function_tf\": f_tf,\n \"name\": \"negative_portfolio_optimization_gaussian\",\n \"xdim\": xdim,\n \"zdim\": zdim,\n \"xmin\": xmin,\n \"xmax\": xmax,\n \"zmin\": zmin,\n \"zmax\": zmax,\n \"z_generator\": z_tnorm_generator,\n \"z_lpdf\": z_lpdf,\n \"zvalues\": z_values,\n \"zlprobs\": z_lprobs,\n \"zprobs\": z_probs,\n \"lengthscale\": lengthscales,\n \"signal_variance\": kern_var,\n \"likelihood_variance\": 1e-4,\n \"rand_opt_init_x\": xs,\n \"max_var_discrete\": 17.835917287050652,\n \"max_var_continuous\": 17.835917287050652, # it takes too long to optimize, so we use the discrete case as an approximation\n \"max_cvar_X\": [0.0, 1.0, 0.08484073],\n \"max_cvar_discrete\": 21.21,\n } # at [0., 1., 0.081978]\n\n\ndef negative_rescaled_hartmann6d_51(dtype=tf.float64):\n # xdim = 3\n # range: (0,1) for all dimensions\n # global maximum: -3.86278 at (0.114614, 0.555649, 0.852547)\n xdim = 5\n zdim = 1\n input_dim = xdim + zdim\n\n xmin = 0.0\n xmax = 1.0\n\n zmin = 0.0\n zmax = 1.0\n\n # maximum = 3.13449414\n # minimum = -1.30954062\n\n xs = get_meshgrid(xmin, xmax, 3, xdim)\n # xs = np.random.rand(1000, xdim) * (xmax - xmin) + xmin\n\n A = np.array(\n [\n [10.0, 3.0, 17.0, 3.5, 1.7, 8.0],\n [0.05, 10.0, 17.0, 0.1, 8.0, 14.0],\n [3.0, 3.5, 1.7, 10.0, 17.0, 8.0],\n [17.0, 8.0, 0.05, 10.0, 0.1, 14.0],\n ]\n )\n A_tf = tf.constant(A, dtype=dtype)\n\n alpha = np.array([1.0, 1.2, 3.0, 3.2])\n alpha_tf = tf.constant(alpha, dtype=dtype)\n\n P = 1e-4 * np.array(\n [\n [1312.0, 1696.0, 5569.0, 124.0, 8283.0, 5886.0],\n [2329.0, 4135.0, 8307.0, 3736.0, 1004.0, 9991.0],\n [2348.0, 1451.0, 3522.0, 2883.0, 3047.0, 6650.0],\n [4047.0, 8828.0, 8732.0, 5743.0, 1091.0, 381.0],\n ]\n )\n P_tf = tf.constant(P, dtype=dtype)\n\n def f(x):\n x = np.tile(x.reshape(-1, 1, input_dim), reps=(1, 4, 1))\n val = (\n 2.58 + np.sum(alpha * np.exp(-np.sum(A * (x - P) ** 2, axis=2)), axis=1)\n ) / 1.94\n # val = (val - minimum) / (maximum - minimum) * 2.0 - 1.0\n return val * 10.0\n\n def f_tf(x):\n x = tf.tile(tf.reshape(x, shape=(-1, 1, input_dim)), multiples=(1, 4, 1))\n val = (\n tf.constant(2.58, dtype)\n + tf.reduce_sum(\n alpha_tf\n * tf.exp(-tf.reduce_sum(A_tf * (x - P_tf) * (x - P_tf), axis=2)),\n axis=1,\n )\n ) / tf.constant(1.94, dtype)\n return val * tf.cast(10.0, dtype)\n\n mean_tnorm = (zmin + zmax) / 2.0\n std_tnorm = (zmax - zmin) / 8.0\n low_tnorm = mean_tnorm - 2.0 * std_tnorm\n high_tnorm = mean_tnorm + 2.0 * std_tnorm\n\n truncated_normal = tfp.distributions.TruncatedNormal(\n loc=tf.cast(mean_tnorm, dtype=tf.float64),\n scale=tf.cast(std_tnorm, dtype=tf.float64),\n low=tf.cast(low_tnorm, dtype=tf.float64),\n high=tf.cast(high_tnorm, dtype=tf.float64),\n name=\"branin_truncated_normal\",\n )\n\n def z_tnorm_generator(n):\n return truncated_normal.sample(sample_shape=(n, zdim))\n\n def z_lpdf(z): # (None,zdim)\n return tf.reduce_sum(truncated_normal.log_prob(z), axis=1)\n\n zmid = (zmin + zmax) / 2.0\n z_values = get_meshgrid(zmid - 0.2, zmid + 0.2, 15, zdim)\n z_lprobs = -np.sum((z_values - np.ones(zdim) * zmid) ** 2, axis=1) / 0.2 ** 2\n z_lprobs = np.squeeze(z_lprobs - scsp.logsumexp(z_lprobs))\n z_probs = np.exp(z_lprobs)\n\n return {\n \"function\": f,\n \"function_tf\": f_tf,\n \"name\": \"negative_rescaled_hartmann6d_51\",\n \"xdim\": xdim,\n \"zdim\": zdim,\n \"xmin\": xmin,\n \"xmax\": xmax,\n \"zmin\": zmin,\n \"zmax\": zmax,\n \"z_generator\": z_tnorm_generator,\n \"z_lpdf\": z_lpdf,\n \"zvalues\": z_values,\n \"zlprobs\": z_lprobs,\n \"zprobs\": z_probs,\n \"lengthscale\": np.array([6.9512, 1.9341, 0.506, 4.2067, 5.0986, 3.5949]),\n \"signal_variance\": 1.423,\n \"likelihood_variance\": 1e-2,\n \"rand_opt_init_x\": xs,\n \"max_cvar_discrete\": 20.5428,\n } \n\n\ndef negative_rescaled_hartmann6d_15(dtype=tf.float64):\n # xdim = 3\n # range: (0,1) for all dimensions\n # global maximum: -3.86278 at (0.114614, 0.555649, 0.852547)\n xdim = 1\n zdim = 5\n input_dim = xdim + zdim\n\n xmin = 0.0\n xmax = 1.0\n\n zmin = 0.0\n zmax = 1.0\n\n xs = get_meshgrid(xmin, xmax, 50, xdim)\n # xs = np.random.rand(1000, xdim) * (xmax - xmin) + xmin\n\n A = np.array(\n [\n [10.0, 3.0, 17.0, 3.5, 1.7, 8.0],\n [0.05, 10.0, 17.0, 0.1, 8.0, 14.0],\n [3.0, 3.5, 1.7, 10.0, 17.0, 8.0],\n [17.0, 8.0, 0.05, 10.0, 0.1, 14.0],\n ]\n )\n A_tf = tf.constant(A, dtype=dtype)\n\n alpha = np.array([1.0, 1.2, 3.0, 3.2])\n alpha_tf = tf.constant(alpha, dtype=dtype)\n\n P = 1e-4 * np.array(\n [\n [1312.0, 1696.0, 5569.0, 124.0, 8283.0, 5886.0],\n [2329.0, 4135.0, 8307.0, 3736.0, 1004.0, 9991.0],\n [2348.0, 1451.0, 3522.0, 2883.0, 3047.0, 6650.0],\n [4047.0, 8828.0, 8732.0, 5743.0, 1091.0, 381.0],\n ]\n )\n P_tf = tf.constant(P, dtype=dtype)\n\n def f(x):\n x = np.tile(x.reshape(-1, 1, input_dim), reps=(1, 4, 1))\n val = (\n 2.58 + np.sum(alpha * np.exp(-np.sum(A * (x - P) ** 2, axis=2)), axis=1)\n ) / 1.94\n # val = (val - minimum) / (maximum - minimum) * 2.0 - 1.0\n return val * 10.0\n\n def f_tf(x):\n x = tf.tile(tf.reshape(x, shape=(-1, 1, input_dim)), multiples=(1, 4, 1))\n val = (\n tf.constant(2.58, dtype)\n + tf.reduce_sum(\n alpha_tf\n * tf.exp(-tf.reduce_sum(A_tf * (x - P_tf) * (x - P_tf), axis=2)),\n axis=1,\n )\n ) / tf.constant(1.94, dtype)\n return val * tf.cast(10.0, dtype)\n\n mean_tnorm = (zmin + zmax) / 2.0\n std_tnorm = (zmax - zmin) / 8.0\n low_tnorm = mean_tnorm - 2.0 * std_tnorm\n high_tnorm = mean_tnorm + 2.0 * std_tnorm\n\n truncated_normal = tfp.distributions.TruncatedNormal(\n loc=tf.cast(mean_tnorm, dtype=tf.float64),\n scale=tf.cast(std_tnorm, dtype=tf.float64),\n low=tf.cast(low_tnorm, dtype=tf.float64),\n high=tf.cast(high_tnorm, dtype=tf.float64),\n name=\"branin_truncated_normal\",\n )\n\n def z_tnorm_generator(n):\n return truncated_normal.sample(sample_shape=(n, zdim))\n\n def z_lpdf(z): # (None,zdim)\n return tf.reduce_sum(truncated_normal.log_prob(z), axis=1)\n\n zmid = (zmin + zmax) / 2.0\n z_values = get_meshgrid(zmid - 0.2, zmid + 0.2, 3, zdim)\n z_lprobs = -np.sum((z_values - np.ones(zdim) * zmid) ** 2, axis=1) / 0.2 ** 2\n z_lprobs = np.squeeze(z_lprobs - scsp.logsumexp(z_lprobs))\n z_probs = np.exp(z_lprobs)\n\n return {\n \"function\": f,\n \"function_tf\": f_tf,\n \"name\": \"negative_rescaled_hartmann6d_15\",\n \"xdim\": xdim,\n \"zdim\": zdim,\n \"xmin\": xmin,\n \"xmax\": xmax,\n \"zmin\": zmin,\n \"zmax\": zmax,\n \"z_generator\": z_tnorm_generator,\n \"z_lpdf\": z_lpdf,\n \"zvalues\": z_values,\n \"zlprobs\": z_lprobs,\n \"zprobs\": z_probs,\n \"lengthscale\": np.array([6.9512, 1.9341, 0.506, 4.2067, 5.0986, 3.5949]),\n \"signal_variance\": 1.423,\n \"likelihood_variance\": 1e-4,\n \"rand_opt_init_x\": xs,\n \"max_cvar_discrete\": 14.1203, # near [0.2544893]\n } # haven't optimize yet\n\n\ndef yacht_hydrodynamics(dtype=tf.float64):\n filename = \"yacht_data/gp_hyperparameters.pkl\"\n\n with open(filename, \"rb\") as readfile:\n yacht = pickle.load(readfile)\n X = yacht[\"X\"]\n Y = yacht[\"Y\"]\n gp_hyper = yacht[\"gp_hyper\"]\n\n NK = utils.computeNKmm(\n X,\n gp_hyper[\"lengthscale\"],\n gp_hyper[\"signal_var\"],\n gp_hyper[\"noise_var\"],\n dtype=dtype,\n kernel_type=\"se\",\n )\n NKInv = utils.chol2inv(NK, dtype=dtype)\n NKInvs = tf.expand_dims(NKInv, axis=0)\n\n input_dim = X.shape[1]\n zdim = 1\n xdim = input_dim - zdim\n\n xmin = 0.0\n xmax = 1.0\n zmin = 0.0\n zmax = 1.0\n\n xs = get_meshgrid(xmin, xmax, 4, xdim)\n\n def f(x):\n x = x.reshape(-1, input_dim)\n mean_f = (\n utils.compute_mean_f_np(\n x,\n X,\n Y - gp_hyper[\"meanf\"],\n gp_hyper[\"lengthscale\"],\n gp_hyper[\"signal_var\"],\n gp_hyper[\"noise_var\"],\n kernel_type=\"se\",\n )\n + gp_hyper[\"meanf\"]\n )\n return -mean_f.reshape(-1, 1)\n\n def f_tf(x):\n x = tf.reshape(x, (-1, input_dim))\n mean_f = (\n utils.compute_mean_f(\n x,\n input_dim,\n 1,\n X,\n Y - gp_hyper[\"meanf\"],\n gp_hyper[\"lengthscale\"].reshape(1, input_dim),\n gp_hyper[\"signal_var\"].reshape(1, 1),\n gp_hyper[\"noise_var\"].reshape(1, 1),\n NKInvs,\n dtype=dtype,\n kernel_type=\"se\",\n )\n + gp_hyper[\"meanf\"]\n )\n return -mean_f\n\n zmid = 0.0\n z_values = np.linspace(zmin, zmax, 15).reshape(-1, 1)\n z_probs = np.ones(z_values.shape[0]) / z_values.shape[0]\n z_lprobs = np.log(z_probs)\n\n return {\n \"function\": f,\n \"function_tf\": f_tf,\n \"name\": \"yacht_hydrodynamics\",\n \"xdim\": xdim,\n \"zdim\": zdim,\n \"xmin\": xmin,\n \"xmax\": xmax,\n \"zmin\": zmin,\n \"zmax\": zmax,\n \"zvalues\": z_values,\n \"zlprobs\": z_lprobs,\n \"zprobs\": z_probs,\n \"lengthscale\": gp_hyper[\"lengthscale\"],\n \"signal_variance\": gp_hyper[\"signal_var\"],\n \"likelihood_variance\": 0.0001, # gp_hyper[\"noise_var\"],\n \"rand_opt_init_x\": xs,\n \"max_cvar_discrete\": -1.009, # at [0.35523405 1. 0. 0. 0.85907464], alpha=0.3\n }"
] |
[
[
"tensorflow.exp",
"numpy.random.rand",
"numpy.meshgrid",
"tensorflow.ones_like",
"numpy.tile",
"numpy.exp",
"tensorflow.reshape",
"tensorflow.sqrt",
"numpy.cos",
"tensorflow.cast",
"tensorflow.shape",
"scipy.special.logsumexp",
"numpy.log",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.linalg.matrix_transpose",
"numpy.sqrt",
"numpy.array",
"tensorflow.expand_dims",
"tensorflow.cos",
"tensorflow.Session",
"numpy.random.randn",
"tensorflow.random.uniform",
"numpy.shape",
"tensorflow.reduce_sum",
"numpy.random.seed",
"numpy.sum",
"tensorflow.Graph",
"numpy.ones",
"numpy.linspace",
"tensorflow.square"
]
] |
iwillcodeu/KerasGANs
|
[
"636ec0533df1d1ba2bfe4ec9ae7aa66bd7ee2177"
] |
[
"wgan_gp/wgan_gp.py"
] |
[
"\n# Large amount of credit goes to:\n# https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py\n# which I've used as a reference for this implementation\n\nfrom __future__ import print_function, division\n\nfrom keras.datasets import mnist\nfrom keras.layers.merge import _Merge\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout\nfrom keras.layers import BatchNormalization, Activation, ZeroPadding2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.convolutional import UpSampling2D, Conv2D\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import RMSprop\nfrom functools import partial\n\nimport keras.backend as K\n\nimport matplotlib.pyplot as plt\n\nimport sys\n\nimport numpy as np\n\nclass RandomWeightedAverage(_Merge):\n \"\"\"Provides a (random) weighted average between real and generated image samples\"\"\"\n def _merge_function(self, inputs):\n alpha = K.random_uniform((32, 1, 1, 1))\n return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])\n\nclass WGANGP():\n def __init__(self):\n self.img_rows = 28\n self.img_cols = 28\n self.channels = 1\n self.img_shape = (self.img_rows, self.img_cols, self.channels)\n self.latent_dim = 100\n\n # Following parameter and optimizer set as recommended in paper\n self.n_critic = 5\n optimizer = RMSprop(lr=0.00005)\n\n # Build the generator and critic\n self.generator = self.build_generator()\n self.critic = self.build_critic()\n\n #-------------------------------\n # Construct Computational Graph\n # for the Critic\n #-------------------------------\n\n # Freeze generator's layers while training critic\n self.generator.trainable = False\n\n # Image input (real sample)\n real_img = Input(shape=self.img_shape)\n\n # Noise input\n z_disc = Input(shape=(self.latent_dim,))\n # Generate image based of noise (fake sample)\n fake_img = self.generator(z_disc)\n\n # Discriminator determines validity of the real and fake images\n fake = self.critic(fake_img)\n valid = self.critic(real_img)\n\n # Construct weighted average between real and fake images\n interpolated_img = RandomWeightedAverage()([real_img, fake_img])\n # Determine validity of weighted sample\n validity_interpolated = self.critic(interpolated_img)\n\n # Use Python partial to provide loss function with additional\n # 'averaged_samples' argument\n partial_gp_loss = partial(self.gradient_penalty_loss,\n averaged_samples=interpolated_img)\n partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names\n\n self.critic_model = Model(inputs=[real_img, z_disc],\n outputs=[valid, fake, validity_interpolated])\n self.critic_model.compile(loss=[self.wasserstein_loss,\n self.wasserstein_loss,\n partial_gp_loss],\n optimizer=optimizer,\n loss_weights=[1, 1, 10])\n #-------------------------------\n # Construct Computational Graph\n # for Generator\n #-------------------------------\n\n # For the generator we freeze the critic's layers\n self.critic.trainable = False\n self.generator.trainable = True\n\n # Sampled noise for input to generator\n z_gen = Input(shape=(self.latent_dim,))\n # Generate images based of noise\n img = self.generator(z_gen)\n # Discriminator determines validity\n valid = self.critic(img)\n # Defines generator model\n self.generator_model = Model(z_gen, valid)\n self.generator_model.compile(loss=self.wasserstein_loss, optimizer=optimizer)\n\n\n def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):\n \"\"\"\n Computes gradient penalty based on prediction and weighted real / fake samples\n \"\"\"\n gradients = K.gradients(y_pred, averaged_samples)[0]\n # compute the euclidean norm by squaring ...\n gradients_sqr = K.square(gradients)\n # ... summing over the rows ...\n gradients_sqr_sum = K.sum(gradients_sqr,\n axis=np.arange(1, len(gradients_sqr.shape)))\n # ... and sqrt\n gradient_l2_norm = K.sqrt(gradients_sqr_sum)\n # compute lambda * (1 - ||grad||)^2 still for each single sample\n gradient_penalty = K.square(1 - gradient_l2_norm)\n # return the mean as loss over all the batch samples\n return K.mean(gradient_penalty)\n\n\n def wasserstein_loss(self, y_true, y_pred):\n return K.mean(y_true * y_pred)\n\n def build_generator(self):\n\n model = Sequential()\n\n model.add(Dense(128 * 7 * 7, activation=\"relu\", input_dim=self.latent_dim))\n model.add(Reshape((7, 7, 128)))\n model.add(UpSampling2D())\n model.add(Conv2D(128, kernel_size=4, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"relu\"))\n model.add(UpSampling2D())\n model.add(Conv2D(64, kernel_size=4, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"relu\"))\n model.add(Conv2D(self.channels, kernel_size=4, padding=\"same\"))\n model.add(Activation(\"tanh\"))\n\n model.summary()\n\n noise = Input(shape=(self.latent_dim,))\n img = model(noise)\n\n return Model(noise, img)\n\n def build_critic(self):\n\n model = Sequential()\n\n model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding=\"same\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Conv2D(32, kernel_size=3, strides=2, padding=\"same\"))\n model.add(ZeroPadding2D(padding=((0,1),(0,1))))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Conv2D(64, kernel_size=3, strides=2, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Conv2D(128, kernel_size=3, strides=1, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(1))\n\n model.summary()\n\n img = Input(shape=self.img_shape)\n validity = model(img)\n\n return Model(img, validity)\n\n def train(self, epochs, batch_size, sample_interval=50):\n\n # Load the dataset\n (X_train, _), (_, _) = mnist.load_data()\n\n # Rescale -1 to 1\n X_train = (X_train.astype(np.float32) - 127.5) / 127.5\n X_train = np.expand_dims(X_train, axis=3)\n\n # Adversarial ground truths\n valid = -np.ones((batch_size, 1))\n fake = np.ones((batch_size, 1))\n dummy = np.zeros((batch_size, 1)) # Dummy gt for gradient penalty\n for epoch in range(epochs):\n\n for _ in range(self.n_critic):\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n # Select a random batch of images\n idx = np.random.randint(0, X_train.shape[0], batch_size)\n imgs = X_train[idx]\n # Sample generator input\n noise = np.random.normal(0, 1, (batch_size, self.latent_dim))\n # Train the critic\n d_loss = self.critic_model.train_on_batch([imgs, noise],\n [valid, fake, dummy])\n\n # ---------------------\n # Train Generator\n # ---------------------\n\n g_loss = self.generator_model.train_on_batch(noise, valid)\n\n # Plot the progress\n print (\"%d [D loss: %f] [G loss: %f]\" % (epoch, d_loss[0], g_loss))\n\n # If at save interval => save generated image samples\n if epoch % sample_interval == 0:\n self.sample_images(epoch)\n\n def sample_images(self, epoch):\n r, c = 5, 5\n noise = np.random.normal(0, 1, (r * c, self.latent_dim))\n gen_imgs = self.generator.predict(noise)\n\n # Rescale images 0 - 1\n gen_imgs = 0.5 * gen_imgs + 0.5\n\n fig, axs = plt.subplots(r, c)\n cnt = 0\n for i in range(r):\n for j in range(c):\n axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')\n axs[i,j].axis('off')\n cnt += 1\n fig.savefig(\"images/mnist_%d.png\" % epoch)\n plt.close()\n\n\nif __name__ == '__main__':\n wgan = WGANGP()\n wgan.train(epochs=30000, batch_size=32, sample_interval=100)\n"
] |
[
[
"numpy.random.normal",
"numpy.zeros",
"numpy.ones",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"numpy.random.randint",
"numpy.expand_dims"
]
] |
claudius-kienle/self-supervised-depth-denoising
|
[
"4dffb30e8ef5022ef665825d26f45f67bf712cfd"
] |
[
"src/evaluate/plot_evaluation.py"
] |
[
"from argparse import ArgumentParser\nimport enum\nfrom pathlib import Path\nfrom matplotlib import pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\nimport yaml\n\nimport pandas as pd\n\n\nMETRICS = ['total_L1', '0_to10mm_L1', '10_to20mm_L1', 'above20mm_L1']\nMETRICS_TITLE = ['L1Loss', 'L1Loss in [0,10) mm', 'L1Loss in [10,20) mm', 'L1Loss above 20 mm']\n\n\ndef get_xticks(dir, trainer_ids):\n\n mapping = {}\n for trainer_id in trainer_ids:\n if trainer_id in mapping.keys():\n continue\n\n config_path = dir / trainer_id / 'config.yml'\n with open(config_path) as f:\n config = yaml.safe_load(f)\n ds_config = config['dataset_config']\n nw_config = config['network_config']\n dd = ds_config['depth_difference_threshold'] if 'depth_difference_threshold' in ds_config else 0\n s = ds_config['scale_images']\n ic = nw_config['initial_channels']\n lr = nw_config['learning_rate']\n lt = nw_config['loss_type']\n o = nw_config['output_activation'] if 'output_activation' in nw_config else 'none'\n sk = int(nw_config['skip_connections']) if 'skip_connections' in nw_config else 0\n\n if lt == 'huber_loss':\n lt = 'h'\n elif lt == 'mean_l1_loss':\n lt = 'l1'\n elif lt == 'mean_l2_loss':\n lt = 'l2'\n\n if o == 'none':\n o = 'n'\n elif o == 'relu':\n o = 'r'\n\n base_title = f\"dd{dd}_s{s}_ic{ic}_lr{lr}_l{lt}_o{o}_sk{sk}\"\n\n num_titles = [v.startswith(base_title) for v in mapping.values()].count(True)\n title = base_title + f\"_{num_titles}\" \n mapping[trainer_id] = title\n\n return mapping\n\n\ndef generate_bar_plot(df):\n df_grouped = df.groupby(['title', 'it_ot'])\n df_grouped_mean = df_grouped.mean()\n df_grouped_std = df_grouped.std()\n\n _, ax = plt.subplots(1, len(METRICS), figsize=(10, 5))\n for idx, metric in enumerate(METRICS):\n df_mean = df_grouped_mean.get(metric).unstack()\n df_std = df_grouped_std.get(metric).unstack()\n df_mean.plot.bar(ax=ax[idx], yerr=df_std, use_index=False, ylim=(0, None))\n\n ax[idx].set_title(METRICS_TITLE[idx], fontdict=dict(fontsize=9))\n ax[idx].set_xticklabels(df_mean.index)\n\n leg = ax[idx].legend(frameon=True, fontsize=8)\n leg.set_title(None)\n # leg.get_frame().set_alpha(None)\n # leg.get_frame().set_facecolor((1, 1, 1, 0.5))\n # leg.get_frame().set_edgecolor('black')\n # leg.get_frame().set_linewidth(0.5)\n\n\ndef get_box_plot(df: pd.DataFrame):\n def set_box_color(bp, color):\n plt.setp(bp['boxes'], color=color)\n plt.setp(bp['whiskers'], color=color)\n plt.setp(bp['caps'], color=color)\n plt.setp(bp['medians'], color=color)\n\n # sort first N models by loss\n N = 5 # 10\n df_grouped = df.groupby(['title', 'it_ot'])\n df_grouped_mean = df_grouped.median().unstack()\n df_grouped_mean_metric = df_grouped_mean[METRICS[0]]\n df_grouped_mean['metricDiff'] = df_grouped_mean_metric['output/target'] - df_grouped_mean_metric['input/target']\n df_grouped_mean.sort_values(by=['metricDiff'], ascending=[True], inplace=True)\n sorted_titles = df_grouped_mean.reset_index()['title'].iloc[:N].to_list()\n df = df_grouped.filter(lambda x: x['title'].isin(sorted_titles).all())\n\n # group by (title, it_ot) and create it/ot colors\n df_grouped = df.groupby(['title', 'it_ot'])\n\n it_colors = {\n title: np.asarray(plt.get_cmap('tab20')((2 * idx + 1) / 20))\n for idx, title in enumerate(sorted_titles) # without i/t pairs\n }\n ot_colors = {\n title: np.asarray(plt.get_cmap('tab20')((2 * idx) / 20))\n for idx, title in enumerate(sorted_titles) # without i/t pairs\n }\n\n fig, ax = plt.subplots(1, len(METRICS), figsize=(10, 4))\n for plot_idx, metric in enumerate(METRICS):\n width = 0.6\n inner_space = width * 2/3\n outer_space = 2\n df_grouped_metric = df_grouped[metric].apply(list)\n df_ot_grouped = df_grouped_metric.loc[:, 'output/target']\n df_it_grouped = df_grouped_metric.loc[:, 'input/target']\n\n for idx, title in enumerate(sorted_titles):\n it_value = df_it_grouped.loc[title]\n bp_it = ax[plot_idx].boxplot(it_value, positions=[idx * outer_space - inner_space],\n sym='', widths=width)\n set_box_color(bp_it, it_colors[title])\n\n ot_value = df_ot_grouped.loc[title]\n bp_ot = ax[plot_idx].boxplot(ot_value, positions=[idx * outer_space + inner_space],\n sym='', widths=width)\n set_box_color(bp_ot, ot_colors[title])\n\n ax[plot_idx].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n\n ax[plot_idx].set_title(METRICS_TITLE[plot_idx], fontdict=dict(fontsize=9))\n ax[plot_idx].set_ylabel(\"mm\", labelpad=2.0)\n\n custom_legend_lines = [\n Line2D([0], [0], color=color, lw=4)\n for color in ot_colors.values()\n ]\n\n fig.legend(custom_legend_lines, ot_colors.keys(), loc='upper right', ncol=(len(custom_legend_lines) // 4) + 1)\n\n plt.tight_layout()\n plt.subplots_adjust(wspace=0.3, top=0.75)\n\n\ndef main(args):\n plot_bar = False\n df = pd.read_json(args.eval_path, dtype={'model': str, 'epoch': str})\\\n\n # unsqueeze metrics list to rows\n df = df.explode('metrics').reset_index()\n\n # metrics dict to columns\n df = df.drop('metrics', axis=1).join(pd.DataFrame(df.metrics.values.tolist())).drop('index', axis=1)\n df.rename(columns={'it': 'input/target', 'ot': 'output/target'}, inplace=True)\n \n # filter out trainer_ids\n blacklisted_trainer_ids = [\"1646936119.3354385\", \"1646987487.7802982\", \"1647161196.55366\"]\n df = df.loc[df['model'].apply(lambda x: x not in blacklisted_trainer_ids)]\n\n df = df.set_index(['model', 'epoch'])\n \n df = df.stack().to_frame(name='metrics')\n df.index.set_names('it_ot', level=2, inplace=True)\n\n df = df['metrics'].apply(pd.Series)\n df = df.reset_index()\n\n xticks = get_xticks(args.eval_path.parent, df['model'].to_list())\n df.insert(0, 'title', df['model'].apply(lambda v: xticks[v]))\n # df['title'] = df['title'] + \"_\" + df['epoch']\n df = df.drop(['model', 'epoch'], axis=1)\n\n if plot_bar:\n generate_bar_plot(df)\n else:\n get_box_plot(df)\n\n # plt.show()\n plt.savefig(f\"{args.eval_path.parent}/plt.png\") # , bbox_inches='tight', pad_inches=0)\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"eval_path\", type=Path)\n main(parser.parse_args())\n"
] |
[
[
"matplotlib.lines.Line2D",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.get_cmap",
"pandas.read_json",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots_adjust"
]
] |
junyongk/pytorch-seq2seq
|
[
"9c60de7825cd0f65050fa958126340607e75057c"
] |
[
"seq2seq/models/DecoderRNN.py"
] |
[
"import random\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nfrom .attention import Attention\nfrom .baseRNN import BaseRNN\n\nif torch.cuda.is_available():\n import torch.cuda as device\nelse:\n import torch as device\n\n\nclass DecoderRNN(BaseRNN):\n r\"\"\"\n Provides functionality for decoding in a seq2seq framework, with an option for attention.\n\n Args:\n vocab_size (int): size of the vocabulary\n max_len (int): a maximum allowed length for the sequence to be processed\n hidden_size (int): the number of features in the hidden state `h`\n sos_id (int): index of the start of sentence symbol\n eos_id (int): index of the end of sentence symbol\n n_layers (int, optional): number of recurrent layers (default: 1)\n rnn_cell (str, optional): type of RNN cell (default: gru)\n bidirectional (bool, optional): if the encoder is bidirectional (default False)\n input_dropout_p (float, optional): dropout probability for the input sequence (default: 0)\n dropout_p (float, optional): dropout probability for the output sequence (default: 0)\n use_attention(bool, optional): flag indication whether to use attention mechanism or not (default: false)\n\n Attributes:\n KEY_ATTN_SCORE (str): key used to indicate attention weights in `ret_dict`\n KEY_LENGTH (str): key used to indicate a list representing lengths of output sequences in `ret_dict`\n KEY_SEQUENCE (str): key used to indicate a list of sequences in `ret_dict`\n\n Inputs: inputs, encoder_hidden, encoder_outputs, function, teacher_forcing_ratio\n - **inputs** (batch, seq_len, input_size): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. It is used for teacher forcing when provided. (default `None`)\n - **encoder_hidden** (num_layers * num_directions, batch_size, hidden_size): tensor containing the features in the\n hidden state `h` of encoder. Used as the initial hidden state of the decoder. (default `None`)\n - **encoder_outputs** (batch, seq_len, hidden_size): tensor with containing the outputs of the encoder.\n Used for attention mechanism (default is `None`).\n - **function** (torch.nn.Module): A function used to generate symbols from RNN hidden state\n (default is `torch.nn.functional.log_softmax`).\n - **teacher_forcing_ratio** (float): The probability that teacher forcing will be used. A random number is\n drawn uniformly from 0-1 for every decoding token, and if the sample is smaller than the given value,\n teacher forcing would be used (default is 0).\n\n Outputs: decoder_outputs, decoder_hidden, ret_dict\n - **decoder_outputs** (seq_len, batch, vocab_size): list of tensors with size (batch_size, vocab_size) containing\n the outputs of the decoding function.\n - **decoder_hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the last hidden\n state of the decoder.\n - **ret_dict**: dictionary containing additional information as follows {*KEY_LENGTH* : list of integers\n representing lengths of output sequences, *KEY_SEQUENCE* : list of sequences, where each sequence is a list of\n predicted token IDs }.\n \"\"\"\n\n KEY_ATTN_SCORE = 'attention_score'\n KEY_LENGTH = 'length'\n KEY_SEQUENCE = 'sequence'\n\n def __init__(self, vocab_size, max_len, hidden_size,\n sos_id, eos_id,\n n_layers=1, rnn_cell='gru', bidirectional=False,\n input_dropout_p=0, dropout_p=0, use_attention=False):\n super(DecoderRNN, self).__init__(vocab_size, max_len, hidden_size,\n input_dropout_p, dropout_p,\n n_layers, rnn_cell)\n\n self.bidirectional_encoder = bidirectional\n self.rnn = self.rnn_cell(hidden_size, hidden_size, n_layers, batch_first=True, dropout=dropout_p)\n\n self.output_size = vocab_size\n self.max_length = max_len\n self.use_attention = use_attention\n self.eos_id = eos_id\n self.sos_id = sos_id\n\n self.init_input = None\n\n self.embedding = nn.Embedding(self.output_size, self.hidden_size)\n if use_attention:\n self.attention = Attention(self.hidden_size)\n\n self.out = nn.Linear(self.hidden_size, self.output_size)\n\n def forward_step(self, input_var, hidden, encoder_outputs, function):\n batch_size = input_var.size(0)\n output_size = input_var.size(1)\n embedded = self.embedding(input_var)\n embedded = self.input_dropout(embedded)\n\n output, hidden = self.rnn(embedded, hidden)\n\n attn = None\n if self.use_attention:\n output, attn = self.attention(output, encoder_outputs)\n\n predicted_softmax = function(self.out(output.view(-1, self.hidden_size))).view(batch_size, output_size, -1)\n return predicted_softmax, hidden, attn\n\n def forward(self, inputs=None, encoder_hidden=None, encoder_outputs=None,\n function=F.log_softmax, teacher_forcing_ratio=0):\n ret_dict = dict()\n if self.use_attention:\n ret_dict[DecoderRNN.KEY_ATTN_SCORE] = list()\n\n inputs, batch_size, max_length = self._validate_args(inputs, encoder_hidden, encoder_outputs,\n function, teacher_forcing_ratio)\n decoder_hidden = self._init_state(encoder_hidden)\n\n use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n\n decoder_outputs = []\n sequence_symbols = []\n lengths = np.array([max_length] * batch_size)\n\n def decode(step, step_output, step_attn):\n decoder_outputs.append(step_output)\n if self.use_attention:\n ret_dict[DecoderRNN.KEY_ATTN_SCORE].append(step_attn)\n symbols = decoder_outputs[-1].topk(1)[1]\n sequence_symbols.append(symbols)\n\n eos_batches = symbols.data.eq(self.eos_id)\n if eos_batches.dim() > 0:\n eos_batches = eos_batches.cpu().view(-1).numpy()\n update_idx = ((lengths > step) & eos_batches) != 0\n lengths[update_idx] = len(sequence_symbols)\n return symbols\n\n # Manual unrolling is used to support random teacher forcing.\n # If teacher_forcing_ratio is True or False instead of a probability, the unrolling can be done in graph\n if use_teacher_forcing:\n decoder_input = inputs[:, :-1]\n decoder_output, decoder_hidden, attn = self.forward_step(decoder_input, decoder_hidden, encoder_outputs,\n function=function)\n\n for di in range(decoder_output.size(1)):\n step_output = decoder_output[:, di, :]\n if attn is not None:\n step_attn = attn[:, di, :]\n else:\n step_attn = None\n decode(di, step_output, step_attn)\n else:\n decoder_input = inputs[:, 0].unsqueeze(1)\n for di in range(max_length):\n decoder_output, decoder_hidden, step_attn = self.forward_step(decoder_input, decoder_hidden, encoder_outputs,\n function=function)\n step_output = decoder_output.squeeze(1)\n symbols = decode(di, step_output, step_attn)\n decoder_input = symbols\n\n ret_dict[DecoderRNN.KEY_SEQUENCE] = sequence_symbols\n ret_dict[DecoderRNN.KEY_LENGTH] = lengths.tolist()\n\n return decoder_outputs, decoder_hidden, ret_dict\n\n def _init_state(self, encoder_hidden):\n \"\"\" Initialize the encoder hidden state. \"\"\"\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden\n\n def _cat_directions(self, h):\n \"\"\" If the encoder is bidirectional, do the following transformation.\n (#directions * #layers, #batch, hidden_size) -> (#layers, #batch, #directions * hidden_size)\n \"\"\"\n if self.bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n return h\n\n def _validate_args(self, inputs, encoder_hidden, encoder_outputs, function, teacher_forcing_ratio):\n if self.use_attention:\n if encoder_outputs is None:\n raise ValueError(\"Argument encoder_outputs cannot be None when attention is used.\")\n\n # inference batch size\n if inputs is None and encoder_hidden is None:\n batch_size = 1\n else:\n if inputs is not None:\n batch_size = inputs.size(0)\n else:\n if self.rnn_cell is nn.LSTM:\n batch_size = encoder_hidden[0].size(1)\n elif self.rnn_cell is nn.GRU:\n batch_size = encoder_hidden.size(1)\n\n # set default input and max decoding length\n if inputs is None:\n if teacher_forcing_ratio > 0:\n raise ValueError(\"Teacher forcing has to be disabled (set 0) when no inputs is provided.\")\n inputs = Variable(torch.LongTensor([self.sos_id] * batch_size),\n volatile=True).view(batch_size, 1)\n if torch.cuda.is_available():\n inputs = inputs.cuda()\n max_length = self.max_length\n else:\n max_length = inputs.size(1) - 1 # minus the start of sequence symbol\n\n return inputs, batch_size, max_length\n"
] |
[
[
"torch.nn.Linear",
"numpy.array",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.nn.Embedding"
]
] |
neu-szy/BasicSR
|
[
"dce8d520d79225ba6e2dfa326a76e31b86df4436"
] |
[
"basicsr/archs/seanet_arch.py"
] |
[
"from . import sean_common as common\nimport torch.nn as nn\nimport torch\nfrom basicsr.utils.registry import ARCH_REGISTRY\n\nclass LFF(nn.Module):\n def __init__(self, scale, n_colors, conv=common.default_conv, n_feats=64):\n super(LFF, self).__init__()\n\n kernel_size = 3\n n_layes = 5\n act = nn.ReLU(True)\n\n m_head = [conv(3, n_feats, kernel_size)]\n\n m_body = [\n conv(\n n_feats, n_feats, kernel_size\n ) for _ in range(n_layes)\n ]\n\n m_tail = [\n common.Upsampler(conv, scale, n_feats, act=False),\n nn.Conv2d(\n n_feats, n_colors, kernel_size,\n padding=(kernel_size // 2)\n )\n ]\n\n self.LLF_head = nn.Sequential(*m_head)\n self.LLF_body = nn.Sequential(*m_body)\n self.LLF_tail = nn.Sequential(*m_tail)\n\n def forward(self, x):\n x = self.LLF_head(x)\n x = self.LLF_body(x)\n x = self.LLF_tail(x)\n return x\n\n\nclass MSRB(nn.Module):\n def __init__(self, conv=common.default_conv):\n super(MSRB, self).__init__()\n\n n_feats = 64\n kernel_size_1 = 3\n kernel_size_2 = 5\n\n self.conv_3_1 = conv(n_feats, n_feats, kernel_size_1)\n self.conv_3_2 = conv(n_feats * 2, n_feats * 2, kernel_size_1)\n self.conv_5_1 = conv(n_feats, n_feats, kernel_size_2)\n self.conv_5_2 = conv(n_feats * 2, n_feats * 2, kernel_size_2)\n self.confusion = nn.Conv2d(n_feats * 4, n_feats, 1, padding=0, stride=1)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n input_1 = x\n output_3_1 = self.relu(self.conv_3_1(input_1))\n output_5_1 = self.relu(self.conv_5_1(input_1))\n input_2 = torch.cat([output_3_1, output_5_1], 1)\n output_3_2 = self.relu(self.conv_3_2(input_2))\n output_5_2 = self.relu(self.conv_5_2(input_2))\n input_3 = torch.cat([output_3_2, output_5_2], 1)\n output = self.confusion(input_3)\n output += x\n return output\n\n\nclass Edge_Net(nn.Module):\n def __init__(self, scale, n_colors, conv=common.default_conv, n_feats=64):\n super(Edge_Net, self).__init__()\n\n kernel_size = 3\n act = nn.ReLU(True)\n n_blocks = 5\n self.n_blocks = n_blocks\n\n modules_head = [conv(3, n_feats, kernel_size)]\n\n modules_body = nn.ModuleList()\n for i in range(n_blocks):\n modules_body.append(\n MSRB())\n\n modules_tail = [\n nn.Conv2d(n_feats * (self.n_blocks + 1), n_feats, 1, padding=0, stride=1),\n conv(n_feats, n_feats, kernel_size),\n common.Upsampler(conv, scale, n_feats, act=False),\n conv(n_feats, n_colors, kernel_size)]\n\n self.Edge_Net_head = nn.Sequential(*modules_head)\n self.Edge_Net_body = nn.Sequential(*modules_body)\n self.Edge_Net_tail = nn.Sequential(*modules_tail)\n\n def forward(self, x):\n x = self.Edge_Net_head(x)\n res = x\n\n MSRB_out = []\n for i in range(self.n_blocks):\n x = self.Edge_Net_body[i](x)\n MSRB_out.append(x)\n MSRB_out.append(res)\n\n res = torch.cat(MSRB_out, 1)\n x = self.Edge_Net_tail(res)\n return x\n\n\nclass Net(nn.Module):\n def __init__(self, scale, res_scale, conv=common.default_conv, n_feats=64):\n super(Net, self).__init__()\n\n n_resblock = 40\n kernel_size = 3\n act = nn.ReLU(True)\n\n m_head = [conv(n_feats, n_feats, kernel_size)]\n\n m_body = [\n common.ResBlock(\n conv, n_feats, kernel_size, act=act, res_scale=res_scale\n ) for _ in range(n_resblock)\n ]\n\n m_tail = [conv(n_feats, 3, kernel_size)]\n\n self.Net_head = nn.Sequential(*m_head)\n self.Net_body = nn.Sequential(*m_body)\n self.Net_tail = nn.Sequential(*m_tail)\n\n def forward(self, x):\n x = self.Net_head(x)\n res = self.Net_body(x)\n res += x\n x = self.Net_tail(res)\n return x\n\n@ARCH_REGISTRY.register()\nclass SEAN(nn.Module):\n def __init__(self,\n n_feats,\n scale,\n rgb_range,\n res_scale,\n n_colors,\n conv=common.default_conv):\n super(SEAN, self).__init__()\n\n kernel_size = 3\n act = nn.ReLU(True)\n\n rgb_mean = (0.4488, 0.4371, 0.4040)\n rgb_std = (1.0, 1.0, 1.0)\n self.sub_mean = common.MeanShift(rgb_range, rgb_mean, rgb_std)\n\n # define head module\n m_LFF = [LFF(scale, n_colors, n_feats=n_feats)]\n\n # define body module\n m_Edge = [Edge_Net(scale, n_colors, n_feats=n_feats)]\n\n m_Fushion = [conv(6, n_feats, kernel_size=1)]\n\n # define tail module\n m_Net = [Net(scale, res_scale, n_feats=n_feats)]\n\n self.add_mean = common.MeanShift(rgb_range, rgb_mean, rgb_std, 1)\n\n self.lff = nn.Sequential(*m_LFF)\n self.edge = nn.Sequential(*m_Edge)\n self.fushion = nn.Sequential(*m_Fushion)\n self.net = nn.Sequential(*m_Net)\n\n def forward(self, x):\n x = self.sub_mean(x)\n low = self.lff(x)\n high = self.edge(x)\n out = torch.cat([low, high], 1)\n out = self.fushion(out)\n out = self.net(out)\n x = self.add_mean(out)\n return high, x\n\n# import torch.nn as nn\n# import torch\n# from basicsr.utils.registry import ARCH_REGISTRY\n#\n#\n# import math\n#\n# import torch\n# import torch.nn as nn\n#\n#\n# def default_conv(in_channels, out_channels, kernel_size, bias=True):\n# return nn.Conv2d(\n# in_channels, out_channels, kernel_size,\n# padding=(kernel_size//2), bias=bias)\n#\n# class MeanShift(nn.Conv2d):\n# def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):\n# super(MeanShift, self).__init__(3, 3, kernel_size=1)\n# std = torch.Tensor(rgb_std)\n# self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n# self.weight.data.div_(std.view(3, 1, 1, 1))\n# self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n# self.bias.data.div_(std)\n# self.requires_grad = False\n#\n# class BasicBlock(nn.Sequential):\n# def __init__(\n# self, in_channels, out_channels, kernel_size, stride=1, bias=False,\n# bn=True, act=nn.ReLU(True)):\n#\n# m = [nn.Conv2d(\n# in_channels, out_channels, kernel_size,\n# padding=(kernel_size//2), stride=stride, bias=bias)\n# ]\n# if bn: m.append(nn.BatchNorm2d(out_channels))\n# if act is not None: m.append(act)\n# super(BasicBlock, self).__init__(*m)\n#\n# class ResBlock(nn.Module):\n# def __init__(\n# self, conv, n_feat, kernel_size,\n# bias=True, bn=False, act=nn.ReLU(True), res_scale=1):\n#\n# super(ResBlock, self).__init__()\n# m = []\n# for i in range(2):\n# m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n# if bn: m.append(nn.BatchNorm2d(n_feat))\n# if i == 0: m.append(act)\n#\n# self.body = nn.Sequential(*m)\n# self.res_scale = res_scale\n#\n# def forward(self, x):\n# res = self.body(x).mul(self.res_scale)\n# res += x\n#\n# return res\n#\n# class Upsampler(nn.Sequential):\n# def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n#\n# m = []\n# if (scale & (scale - 1)) == 0: # Is scale = 2^n?\n# for _ in range(int(math.log(scale, 2))):\n# m.append(conv(n_feat, 4 * n_feat, 3, bias))\n# m.append(nn.PixelShuffle(2))\n# if bn: m.append(nn.BatchNorm2d(n_feat))\n# if act: m.append(act())\n# elif scale == 3:\n# m.append(conv(n_feat, 9 * n_feat, 3, bias))\n# m.append(nn.PixelShuffle(3))\n# if bn: m.append(nn.BatchNorm2d(n_feat))\n# if act: m.append(act())\n# else:\n# raise NotImplementedError\n#\n# super(Upsampler, self).__init__(*m)\n#\n# ## add SELayer\n# class SELayer(nn.Module):\n# def __init__(self, channel, reduction=16):\n# super(SELayer, self).__init__()\n# self.avg_pool = nn.AdaptiveAvgPool2d(1)\n# self.conv_du = nn.Sequential(\n# nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n# nn.ReLU(inplace=True),\n# nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n# nn.Sigmoid()\n# )\n#\n# def forward(self, x):\n# y = self.avg_pool(x)\n# y = self.conv_du(y)\n# return x * y\n#\n# ## add SEResBlock\n# class SEResBlock(nn.Module):\n# def __init__(\n# self, conv, n_feat, kernel_size, reduction,\n# bias=True, bn=False, act=nn.ReLU(True), res_scale=1):\n#\n# super(SEResBlock, self).__init__()\n# modules_body = []\n# for i in range(2):\n# modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n# if bn: modules_body.append(nn.BatchNorm2d(n_feat))\n# if i == 0: modules_body.append(act)\n# modules_body.append(SELayer(n_feat, reduction))\n# self.body = nn.Sequential(*modules_body)\n# self.res_scale = res_scale\n#\n# def forward(self, x):\n# res = self.body(x)\n# #res = self.body(x).mul(self.res_scale)\n# res += x\n#\n# return res\n#\n#\n# class LFF(nn.Module):\n# def __init__(self, scale, n_colors, conv=default_conv, n_feats=64):\n# super(LFF, self).__init__()\n#\n# kernel_size = 3\n# n_layes = 5\n# act = nn.ReLU(True)\n#\n# m_head = [conv(3, n_feats, kernel_size)]\n#\n# m_body = [\n# conv(\n# n_feats, n_feats, kernel_size\n# ) for _ in range(n_layes)\n# ]\n#\n# m_tail = [\n# Upsampler(conv, scale, n_feats, act=False),\n# nn.Conv2d(\n# n_feats, n_colors, kernel_size,\n# padding=(kernel_size // 2)\n# )\n# ]\n#\n# self.LLF_head = nn.Sequential(*m_head)\n# self.LLF_body = nn.Sequential(*m_body)\n# self.LLF_tail = nn.Sequential(*m_tail)\n#\n# def forward(self, x):\n# x = self.LLF_head(x)\n# x = self.LLF_body(x)\n# x = self.LLF_tail(x)\n# return x\n#\n#\n# class MSRB(nn.Module):\n# def __init__(self, conv=default_conv):\n# super(MSRB, self).__init__()\n#\n# n_feats = 64\n# kernel_size_1 = 3\n# kernel_size_2 = 5\n#\n# self.conv_3_1 = conv(n_feats, n_feats, kernel_size_1)\n# self.conv_3_2 = conv(n_feats * 2, n_feats * 2, kernel_size_1)\n# self.conv_5_1 = conv(n_feats, n_feats, kernel_size_2)\n# self.conv_5_2 = conv(n_feats * 2, n_feats * 2, kernel_size_2)\n# self.confusion = nn.Conv2d(n_feats * 4, n_feats, 1, padding=0, stride=1)\n# self.relu = nn.ReLU(inplace=True)\n#\n# def forward(self, x):\n# input_1 = x\n# output_3_1 = self.relu(self.conv_3_1(input_1))\n# output_5_1 = self.relu(self.conv_5_1(input_1))\n# input_2 = torch.cat([output_3_1, output_5_1], 1)\n# output_3_2 = self.relu(self.conv_3_2(input_2))\n# output_5_2 = self.relu(self.conv_5_2(input_2))\n# input_3 = torch.cat([output_3_2, output_5_2], 1)\n# output = self.confusion(input_3)\n# output += x\n# return output\n#\n#\n# class Edge_Net(nn.Module):\n# def __init__(self, scale, n_colors, conv=default_conv, n_feats=64):\n# super(Edge_Net, self).__init__()\n#\n# kernel_size = 3\n# act = nn.ReLU(True)\n# n_blocks = 5\n# self.n_blocks = n_blocks\n#\n# modules_head = [conv(3, n_feats, kernel_size)]\n#\n# modules_body = nn.ModuleList()\n# for i in range(n_blocks):\n# modules_body.append(\n# MSRB())\n#\n# modules_tail = [\n# nn.Conv2d(n_feats * (self.n_blocks + 1), n_feats, 1, padding=0, stride=1),\n# conv(n_feats, n_feats, kernel_size),\n# Upsampler(conv, scale, n_feats, act=False),\n# conv(n_feats, n_colors, kernel_size)]\n#\n# self.Edge_Net_head = nn.Sequential(*modules_head)\n# self.Edge_Net_body = nn.Sequential(*modules_body)\n# self.Edge_Net_tail = nn.Sequential(*modules_tail)\n#\n# def forward(self, x):\n# x = self.Edge_Net_head(x)\n# res = x\n#\n# MSRB_out = []\n# for i in range(self.n_blocks):\n# x = self.Edge_Net_body[i](x)\n# MSRB_out.append(x)\n# MSRB_out.append(res)\n#\n# res = torch.cat(MSRB_out, 1)\n# x = self.Edge_Net_tail(res)\n# return x\n#\n#\n# class Net(nn.Module):\n# def __init__(self, res_scale, conv=default_conv, n_feats=64):\n# super(Net, self).__init__()\n#\n# n_resblock = 40\n# kernel_size = 3\n# act = nn.ReLU(True)\n#\n# m_head = [conv(n_feats, n_feats, kernel_size)]\n#\n# m_body = [\n# ResBlock(\n# conv, n_feats, kernel_size, act=act, res_scale=res_scale\n# ) for _ in range(n_resblock)\n# ]\n#\n# m_tail = [conv(n_feats, 3, kernel_size)]\n#\n# self.Net_head = nn.Sequential(*m_head)\n# self.Net_body = nn.Sequential(*m_body)\n# self.Net_tail = nn.Sequential(*m_tail)\n#\n# def forward(self, x):\n# x = self.Net_head(x)\n# res = self.Net_body(x)\n# res += x\n# x = self.Net_tail(res)\n# return x\n#\n# @ARCH_REGISTRY.register()\n# class SEAN(nn.Module):\n# def __init__(self,\n# n_feats,\n# scale,\n# n_colors,\n# rgb_range,\n# res_scale,\n# conv=default_conv):\n# super(SEAN, self).__init__()\n#\n# rgb_mean = (0.4488, 0.4371, 0.4040)\n# rgb_std = (1.0, 1.0, 1.0)\n# self.sub_mean = MeanShift(rgb_range, rgb_mean, rgb_std)\n#\n# # define head module\n# m_LFF = [LFF(scale, n_colors, n_feats=n_feats)]\n#\n# # define body module\n# m_Edge = [Edge_Net(scale, n_colors, n_feats=n_feats)]\n#\n# m_Fushion = [conv(6, n_feats, kernel_size=1)]\n#\n# # define tail module\n# m_Net = [Net(res_scale, n_feats=n_feats)]\n#\n# self.add_mean = MeanShift(rgb_range, rgb_mean, rgb_std, 1)\n#\n# self.lff = nn.Sequential(*m_LFF)\n# self.edge = nn.Sequential(*m_Edge)\n# self.fushion = nn.Sequential(*m_Fushion)\n# self.net = nn.Sequential(*m_Net)\n#\n# def forward(self, x):\n# x = self.sub_mean(x)\n# low = self.lff(x)\n# high = self.edge(x)\n# out = torch.cat([low, high], 1)\n# out = self.fushion(out)\n# out = self.net(out)\n# x = self.add_mean(out)\n# return high, x\n"
] |
[
[
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] |
Li-kewei/models
|
[
"d02ba6a87c37ad9d0bc413413b9e9ddc8c60f43c"
] |
[
"official/cv/brdnet/preprocess.py"
] |
[
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport argparse\nimport os\nimport glob\nimport numpy as np\nimport PIL.Image as Image\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--out_dir', type=str, required=True,\n help='directory to store the image with noise')\nparser.add_argument('--image_path', type=str, required=True,\n help='directory of image to add noise')\nparser.add_argument('--channel', type=int, default=3\n , help='image channel, 3 for color, 1 for gray')\nparser.add_argument('--sigma', type=int, default=15, help='level of noise')\nargs = parser.parse_args()\n\ndef add_noise(out_dir, image_path, channel, sigma):\n file_list = glob.glob(image_path+'*') # image_path must end by '/'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n for file in file_list:\n print(\"Adding noise to: \", file)\n # read image\n if channel == 3:\n img_clean = np.array(Image.open(file), dtype='float32') / 255.0\n else:\n img_clean = np.expand_dims(np.array(Image.open(file).convert('L'), dtype='float32') / 255.0, axis=2)\n\n np.random.seed(0) #obtain the same random data when it is in the test phase\n img_test = img_clean + np.random.normal(0, sigma/255.0, img_clean.shape).astype(np.float32)#HWC\n img_test = np.expand_dims(img_test.transpose((2, 0, 1)), 0)#NCHW\n #img_test = np.clip(img_test, 0, 1)\n\n filename = file.split('/')[-1].split('.')[0] # get the name of image file\n img_test.tofile(os.path.join(out_dir, filename+'_noise.bin'))\n\nif __name__ == \"__main__\":\n add_noise(args.out_dir, args.image_path, args.channel, args.sigma)\n"
] |
[
[
"numpy.random.seed",
"numpy.random.normal"
]
] |
KlrShaK/bayesrace
|
[
"8fe76f22b2ed57987dd8a57279cd29c100d0b9a6"
] |
[
"bayes_race/models/dynamic.py"
] |
[
"\"\"\"\tDynamic bicycle model.\n\t\n\tUse Dynamic class to:\n\t1. simulate continuous model\n\t2. linearize continuous model\n\t3. discretize continuous model\n\t4. simulate continuously linearized discrete model\n\t5. compare continuous and discrete models\n\n\"\"\"\n\n__author__ = 'Achin Jain'\n__email__ = 'achinj@seas.upenn.edu'\n\n\nimport numpy as np\nimport casadi as cs\nfrom bayes_race.models.model import Model\nfrom bayes_race.params import F110\n\n\nclass Dynamic(Model):\n\n\tdef __init__(self, lf, lr, mass, Iz, Cf, Cr, \n\t\t\t\t Bf=None, Br=None, Df=None, Dr=None,\n\t\t\t\t Cm1=None, Cm2=None, Cr0=None, Cr2=None, \n\t\t\t\t input_acc=False, **kwargs):\n\t\t\"\"\"\tspecify model params here\n\t\t\"\"\"\n\t\tself.lf = lf\n\t\tself.lr = lr\n\t\tself.dr = lr/(lf+lr)\n\t\tself.mass = mass\n\t\tself.Iz = Iz\n\n\t\tself.Cf = Cf\n\t\tself.Cr = Cr\n\n\t\tself.Bf = Bf\n\t\tself.Br = Br\n\t\tself.Df = Df\n\t\tself.Dr = Dr\n\n\t\tself.Cm1 = Cm1\n\t\tself.Cm2 = Cm2\n\t\tself.Cr0 = Cr0\n\t\tself.Cr2 = Cr2\n\n\t\tself.approx = False\n\t\tif Bf is None or Br is None or Df is None or Dr is None:\n\t\t\tself.approx = True\n\t\tself.input_acc = input_acc\n\t\tself.n_states = 6\n\t\tself.n_inputs = 2\n\t\tModel.__init__(self)\n\n\tdef sim_continuous(self, x0, u, t):\n\t\t\"\"\"\tsimulates the nonlinear continuous model with given input vector\n\t\t\tby numerical integration using 6th order Runge Kutta method\n\t\t\tx0 is the initial state of size 6x1\n\t\t\tu is the input vector of size 2xn\n\t\t\tt is the time vector of size 1x(n+1)\n\t\t\"\"\"\n\t\tn_steps = u.shape[1]\n\t\tx = np.zeros([6, n_steps+1])\n\t\tdxdt = np.zeros([6, n_steps+1])\n\t\tdxdt[:,0] = self._diffequation(None, x0, [0, 0])\n\t\tx[:,0] = x0\n\t\tfor ids in range(1, n_steps+1):\n\t\t\tx[:,ids] = self._integrate(x[:,ids-1], u[:,ids-1], t[ids-1], t[ids])\n\t\t\tdxdt[:,ids] = self._diffequation(None, x[:,ids], u[:,ids-1])\n\t\treturn x, dxdt\n\n\tdef _diffequation(self, t, x, u):\n\t\t\"\"\"\twrite dynamics as first order ODE: dxdt = f(x(t))\n\t\t\tx is a 6x1 vector: [x, y, psi, vx, vy, omega]^T\n\t\t\tu is a 2x1 vector: [acc/pwm, steer]^T\n\t\t\"\"\"\n\t\tsteer = u[1]\n\t\tpsi = x[2]\n\t\tvx = x[3]\n\t\tvy = x[4]\n\t\tomega = x[5]\n\n\t\tFfy, Frx, Fry = self.calc_forces(x, u)\n\n\t\tdxdt = np.zeros(6)\n\t\tdxdt[0] = vx*np.cos(psi) - vy*np.sin(psi)\n\t\tdxdt[1] = vx*np.sin(psi) + vy*np.cos(psi)\n\t\tdxdt[2] = omega\n\t\tdxdt[3] = 1/self.mass * (Frx - Ffy*np.sin(steer)) + vy*omega\n\t\tdxdt[4] = 1/self.mass * (Fry + Ffy*np.cos(steer)) - vx*omega\n\t\tdxdt[5] = 1/self.Iz * (Ffy*self.lf*np.cos(steer) - Fry*self.lr)\n\t\treturn dxdt\n\n\tdef calc_forces(self, x, u, return_slip=False):\n\t\tsteer = u[1]\n\t\tpsi = x[2]\n\t\tvx = x[3]\n\t\tvy = x[4]\n\t\tomega = x[5]\n\n\t\tif self.approx:\n\n\t\t\t# rolling friction and drag are ignored\n\t\t\tacc = u[0]\n\t\t\tFrx = self.mass*acc\n\n\t\t\t# See Vehicle Dynamics and Control (Rajamani)\n\t\t\talphaf = steer - (self.lf*omega + vy)/vx\n\t\t\talphar = (self.lr*omega - vy)/vx\n\t\t\tFfy = 2 * self.Cf * alphaf\n\t\t\tFry = 2 * self.Cr * alphar\n\n\t\telse:\n\t\t\t\n\t\t\tif self.input_acc:\n\t\t\t\t# rolling friction and drag are ignored\n\t\t\t\tacc = u[0]\n\t\t\t\tFrx = self.mass*acc\n\t\t\telse:\n\t\t\t\t# rolling friction and drag are modeled\n\t\t\t\tpwm = u[0]\n\t\t\t\tFrx = (self.Cm1-self.Cm2*vx)*pwm - self.Cr0 - self.Cr2*(vx**2)\n\n\t\t\talphaf = steer - np.arctan2((self.lf*omega + vy), abs(vx))\n\t\t\talphar = np.arctan2((self.lr*omega - vy), abs(vx))\n\t\t\tFfy = self.Df * np.sin(self.Cf * np.arctan(self.Bf * alphaf))\n\t\t\tFry = self.Dr * np.sin(self.Cr * np.arctan(self.Br * alphar))\n\t\tif return_slip:\n\t\t\treturn Ffy, Frx, Fry, alphaf, alphar\n\t\telse:\n\t\t\treturn Ffy, Frx, Fry\n\n\tdef casadi(self, x, u, dxdt):\n\t\t\"\"\"\twrite dynamics as first order ODE: dxdt = f(x(t))\n\t\t\tx is a 6x1 vector: [x, y, psi, vx, vy, omega]^T\n\t\t\tu is a 2x1 vector: [acc/pwm, steer]^T\n\t\t\tdxdt is a casadi.SX variable\n\t\t\"\"\"\n\t\tpwm = u[0]\n\t\tsteer = u[1]\n\t\tpsi = x[2]\n\t\tvx = x[3]\n\t\tvy = x[4]\n\t\tomega = x[5]\n\n\t\tvmin = 0.05\n\t\tvy = cs.if_else(vx<vmin, 0, vy)\n\t\tomega = cs.if_else(vx<vmin, 0, omega)\n\t\tsteer = cs.if_else(vx<vmin, 0, steer)\n\t\tvx = cs.if_else(vx<vmin, vmin, vx)\n\n\t\tFrx = (self.Cm1-self.Cm2*vx)*pwm - self.Cr0 - self.Cr2*(vx**2)\n\t\talphaf = steer - cs.atan2((self.lf*omega + vy), vx)\n\t\talphar = cs.atan2((self.lr*omega - vy), vx)\n\t\tFfy = self.Df * cs.sin(self.Cf * cs.arctan(self.Bf * alphaf))\n\t\tFry = self.Dr * cs.sin(self.Cr * cs.arctan(self.Br * alphar))\n\n\t\tdxdt[0] = vx*cs.cos(psi) - vy*cs.sin(psi)\n\t\tdxdt[1] = vx*cs.sin(psi) + vy*cs.cos(psi)\n\t\tdxdt[2] = omega\n\t\tdxdt[3] = 1/self.mass * (Frx - Ffy*cs.sin(steer)) + vy*omega\n\t\tdxdt[4] = 1/self.mass * (Fry + Ffy*cs.cos(steer)) - vx*omega\n\t\tdxdt[5] = 1/self.Iz * (Ffy*self.lf*cs.cos(steer) - Fry*self.lr)\n\t\treturn dxdt\n\n\tdef sim_discrete(self, x0, u, Ts):\n\t\t\"\"\"\tsimulates a continuously linearized discrete model\n\t\t\tu is the input vector of size 2xn\n\t\t\tTs is the sampling time\n\t\t\"\"\"\n\t\tn_steps = u.shape[1]\n\t\tx = np.zeros([6, n_steps+1])\n\t\tdxdt = np.zeros([6, n_steps+1])\n\t\tdxdt[:,0] = self._diffequation(None, x0, [0, 0])\n\t\tx[:,0] = x0\n\t\tfor ids in range(1, n_steps+1):\n\t\t\tg = self._diffequation(None, x[:,ids-1], u[:,ids-1]).reshape(-1,)\n\t\t\tx[:,ids] = x[:,ids-1] + g*Ts\n\t\t\tdxdt[:,ids] = self._diffequation(None, x[:,ids], u[:,ids-1])\n\t\treturn x, dxdt\n\n\tdef linearize(self, x0, u0):\n\t\t\"\"\"\tlinearize at a given x0, u0\n\t\t\tfor a given continuous system dxdt = f(x(t))\n\t\t\tcalculate A = ∂f/∂x, B = ∂f/∂u, g = f evaluated at x0, u0\n\t\t\tA is 6x6, B is 6x2, g is 6x1\n\t\t\"\"\"\n\t\tsteer = u0[1]\n\t\tpsi = x0[2]\n\t\tvx = x0[3]\n\t\tvy = x0[4]\n\t\tomega = x0[5]\n\n\t\t# numerical correction for low speeds\n\t\tvmin = 0.05\n\t\tif vx < vmin:\n\t\t\tvy = 0\n\t\t\tomega = 0\n\t\t\tsteer = 0\n\t\t\tvx = vmin\n\n\t\tsindelta = np.sin(steer)\n\t\tcosdelta = np.cos(steer)\n\t\tsinpsi = np.sin(psi)\n\t\tcospsi = np.cos(psi)\n\n\t\tFfy, Frx, Fry, alphaf, alphar = self.calc_forces(x0, u0, return_slip=True)\n\n\t\tif self.approx:\n\n\t\t\tdFfy_dvx = 2 * self.Cf * (self.lf*omega + vy)/((self.lf*omega + vy)**2 + vx**2)\n\t\t\tdFfy_dvy = -2 * self.Cf * vx/((self.lf*omega + vy)**2 + vx**2)\n\t\t\tdFfy_domega = -2 * self.Cf * self.lf * vx/((self.lf*omega + vy)**2 + vx**2)\n\n\t\t\tdFrx_dvx = 0\n\t\t\tdFrx_dvu1 = 1\n\n\t\t\tdFry_dvx = -2 * self.Cr * (self.lr*omega - vy)/((self.lr*omega - vy)**2 + vx**2)\n\t\t\tdFry_dvy = -2 * self.Cr * vx/((self.lr*omega - vy)**2 + vx**2)\n\t\t\tdFry_domega = 2 * self.Cr * self.lr * vx/((self.lr*omega - vy)**2 + vx**2)\n\n\t\t\tdFfy_delta = 2*self.Cf\n\n\t\telse:\n\n\t\t\tdFfy_dalphaf = self.Bf * self.Cf * self.Df * np.cos(self.Cf * np.arctan(self.Bf * alphaf))\n\t\t\tdFfy_dalphaf *= 1/(1+(self.Bf*alphaf)**2)\n\n\t\t\tdFry_dalphar = self.Br * self.Cr * self.Dr * np.cos(self.Cr * np.arctan(self.Br * alphar))\n\t\t\tdFry_dalphar *= 1/(1+(self.Br*alphar)**2)\n\n\t\t\tdFfy_dvx = dFfy_dalphaf * (self.lf*omega + vy)/((self.lf*omega + vy)**2 + vx**2)\n\t\t\tdFfy_dvy = -dFfy_dalphaf * vx/((self.lf*omega + vy)**2 + vx**2)\n\t\t\tdFfy_domega = -dFfy_dalphaf * self.lf * vx/((self.lf*omega + vy)**2 + vx**2)\n\n\t\t\tif self.input_acc:\n\t\t\t\traise NotImplementedError\n\t\t\t\t\n\t\t\tpwm = u0[0]\n\t\t\tdFrx_dvx = -self.Cm2*pwm - 2*self.Cr2*vx\n\t\t\tdFrx_dvu1 = self.Cm1-self.Cm2*vx\n\n\t\t\tdFry_dvx = -dFry_dalphar * (self.lr*omega - vy)/((self.lr*omega - vy)**2 + vx**2)\n\t\t\tdFry_dvy = -dFry_dalphar * vx/((self.lr*omega - vy)**2 + vx**2)\n\t\t\tdFry_domega = dFry_dalphar * self.lr * vx/((self.lr*omega - vy)**2 + vx**2)\n\n\t\t\tdFfy_delta = dFfy_dalphaf\n\n\t\tf1_psi = -vx*sinpsi-vy*cospsi\n\t\tf1_vx = cospsi\n\t\tf1_vy = -sinpsi\n\n\t\tf2_psi = vx*cospsi-vy*sinpsi\n\t\tf2_vx = sinpsi\n\t\tf2_vy = cospsi\n\n\t\tf4_vx = 1/self.mass * (dFrx_dvx -dFfy_dvx*sindelta)\n\t\tf4_vy = 1/self.mass * (-dFfy_dvy*sindelta + self.mass*omega)\n\t\tf4_omega = 1/self.mass * (-dFfy_domega*sindelta + self.mass*vy)\n\n\t\tf5_vx = 1/self.mass * (dFry_dvx + dFfy_dvx*cosdelta - self.mass*omega)\n\t\tf5_vy = 1/self.mass * (dFry_dvy + dFfy_dvy*cosdelta)\n\t\tf5_omega = 1/self.mass * (dFry_domega + dFfy_domega*cosdelta - self.mass*vx)\n\n\t\tf6_vx = 1/self.Iz * (dFfy_dvx*self.lf*cosdelta - dFry_dvx*self.lr)\n\t\tf6_vy = 1/self.Iz * (dFfy_dvy*self.lf*cosdelta - dFry_dvy*self.lr)\n\t\tf6_omega = 1/self.Iz * (dFfy_domega*self.lf*cosdelta - dFry_domega*self.lr)\n\n\t\tf4_u1 = dFrx_dvu1\n\t\tf4_delta = 1/self.mass * (-dFfy_delta*sindelta - Ffy*cosdelta)\n\t\tf5_delta = 1/self.mass * (dFfy_delta*cosdelta - Ffy*sindelta)\n\t\tf6_delta = 1/self.Iz * (dFfy_delta*self.lf*cosdelta - Ffy*self.lf*sindelta)\n\n\t\tA = np.array([\n\t\t\t[0, 0, f1_psi, f1_vx, f1_vy, 0],\n\t\t\t[0, 0, f2_psi, f2_vx, f2_vy, 0],\n\t\t\t[0, 0, 0, 0, 0, 1],\n\t\t\t[0, 0, 0, f4_vx, f4_vy, f4_omega],\n\t\t\t[0, 0, 0, f5_vx, f5_vy, f5_omega],\n\t\t\t[0, 0, 0, f6_vx, f6_vy, f6_omega],\n\t\t\t])\n\t\tB = np.array([\n\t\t\t[0, 0],\n\t\t\t[0, 0],\n\t\t\t[0, 0],\n\t\t\t[f4_u1, f4_delta],\n\t\t\t[0, f5_delta],\n\t\t\t[0, f6_delta],\n\t\t\t])\n\t\tg = self._diffequation(None, x0, u0).reshape(-1,)\n\t\treturn A, B, g\n\n\nif __name__ == '__main__':\n\t\"\"\"\ttest cases 1-3 use 4 states continuous model\n\t\ttest cases 4-6 use 4 states discrete model\n\t\ttest pairs (1,4), (2,5) and (3,6) should give same results\n\t\"\"\"\n\n\t# vehicle parameters for F1/10\n\tparams = F110()\n\tmodel = Dynamic(**params)\n\n\ttest_case = 3\n\n\t#####################################################################\n\t# CONTINUOUS MODEL 6 STATES\n\n\t# start at origin with init velocity [3, 3] m/s\n\t# apply constant acceleration 1 m/s^2 for 1s and then move at constant speed\n\tif test_case == 1:\n\t\tn_steps = 100\n\t\tinputs = np.zeros([2, n_steps])\n\t\ttime = np.linspace(0, 2, n_steps+1)\n\t\tinputs[0,:50] = 1\n\t\tx_init = np.array([0, 0, np.pi/4, 3*np.sqrt(2), 0, 0])\n\t\tx_cont, dxdt_cont = model.sim_continuous(x_init, inputs, time)\n\t\tmodel.plot_results(time, x_cont, dxdt_cont, inputs)\n\n\t# start at origin with init velocity [3, 0] m/s\n\t# steer at constant angle 0.2 rad\n\tif test_case == 2:\n\t\tn_steps = 200\n\t\tinputs = np.zeros([2, n_steps])\n\t\ttime = np.linspace(0, 4, n_steps+1)\n\t\tinputs[1,:] = 0.2\n\t\tx_init = np.array([0, 0, 0, 3, 0, 0])\n\t\tx_cont, dxdt_cont = model.sim_continuous(x_init, inputs, time)\n\t\tmodel.plot_results(time, x_cont, dxdt_cont, inputs)\n\n\t# start at origin with init velocity [3, 0] m/s\n\t# steer at constant angle 0.2 rad after 2 sec\n\tif test_case == 3:\n\t\tn_steps = 400\n\t\tinputs = np.zeros([2, n_steps])\n\t\tinputs[1,100:] = 0.2\n\t\ttime = np.linspace(0, 8, n_steps+1)\n\t\tx_init = np.array([0, 0, 0, 3, 0, 0])\n\t\tx_cont, dxdt_cont = model.sim_continuous(x_init, inputs, time)\n\t\tmodel.plot_results(time, x_cont, dxdt_cont, inputs)\n\n\t#####################################################################\n\t# DISCRETE MODEL 6 STATES\n\t\n\t# start at origin with init velocity [3, 3] m/s\n\t# apply constant acceleration 1 m/s^2 for 1s and then move at constant speed\n\tif test_case == 4:\n\t\tTs = 0.02\n\t\tn_steps = int(2/Ts)\n\t\tinputs = np.zeros([2, n_steps])\n\t\ttime = np.linspace(0, n_steps+1, n_steps+1)*Ts\n\t\tinputs[0,:int(n_steps/2)] = 1\n\t\tx_init = np.array([0, 0, np.pi/4, 3*np.sqrt(2), 0, 0])\n\t\tx_disc, dxdt_disc = model.sim_discrete(x_init, inputs, Ts)\n\t\tmodel.plot_results(time, x_disc, dxdt_disc, inputs)\n\n\t# start at origin with init velocity [3, 0] m/s\n\t# steer at constant angle 0.2 rad\n\tif test_case == 5:\n\t\tTs = 0.02\n\t\tn_steps = int(4/Ts)\n\t\tinputs = np.zeros([2, n_steps])\n\t\ttime = np.linspace(0, n_steps+1, n_steps+1)*Ts\n\t\tinputs[1,:] = 0.2\n\t\tx_init = np.array([0, 0, 0, 3, 0, 0])\n\t\tx_disc, dxdt_disc = model.sim_discrete(x_init, inputs, Ts)\n\t\tmodel.plot_results(time, x_disc, dxdt_disc, inputs)\n\n\t# start at origin with init velocity [3, 0] m/s\n\t# steer at constant angle 0.2 rad after 2 sec\n\tif test_case == 6:\n\t\tTs = 0.02\n\t\tn_steps = int(8/Ts)\n\t\tinputs = np.zeros([2, n_steps])\n\t\tinputs[1,int(n_steps/4):] = 0.2\n\t\ttime = np.linspace(0, n_steps+1, n_steps+1)*Ts\n\t\tx_init = np.array([0, 0, 0, 3, 0, 0])\n\t\tx_disc, dxdt_disc = model.sim_discrete(x_init, inputs, Ts)\n\t\tmodel.plot_results(time, x_disc, dxdt_disc, inputs)"
] |
[
[
"numpy.array",
"numpy.sin",
"numpy.zeros",
"numpy.arctan",
"numpy.cos",
"numpy.sqrt",
"numpy.linspace"
]
] |
nguyenvanhoang7398/nndl2-project
|
[
"0da19e3ff0da1de2cb9ad6c9286d9f9204358b91"
] |
[
"zero/cli.py"
] |
[
"from datetime import datetime\nimport logging\nimport os\nimport subprocess\nimport sys\nfrom argparse import Namespace\n\nlogging.getLogger(\"transformers\").setLevel(logging.WARNING)\n\nimport click\nimport torch\n\nfrom luke.utils.model_utils import ModelArchive\n\nfrom zero.utils.experiment_logger import commet_logger_args, CometLogger, NullLogger\n\nLOG_FORMAT = \"[%(asctime)s] [%(levelname)s] %(message)s (%(funcName)s@%(filename)s:%(lineno)s)\"\n\ntry:\n import absl.logging\n\n # https://github.com/tensorflow/tensorflow/issues/27045#issuecomment-519642980\n logging.getLogger().removeHandler(absl.logging._absl_handler)\n absl.logging._warn_preinit_stderr = False\nexcept ImportError:\n pass\n\nlogger = logging.getLogger(__name__)\n\n\n@click.group()\n@click.option(\n \"--output-dir\", default=\"models\", type=click.Path()\n)\n@click.option(\"--num-gpus\", default=1)\n@click.option(\"--experiment-logger\", \"--logger\", type=click.Choice([\"comet\"]))\n@click.option(\"--master-port\", default=29500)\n@click.option(\"--local-rank\", \"--local_rank\", default=-1)\n@click.option(\"--model-file\", type=click.Path(exists=True))\n@click.option(\"--device-id\", type=int)\n@commet_logger_args\n@click.pass_context\ndef cli(ctx, **kwargs):\n args = Namespace(**kwargs)\n\n if args.local_rank == -1 and args.num_gpus > 1:\n current_env = os.environ.copy()\n current_env[\"MASTER_ADDR\"] = \"127.0.0.1\"\n current_env[\"MASTER_PORT\"] = str(args.master_port)\n current_env[\"WORLD_SIZE\"] = str(args.num_gpus)\n\n processes = []\n\n for args.local_rank in range(0, args.num_gpus):\n current_env[\"RANK\"] = str(args.local_rank)\n current_env[\"LOCAL_RANK\"] = str(args.local_rank)\n\n cmd = [sys.executable, \"-u\", \"-m\", \"examples.cli\", \"--local-rank={}\".format(args.local_rank)]\n cmd.extend(sys.argv[1:])\n\n process = subprocess.Popen(cmd, env=current_env)\n processes.append(process)\n\n for process in processes:\n process.wait()\n if process.returncode != 0:\n raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n\n sys.exit(0)\n else:\n if args.local_rank not in (-1, 0):\n logging.basicConfig(format=LOG_FORMAT, level=logging.WARNING)\n else:\n logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)\n\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n logger.info(\"Output dir: %s\", args.output_dir)\n\n # NOTE: ctx.obj is documented here: http://click.palletsprojects.com/en/7.x/api/#click.Context.obj\n ctx.obj = dict(local_rank=args.local_rank, output_dir=args.output_dir)\n\n if args.num_gpus == 0:\n ctx.obj[\"device\"] = torch.device(\"cpu\")\n elif args.local_rank == -1:\n ctx.obj[\"device\"] = torch.device(\"cuda:{}\".format(args.device_id))\n else:\n torch.cuda.set_device(args.local_rank)\n ctx.obj[\"device\"] = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n\n experiment_logger = NullLogger()\n\n if args.local_rank in (-1, 0) and args.experiment_logger == \"comet\":\n experiment_logger = CometLogger(args)\n\n experiment_logger.log_parameters({p.name: getattr(args, p.name) for p in cli.params})\n ctx.obj[\"experiment\"] = experiment_logger\n\n if args.model_file:\n model_archive = ModelArchive.load(args.model_file)\n ctx.obj[\"tokenizer\"] = model_archive.tokenizer\n ctx.obj[\"entity_vocab\"] = model_archive.entity_vocab\n ctx.obj[\"bert_model_name\"] = model_archive.bert_model_name\n ctx.obj[\"model_config\"] = model_archive.config\n ctx.obj[\"max_mention_length\"] = model_archive.max_mention_length\n ctx.obj[\"model_weights\"] = model_archive.state_dict\n\n experiment_logger.log_parameter(\"model_file_name\", os.path.basename(args.model_file))\n\n\nfrom zero.ner.main import cli as ner_cli\ncli.add_command(ner_cli)\n\n\nif __name__ == \"__main__\":\n cli()"
] |
[
[
"torch.distributed.init_process_group",
"torch.device",
"torch.cuda.set_device"
]
] |
denisesato/scikit-multiflow
|
[
"3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95",
"3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95",
"3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95",
"3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95",
"3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95",
"3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95"
] |
[
"src/skmultiflow/drift_detection/adwin.py",
"src/skmultiflow/meta/online_under_over_bagging.py",
"tests/meta/test_regressor_chains.py",
"src/skmultiflow/bayes/naive_bayes.py",
"tests/data/test_regression_generator.py",
"src/skmultiflow/data/sea_generator.py"
] |
[
"import numpy as np\n\nfrom skmultiflow.drift_detection.base_drift_detector import BaseDriftDetector\n\n\nclass ADWIN(BaseDriftDetector):\n \"\"\" Adaptive Windowing method for concept drift detection.\n\n Parameters\n ----------\n delta : float (default=0.002)\n The delta parameter for the ADWIN algorithm.\n\n Notes\n -----\n ADWIN [1]_ (ADaptive WINdowing) is an adaptive sliding window algorithm\n for detecting change, and keeping updated statistics about a data stream.\n ADWIN allows algorithms not adapted for drifting data, to be resistant\n to this phenomenon.\n\n The general idea is to keep statistics from a window of variable size while\n detecting concept drift.\n\n The algorithm will decide the size of the window by cutting the statistics'\n window at different points and analysing the average of some statistic over\n these two windows. If the absolute value of the difference between the two\n averages surpasses a pre-defined threshold, change is detected at that point\n and all data before that time is discarded.\n\n References\n ----------\n .. [1] Bifet, Albert, and Ricard Gavalda. \"Learning from time-changing data with adaptive\n windowing.\"\n In Proceedings of the 2007 SIAM international conference on data mining, pp. 443-448.\n Society for Industrial and Applied Mathematics, 2007.\n\n Examples\n --------\n >>> # Imports\n >>> import numpy as np\n >>> from skmultiflow.drift_detection.adwin import ADWIN\n >>> adwin = ADWIN()\n >>> # Simulating a data stream as a normal distribution of 1's and 0's\n >>> data_stream = np.random.randint(2, size=2000)\n >>> # Changing the data concept from index 999 to 2000\n >>> for i in range(999, 2000):\n ... data_stream[i] = np.random.randint(4, high=8)\n >>> # Adding stream elements to ADWIN and verifying if drift occurred\n >>> for i in range(2000):\n ... adwin.add_element(data_stream[i])\n ... if adwin.detected_change():\n ... print('Change detected in data: ' + str(data_stream[i]) + ' - at index: ' + str(i))\n\n \"\"\"\n MAX_BUCKETS = 5\n\n def __init__(self, delta=.002):\n super().__init__()\n # default values affected by init_bucket()\n self.delta = delta\n self.last_bucket_row = 0\n self.list_row_bucket = None\n self._total = 0\n self._variance = 0\n self._width = 0\n self.bucket_number = 0\n\n self.__init_buckets()\n\n # other default values\n self.mint_min_window_longitude = 10\n\n self.mdbl_delta = .002\n self.mint_time = 0\n self.mdbl_width = 0\n\n self.detect = 0\n self._n_detections = 0\n self.detect_twice = 0\n self.mint_clock = 32\n\n self.bln_bucket_deleted = False\n self.bucket_num_max = 0\n self.mint_min_window_length = 5\n super().reset()\n\n def reset(self):\n \"\"\" Reset detectors\n\n Resets statistics and adwin's window.\n\n Returns\n -------\n ADWIN\n self\n\n \"\"\"\n self.__init__(delta=self.delta)\n\n def get_change(self):\n \"\"\" Get drift\n\n Returns\n -------\n bool\n Whether or not a drift occurred\n\n \"\"\"\n return self.bln_bucket_deleted\n\n def reset_change(self):\n self.bln_bucket_deleted = False\n\n def set_clock(self, clock):\n self.mint_clock = clock\n\n def detected_warning_zone(self):\n return False\n\n @property\n def _bucket_used_bucket(self):\n return self.bucket_num_max\n\n @property\n def width(self):\n return self._width\n\n @property\n def n_detections(self):\n return self._n_detections\n\n @property\n def total(self):\n return self._total\n\n @property\n def variance(self):\n return self._variance / self._width\n\n @property\n def estimation(self):\n if self._width == 0:\n return 0\n return self._total / self._width\n\n @estimation.setter\n def estimation(self, value):\n pass\n\n @property\n def width_t(self):\n return self.mdbl_width\n\n def __init_buckets(self):\n \"\"\" Initialize the bucket's List and statistics\n\n Set all statistics to 0 and create a new bucket List.\n\n \"\"\"\n self.list_row_bucket = List()\n self.last_bucket_row = 0\n self._total = 0\n self._variance = 0\n self._width = 0\n self.bucket_number = 0\n\n def add_element(self, value):\n \"\"\" Add a new element to the sample window.\n\n Apart from adding the element value to the window, by inserting it in\n the correct bucket, it will also update the relevant statistics, in\n this case the total sum of all values, the window width and the total\n variance.\n\n Parameters\n ----------\n value: int or float (a numeric value)\n\n Notes\n -----\n The value parameter can be any numeric value relevant to the analysis\n of concept change. For the learners in this framework we are using\n either 0's or 1's, that are interpreted as follows:\n 0: Means the learners prediction was wrong\n 1: Means the learners prediction was correct\n\n This function should be used at every new sample analysed.\n\n \"\"\"\n if self.in_concept_change:\n self.reset()\n\n self._width += 1\n self.__insert_element_bucket(0, value, self.list_row_bucket.first)\n incremental_variance = 0\n\n if self._width > 1:\n incremental_variance = (self._width - 1) * \\\n (value - self._total / (self._width - 1)) * \\\n (value - self._total / (self._width - 1)) / self._width\n\n self._variance += incremental_variance\n self._total += value\n self.__compress_buckets()\n\n def __insert_element_bucket(self, variance, value, node):\n node.insert_bucket(value, variance)\n self.bucket_number += 1\n\n if self.bucket_number > self.bucket_num_max:\n self.bucket_num_max = self.bucket_number\n\n @staticmethod\n def bucket_size(row):\n return np.power(2, row)\n\n def delete_element(self):\n \"\"\" Delete an Item from the bucket list.\n\n Deletes the last Item and updates relevant statistics kept by ADWIN.\n\n Returns\n -------\n int\n The bucket size from the updated bucket\n\n \"\"\"\n node = self.list_row_bucket.last\n n1 = self.bucket_size(self.last_bucket_row)\n self._width -= n1\n self._total -= node.get_total(0)\n u1 = node.get_total(0) / n1\n incremental_variance = node.get_variance(0) + n1 * self._width * (\n u1 - self._total / self._width) * (u1 - self._total / self._width) / (\n n1 + self._width)\n self._variance -= incremental_variance\n node.remove_bucket()\n self.bucket_number -= 1\n\n if node.bucket_size_row == 0:\n self.list_row_bucket.remove_from_tail()\n self.last_bucket_row -= 1\n\n return n1\n\n def __compress_buckets(self):\n cursor = self.list_row_bucket.first\n i = 0\n while cursor is not None:\n k = cursor.bucket_size_row\n if k == self.MAX_BUCKETS + 1:\n next_node = cursor.get_next_item()\n if next_node is None:\n self.list_row_bucket.add_to_tail()\n next_node = cursor.get_next_item()\n self.last_bucket_row += 1\n n1 = self.bucket_size(i)\n n2 = self.bucket_size(i)\n u1 = cursor.get_total(0) / n1\n u2 = cursor.get_total(1) / n2\n incremental_variance = n1 * n2 * ((u1 - u2) * (u1 - u2)) / (n1 + n2)\n next_node.insert_bucket(\n cursor.get_total(0) + cursor.get_total(1),\n cursor.get_variance(1) + incremental_variance)\n self.bucket_number += 1\n cursor.compress_bucket_row(2)\n\n if next_node.bucket_size_row <= self.MAX_BUCKETS:\n break\n else:\n break\n\n cursor = cursor.get_next_item()\n i += 1\n\n def detected_change(self):\n \"\"\" Detects concept change in a drifting data stream.\n\n The ADWIN algorithm is described in Bifet and Gavaldà's 'Learning from\n Time-Changing Data with Adaptive Windowing'. The general idea is to keep\n statistics from a window of variable size while detecting concept drift.\n\n This function is responsible for analysing different cutting points in\n the sliding window, to verify if there is a significant change in concept.\n\n Returns\n -------\n bln_change : bool\n Whether change was detected or not\n\n Notes\n -----\n If change was detected, one should verify the new window size, by reading\n the width property.\n\n \"\"\"\n bln_change = False\n bln_exit = False\n bln_bucket_deleted = False\n self.mint_time += 1\n n0 = 0\n if (self.mint_time % self.mint_clock == 0) and (\n self.width > self.mint_min_window_longitude):\n bln_reduce_width = True\n while bln_reduce_width:\n bln_reduce_width = not bln_reduce_width\n bln_exit = False\n n0 = 0\n n1 = self._width\n u0 = 0\n u1 = self.total\n v0 = 0\n v1 = self._variance\n n2 = 0\n u2 = 0\n cursor = self.list_row_bucket.last\n i = self.last_bucket_row\n\n while (not bln_exit) and (cursor is not None):\n for k in range(cursor.bucket_size_row):\n n2 = self.bucket_size(i)\n u2 = cursor.get_total(k)\n\n if n0 > 0:\n v0 += cursor.get_variance(k) + 1. * n0 * n2 * \\\n (u0 / n0 - u2 / n2) * (u0 / n0 - u2 / n2) / (n0 + n2)\n\n if n1 > 0:\n v1 -= cursor.get_variance(k) + 1. * n1 * n2 * \\\n (u1 / n1 - u2 / n2) * (u1 / n1 - u2 / n2) / (n1 + n2)\n\n n0 += self.bucket_size(i)\n n1 -= self.bucket_size(i)\n u0 += cursor.get_total(k)\n u1 -= cursor.get_total(k)\n\n if (i == 0) and (k == cursor.bucket_size_row - 1):\n bln_exit = True\n break\n\n abs_value = 1. * ((u0 / n0) - (u1 / n1))\n if (n1 >= self.mint_min_window_length) \\\n and (n0 >= self.mint_min_window_length) \\\n and (\n self.__bln_cut_expression(n0, n1, u0, u1, v0, v1, abs_value,\n self.delta)):\n bln_bucket_deleted = True # noqa: F841\n self.detect = self.mint_time\n if self.detect == 0:\n self.detect = self.mint_time\n elif self.detect_twice == 0:\n self.detect_twice = self.mint_time\n\n bln_reduce_width = True\n bln_change = True\n if self.width > 0:\n n0 -= self.delete_element()\n bln_exit = True\n break\n\n cursor = cursor.get_previous()\n i -= 1\n self.mdbl_width += self.width\n if bln_change:\n self._n_detections += 1\n self.in_concept_change = bln_change\n return bln_change\n\n def __bln_cut_expression(self, n0, n1, u0, u1, v0, v1, abs_value, delta):\n n = self.width\n dd = np.log(2 * np.log(n) / delta)\n v = self.variance\n m = (1. / (n0 - self.mint_min_window_length + 1)) + \\\n (1. / (n1 - self.mint_min_window_length + 1))\n epsilon = np.sqrt(2 * m * v * dd) + 1. * 2 / 3 * dd * m\n return np.absolute(abs_value) > epsilon\n\n\nclass List(object):\n \"\"\" A linked list object for ADWIN algorithm.\n\n Used for storing ADWIN's bucket list. Is composed of Item objects.\n Acts as a linked list, where each element points to its predecessor\n and successor.\n\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._count = None\n self._first = None\n self._last = None\n self.reset()\n self.add_to_head()\n\n def reset(self):\n self._count = 0\n self._first = None\n self._last = None\n\n def add_to_head(self):\n self._first = Item(self._first, None)\n if self._last is None:\n self._last = self._first\n\n def remove_from_head(self):\n self._first = self._first.get_next_item()\n if self._first is not None:\n self._first.set_previous(None)\n else:\n self._last = None\n self._count -= 1\n\n def add_to_tail(self):\n self._last = Item(None, self._last)\n if self._first is None:\n self._first = self._last\n self._count += 1\n\n def remove_from_tail(self):\n self._last = self._last.get_previous()\n if self._last is not None:\n self._last.set_next_item(None)\n else:\n self._first = None\n self._count -= 1\n\n @property\n def first(self):\n return self._first\n\n @property\n def last(self):\n return self._last\n\n @property\n def size(self):\n return self._count\n\n\nclass Item(object):\n \"\"\" Item to be used by the List object.\n\n The Item object, alongside the List object, are the two main data\n structures used for storing the relevant statistics for the ADWIN\n algorithm for change detection.\n\n Parameters\n ----------\n next_item: Item object\n Reference to the next Item in the List\n previous_item: Item object\n Reference to the previous Item in the List\n\n \"\"\"\n\n def __init__(self, next_item=None, previous_item=None):\n super().__init__()\n self.next = next_item\n self.previous = previous_item\n if next_item is not None:\n next_item.previous = self\n if previous_item is not None:\n previous_item.set_next_item(self)\n self.bucket_size_row = None\n self.max_buckets = ADWIN.MAX_BUCKETS\n self.bucket_total = np.zeros(self.max_buckets + 1, dtype=float)\n self.bucket_variance = np.zeros(self.max_buckets + 1, dtype=float)\n self.reset()\n\n def reset(self):\n \"\"\" Reset the algorithm's statistics and window\n\n Returns\n -------\n ADWIN\n self\n\n \"\"\"\n self.bucket_size_row = 0\n for i in range(ADWIN.MAX_BUCKETS + 1):\n self.__clear_buckets(i)\n\n return self\n\n def __clear_buckets(self, index):\n self.set_total(0, index)\n self.set_variance(0, index)\n\n def insert_bucket(self, value, variance):\n new_item = self.bucket_size_row\n self.bucket_size_row += 1\n self.set_total(value, new_item)\n self.set_variance(variance, new_item)\n\n def remove_bucket(self):\n self.compress_bucket_row(1)\n\n def compress_bucket_row(self, num_deleted=1):\n for i in range(num_deleted, ADWIN.MAX_BUCKETS + 1):\n self.bucket_total[i - num_deleted] = self.bucket_total[i]\n self.bucket_variance[i - num_deleted] = self.bucket_variance[i]\n\n for i in range(1, num_deleted + 1):\n self.__clear_buckets(ADWIN.MAX_BUCKETS - i + 1)\n\n self.bucket_size_row -= num_deleted\n\n def get_next_item(self):\n return self.next\n\n def set_next_item(self, next_item):\n self.next = next_item\n\n def get_previous(self):\n return self.previous\n\n def set_previous(self, previous):\n self.previous = previous\n\n def get_total(self, index):\n return self.bucket_total[index]\n\n def get_variance(self, index):\n return self.bucket_variance[index]\n\n def set_total(self, value, index):\n self.bucket_total[index] = value\n\n def set_variance(self, value, index):\n self.bucket_variance[index] = value\n",
"import copy as cp\nimport warnings\n\nimport numpy as np\n\nfrom skmultiflow.core import BaseSKMObject, ClassifierMixin, MetaEstimatorMixin\nfrom skmultiflow.drift_detection import ADWIN\nfrom skmultiflow.lazy import KNNADWINClassifier\nfrom skmultiflow.utils import check_random_state\nfrom skmultiflow.utils.utils import get_dimensions\n\n\ndef OnlineUnderOverBagging(base_estimator=KNNADWINClassifier(), n_estimators=10, sampling_rate=2,\n drift_detection=True, random_state=None): # pragma: no cover\n warnings.warn(\"'OnlineUnderOverBagging' has been renamed to 'OnlineUnderOverBaggingClassifier'\"\n \" in v0.5.0.\\nThe old name will be removed in v0.7.0\", category=FutureWarning)\n return OnlineUnderOverBaggingClassifier(base_estimator=base_estimator,\n n_estimators=n_estimators,\n sampling_rate=sampling_rate,\n drift_detection=drift_detection,\n random_state=random_state)\n\n\nclass OnlineUnderOverBaggingClassifier(BaseSKMObject, ClassifierMixin, MetaEstimatorMixin):\n r\"\"\" Online Under-Over-Bagging ensemble classifier.\n\n Online UnderOverBagging [1]_ is the online version of the ensemble method.\n\n In case of imbalanced classes UnderOverBagging uses the strategy of under-sampling\n the majority class and oversampling the minority class. In addition the sampling\n rate can be also varied over the bagging iterations, which further boosts the\n diversity of the base learners.\n\n The derivation of the online UnderOverBagging algorithm is made through the observation\n that a Binomial distribution with sampling rate :math:`\\frac{C}{N}` corresponds to a\n poisson distribution with :math:`\\lambda=C`.\n\n This online ensemble learner method is improved by the addition of an ADWIN change\n detector.\n\n ADWIN stands for Adaptive Windowing. It works by keeping updated\n statistics of a variable sized window, so it can detect changes and\n perform cuts in its window to better adapt the learning algorithms.\n\n\n Parameters\n ----------\n base_estimator: skmultiflow.core.BaseSKMObject or sklearn.BaseEstimator\n (default=KNNADWINClassifier) Each member of the ensemble is\n an instance of the base estimator.\n\n n_estimators: int, optional (default=10)\n The size of the ensemble, in other words, how many classifiers to train.\n\n sampling_rate: int, optional (default=2)\n The sampling rate of the positive instances.\n\n drift_detection: bool, optional (default=True)\n A drift detector (ADWIN) can be used by the method to track the performance\n of the classifiers and adapt when a drift is detected.\n\n random_state: int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used by `np.random`.\n\n Raises\n ------\n NotImplementedError: A few of the functions described here are not\n implemented since they have no application in this context.\n\n ValueError: A ValueError is raised if the 'classes' parameter is\n not passed in the first partial_fit call.\n\n References\n ----------\n .. [1] B. Wang and J. Pineau, \"Online Bagging and Boosting for Imbalanced Data Streams,\"\n in IEEE Transactions on Knowledge and Data Engineering, vol. 28, no. 12, pp. 3353-3366,\n 1 Dec. 2016. doi: 10.1109/TKDE.2016.2609424\n\n Examples\n --------\n >>> # Imports\n >>> from skmultiflow.data import SEAGenerator\n >>> from skmultiflow.meta import OnlineUnderOverBaggingClassifier\n >>>\n >>> # Setup a data stream\n >>> stream = SEAGenerator(random_state=1)\n >>>\n >>> # Setup variables to control loop and track performance\n >>> n_samples = 0\n >>> correct_cnt = 0\n >>> max_samples = 200\n >>>\n >>> # Setup the Online Under-Over-Bagging ensemble classifier\n >>> online_under_over_bagging = OnlineUnderOverBaggingClassifier()\n >>>\n >>> # Train the classifier with the samples provided by the data stream\n >>> while n_samples < max_samples and stream.has_more_samples():\n >>> X, y = stream.next_sample()\n >>> y_pred = online_under_over_bagging.predict(X)\n >>> if y[0] == y_pred[0]:\n >>> correct_cnt += 1\n >>> online_under_over_bagging.partial_fit(X, y)\n >>> n_samples += 1\n >>>\n >>> # Display results\n >>> print('{} samples analyzed.'.format(n_samples))\n >>> print('Online Under Over Bagging performance: {}'.format(correct_cnt / n_samples))\n \"\"\"\n\n def __init__(\n self,\n base_estimator=KNNADWINClassifier(),\n n_estimators=10,\n sampling_rate=2,\n drift_detection=True,\n random_state=None):\n super().__init__()\n # default values\n self.base_estimator = base_estimator\n self.n_estimators = n_estimators\n self.sampling_rate = sampling_rate\n self.drift_detection = drift_detection\n self.random_state = random_state\n self.ensemble = None\n self.actual_n_estimators = None\n self.classes = None\n self._random_state = None\n self.n_samples = None\n self.adwin_ensemble = None\n\n def __configure(self):\n if hasattr(self.base_estimator, \"reset\"):\n self.base_estimator.reset()\n\n self.actual_n_estimators = self.n_estimators\n self.adwin_ensemble = []\n for i in range(self.actual_n_estimators):\n self.adwin_ensemble.append(ADWIN())\n self.ensemble = [cp.deepcopy(self.base_estimator) for _ in range(self.actual_n_estimators)]\n self._random_state = check_random_state(self.random_state)\n\n def reset(self):\n self.__configure()\n\n def partial_fit(self, X, y, classes=None, sample_weight=None):\n \"\"\" Partially fits the model, based on the X and y matrix.\n\n Since it's an ensemble learner, if X and y matrix of more than one\n sample are passed, the algorithm will partial fit the model one sample\n at a time.\n\n Each sample is trained by each classifier a total of K times, where K\n is drawn by a Poisson(l) distribution. l is updated after every example\n using :math:`lambda_{sc}` if th estimator correctly classifies the example or\n :math:`lambda_{sw}` in the other case.\n\n Parameters\n ----------\n X : numpy.ndarray of shape (n_samples, n_features)\n The features to train the model.\n\n y: numpy.ndarray of shape (n_samples)\n An array-like with the class labels of all samples in X.\n\n classes: numpy.ndarray, optional (default=None)\n Array with all possible/known class labels. This is an optional parameter, except\n for the first partial_fit call where it is compulsory.\n\n sample_weight: Array-like\n Instance weight. If not provided, uniform weights are assumed.\n Usage varies depending on the base estimator.\n\n Raises\n ------\n ValueError: A ValueError is raised if the 'classes' parameter is not\n passed in the first partial_fit call, or if they are passed in further\n calls but differ from the initial classes list passed..\n \"\"\"\n if self.ensemble is None:\n self.__configure()\n\n if self.classes is None:\n if classes is None:\n raise ValueError(\"The first partial_fit call should pass all the classes.\")\n else:\n self.classes = classes\n\n if self.classes is not None and classes is not None:\n if set(self.classes) == set(classes):\n pass\n else:\n raise ValueError(\"The classes passed to the partial_fit function differ \"\n \"from those passed earlier.\")\n\n self.__adjust_ensemble_size()\n\n r, _ = get_dimensions(X)\n for j in range(r):\n change_detected = False\n for i in range(self.actual_n_estimators):\n a = (i + 1) / self.actual_n_estimators\n if y[j] == 1:\n lam = a * self.sampling_rate\n else:\n lam = a\n k = self._random_state.poisson(lam)\n if k > 0:\n for b in range(k):\n self.ensemble[i].partial_fit([X[j]], [y[j]], classes, sample_weight)\n\n if self.drift_detection:\n try:\n pred = self.ensemble[i].predict(X)\n error_estimation = self.adwin_ensemble[i].estimation\n for k in range(r):\n if pred[k] is not None:\n self.adwin_ensemble[i].add_element(int(pred[k] == y[k]))\n if self.adwin_ensemble[i].detected_change():\n if self.adwin_ensemble[i].estimation > error_estimation:\n change_detected = True\n except ValueError:\n change_detected = False\n pass\n\n if change_detected and self.drift_detection:\n max_threshold = 0.0\n i_max = -1\n for i in range(self.actual_n_estimators):\n if max_threshold < self.adwin_ensemble[i].estimation:\n max_threshold = self.adwin_ensemble[i].estimation\n i_max = i\n if i_max != -1:\n self.ensemble[i_max].reset()\n self.adwin_ensemble[i_max] = ADWIN()\n\n return self\n\n def __adjust_ensemble_size(self):\n if len(self.classes) != len(self.ensemble):\n if len(self.classes) > len(self.ensemble):\n for i in range(len(self.ensemble), len(self.classes)):\n self.ensemble.append(cp.deepcopy(self.base_estimator))\n self.actual_n_estimators += 1\n self.adwin_ensemble.append(ADWIN())\n\n def predict(self, X):\n \"\"\" predict\n\n The predict function will average the predictions from all its learners\n to find the most likely prediction for the sample matrix X.\n\n Parameters\n ----------\n X: Numpy.ndarray of shape (n_samples, n_features)\n A matrix of the samples we want to predict.\n\n Returns\n -------\n numpy.ndarray\n A numpy.ndarray with the label prediction for all the samples in X.\n\n \"\"\"\n r, c = get_dimensions(X)\n proba = self.predict_proba(X)\n predictions = []\n if proba is None:\n return None\n for i in range(r):\n predictions.append(np.argmax(proba[i]))\n return np.asarray(predictions)\n\n def predict_proba(self, X):\n \"\"\" predict_proba\n\n Predicts the probability of each sample belonging to each one of the\n known classes.\n\n Parameters\n ----------\n X: Numpy.ndarray of shape (n_samples, n_features)\n A matrix of the samples we want to predict.\n\n Raises\n ------\n ValueError: A ValueError is raised if the number of classes in the base_estimator\n learner differs from that of the ensemble learner.\n\n Returns\n -------\n numpy.ndarray\n An array of shape (n_samples, n_features), in which each outer entry is\n associated with the X entry of the same index. And where the list in\n index [i] contains len(self.target_values) elements, each of which represents\n the probability that the i-th sample of X belongs to a certain label.\n\n \"\"\"\n proba = []\n r, c = get_dimensions(X)\n\n if self.ensemble is None:\n return np.zeros((r, 1))\n\n # Context manager to catch errors raised by numpy as RuntimeWarning\n with warnings.catch_warnings():\n warnings.filterwarnings('error')\n try:\n for i in range(self.actual_n_estimators):\n partial_proba = self.ensemble[i].predict_proba(X)\n if len(partial_proba[0]) > max(self.classes) + 1:\n raise ValueError(\"The number of classes in the base learner is larger \"\n \"than in the ensemble.\")\n\n if len(proba) < 1:\n for n in range(r):\n proba.append([0.0 for _ in partial_proba[n]])\n\n for n in range(r):\n for k in range(len(partial_proba[n])):\n try:\n proba[n][k] += partial_proba[n][k]\n except IndexError:\n proba[n].append(partial_proba[n][k])\n except RuntimeWarning:\n # Catch division by zero errors raised by numpy as RuntimeWarning\n continue\n\n except ValueError:\n return np.zeros((r, 1))\n except TypeError:\n return np.zeros((r, 1))\n\n # normalizing probabilities\n sum_proba = []\n for k in range(r):\n sum_proba.append(np.sum(proba[k]))\n aux = []\n for i in range(len(proba)):\n if sum_proba[i] > 0.:\n aux.append([x / sum_proba[i] for x in proba[i]])\n else:\n aux.append(proba[i])\n return np.asarray(aux)\n",
"from sklearn.datasets import make_regression\nfrom skmultiflow.data import DataStream\n\nfrom skmultiflow.meta import RegressorChain\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn import set_config\n\nimport numpy as np\n\nimport pytest\n\n# Force sklearn to show only the parameters whose default value have been changed when\n# printing an estimator (backwards compatibility with versions prior to sklearn==0.23)\nset_config(print_changed_only=True)\n\n\n@pytest.mark.filterwarnings('ignore::UserWarning')\ndef test_regressor_chains():\n X_reg, y_reg = make_regression(random_state=112, n_targets=3, n_samples=5150)\n stream = DataStream(X_reg, y_reg)\n\n estimator = SGDRegressor(random_state=112, max_iter=10)\n learner = RegressorChain(base_estimator=estimator, random_state=112)\n\n X, y = stream.next_sample(150)\n learner.partial_fit(X, y)\n\n cnt = 0\n max_samples = 5000\n predictions = []\n true_labels = []\n wait_samples = 100\n\n while cnt < max_samples:\n X, y = stream.next_sample()\n # Test every n samples\n if (cnt % wait_samples == 0) and (cnt != 0):\n predictions.append(list(learner.predict(X)[0]))\n true_labels.append(y[0])\n\n learner.partial_fit(X, y)\n cnt += 1\n\n expected_predictions = [[-21.932581119953333, 1265662295936.5574, 7.5406725414072326e+22],\n [-97.17297744582125, 5438576501559.791, -1.1370581201037737e+24],\n [-60.06308622605051, 26421144038311.047, 1.3207650552720094e+25],\n [-285.32687352244847, 8881551118262.033, -1.1322856827798374e+24],\n [-115.80322693771457, -24997431307818.508, 2.85747306174037e+24],\n [-12.184193815918672, 3510562166726.0283, -4.8590562435597834e+23],\n [-94.99008392491476, 4794062761133.606, -1.8849188211946465e+24],\n [66.35576182871232, -8147485653396.883, -7.492944375995595e+23],\n [-52.145505628056995, -1013810481101.9043, -4.5310283013446384e+23],\n [16.715060622072958, 562391244392.6193, 3.3789644409962397e+22],\n [96.32219400190282, -20397346086007.85, 1.558245298240083e+24],\n [-281.8168065846582, 118681520215938.52, 4.815807486956294e+25],\n [-135.62679760307105, 20260866750185.832, 1.605753540523006e+24],\n [0.07932047636460954, -708539394047.3298, -3.61482684929158e+22],\n [-292.1646176261883, -11162615183157.55, -8.674643964570704e+23],\n [-176.92746747754094, -29231218161585.13, 1.411600743825668e+24],\n [-348.0498644784687, -100615393132365.25, 9.759683002046948e+23],\n [30.948974669258675, -1199287119275.6328, 2.0866927007519847e+23],\n [214.0020659569134, -24437173206276.543, 9.450880718880671e+23],\n [153.98931593720746, 32675842205528.723, -1.7246747286222668e+24],\n [99.39074016354951, -11385065116243.611, 1.0770253102805811e+24],\n [127.81660709796127, 16929726964275.697, 7.14820947257164e+24],\n [40.45505653639006, -14311951591200.725, -9.33193290094133e+23],\n [117.52219878440611, 17952367624051.36, 4.5651719663788677e+23],\n [75.53942801239991, -9231543699137.594, 3.2317133158453914e+24],\n [31.795193207760704, -4084783706153.4004, -4.188095047309216e+23],\n [68.5318978502461, 5735810247065.921, 1.7284713503779943e+24],\n [65.18438567482129, -13298743450357.943, -1.4367047198923567e+24],\n [-116.63952028337805, -344127767223.9295, 2.3925104169428623e+22],\n [-76.81599010889556, 8711205431447.733, -1.1575305916673031e+24],\n [263.1077717649874, 32146618104196.434, -7.240279466740839e+24],\n [-94.07597099457413, -8216681977657.527, 2.3785728690780553e+24],\n [-175.78429788635424, -368856885004.46, -5.7200993095587195e+22],\n [59.648477499483285, -1752783828320.242, 2.1429953624557326e+23],\n [71.68447202426032, -27151271800666.492, 9.367463190825582e+24],\n [-189.96629636835922, -27090727476080.18, -3.8659883994544866e+24],\n [-240.7920206809074, 15406047062899.537, 2.0609123388035027e+24],\n [-105.80996634043589, -1518636404558.1646, -1.4166487855869706e+23],\n [-164.02527753963858, -61386039046571.125, -2.179071650432624e+25],\n [52.451759456657975, -988509747123.6125, -7.334899319683594e+22],\n [68.37044139814127, -7434200892467.581, -7.535677215142279e+23],\n [164.9457843624521, -9474550940989.51, -1.3512944635293625e+24],\n [189.34401690407307, -14349556896444.508, 1.0732760415617274e+24],\n [0.8944005517286119, 463945767759.78735, -1.9938544157612443e+22],\n [71.7856433565235, -9804063257174.584, 4.7874862540754335e+23],\n [-5.450502769025279, 281585481223.33276, 2.1974700575843552e+22],\n [248.00190755589915, -81874135462745.58, -2.6532557110860303e+25],\n [-113.86249490223707, 2634310697909.643, 1.580428629322546e+23],\n [-35.92856878407447, -5410985463428.589, 2.522168862637753e+23]]\n\n print(predictions)\n assert np.allclose(np.array(predictions).all(), np.array(expected_predictions).all())\n assert type(learner.predict(X)) == np.ndarray\n\n expected_info = \"RegressorChain(base_estimator=SGDRegressor(max_iter=10, random_state=112), \" \\\n \"order=None, random_state=112)\"\n\n info = \" \".join([line.strip() for line in learner.get_info().split()])\n assert info == expected_info\n",
"import numpy as np\n\nfrom collections import deque\n\nfrom skmultiflow.core import BaseSKMObject, ClassifierMixin\nfrom skmultiflow.utils import get_dimensions\nfrom skmultiflow.bayes import do_naive_bayes_prediction\nfrom skmultiflow.trees._attribute_observer import NumericAttributeClassObserverGaussian\nfrom skmultiflow.trees._attribute_observer import NominalAttributeClassObserver\n\n\nclass NaiveBayes(BaseSKMObject, ClassifierMixin):\n \"\"\" Naive Bayes classifier.\n\n Performs classic bayesian prediction while making naive assumption that all inputs are\n independent. Naive Bayes is a classifier algorithm known for its simplicity\n and low computational cost. Given `n` different classes, the trained Naive Bayes classifier\n predicts for every unlabelled instance the class to which it belongs with high accuracy.\n\n Parameters\n ----------\n nominal_attributes: numpy.ndarray (optional, default=None)\n List of Nominal attributes. If emtpy, then assume that all attributes are numerical.\n\n Notes\n -----\n The `scikit-learn` implementations of NaiveBayes are compatible with `scikit-multiflow`\n with the caveat that they must be partially fitted before use. In the `scikit-multiflow`\n evaluators this is done by setting `pretrain_size>0`.\n\n Examples\n --------\n >>> # Imports\n >>> from skmultiflow.data import SEAGenerator\n >>> from skmultiflow.bayes import NaiveBayes\n >>>\n >>> # Setup a data stream\n >>> stream = SEAGenerator(random_state=1)\n >>>\n >>> # Setup Naive Bayes estimator\n >>> naive_bayes = NaiveBayes()\n >>>\n >>> # Setup variables to control loop and track performance\n >>> n_samples = 0\n >>> correct_cnt = 0\n >>> max_samples = 200\n >>>\n >>> # Train the estimator with the samples provided by the data stream\n >>> while n_samples < max_samples and stream.has_more_samples():\n >>> X, y = stream.next_sample()\n >>> y_pred = naive_bayes.predict(X)\n >>> if y[0] == y_pred[0]:\n >>> correct_cnt += 1\n >>> naive_bayes.partial_fit(X, y)\n >>> n_samples += 1\n >>>\n >>> # Display results\n >>> print('{} samples analyzed.'.format(n_samples))\n >>> print('Naive Bayes accuracy: {}'.format(correct_cnt / n_samples))\n\n \"\"\"\n\n def __init__(self, nominal_attributes=None):\n super().__init__()\n self._observed_class_distribution = {}\n self._attribute_observers = {}\n self._classes = None\n self.nominal_attributes = nominal_attributes\n if not self.nominal_attributes:\n self._nominal_attributes = []\n else:\n self._nominal_attributes = self.nominal_attributes\n\n def partial_fit(self, X, y, classes=None, sample_weight=None):\n \"\"\" Partially (incrementally) fit the model.\n\n Parameters\n ----------\n X : numpy.ndarray of shape (n_samples, n_features)\n The features to train the model.\n\n y: numpy.ndarray of shape (n_samples)\n An array-like with the labels of all samples in X.\n\n classes: numpy.ndarray, optional (default=None)\n Array with all possible/known classes. Usage varies depending on the learning method.\n\n sample_weight: numpy.ndarray of shape (n_samples), optional (default=None)\n Samples weight. If not provided, uniform weights are assumed.\n Usage varies depending on the learning method.\n\n Returns\n -------\n NaiveBayes\n self\n\n \"\"\"\n if not self._classes and classes is not None:\n self._classes = classes\n\n if y is not None:\n row_cnt, _ = get_dimensions(X)\n if sample_weight is None:\n sample_weight = np.ones(row_cnt)\n if row_cnt != len(sample_weight):\n raise ValueError(\n 'Inconsistent number of instances ({}) and weights ({}).'.format(row_cnt, len(\n sample_weight)))\n for i in range(row_cnt):\n if sample_weight[i] != 0.0:\n self._partial_fit(X[i], y[i], sample_weight[i])\n return self\n\n def _partial_fit(self, X, y, weight):\n try:\n self._observed_class_distribution[y] += weight\n except KeyError:\n self._observed_class_distribution[y] = weight\n for i in range(len(X)):\n try:\n obs = self._attribute_observers[i]\n except KeyError:\n if i in self._nominal_attributes:\n obs = NominalAttributeClassObserver()\n else:\n obs = NumericAttributeClassObserverGaussian()\n self._attribute_observers[i] = obs\n obs.update(X[i], int(y), weight)\n\n def predict(self, X):\n \"\"\" Predict classes for the passed data.\n\n Parameters\n ----------\n X : numpy.ndarray of shape (n_samples, n_features)\n The set of data samples to predict the labels for.\n\n Returns\n -------\n A numpy.ndarray with all the predictions for the samples in X.\n\n \"\"\"\n r, _ = get_dimensions(X)\n predictions = deque()\n y_proba = self.predict_proba(X)\n for i in range(r):\n class_val = np.argmax(y_proba[i])\n predictions.append(class_val)\n return np.array(predictions)\n\n def predict_proba(self, X):\n \"\"\" Estimates the probability of each sample in X belonging to each of the class-labels.\n\n Parameters\n ----------\n X : Numpy.ndarray of shape (n_samples, n_features)\n The matrix of samples one wants to predict the class probabilities for.\n\n Returns\n -------\n A numpy.ndarray of shape (n_samples, n_labels), in which each outer entry is associated\n with the X entry of the same index. And where the list in index [i] contains\n len(self.target_values) elements, each of which represents the probability that\n the i-th sample of X belongs to a certain class-label.\n\n \"\"\"\n predictions = deque()\n r, _ = get_dimensions(X)\n if self._observed_class_distribution == {}:\n # Model is empty, all classes equal, default to zero\n return np.zeros((r, 1))\n else:\n for i in range(r):\n votes = do_naive_bayes_prediction(X[i], self._observed_class_distribution,\n self._attribute_observers)\n sum_values = sum(votes.values())\n if self._classes is not None:\n y_proba = np.zeros(int(max(self._classes)) + 1)\n else:\n y_proba = np.zeros(int(max(votes.keys())) + 1)\n for key, value in votes.items():\n y_proba[int(key)] = value / sum_values if sum_values != 0 else 0.0\n predictions.append(y_proba)\n return np.array(predictions)\n",
"import os\nimport numpy as np\nfrom skmultiflow.data.regression_generator import RegressionGenerator\n\n\ndef test_regression_generator(test_path):\n stream = RegressionGenerator(n_samples=100, n_features=20, n_targets=4, n_informative=6, random_state=0)\n\n assert stream.n_remaining_samples() == 100\n\n expected_names = ['att_num_0', 'att_num_1', 'att_num_2', 'att_num_3', 'att_num_4',\n 'att_num_5', 'att_num_6', 'att_num_7', 'att_num_8', 'att_num_9',\n 'att_num_10', 'att_num_11', 'att_num_12', 'att_num_13', 'att_num_14',\n 'att_num_15', 'att_num_16', 'att_num_17', 'att_num_18', 'att_num_19']\n assert stream.feature_names == expected_names\n\n assert stream.target_values == [float] * stream.n_targets\n\n expected_names = ['target_0', 'target_1', 'target_2', 'target_3']\n assert stream.target_names == expected_names\n\n assert stream.n_features == 20\n\n assert stream.n_cat_features == 0\n\n assert stream.n_num_features == 20\n\n assert stream.n_targets == 4\n\n assert stream.get_data_info() == 'Regression Generator - 4 targets, 20 features'\n\n assert stream.has_more_samples() is True\n\n assert stream.is_restartable() is True\n\n # Load test data corresponding to first 10 instances\n test_file = os.path.join(test_path, 'regression_stream.npz')\n data = np.load(test_file)\n X_expected = data['X']\n y_expected = data['y']\n\n X, y = stream.next_sample()\n assert np.allclose(X[0], X_expected[0])\n assert np.allclose(y[0], y_expected[0])\n\n X, y = stream.last_sample()\n assert np.allclose(X[0], X_expected[0])\n assert np.allclose(y[0], y_expected[0])\n\n stream.restart()\n X, y = stream.next_sample(10)\n assert np.allclose(X, X_expected)\n assert np.allclose(y, y_expected)\n\n assert stream.n_targets == y.shape[1]\n\n assert stream.n_features == X.shape[1]\n\n assert 'stream' == stream._estimator_type\n\n expected_info = \"RegressionGenerator(n_features=20, n_informative=6, n_samples=100, n_targets=4,\\n\" \\\n \" random_state=0)\"\n assert stream.get_info() == expected_info\n",
"import numpy as np\nfrom skmultiflow.data.base_stream import Stream\nfrom skmultiflow.utils import check_random_state\n\n\nclass SEAGenerator(Stream):\n r\"\"\" SEA stream generator.\n\n This generator is an implementation of the data stream with abrupt\n concept drift, first described in Street and Kim's 'A streaming\n ensemble algorithm (SEA) for large-scale classification' [1]_.\n\n It generates 3 numerical attributes, that vary from 0 to 10, where\n only 2 of them are relevant to the classification task. A classification\n function is chosen, among four possible ones. These functions compare\n the sum of the two relevant attributes with a threshold value, unique\n for each of the classification functions. Depending on the comparison\n the generator will classify an instance as one of the two possible\n labels.\n\n The functions are:\n * Function 0: if :math:`(att1 + att2 \\leq 8)` else 1\n * Function 1: if :math:`(att1 + att2 \\leq 9)` else 1\n * Function 2: if :math:`(att1 + att2 \\leq 7)` else 1\n * Function 3: if :math:`(att1 + att2 \\leq 9.5)` else 1\n\n Concept drift can be introduced by changing the classification function.\n This can be done manually or using ``ConceptDriftStream``.\n\n This data stream has two additional parameters, the first is to balance classes, which\n means the class distribution will tend to a uniform one, and the possibility\n to add noise, which will, according to some probability, change the chosen\n label for an instance.\n\n Parameters\n ----------\n classification_function: int (Default: 0)\n Which of the four classification functions to use for the generation.\n This value can vary from 0 to 3, and the thresholds are, 8, 9, 7 and 9.5.\n\n random_state: int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n balance_classes: bool (Default: False)\n Whether to balance classes or not. If balanced, the class\n distribution will converge to a uniform distribution.\n\n noise_percentage: float (Default: 0.0)\n The probability that noise will happen in the generation. At each\n new sample generated, a random probability is generated, and if that\n probability is higher than the noise_percentage, the chosen label will\n be switched. From 0.0 to 1.0.\n\n References\n ----------\n .. [1] W. Nick Street and YongSeog Kim. 2001. A streaming ensemble algorithm (SEA)\n for large-scale classification. In Proceedings of the seventh ACM SIGKDD international\n conference on Knowledge discovery and data mining (KDD '01). ACM, New York, NY, USA,\n 377-382. DOI=http://dx.doi.org/10.1145/502512.502568\n\n Examples\n --------\n >>> # Imports\n >>> from skmultiflow.data.sea_generator import SEAGenerator\n >>> # Setting up the stream\n >>> stream = SEAGenerator(classification_function = 2, random_state = 112,\n ... balance_classes = False, noise_percentage = 0.28)\n >>> # Retrieving one sample\n >>> stream.next_sample()\n (array([[ 3.75057129, 6.4030462 , 9.50016579]]), array([ 0.]))\n >>> # Retrieving 10 samples\n >>> stream.next_sample(10)\n (array([[ 7.76929659, 8.32745763, 0.5480574 ],\n [ 8.85351458, 7.22346511, 0.02556032],\n [ 3.43419851, 0.94759888, 3.94642589],\n [ 7.3670683 , 9.55806869, 8.20609371],\n [ 3.78544458, 7.84763615, 0.86231513],\n [ 1.6222602 , 2.90069726, 0.45008172],\n [ 7.36533216, 8.39211485, 7.09361615],\n [ 9.8566856 , 3.88003308, 5.03154482],\n [ 6.8373245 , 7.21957381, 2.14152091],\n [ 0.75216155, 6.10890702, 4.25630425]]),\n array([ 1., 1., 1., 1., 1., 0., 0., 1., 1., 1.]))\n >>> # Generators will have infinite remaining instances, so it returns -1\n >>> stream.n_remaining_samples()\n -1\n >>> stream.has_more_samples()\n True\n\n \"\"\"\n\n def __init__(self, classification_function=0, random_state=None, balance_classes=False,\n noise_percentage=0.0):\n super().__init__()\n\n # Classification functions to use\n self._classification_functions = [self._classification_function_zero,\n self._classification_function_one,\n self._classification_function_two,\n self._classification_function_three]\n\n self.classification_function = classification_function\n self.random_state = random_state\n self.balance_classes = balance_classes\n self.noise_percentage = noise_percentage\n self.n_num_features = 3\n self.n_features = self.n_num_features\n self.n_classes = 2\n self.n_targets = 1\n self._random_state = None # This is the actual random_state object used internally\n self.next_class_should_be_zero = False\n self.name = \"SEA Generator\"\n\n self.target_names = [\"target_0\"]\n self.feature_names = [\"att_num_\" + str(i) for i in range(self.n_features)]\n self.target_values = [i for i in range(self.n_classes)]\n\n self._prepare_for_use()\n\n @property\n def classification_function(self):\n \"\"\" Retrieve the index of the current classification function.\n\n Returns\n -------\n int\n index of the classification function [0,1,2,3]\n \"\"\"\n return self._classification_function_idx\n\n @classification_function.setter\n def classification_function(self, classification_function_idx):\n \"\"\" Set the index of the current classification function.\n\n Parameters\n ----------\n classification_function_idx: int (0,1,2,3)\n \"\"\"\n if classification_function_idx in range(4):\n self._classification_function_idx = classification_function_idx\n else:\n raise ValueError(\"classification_function takes only these values: 0, 1, 2, 3, {} was \"\n \"passed\".format(classification_function_idx))\n\n @property\n def balance_classes(self):\n \"\"\" Retrieve the value of the option: Balance classes.\n\n Returns\n -------\n Boolean\n True is the classes are balanced\n \"\"\"\n return self._balance_classes\n\n @balance_classes.setter\n def balance_classes(self, balance_classes):\n \"\"\" Set the value of the option: Balance classes.\n\n Parameters\n ----------\n balance_classes: Boolean\n\n \"\"\"\n if isinstance(balance_classes, bool):\n self._balance_classes = balance_classes\n else:\n raise ValueError(\n \"balance_classes should be boolean, {} was passed\".format(balance_classes))\n\n @property\n def noise_percentage(self):\n \"\"\" Retrieve the value of the value of Noise percentage\n\n Returns\n -------\n float\n percentage of the noise\n \"\"\"\n return self._noise_percentage\n\n @noise_percentage.setter\n def noise_percentage(self, noise_percentage):\n \"\"\" Set the value of the value of noise percentage.\n\n Parameters\n ----------\n noise_percentage: float (0.0..1.0)\n\n \"\"\"\n if (0.0 <= noise_percentage) and (noise_percentage <= 1.0):\n self._noise_percentage = noise_percentage\n else:\n raise ValueError(\n \"noise percentage should be in [0.0..1.0], {} was passed\".format(noise_percentage))\n\n def _prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self.next_class_should_be_zero = False\n\n def next_sample(self, batch_size=1):\n \"\"\" Returns next sample from the stream.\n\n The sample generation works as follows: The three attributes are\n generated with the random generator, initialized with the seed passed\n by the user. Then, the classification function decides, as a function\n of the two relevant attributes, whether to classify the instance as\n class 0 or class 1. The next step is to verify if the classes should\n be balanced, and if so, balance the classes. The last step is to add\n noise, if the noise percentage is higher than 0.0.\n\n The generated sample will have 3 features, where only the two first\n are relevant, and 1 label (it has one classification task).\n\n Parameters\n ----------\n batch_size: int (optional, default=1)\n The number of samples to return.\n\n Returns\n -------\n tuple or tuple list\n Return a tuple with the features matrix and the labels matrix for\n the batch_size samples that were requested.\n\n \"\"\"\n data = np.zeros([batch_size, self.n_features + 1])\n\n for j in range(batch_size):\n self.sample_idx += 1\n att1 = att2 = att3 = 0.0\n group = 0\n desired_class_found = False\n while not desired_class_found:\n att1 = 10 * self._random_state.rand()\n att2 = 10 * self._random_state.rand()\n att3 = 10 * self._random_state.rand()\n group = self._classification_functions[self.classification_function](att1, att2,\n att3)\n\n if not self.balance_classes:\n desired_class_found = True\n else:\n if (self.next_class_should_be_zero and (group == 0)) or \\\n ((not self.next_class_should_be_zero) and (group == 1)):\n desired_class_found = True\n self.next_class_should_be_zero = not self.next_class_should_be_zero\n\n if 0.01 + self._random_state.rand() <= self.noise_percentage:\n group = 1 if (group == 0) else 0\n\n data[j, 0] = att1\n data[j, 1] = att2\n data[j, 2] = att3\n data[j, 3] = group\n\n self.current_sample_x = data[:, :self.n_features]\n self.current_sample_y = data[:, self.n_features:].flatten().astype(np.int64)\n\n return self.current_sample_x, self.current_sample_y\n\n def generate_drift(self):\n \"\"\"\n Generate drift by switching the classification function randomly.\n\n \"\"\"\n new_function = self._random_state.randint(4)\n while new_function == self.classification_function:\n new_function = self._random_state.randint(4)\n self.classification_function = new_function\n\n @staticmethod\n def _classification_function_zero(att1, att2, att3):\n \"\"\" classification_function_zero\n\n Decides the sample class label based on the sum of att1 and att2,\n and the threshold value of 8.\n\n Parameters\n ----------\n att1: float\n First numeric attribute.\n\n att2: float\n Second numeric attribute.\n\n att3: float\n Third numeric attribute.\n\n Returns\n -------\n int\n Returns the sample class label, either 0 or 1.\n\n \"\"\"\n return 0 if (att1 + att2 <= 8) else 1\n\n @staticmethod\n def _classification_function_one(att1, att2, att3):\n \"\"\" classification_function_one\n\n Decides the sample class label based on the sum of att1 and att2,\n and the threshold value of 9.\n\n Parameters\n ----------\n att1: float\n First numeric attribute.\n\n att2: float\n Second numeric attribute.\n\n att3: float\n Third numeric attribute.\n\n Returns\n -------\n int\n Returns the sample class label, either 0 or 1.\n\n \"\"\"\n return 0 if (att1 + att2 <= 9) else 1\n\n @staticmethod\n def _classification_function_two(att1, att2, att3):\n \"\"\" classification_function_two\n\n Decides the sample class label based on the sum of att1 and att2,\n and the threshold value of 7.\n\n Parameters\n ----------\n att1: float\n First numeric attribute.\n\n att2: float\n Second numeric attribute.\n\n att3: float\n Third numeric attribute.\n\n Returns\n -------\n int\n Returns the sample class label, either 0 or 1.\n\n \"\"\"\n return 0 if (att1 + att2 <= 7) else 1\n\n @staticmethod\n def _classification_function_three(att1, att2, att3):\n \"\"\" classification_function_three\n\n Decides the sample class label based on the sum of att1 and att2,\n and the threshold value of 9.5.\n\n Parameters\n ----------\n att1: float\n First numeric attribute.\n\n att2: float\n Second numeric attribute.\n\n att3: float\n Third numeric attribute.\n\n Returns\n -------\n int\n Returns the sample class label, either 0 or 1.\n\n \"\"\"\n return 0 if (att1 + att2 <= 9.5) else 1\n"
] |
[
[
"numpy.log",
"numpy.zeros",
"numpy.power",
"numpy.sqrt",
"numpy.absolute"
],
[
"numpy.sum",
"numpy.argmax",
"numpy.asarray",
"numpy.zeros"
],
[
"numpy.array",
"sklearn.linear_model.SGDRegressor",
"sklearn.set_config",
"sklearn.datasets.make_regression"
],
[
"numpy.array",
"numpy.ones",
"numpy.argmax",
"numpy.zeros"
],
[
"numpy.allclose",
"numpy.load"
],
[
"numpy.zeros"
]
] |
Bobholamovic/ever
|
[
"f38060674a40ed53072b9d9be99cc656a830398f"
] |
[
"ever/module/_hrnet.py"
] |
[
"\"\"\" Modified from offical repo and mmlab's repo of HRNet\nMIT License\nCopyright (c) 2019 Microsoft\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.model_zoo import load_url as load_state_dict_from_url\n\nBatchNorm2d = nn.BatchNorm2d\n\nBN_MOMENTUM = 0.1\n\n__all__ = ['HighResolutionNet', 'HighResolutionModule',\n 'hrnetv2_w18', 'hrnetv2_w32', 'hrnetv2_w40']\n\nmodel_urls = {\n 'hrnetv2_w18': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/hrnetv2_w18-00eb2006.pth',\n 'hrnetv2_w32': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/hrnetv2_w32-dc9eeb4f.pth',\n 'hrnetv2_w40': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/hrnetv2_w40-ed0b031c.pth',\n 'hrnetv2_w48': 'https://download.openmmlab.com/pretrain/third_party/hrnetv2_w48-d2186c55.pth'\n}\nmodel_extra = dict(\n hrnetv2_w18=dict(\n stage1=dict(\n num_modules=1,\n num_branches=1,\n block='BOTTLENECK',\n num_blocks=(4,),\n num_channels=(64,),\n fuse_method='SUM'),\n stage2=dict(\n num_modules=1,\n num_branches=2,\n block='BASIC',\n num_blocks=(4, 4),\n num_channels=(18, 36),\n fuse_method='SUM'),\n stage3=dict(\n num_modules=4,\n num_branches=3,\n block='BASIC',\n num_blocks=(4, 4, 4),\n num_channels=(18, 36, 72),\n fuse_method='SUM'),\n stage4=dict(\n num_modules=3,\n num_branches=4,\n block='BASIC',\n num_blocks=(4, 4, 4, 4),\n num_channels=(18, 36, 72, 144),\n fuse_method='SUM')),\n hrnetv2_w32=dict(\n stage1=dict(\n num_modules=1,\n num_branches=1,\n block='BOTTLENECK',\n num_blocks=(4,),\n num_channels=(64,),\n fuse_method='SUM'),\n stage2=dict(\n num_modules=1,\n num_branches=2,\n block='BASIC',\n num_blocks=(4, 4),\n num_channels=(32, 64),\n fuse_method='SUM'),\n stage3=dict(\n num_modules=4,\n num_branches=3,\n block='BASIC',\n num_blocks=(4, 4, 4),\n num_channels=(32, 64, 128),\n fuse_method='SUM'),\n stage4=dict(\n num_modules=3,\n num_branches=4,\n block='BASIC',\n num_blocks=(4, 4, 4, 4),\n num_channels=(32, 64, 128, 256),\n fuse_method='SUM')),\n hrnetv2_w40=dict(\n stage1=dict(\n num_modules=1,\n num_branches=1,\n block='BOTTLENECK',\n num_blocks=(4,),\n num_channels=(64,),\n fuse_method='SUM'),\n stage2=dict(\n num_modules=1,\n num_branches=2,\n block='BASIC',\n num_blocks=(4, 4),\n num_channels=(40, 80),\n fuse_method='SUM'),\n stage3=dict(\n num_modules=4,\n num_branches=3,\n block='BASIC',\n num_blocks=(4, 4, 4),\n num_channels=(40, 80, 160),\n fuse_method='SUM'),\n stage4=dict(\n num_modules=3,\n num_branches=4,\n block='BASIC',\n num_blocks=(4, 4, 4, 4),\n num_channels=(40, 80, 160, 320),\n fuse_method='SUM')),\n hrnetv2_w48=dict(\n stage1=dict(\n num_modules=1,\n num_branches=1,\n block='BOTTLENECK',\n num_blocks=(4,),\n num_channels=(64,),\n fuse_method='SUM'),\n stage2=dict(\n num_modules=1,\n num_branches=2,\n block='BASIC',\n num_blocks=(4, 4),\n num_channels=(48, 96),\n fuse_method='SUM'),\n stage3=dict(\n num_modules=4,\n num_branches=3,\n block='BASIC',\n num_blocks=(4, 4, 4),\n num_channels=(48, 96, 192),\n fuse_method='SUM'),\n stage4=dict(\n num_modules=3,\n num_branches=4,\n block='BASIC',\n num_blocks=(4, 4, 4, 4),\n num_channels=(48, 96, 192, 384),\n fuse_method='SUM'))\n)\n\n\ndef constant_init(module, val, bias=0):\n nn.init.constant_(module.weight, val)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)\n\n\ndef kaiming_init(module,\n a=0,\n mode='fan_out',\n nonlinearity='relu',\n bias=0,\n distribution='normal'):\n assert distribution in ['uniform', 'normal']\n if distribution == 'uniform':\n nn.init.kaiming_uniform_(\n module.weight, a=a, mode=mode, nonlinearity=nonlinearity)\n else:\n nn.init.kaiming_normal_(\n module.weight, a=a, mode=mode, nonlinearity=nonlinearity)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass HighResolutionModule(nn.Module):\n def __init__(self, num_branches, blocks, num_blocks, num_inchannels,\n num_channels, fuse_method, multi_scale_output=True):\n super(HighResolutionModule, self).__init__()\n self._check_branches(\n num_branches, blocks, num_blocks, num_inchannels, num_channels)\n\n self.num_inchannels = num_inchannels\n self.fuse_method = fuse_method\n self.num_branches = num_branches\n\n self.multi_scale_output = multi_scale_output\n\n self.branches = self._make_branches(\n num_branches, blocks, num_blocks, num_channels)\n self.fuse_layers = self._make_fuse_layers()\n self.relu = nn.ReLU(False)\n\n def _check_branches(self, num_branches, blocks, num_blocks,\n num_inchannels, num_channels):\n if num_branches != len(num_blocks):\n error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(\n num_branches, len(num_blocks))\n raise ValueError(error_msg)\n\n if num_branches != len(num_channels):\n error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(\n num_branches, len(num_channels))\n raise ValueError(error_msg)\n\n if num_branches != len(num_inchannels):\n error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(\n num_branches, len(num_inchannels))\n raise ValueError(error_msg)\n\n def _make_one_branch(self, branch_index, block, num_blocks, num_channels,\n stride=1):\n downsample = None\n if stride != 1 or \\\n self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.num_inchannels[branch_index],\n num_channels[branch_index] * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm2d(num_channels[branch_index] * block.expansion,\n momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.num_inchannels[branch_index],\n num_channels[branch_index], stride, downsample))\n self.num_inchannels[branch_index] = \\\n num_channels[branch_index] * block.expansion\n for i in range(1, num_blocks[branch_index]):\n layers.append(block(self.num_inchannels[branch_index],\n num_channels[branch_index]))\n\n return nn.Sequential(*layers)\n\n def _make_branches(self, num_branches, block, num_blocks, num_channels):\n branches = []\n\n for i in range(num_branches):\n branches.append(\n self._make_one_branch(i, block, num_blocks, num_channels))\n\n return nn.ModuleList(branches)\n\n def _make_fuse_layers(self):\n if self.num_branches == 1:\n return None\n\n num_branches = self.num_branches\n num_inchannels = self.num_inchannels\n fuse_layers = []\n for i in range(num_branches if self.multi_scale_output else 1):\n fuse_layer = []\n for j in range(num_branches):\n if j > i:\n fuse_layer.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_inchannels[i],\n 1,\n 1,\n 0,\n bias=False),\n BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM),\n nn.Upsample(scale_factor=2 ** (j - i), mode='nearest')))\n elif j == i:\n fuse_layer.append(None)\n else:\n conv3x3s = []\n for k in range(i - j):\n if k == i - j - 1:\n num_outchannels_conv3x3 = num_inchannels[i]\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_outchannels_conv3x3,\n 3, 2, 1, bias=False),\n BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM)))\n else:\n num_outchannels_conv3x3 = num_inchannels[j]\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_outchannels_conv3x3,\n 3, 2, 1, bias=False),\n BatchNorm2d(num_outchannels_conv3x3,\n momentum=BN_MOMENTUM),\n nn.ReLU(False)))\n fuse_layer.append(nn.Sequential(*conv3x3s))\n fuse_layers.append(nn.ModuleList(fuse_layer))\n\n return nn.ModuleList(fuse_layers)\n\n def get_num_inchannels(self):\n return self.num_inchannels\n\n def forward(self, x):\n if self.num_branches == 1:\n return [self.branches[0](x[0])]\n\n for i in range(self.num_branches):\n x[i] = self.branches[i](x[i])\n\n x_fuse = []\n for i in range(len(self.fuse_layers)):\n if i == 0:\n y = x[0]\n else:\n y = self.fuse_layers[i][0](x[0])\n for j in range(1, self.num_branches):\n if i == j:\n y = y + x[j]\n else:\n y = y + self.fuse_layers[i][j](x[j])\n x_fuse.append(self.relu(y))\n\n return x_fuse\n\n\nblocks_dict = {\n 'BASIC': BasicBlock,\n 'BOTTLENECK': Bottleneck\n}\n\n\nclass HighResolutionNet(nn.Module):\n\n def __init__(self,\n extra,\n norm_eval=True,\n zero_init_residual=False,\n frozen_stages=-1):\n super(HighResolutionNet, self).__init__()\n self.norm_eval = norm_eval\n self.frozen_stages = frozen_stages\n self.zero_init_residual = zero_init_residual\n # for\n self.extra = extra\n # stem network\n # stem net\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,\n bias=False)\n self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,\n bias=False)\n self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n\n # stage 1\n self.stage1_cfg = self.extra['stage1']\n num_channels = self.stage1_cfg['num_channels'][0]\n block_type = self.stage1_cfg['block']\n num_blocks = self.stage1_cfg['num_blocks'][0]\n\n block = blocks_dict[block_type]\n stage1_out_channels = num_channels * block.expansion\n self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)\n\n # stage 2\n self.stage2_cfg = self.extra['stage2']\n num_channels = self.stage2_cfg['num_channels']\n block_type = self.stage2_cfg['block']\n\n block = blocks_dict[block_type]\n num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition1 = self._make_transition_layer([stage1_out_channels], num_channels)\n # num_modules, num_branches, num_blocks, num_channels, block, fuse_method, num_inchannels\n self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels)\n\n # stage 3\n self.stage3_cfg = self.extra['stage3']\n num_channels = self.stage3_cfg['num_channels']\n block_type = self.stage3_cfg['block']\n\n block = blocks_dict[block_type]\n num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)\n self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels)\n\n # stage 4\n self.stage4_cfg = self.extra['stage4']\n num_channels = self.stage4_cfg['num_channels']\n block_type = self.stage4_cfg['block']\n\n block = blocks_dict[block_type]\n num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)\n self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels)\n\n self._frozen_stages()\n\n def _make_transition_layer(\n self, num_channels_pre_layer, num_channels_cur_layer):\n num_branches_cur = len(num_channels_cur_layer)\n num_branches_pre = len(num_channels_pre_layer)\n\n transition_layers = []\n for i in range(num_branches_cur):\n if i < num_branches_pre:\n if num_channels_cur_layer[i] != num_channels_pre_layer[i]:\n transition_layers.append(nn.Sequential(\n nn.Conv2d(num_channels_pre_layer[i],\n num_channels_cur_layer[i],\n 3,\n 1,\n 1,\n bias=False),\n BatchNorm2d(\n num_channels_cur_layer[i], momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)))\n else:\n transition_layers.append(None)\n else:\n conv3x3s = []\n for j in range(i + 1 - num_branches_pre):\n inchannels = num_channels_pre_layer[-1]\n outchannels = num_channels_cur_layer[i] \\\n if j == i - num_branches_pre else inchannels\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(\n inchannels, outchannels, 3, 2, 1, bias=False),\n BatchNorm2d(outchannels, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)))\n transition_layers.append(nn.Sequential(*conv3x3s))\n\n return nn.ModuleList(transition_layers)\n\n def _make_layer(self, block, inplanes, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(inplanes, planes, stride, downsample))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _frozen_stages(self):\n # frozen stage 1 or stem networks\n if self.frozen_stages >= 0:\n for m in [self.conv1, self.bn1, self.conv2, self.bn2]:\n for param in m.parameters():\n param.requires_grad = False\n if self.frozen_stages == 1:\n for param in self.layer1.parameters():\n param.requires_grad = False\n\n def _make_stage(self, layer_config, num_inchannels,\n multi_scale_output=True):\n num_modules = layer_config['num_modules']\n num_branches = layer_config['num_branches']\n num_blocks = layer_config['num_blocks']\n num_channels = layer_config['num_channels']\n block = blocks_dict[layer_config['block']]\n fuse_method = layer_config['fuse_method']\n\n modules = []\n for i in range(num_modules):\n # multi_scale_output is only used last module\n if not multi_scale_output and i == num_modules - 1:\n reset_multi_scale_output = False\n else:\n reset_multi_scale_output = True\n\n modules.append(\n HighResolutionModule(num_branches,\n block,\n num_blocks,\n num_inchannels,\n num_channels,\n fuse_method,\n reset_multi_scale_output)\n )\n num_inchannels = modules[-1].get_num_inchannels()\n\n return nn.Sequential(*modules), num_inchannels\n\n def forward(self, x):\n # h, w = x.size(2), x.size(3)\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.layer1(x)\n\n x_list = []\n for i in range(self.stage2_cfg['num_branches']):\n if self.transition1[i] is not None:\n x_list.append(self.transition1[i](x))\n else:\n x_list.append(x)\n y_list = self.stage2(x_list)\n\n x_list = []\n for i in range(self.stage3_cfg['num_branches']):\n if self.transition2[i] is not None:\n x_list.append(self.transition2[i](y_list[-1]))\n else:\n x_list.append(y_list[i])\n y_list = self.stage3(x_list)\n\n x_list = []\n for i in range(self.stage4_cfg['num_branches']):\n if self.transition3[i] is not None:\n x_list.append(self.transition3[i](y_list[-1]))\n else:\n x_list.append(y_list[i])\n y_list = self.stage4(x_list)\n\n return y_list\n\n def train(self, mode=True):\n super(HighResolutionNet, self).train(mode)\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n\ndef hrnetv2_w18(pretrained=False, weight_path=None, norm_eval=False, frozen_stages=-1):\n model = HighResolutionNet(model_extra['hrnetv2_w18'], norm_eval, zero_init_residual=False,\n frozen_stages=frozen_stages)\n if pretrained:\n if weight_path is not None:\n state_dict = torch.load(weight_path, map_location=torch.device(\"cpu\"))\n else:\n state_dict = load_state_dict_from_url(model_urls['hrnetv2_w18'],\n progress=True)\n model.load_state_dict(state_dict, strict=False)\n return model\n\n\ndef hrnetv2_w32(pretrained=False, weight_path=None, norm_eval=False, frozen_stages=-1):\n model = HighResolutionNet(model_extra['hrnetv2_w32'], norm_eval, zero_init_residual=False,\n frozen_stages=frozen_stages)\n if pretrained:\n if weight_path is not None:\n state_dict = torch.load(weight_path, map_location=torch.device(\"cpu\"))\n else:\n state_dict = load_state_dict_from_url(model_urls['hrnetv2_w32'],\n progress=True)\n model.load_state_dict(state_dict, strict=False)\n return model\n\n\ndef hrnetv2_w40(pretrained=False, weight_path=None, norm_eval=False, frozen_stages=-1):\n model = HighResolutionNet(model_extra['hrnetv2_w40'], norm_eval, zero_init_residual=False,\n frozen_stages=frozen_stages)\n if pretrained:\n if weight_path is not None:\n state_dict = torch.load(weight_path, map_location=torch.device(\"cpu\"))\n else:\n state_dict = load_state_dict_from_url(model_urls['hrnetv2_w40'],\n progress=True)\n model.load_state_dict(state_dict, strict=False)\n return model\n\n\ndef hrnetv2_w48(pretrained=False, weight_path=None, norm_eval=False, frozen_stages=-1):\n model = HighResolutionNet(model_extra['hrnetv2_w48'], norm_eval, zero_init_residual=False,\n frozen_stages=frozen_stages)\n if pretrained:\n if weight_path is not None:\n state_dict = torch.load(weight_path, map_location=torch.device(\"cpu\"))\n else:\n state_dict = load_state_dict_from_url(model_urls['hrnetv2_w48'],\n progress=True)\n model.load_state_dict(state_dict, strict=False)\n return model\n"
] |
[
[
"torch.device",
"torch.nn.init.kaiming_uniform_",
"torch.nn.ModuleList",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.init.kaiming_normal_",
"torch.utils.model_zoo.load_url",
"torch.nn.ReLU",
"torch.nn.Upsample",
"torch.nn.Conv2d"
]
] |
WenmuZhou/crnn.pytorch
|
[
"bf7a7c62376eee93943ca7c68e88e3d563c09aa8"
] |
[
"data_loader/modules/Text_Image_Augmentation_python/augment.py"
] |
[
"# -*- coding:utf-8 -*-\n# Author: RubanSeven\n\n# import cv2\nimport numpy as np\n# from transform import get_perspective_transform, warp_perspective\nfrom .warp_mls import WarpMLS\n\n\ndef distort(src, segment):\n img_h, img_w = src.shape[:2]\n\n cut = img_w // segment\n thresh = cut // 3\n # thresh = img_h // segment // 3\n # thresh = img_h // 5\n\n src_pts = list()\n dst_pts = list()\n\n src_pts.append([0, 0])\n src_pts.append([img_w, 0])\n src_pts.append([img_w, img_h])\n src_pts.append([0, img_h])\n\n dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)])\n dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)])\n dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)])\n dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)])\n\n half_thresh = thresh * 0.5\n\n for cut_idx in np.arange(1, segment, 1):\n src_pts.append([cut * cut_idx, 0])\n src_pts.append([cut * cut_idx, img_h])\n dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,\n np.random.randint(thresh) - half_thresh])\n dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,\n img_h + np.random.randint(thresh) - half_thresh])\n\n trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)\n dst = trans.generate()\n\n return dst\n\n\ndef stretch(src, segment):\n img_h, img_w = src.shape[:2]\n\n cut = img_w // segment\n thresh = cut * 4 // 5\n # thresh = img_h // segment // 3\n # thresh = img_h // 5\n\n src_pts = list()\n dst_pts = list()\n\n src_pts.append([0, 0])\n src_pts.append([img_w, 0])\n src_pts.append([img_w, img_h])\n src_pts.append([0, img_h])\n\n dst_pts.append([0, 0])\n dst_pts.append([img_w, 0])\n dst_pts.append([img_w, img_h])\n dst_pts.append([0, img_h])\n\n half_thresh = thresh * 0.5\n\n for cut_idx in np.arange(1, segment, 1):\n move = np.random.randint(thresh) - half_thresh\n src_pts.append([cut * cut_idx, 0])\n src_pts.append([cut * cut_idx, img_h])\n dst_pts.append([cut * cut_idx + move, 0])\n dst_pts.append([cut * cut_idx + move, img_h])\n\n trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)\n dst = trans.generate()\n\n return dst\n\n\ndef perspective(src):\n img_h, img_w = src.shape[:2]\n\n thresh = img_h // 2\n\n src_pts = list()\n dst_pts = list()\n\n src_pts.append([0, 0])\n src_pts.append([img_w, 0])\n src_pts.append([img_w, img_h])\n src_pts.append([0, img_h])\n\n dst_pts.append([0, np.random.randint(thresh)])\n dst_pts.append([img_w, np.random.randint(thresh)])\n dst_pts.append([img_w, img_h - np.random.randint(thresh)])\n dst_pts.append([0, img_h - np.random.randint(thresh)])\n\n trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)\n dst = trans.generate()\n\n return dst\n\n# def distort(src, segment):\n# img_h, img_w = src.shape[:2]\n# dst = np.zeros_like(src, dtype=np.uint8)\n#\n# cut = img_w // segment\n# thresh = img_h // 8\n#\n# src_pts = list()\n# # dst_pts = list()\n#\n# src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)])\n# src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)])\n#\n# # dst_pts.append([0, 0])\n# # dst_pts.append([0, img_h])\n# dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32)\n#\n# half_thresh = thresh * 0.5\n#\n# for cut_idx in np.arange(1, segment, 1):\n# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,\n# np.random.randint(thresh) - half_thresh])\n# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,\n# img_h + np.random.randint(thresh) - half_thresh])\n#\n# # dst_pts.append([cut * i, 0])\n# # dst_pts.append([cut * i, img_h])\n#\n# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)\n#\n# # mat = cv2.getPerspectiveTransform(src_box, dst_box)\n# # print(mat)\n# # dst[:, cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h))\n#\n# mat = get_perspective_transform(dst_box, src_box)\n# dst[:, cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h))\n# # print(mat)\n#\n# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,\n# np.random.randint(thresh) - half_thresh])\n# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,\n# img_h + np.random.randint(thresh) - half_thresh])\n# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)\n#\n# # mat = cv2.getPerspectiveTransform(src_box, dst_box)\n# # dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1), img_h))\n# mat = get_perspective_transform(dst_box, src_box)\n# dst[:, cut * (segment - 1):] = warp_perspective(src, mat, (img_w - cut * (segment - 1), img_h))\n#\n# return dst\n"
] |
[
[
"numpy.random.randint",
"numpy.arange"
]
] |
1MT3J45/ML-DroughtAnalysisNLP
|
[
"861b70acaabe9f28b887eef6661b399e7f9ca8d6"
] |
[
"freqWordSelection.py"
] |
[
"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import with_statement\n\nfrom replacers import *\nimport pandas as pd\nimport nltk\nimport subprocess\n\n\ndef findFreqWord(fuzzyDF):\n f1 = fuzzyDF # pd.read_csv(\"SubmittedCSV/fuzzy.csv\")\n f2 = pd.DataFrame(columns=['Tweets', 'Classified', 'FreqWord'])\n f3 = pd.read_csv(\"SubmittedCSV/fuzzyptag.csv\", )\n\n pop_list = list(f3.iloc[:, 0])\n\n for zero_cl_row in range(f1.__len__()):\n row = 1\n found = False\n splitted_sentence = f1.iloc[zero_cl_row, 0].split()\n print(splitted_sentence)\n for tag in pop_list:\n print(\"Popular tags:\", pop_list)\n for word in splitted_sentence:\n\n if word in tag and f1.iloc[zero_cl_row, 1] == \"Highly Positive\":\n f2 = f2.append(\n {'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Highly Positive', 'FreqWord': tag},\n ignore_index=True)\n found = True\n row += 1\n elif word in tag and f1.iloc[zero_cl_row, 1] == \"Highly Negative\":\n f2 = f2.append(\n {'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Highly Negative', 'FreqWord': tag},\n ignore_index=True)\n found = True\n row += 1\n elif word in tag and f1.iloc[zero_cl_row, 1] == \"Moderately Positive\":\n f2 = f2.append(\n {'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Moderately Positive', 'FreqWord': tag},\n ignore_index=True)\n found = True\n row += 1\n elif word in tag and f1.iloc[zero_cl_row, 1] == \"Moderately Negative\":\n f2 = f2.append(\n {'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Moderately Negative', 'FreqWord': tag},\n ignore_index=True)\n found = True\n row += 1\n elif word in tag and f1.iloc[zero_cl_row, 1] == \"Positive\":\n f2 = f2.append({'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Positive', 'FreqWord': tag},\n ignore_index=True)\n found = True\n row += 1\n elif word in tag and f1.iloc[zero_cl_row, 1] == \"Negative\":\n f2 = f2.append({'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Negative', 'FreqWord': tag},\n ignore_index=True)\n found = True\n row += 1\n else:\n print(\"Unmatched\")\n if not found:\n print(\"NO\")\n f2.to_csv(\"SubmittedCSV/fuzzyfreq.csv\", index=False)\n try:\n subprocess.call(['libreoffice','--calc','SubmittedCSV/fuzzyfreq.csv'])\n except OSError:\n print(\"Works with DEBIAN OS & LIBREOFFICE 5 only \\n Use MS Excel or equivalent Software to open : \"\n \"SubmittedCSV/fuzzyfreq.csv\")\n return f2\n\ndef pivotTable():\n pass\n\n\n# ---------------------------------- SUBMITTED LOGIC - TEST CASE\n# ---------------------------------- #01 UNIT TESTING FAILED ##10, 11, 27, 30\n# ---------------------------------- #02 LOGICAL GLITCH\n# ---------------------------------- #03 COMPLIANCE MISUSE\n# ---------------------------------- #04 MEMDUMP DETECTED\n# ---------------------------------- #05 UNUSED OBJECTS, MEMORY BLOCK 0x0008\n# for hosts_row in f1:\n# row = 1\n# found = False\n# # t1=nltk.word_tokenize(hosts_row[0])\n# t1 = hosts_row.split()\n# print(\"t1=\", t1)\n# for master_row in pop_list:\n# print(\"popular tags=\", pop_list)\n# for word in t1:\n#\n# if word == master_row[0] and hosts_row[1] == \"Highly Positive\":\n# # >>> master_row[0] # Logical glitch, value uncompilable\n# # 'b'\n# f2.write(str(hosts_row[1]) + \",\" + word) # Will always look for 1st element of string\n# # >>> hosts_row\n# # ' neville rooney end ever tons trophy drought httpcocryingeyesjebfkdp,Positive\\r\\n'\n# # >>> hosts_row[1]\n# # 'n'\n# found = True\n# row = row + 1\n#\n# elif word == master_row[0] and hosts_row[1] == \"Highly Negative\":\n# f2.write(str(hosts_row[1]) + \",\" + str(master_row[0]))\n# found = True\n# row = row + 1\n# elif word == master_row[0] and hosts_row[1] == \"Moderately Positive\":\n# f2.write(str(hosts_row[1]) + \",\" + str(master_row[0]))\n# found = True\n# row = row + 1\n# elif word == master_row[0] and hosts_row[1] == \"Moderately Negative\":\n# f2.write(str(hosts_row[1]) + \",\" + str(master_row[0]))\n# found = True\n# row = row + 1\n# elif word == master_row[0] and hosts_row[1] == \"Positive\":\n# f2.write(str(hosts_row[1]) + \",\" + str(master_row[0]))\n# # >>> master_row[0]\n# # 'business'\n# # >>> hosts_row[1]\n# # 'n'\n# found = True\n# row = row + 1\n# elif word == master_row[0] and hosts_row[1] == \"Negative\":\n# f2.write(str(hosts_row[1]) + \",\" + str(master_row[0]))\n# found = True\n# row = row + 1\n#\n# # print count\n# if not found:\n# print(\"no\")\n#\n# print(count)\n# f1.close()\n# f2.close()\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
jzlotek/drexel-tms-parser
|
[
"9d5582a441d490a8099252994f9e41dca36acf81"
] |
[
"src/plotter.py"
] |
[
"import io\nfrom sdk.db import database\nimport numpy as np\nimport matplotlib.pyplot as plt \nfrom numba import jit\nfrom utils import logger\n\n\nsemester_order = ['FA', 'WI', 'SP', 'SU']\n\ndef get_section(x):\n return (\n x.get('semester'),\n x.get('year')\n )\n\n \ndef filter_function(sections):\n d = {}\n for x in sections:\n if x not in d.keys():\n d.update({x: 1})\n else:\n d[x] += 1\n return sorted(list(d.items()), key=lambda k: k[0][1] + semester_order.index(k[0][0]) / 10)\n\ndef get_plot(sc, cn, isQuarter=True) -> io.BytesIO:\n query = database.get_query()\n database.isQuarter(isQuarter, query)\n database.subject_code(sc, query)\n database.course_number(cn, query)\n q = database.execute(query)\n q = filter_function(map(get_section, q))\n\n keys = range(0, len(q))\n vals = [x[1] for x in q]\n\n buffer = io.BytesIO()\n plt.plot(keys, vals)\n plt.xticks(np.arange(len(q)), [f'{x[0][0]} \\'{x[0][1]}' for x in q])\n plt.savefig(buffer, format='png')\n buffer.seek(0)\n plt.close()\n\n return buffer"
] |
[
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close"
]
] |
marshuang80/NucleiSegmentation
|
[
"aef5196b6fd906bff3866156e205d4f787710bde"
] |
[
"process_data/nuclei_create_hdf5.py"
] |
[
"import numpy as np\nimport h5py\nimport argparse\nimport imageio\nimport tqdm\nimport os\nfrom glob import glob\n\n\ndef main(args):\n \"\"\"Main function to parse in Nuclei Dataset from Kaggle and store as HDF5\n\n Parameters\n ----------\n args: ArgumentParser()\n input_dir: str\n directory of the Nuclei data\n output_dir: str\n path to the HDF5 output directory\n \"\"\"\n\n # create hdf5 \n hdf5_fn = h5py.File(os.path.join(args.output_dir, \"data_360.hdf5\"), \"a\")\n\n # get all data directory\n data_dirs = glob(os.path.join(args.input_dir, \"*/\"))\n\n with tqdm.tqdm(total=len(data_dirs), unit=\"folder\") as progress_bar:\n for path in data_dirs:\n\n data_name = path.split(\"/\")[-2]\n x, y, masks = parse_data(path)\n\n # TODO only use majority size for now\n if x is None:\n progress_bar.update(1)\n continue\n\n # stack x and y together\n y = np.expand_dims(y, axis=0)\n data = np.vstack((x,y,masks))\n\n hdf5_fn.create_dataset(str(data_name), data=data, dtype=np.float, chunks=True)\n progress_bar.update(1)\n hdf5_fn.close()\n\n\ndef parse_data(path):\n\n # define data folders\n x_path = os.path.join(path, \"images/\")\n y_path = os.path.join(path, \"masks/\")\n\n # get all data paths \n x_file = glob(os.path.join(x_path, \"*.png\"))[0]\n y_files = glob(os.path.join(y_path, \"*.png\"))\n\n # parse in data\n x = imageio.imread(x_file)\n\n # TODO only using majority shape\n if x.shape != (256, 256, 4):\n return None, None, None\n\n masks = np.array([imageio.imread(y) for y in y_files])\n y = np.zeros_like(masks[0])\n for y_raw in masks:\n y = np.maximum(y, y_raw)\n\n # normalize\n x = x / 255.0\n y = y / 255.0\n masks = masks / 255.0\n\n # fix dimentions\n x = np.transpose(x, (2,0,1)) # channels first\n\n return x, y, masks\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--input_dir', type=str)\n parser.add_argument('--output_dir', type=str)\n\n args = parser.parse_args()\n\n main(args)\n"
] |
[
[
"numpy.zeros_like",
"numpy.transpose",
"numpy.expand_dims",
"numpy.vstack",
"numpy.maximum"
]
] |
WuHaobo/Paddle
|
[
"f430799bc82ee327d14a04c3bb1ea712d39d1cef"
] |
[
"python/paddle/fluid/tests/unittests/test_roll_op.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport paddle\nimport numpy as np\nimport paddle.fluid.core as core\nfrom op_test import OpTest\nimport paddle.fluid as fluid\nfrom paddle.fluid import Program, program_guard\n\n\nclass TestRollOp(OpTest):\n def setUp(self):\n self.op_type = \"roll\"\n self.init_dtype_type()\n self.inputs = {'X': np.random.random(self.x_shape).astype(self.dtype)}\n self.attrs = {'shifts': self.shifts, 'dims': self.dims}\n self.outputs = {\n 'Out': np.roll(self.inputs['X'], self.attrs['shifts'],\n self.attrs['dims'])\n }\n\n def init_dtype_type(self):\n self.dtype = np.float64\n self.x_shape = (100, 4, 5)\n self.shifts = [101, -1]\n self.dims = [0, -2]\n\n def test_check_output(self):\n self.check_output()\n\n def test_check_grad_normal(self):\n self.check_grad(['X'], 'Out')\n\n\nclass TestRollOpCase2(TestRollOp):\n def init_dtype_type(self):\n self.dtype = np.float32\n self.x_shape = (100, 10, 5)\n self.shifts = [8, -1]\n self.dims = [-1, -2]\n\n\nclass TestRollAPI(unittest.TestCase):\n def input_data(self):\n self.data_x = np.array(\n [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])\n\n def test_roll_op_api(self):\n self.input_data()\n\n # case 1:\n with program_guard(Program(), Program()):\n x = fluid.layers.data(name='x', shape=[-1, 3])\n z = paddle.roll(x, shifts=1)\n exe = fluid.Executor(fluid.CPUPlace())\n res, = exe.run(feed={'x': self.data_x},\n fetch_list=[z.name],\n return_numpy=False)\n expect_out = np.array([[9.0, 1.0, 2.0], [3.0, 4.0, 5.0],\n [6.0, 7.0, 8.0]])\n self.assertTrue(np.allclose(expect_out, np.array(res)))\n\n # case 2:\n with program_guard(Program(), Program()):\n x = fluid.layers.data(name='x', shape=[-1, 3])\n z = paddle.roll(x, shifts=1, dims=0)\n exe = fluid.Executor(fluid.CPUPlace())\n res, = exe.run(feed={'x': self.data_x},\n fetch_list=[z.name],\n return_numpy=False)\n expect_out = np.array([[7.0, 8.0, 9.0], [1.0, 2.0, 3.0],\n [4.0, 5.0, 6.0]])\n self.assertTrue(np.allclose(expect_out, np.array(res)))\n\n def test_dygraph_api(self):\n self.input_data()\n # case 1:\n with fluid.dygraph.guard():\n x = fluid.dygraph.to_variable(self.data_x)\n z = paddle.roll(x, shifts=1)\n np_z = z.numpy()\n expect_out = np.array([[9.0, 1.0, 2.0], [3.0, 4.0, 5.0],\n [6.0, 7.0, 8.0]])\n self.assertTrue(np.allclose(expect_out, np_z))\n\n # case 2:\n with fluid.dygraph.guard():\n x = fluid.dygraph.to_variable(self.data_x)\n z = paddle.roll(x, shifts=1, dims=0)\n np_z = z.numpy()\n expect_out = np.array([[7.0, 8.0, 9.0], [1.0, 2.0, 3.0],\n [4.0, 5.0, 6.0]])\n self.assertTrue(np.allclose(expect_out, np_z))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.allclose",
"numpy.array",
"numpy.roll",
"numpy.random.random"
]
] |
willwhitney/sac_dists
|
[
"d1374caab080d9fb4b8e762f501501843686cf13"
] |
[
"distributions/squashed_normal.py"
] |
[
"from torch import nn\nimport torch.nn.functional as F\nfrom torch import distributions as pyd\n\n\nclass TanhTransform(pyd.transforms.Transform):\n domain = pyd.constraints.real\n codomain = pyd.constraints.interval(-1.0, 1.0)\n bijective = True\n sign = +1\n\n def __init__(self, cache_size=1):\n super().__init__(cache_size=cache_size)\n\n @staticmethod\n def atanh(x):\n return 0.5 * (x.log1p() - (-x).log1p())\n\n def __eq__(self, other):\n return isinstance(other, TanhTransform)\n\n def _call(self, x):\n return x.tanh()\n\n def _inverse(self, y):\n # We do not clamp to the boundary here as it may degrade the performance of certain algorithms.\n # one should use `cache_size=1` instead\n return self.atanh(y)\n\n def log_abs_det_jacobian(self, x, y):\n # We use a formula that is more numerically stable, see details in the following link\n # https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7\n return 2. * (math.log(2.) - x - F.softplus(-2. * x))\n\n\nclass SquashedNormal(pyd.transformed_distribution.TransformedDistribution):\n def __init__(self, loc, scale):\n self.loc = loc\n self.scale = scale\n\n self.base_dist = pyd.Normal(loc, scale)\n transforms = [TanhTransform()]\n super().__init__(self.base_dist, transforms)\n\n @property\n def mean(self):\n mu = self.loc\n for tr in self.transforms:\n mu = tr(mu)\n return mu\n"
] |
[
[
"torch.distributions.Normal",
"torch.nn.functional.softplus",
"torch.distributions.constraints.interval"
]
] |
HotelsDotCom/neaps
|
[
"4222016d60d27a168f4fb5569d696ec1731f8698"
] |
[
"neaps-api/neaps_lib/bootstrap_test.py"
] |
[
"#\n# Copyright 2018 Expedia Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom functions import bootstrap\n\ntest_cases = [\n {\n 'sample': [3, 3, 3],\n 'predstot': 1,\n 'predsdim': 1,\n 'len': 1,\n 'mean': np.float64(3),\n 'meanInt64': np.int64(3)\n },\n {\n 'sample': [2, 2, 2],\n 'predstot': 1000,\n 'predsdim': 2,\n 'len': 1000,\n 'mean': np.float64(2),\n 'meanInt64': np.int64(2)\n },\n {\n 'sample': [1, 1, 1],\n 'predstot': 9999,\n 'predsdim': 3,\n 'len': 9999,\n 'mean': np.float64(1),\n 'meanInt64': np.int64(1)\n },\n]\n\nresults = []\nresultsInt64 = []\n\n\nclass BootstrapTestCase(unittest.TestCase):\n \"\"\" docstring \"\"\"\n def setUp(self):\n for i in range(len(test_cases)):\n results.append(\n bootstrap(\n test_cases[i]['sample'],\n test_cases[i]['predstot'],\n test_cases[i]['predsdim']\n )\n )\n\n resultsInt64.append(\n bootstrap(\n test_cases[i]['sample'],\n test_cases[i]['predstot'],\n test_cases[i]['predsdim'],\n True\n )\n )\n\n def test_len(self):\n \"\"\" test for boostrap helper\"\"\"\n for i in range(len(test_cases)):\n self.assertEqual(len(results[i]), test_cases[i]['len'])\n self.assertEqual(len(resultsInt64[i]), test_cases[i]['len'])\n\n def test_value(self):\n \"\"\" docstring \"\"\"\n for i in range(len(test_cases)):\n for j in range(len(results[i])):\n self.assertEqual(results[i][j], test_cases[i]['mean'])\n self.assertEqual(resultsInt64[i][j], test_cases[i]['meanInt64'])\n self.assertIsInstance(results[i][j], np.float64)\n self.assertIsInstance(resultsInt64[i][j], np.int64)\n\n def test_less(self):\n \"\"\" docstring \"\"\"\n for i in range(len(test_cases)):\n for j in range(len(results[i])):\n self.assertLessEqual(results[i][j], max(test_cases[i]['sample']))\n\n def test_greater(self):\n \"\"\" docstring \"\"\"\n for i in range(len(test_cases)):\n for j in range(len(results[i])):\n self.assertGreaterEqual(results[i][j], min(test_cases[i]['sample']))\n\nif __name__ == '__main__':\n unittest.main()"
] |
[
[
"numpy.float64",
"numpy.int64"
]
] |
yasuyuky/pytorch-pfn-extras
|
[
"febea6ded644d3b7a099ac557f06567a04b3b838"
] |
[
"pytorch_pfn_extras/dataset/tabular/tabular_dataset.py"
] |
[
"import numpy\nimport torch\n\nimport pytorch_pfn_extras as ppe\nfrom torch.utils.data import Dataset\n\n\nclass TabularDataset(Dataset):\n \"\"\"An abstract class that represents tabular dataset.\n\n This class represents a tabular dataset.\n In a tabular dataset, all examples have the same number of elements.\n For example, all examples of the dataset below have three elements\n (:obj:`a[i]`, :obj:`b[i]`, and :obj:`c[i]`).\n\n .. csv-table::\n :header: , a, b, c\n\n 0, :obj:`a[0]`, :obj:`b[0]`, :obj:`c[0]`\n 1, :obj:`a[1]`, :obj:`b[1]`, :obj:`c[1]`\n 2, :obj:`a[2]`, :obj:`b[2]`, :obj:`c[2]`\n 3, :obj:`a[3]`, :obj:`b[3]`, :obj:`c[3]`\n\n Since an example can be represented by both tuple and dict (\n :obj:`(a[i], b[i], c[i])` and :obj:`{'a': a[i], 'b': b[i], 'c': c[i]}`),\n this class uses :attr:`mode` to indicate which representation will be used.\n If there is only one column, an example also can be represented by a value\n (:obj:`a[i]`). In this case, :attr:`mode` is :obj:`None`.\n\n An inheritance should implement\n :meth:`__len__`, :attr:`keys`, :attr:`mode` and :meth:`get_examples`.\n\n >>> import numpy as np\n >>>\n >>> from pytorch_pfn_extras import dataset\n >>>\n >>> class MyDataset(dataset.TabularDataset):\n ...\n ... def __len__(self):\n ... return 4\n ...\n ... @property\n ... def keys(self):\n ... return ('a', 'b', 'c')\n ...\n ... @property\n ... def mode(self):\n ... return tuple\n ...\n ... def get_examples(self, indices, key_indices):\n ... data = np.arange(12).reshape((4, 3))\n ... if indices is not None:\n ... data = data[indices]\n ... if key_indices is not None:\n ... data = data[:, list(key_indices)]\n ... return tuple(data.transpose())\n ...\n >>> dataset = MyDataset()\n >>> len(dataset)\n 4\n >>> dataset.keys\n ('a', 'b', 'c')\n >>> dataset.astuple()[0]\n (0, 1, 2)\n >>> sorted(dataset.asdict()[0].items())\n [('a', 0), ('b', 1), ('c', 2)]\n >>>\n >>> view = dataset.slice[[3, 2], ('c', 0)]\n >>> len(view)\n 2\n >>> view.keys\n ('c', 'a')\n >>> view.astuple()[1]\n (8, 6)\n >>> sorted(view.asdict()[1].items())\n [('a', 6), ('c', 8)]\n\n \"\"\"\n\n def __len__(self):\n raise NotImplementedError\n\n @property\n def keys(self):\n \"\"\"Names of columns.\n\n A tuple of strings that indicate the names of columns.\n \"\"\"\n raise NotImplementedError\n\n @property\n def mode(self):\n \"\"\"Mode of representation.\n\n This indicates the type of value returned\n by :meth:`fetch` and :meth:`__getitem__`.\n :class:`tuple`, :class:`dict`, and :obj:`None` are supported.\n \"\"\"\n raise NotImplementedError\n\n def get_examples(self, indices, key_indices):\n \"\"\"Return a part of data.\n\n Args:\n indices (list of ints or slice): Indices of requested rows.\n If this argument is :obj:`None`, it indicates all rows.\n key_indices (tuple of ints): Indices of requested columns.\n If this argument is :obj:`None`, it indicates all columns.\n\n Returns:\n tuple of lists/arrays\n \"\"\"\n raise NotImplementedError\n\n @property\n def slice(self):\n \"\"\"Get a slice of dataset.\n\n Args:\n indices (list/array of ints/bools or slice): Requested rows.\n keys (tuple of ints/strs or int or str): Requested columns.\n\n Returns:\n A view of specified range.\n \"\"\"\n return ppe.dataset.tabular._slice._SliceHelper(self)\n\n def fetch(self):\n \"\"\"Fetch data.\n\n This method fetches all data of the dataset/view.\n Note that this method returns a column-major data\n (i.e. :obj:`([a[0], ..., a[3]], ..., [c[0], ... c[3]])`,\n :obj:`{'a': [a[0], ..., a[3]], ..., 'c': [c[0], ..., c[3]]}`, or\n :obj:`[a[0], ..., a[3]]`).\n\n Returns:\n If :attr:`mode` is :class:`tuple`,\n this method returns a tuple of lists/arrays.\n If :attr:`mode` is :class:`dict`,\n this method returns a dict of lists/arrays.\n \"\"\"\n examples = self.get_examples(None, None)\n if self.mode is tuple:\n return examples\n elif self.mode is dict:\n return dict(zip(self.keys, examples))\n elif self.mode is None:\n return examples[0]\n\n def convert(self, data):\n \"\"\"Convert fetched data.\n\n This method takes data fetched by :meth:`fetch` and\n pre-process them before passing them to models.\n The default behaviour is converting each column into an ndarray.\n This behaviour can be overridden by :meth:`with_converter`.\n If the dataset is constructed by :meth:`concat` or :meth:`join`,\n the converter of the first dataset is used.\n\n Args:\n data (tuple or dict): Data from :meth:`fetch`.\n\n Returns:\n A tuple or dict.\n Each value is an ndarray.\n \"\"\"\n if isinstance(data, tuple):\n return tuple(_as_array(d) for d in data)\n elif isinstance(data, dict):\n return {k: _as_array(v) for k, v in data.items()}\n else:\n return _as_array(data)\n\n def astuple(self):\n \"\"\"Return a view with tuple mode.\n\n Returns:\n A view whose :attr:`mode` is :class:`tuple`.\n \"\"\"\n return ppe.dataset.tabular._asmode._Astuple(self)\n\n def asdict(self):\n \"\"\"Return a view with dict mode.\n\n Returns:\n A view whose :attr:`mode` is :class:`dict`.\n \"\"\"\n return ppe.dataset.tabular._asmode._Asdict(self)\n\n def concat(self, *datasets):\n \"\"\"Stack datasets along rows.\n\n Args:\n datasets (iterable of :class:`TabularDataset`):\n Datasets to be concatenated.\n All datasets must have the same :attr:`keys`.\n\n Returns:\n A concatenated dataset.\n \"\"\"\n return ppe.dataset.tabular._concat._Concat(\n self, *datasets)\n\n def join(self, *datasets):\n \"\"\"Stack datasets along columns.\n\n Args: datasets (iterable of :class:`TabularDataset`):\n Datasets to be concatenated.\n All datasets must have the same length\n\n Returns:\n A joined dataset.\n \"\"\"\n return ppe.dataset.tabular._join._Join(self, *datasets)\n\n def transform(self, keys, transform):\n \"\"\"Apply a transform to each example.\n\n The transformations are a list where each element\n is a tuple that holds the transformation signature and\n a callable that is the transformation itself.\n\n The transformation signature is a tuple of 2 elements with\n the first one being the keys of the dataset that are taken\n as inputs. And the last one the outputs it produces for the\n transformation `keys` argument.\n\n When multiple transformations are specified, the outputs\n must be disjoint or `ValueError` will be risen.\n\n Args:\n keys (tuple of strs): The keys of transformed examples.\n transform (list of tuples): A list where each element\n specifies a transformation with a tuple with the\n transformation signature and a callable that takes an example\n and returns transformed example. :attr:`mode` of\n transformed dataset is determined by the transformed\n examples.\n\n Returns:\n A transfromed dataset.\n \"\"\"\n return ppe.dataset.tabular._transform._Transform(\n self, keys, transform)\n\n def transform_batch(self, keys, transform_batch):\n \"\"\"Apply a transform to examples.\n\n The transformations are a list where each element\n is a tuple that holds the transformation signature and\n a callable that is the transformation itself.\n\n The transformation signature is a tuple of 2 elements with\n the first one being the keys of the dataset that are taken\n as inputs. And the last one the outputs it produces for the\n transformation `keys` argument.\n\n When multiple transformations are specified, the outputs\n must be disjoint or `ValueError` will be risen.\n\n Args:\n keys (tuple of strs): The keys of transformed examples.\n transform_batch (list of tuples): A list where each element\n specifies a transformation with a tuple with the\n transformation signature and a callable that takes a\n batch of examples and returns a batch of transformed examples.\n :attr:`mode` of transformed dataset is determined by\n the transformed examples.\n\n Returns:\n A transfromed dataset.\n \"\"\"\n return ppe.dataset.tabular._transform._TransformBatch(\n self, keys, transform_batch)\n\n def with_converter(self, converter):\n \"\"\"Override the behaviour of :meth:`convert`.\n\n This method overrides :meth:`convert`.\n\n Args:\n converter (callable): A new converter.\n\n Returns:\n A dataset with the new converter.\n \"\"\"\n\n return ppe.dataset.tabular._with_converter._WithConverter(\n self, converter)\n\n def get_example(self, i):\n example = self.get_examples([i], None)\n example = tuple(col[0] for col in example)\n if self.mode is tuple:\n return example\n elif self.mode is dict:\n return dict(zip(self.keys, example))\n elif self.mode is None:\n return example[0]\n\n def __iter__(self):\n return (self.get_example(i) for i in range(len(self)))\n\n def __getitem__(self, index):\n \"\"\"Returns an example or a sequence of examples.\n It implements the standard Python indexing and one-dimensional integer\n array indexing. It uses the :meth:`get_example` method by default, but\n it may be overridden by the implementation to, for example, improve the\n slicing performance.\n Args:\n index (int, slice, list or numpy.ndarray): An index of an example\n or indexes of examples.\n Returns:\n If index is int, returns an example created by `get_example`.\n If index is either slice or one-dimensional list or numpy.ndarray,\n returns a list of examples created by `get_example`.\n \"\"\"\n if isinstance(index, slice):\n current, stop, step = index.indices(len(self))\n return [self.get_example(i) for i in\n range(current, stop, step)]\n elif isinstance(index, list) or isinstance(index, numpy.ndarray):\n return [self.get_example(i) for i in index]\n else:\n return self.get_example(index)\n\n\ndef _as_array(data):\n if isinstance(data, (numpy.ndarray, torch.Tensor)):\n return data\n else:\n return numpy.array(data)\n"
] |
[
[
"numpy.array"
]
] |
ThatSnail/synth_detune
|
[
"9540508ae653b390dfea5e3083b05936c53c6d4d"
] |
[
"plotter.py"
] |
[
"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nts = np.load(\"ts.npy\")\nts -= ts[0]\nxs = np.load(\"xs.npy\")\n\nprint(\"ts shape: {0}\".format(np.shape(ts)))\nprint(\"xs shape: {0}\".format(np.shape(xs)))\n\nplt.figure()\nplt.scatter(ts, xs)\nplt.show()\n"
] |
[
[
"numpy.load",
"numpy.shape",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter"
]
] |
BFourcin/rcmodel
|
[
"160eb2ad7ea60f328abde157baeef7378e28d815"
] |
[
"src/rcmodel/optimisation.py"
] |
[
"from filelock import FileLock\nimport torch\nimport pandas as pd\nfrom .tools import BuildingTemperatureDataset\nimport os\n\ndef train(model, device, dataloader, optimizer):\n \"\"\"\n Performs one epoch of training.\n Order of rooms in building and in data must match otherwise model will fit wrong rooms to data.\n \"\"\"\n model.reset_iv() # Reset initial value\n model.train()\n model.cooling_policy.eval()\n\n # Stops Autograd endlessly keeping track of the graph. Memory Leak!\n for layer in model.cooling_policy.parameters():\n layer.requires_grad = False\n\n num_cols = len(model.building.rooms) # number of columns to use from data.\n num_batches = len(dataloader)\n train_loss = 0\n loss_fn = torch.nn.MSELoss()\n\n for batch, (time, temp) in enumerate(dataloader):\n time, temp = time.to(device), temp.to(device) # Put on GPU if available\n\n # Get model arguments:\n time = time.squeeze(0)\n temp = temp.squeeze(0)\n\n # Compute prediction and loss\n pred = model(time)\n pred = pred.squeeze(-1) # change from column to row matrix\n\n loss = loss_fn(pred[:, 2:], temp[:, 0:num_cols])\n train_loss += loss.item()\n\n # get last output and use for next initial value\n model.iv = pred[-1, :].unsqueeze(1).detach() # MUST DETACH GRAD\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return train_loss / num_batches\n\n\ndef test(model, device, dataloader):\n model.reset_iv() # Reset initial value\n model.eval() # Put model in evaluation mode\n num_batches = len(dataloader)\n num_cols = len(model.building.rooms) # number of columns to take from data.\n test_loss = 0\n loss_fn = torch.nn.MSELoss()\n\n with torch.no_grad():\n for (time, temp) in dataloader:\n time, temp = time.to(device), temp.to(device) # Put on GPU if available\n\n time = time.squeeze(0)\n temp = temp.squeeze(0)\n\n pred = model(time)\n pred = pred.squeeze(-1) # change from column to row matrix\n test_loss += loss_fn(pred[:, 2:], temp[:, 0:num_cols]).item()\n\n # get last output and use for next initial value\n model.iv = pred[-1, :].unsqueeze(1).detach() # MUST DETACH GRAD\n\n test_loss /= num_batches\n\n return test_loss\n\n\ndef dataset_creator(path, sample_size, dt):\n path_sorted = sort_data(path, dt)\n with FileLock(f\"{os.path.dirname(os.path.abspath(path_sorted))}.lock\"):\n training_data = BuildingTemperatureDataset(path_sorted, sample_size, train=True)\n train_dataloader = torch.utils.data.DataLoader(training_data, batch_size=1, shuffle=False)\n test_data = BuildingTemperatureDataset(path_sorted, sample_size, test=True)\n test_dataloader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False)\n \n return train_dataloader, test_dataloader\n\n\ndef sort_data(path, dt):\n \"\"\"\n Check if path has sorted data tag (_sorted)\n If not check if data has previously been sorted and exists in the directory.\n Check to see if the value dt is correct\n If not sort data and write filename_sorted.csv\n\n data is sorted by time in ascending order and downsampled to a frequency of dt seconds.\n Missing values are interpolated.\n A time-date string is also inserted.\n \"\"\"\n def sort(path, dt):\n df = pd.read_csv(path)\n\n if path[-11:] == '_sorted.csv':\n path_sorted = path\n else:\n path_sorted = path[:-4] + '_sorted.csv'\n\n # Sort df by time (raw data not always in order)\n df = df.sort_values(by=[\"time\"], ascending=True)\n\n # insert date-time value at start of df\n try:\n df.insert(loc=0, column='date-time', value=pd.to_datetime(df['time'], unit='ms'))\n except ValueError:\n raise ValueError('Data appears to have already been sorted. Check if still appropriate and add _sorted.csv tag to avoid this error.')\n\n # downscale data to a frequency of dt (seconds) use the mean value and round to 2dp.\n df = df.set_index('date-time').resample(str(dt) + 's').mean().round(2)\n\n # time column is converted to unix epoch seconds to match the date-time\n df[\"time\"] = (df.index - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta(\"1s\")\n\n # change date-time from UTC to Local time\n df = df.tz_localize('Europe/London')\n\n df = df.interpolate().round(2) # interpolate missing values NaN\n\n df.to_csv(path_sorted, index=True)\n\n def need_to_sort(path, dt):\n\n def get_dt(path):\n df_dt = pd.read_csv(path)['time'][0:2].values\n return df_dt[1] - df_dt[0]\n\n # Does path already have sorted tag?\n if path[-11:] == '_sorted.csv':\n\n # if so, is dt correct?\n if get_dt(path) == dt:\n\n return False # path and file is correct dont sort\n\n else:\n return True # dt is wrong, re-sort\n\n # path does not contain _sorted.csv\n else:\n\n # Does path_sorted exist?\n path_sorted = path[:-4] + '_sorted.csv'\n import os.path\n if os.path.isfile(path_sorted): # check if file already exists\n\n # if file exists check if dt is correct\n if get_dt(path_sorted) == dt:\n return False # correct file already exists don't sort\n else:\n return True # file exists but dt wrong, re-sort\n\n else: # File doesn't exist\n return True\n\n if need_to_sort(path, dt):\n sort(path, dt)\n\n # return the path_sorted\n if path[-11:] == '_sorted.csv':\n path_sorted = path\n else:\n path_sorted = path[:-4] + '_sorted.csv'\n\n return path_sorted\n\n\nclass OptimiseRC:\n \"\"\"\n Parameters\n ----------\n model : object\n RCModel class object.\n csv_path : string\n Path to .csv file containing room temperature data.\n Data will be sorted if not done already and saved to a new file with the tag '_sorted'\n sample_size : int\n Length of indexes to sample from dataset per batch.\n dt : int\n Timestep data will be resampled to.\n lr : float\n Learning rate for optimiser.\n model_id : int\n Unique identifier used when optimising multiple models.\n\n see https://docs.ray.io/en/latest/using-ray-with-pytorch.html\n \"\"\"\n def __init__(self, model, csv_path, sample_size, dt=30, lr=1e-3, opt_id=0):\n self.model = model\n self.model.init_params() # randomise parameters\n self.model_id = opt_id\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.train_dataloader, self.test_dataloader = dataset_creator(csv_path, int(sample_size), int(dt))\n\n self.optimizer = torch.optim.Adam([self.model.params, self.model.cooling], lr=lr)\n\n\n\n def train(self):\n avg_loss = train(self.model, self.device, self.train_dataloader, self.optimizer)\n return avg_loss\n\n def test(self):\n test_loss = test(self.model, self.device, self.test_dataloader)\n return test_loss\n\n def train_loop(self, epochs):\n print(self.model.params)\n for i in range(int(epochs)):\n # print(f\"Epoch {i + 1}\\n-------------------------------\")\n testloss = self.train()\n\n results = [testloss, self.model]\n return results\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"pandas.to_datetime",
"torch.nn.MSELoss",
"pandas.Timedelta",
"torch.no_grad",
"torch.optim.Adam",
"pandas.Timestamp",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"pandas.read_csv"
]
] |
madhukaranand/Which-Bollywood-Celebrity-You-look-like
|
[
"22d5aac12f88c5f5616008eb981346480cdd1d28"
] |
[
"src/testing.py"
] |
[
"from keras_vggface.utils import preprocess_input\nfrom keras_vggface.vggface import VGGFace\nimport numpy as np\nimport pickle\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport cv2\nfrom mtcnn import MTCNN\nfrom PIL import Image\n\nfeature_list = np.array(pickle.load(open('artifacts/extracted_features/embedding.pkl','rb')))\nfilenames = pickle.load(open('artifacts/pickle_format_data/img_PICKLE_file.pkl','rb'))\n\nmodel = VGGFace(model='resnet50',include_top=False,input_shape=(224,224,3),pooling='avg')\n\n#detect face\ndetector = MTCNN()\n\n# load img -> face detection\nsample_img = cv2.imread('samples/saif_dup.jpg')\nresults = detector.detect_faces(sample_img)\n\nx,y,width,height = results[0]['box']\n\nface = sample_img[y:y+height,x:x+width]\n\n# extract its features\nimage = Image.fromarray(face)\nimage = image.resize((224,224))\n\nface_array = np.asarray(image)\n\nface_array = face_array.astype('float32')\n\nexpanded_img = np.expand_dims(face_array,axis=0)\npreprocessed_img = preprocess_input(expanded_img)\nresult = model.predict(preprocessed_img).flatten()\n\n# print(result)\n# print(result.shape)\n# print(result.reshape(1,-1))\n\n# find the cosine distance of current image with all the 8664 features\nsimilarity = []\nfor i in range(len(feature_list)):\n similarity.append(cosine_similarity(result.reshape(1,-1),feature_list[i].reshape(1,-1))[0][0])\n\n# print(len(similarity))\n\nindex_pos = sorted(list(enumerate(similarity)),reverse=True,key=lambda x:x[1])[0][0]\n\n#recommend that image\ntemp_img = cv2.imread(filenames[index_pos])\ncv2.imshow('output',temp_img)\ncv2.waitKey(0)\n"
] |
[
[
"numpy.asarray",
"numpy.expand_dims"
]
] |
itsnamgyu/reid-metric
|
[
"437e02ebad510b482f620a293fd8c7baa4f42ad6"
] |
[
"hitl/feedback.py"
] |
[
"import torch\n\nfrom utils.distmat import compute_distmat\n\n\ndef init_feedback_indices(q, g, device=None):\n return torch.zeros((q, g), dtype=torch.bool, device=device)\n\n\ndef init_feedback_indices_qg(q, g, positive=False, device=None):\n indices = torch.zeros(q, q + g, dtype=torch.bool, device=device)\n if positive:\n indices[torch.arange(q), torch.arange(q)] = True\n return indices\n\n\ndef greedy_feedback(distmat, q_pids, g_pids, positive_indices, negative_indices, inplace=True):\n \"\"\"\n Update positive_indices, negative_indices with one round of feedback. Provide feedback for top-ranked gallery.\n Note that distmat is corrupted if inplace=True.\n\n :param distmat: q x g Tensor (adjusted query to gallery)\n :param q_pids: q\n :param g_pids: g\n :param positive_indices: q x g\n :param negative_indices: q x g\n :return:\n (positive_indices, negative_indices, matches)\n \"\"\"\n q, g = tuple(distmat.shape)\n\n if not inplace:\n distmat = distmat.clone().detach()\n positive_indices = positive_indices.copy()\n negative_indices = negative_indices.copy()\n\n distmat[positive_indices] = float(\"inf\")\n distmat[negative_indices] = float(\"inf\")\n\n indices = distmat.argmin(dim=1)\n pmap = g_pids[indices] == q_pids\n positive_q = torch.arange(0, q)[pmap]\n negative_q = torch.arange(0, q)[pmap == False]\n positive_g = indices[pmap]\n negative_g = indices[pmap == False]\n\n existing = positive_indices[positive_q, positive_g]\n assert (not existing.any())\n positive_indices[positive_q, positive_g] = True\n existing = negative_indices[negative_q, negative_g]\n assert (not existing.any())\n negative_indices[negative_q, negative_g] = True\n\n return positive_indices, negative_indices, pmap\n\n\ndef naive_round(qf, gf, q_pids, g_pids, positive_indices=None, negative_indices=None,\n inplace=True, previous_distmat=None, device=None):\n \"\"\"\n qf: q x m\n gf: g x m\n q_pids: q\n g_pids: g\n positive_indices: q x g\n negative_indices: q x g\n previous_distmat: adjusted distmat (== compute_distmat(qf, gf) only at init)\n \"\"\"\n q, g = qf.shape[0], gf.shape[0]\n assert (qf.shape[1] == gf.shape[1])\n\n if positive_indices is None: positive_indices = init_feedback_indices(q, g, device=device)\n if negative_indices is None: negative_indices = init_feedback_indices(q, g, device=device)\n\n if previous_distmat is None:\n distmat = compute_distmat(qf, gf)\n else:\n distmat = previous_distmat\n\n res = greedy_feedback(distmat, q_pids, g_pids, positive_indices, negative_indices, inplace=inplace)\n positive_indices, negative_indices, matches = res\n\n distmat = compute_distmat(qf, gf)\n distmat[positive_indices] = 0\n distmat[negative_indices] = float(\"inf\")\n\n return distmat, positive_indices, negative_indices, matches\n"
] |
[
[
"torch.zeros",
"torch.arange"
]
] |
Mougatine/lectures-labs
|
[
"0f2bdd326174c92d80816663e2ae49abeb7f8533"
] |
[
"labs/10_unsupervised_generative_models/solutions/grl_training.py"
] |
[
"import collections\n\nfrom tensorflow.keras.losses import SparseCategoricalCrossentropy, BinaryCrossentropy\nfrom tensorflow.keras.metrics import Mean, Accuracy\n\n\noptimizer = SGD(lr=0.01, momentum=0.9, nesterov=True)\n\ncce = SparseCategoricalCrossentropy()\nbce = BinaryCrossentropy()\n\nmodel.compile(\n optimizer=optimizer,\n loss=[cce, bce],\n metrics=[\"accuracy\"]\n)\n\ndef train_epoch(source_train_generator, target_train_generator):\n global lambda_factor, global_step\n\n # Keras provide helpful classes to monitor various metrics:\n epoch_source_digits = tf.keras.metrics.Mean(name=\"source_digits_loss\")\n epoch_source_domains = tf.keras.metrics.Mean(name=\"source_domain_loss\")\n epoch_target_domains = tf.keras.metrics.Mean(name=\"target_domain_loss\")\n epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(\"source_digits_accuracy\")\n\n # Fetch all trainable variables but those used uniquely for the digits classification:\n variables_but_classifier = list(filter(lambda x: \"digits\" not in x.name, model.trainable_variables))\n\n loss_record = collections.defaultdict(list)\n\n for i, data in enumerate(zip(source_train_generator, target_train_generator)):\n source_data, target_data = data\n # Training digits classifier & domain classifier on source:\n x_source, y_source, d_source = source_data\n\n with tf.GradientTape() as tape:\n digits_prob, domains_probs = model(x_source)\n digits_loss = cce(y_source, digits_prob)\n domains_loss = bce(d_source, domains_probs)\n source_loss = digits_loss + 0.2 * domains_loss\n\n gradients = tape.gradient(source_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n epoch_source_digits(digits_loss)\n epoch_source_domains(domains_loss)\n epoch_accuracy(y_source, digits_prob)\n\n # Training domain classifier on target:\n x_target, d_target = target_data\n with tf.GradientTape() as tape:\n _, domains_probs = model(x_target)\n target_loss = 0.2 * bce(d_target, domains_probs)\n\n gradients = tape.gradient(target_loss, variables_but_classifier)\n optimizer.apply_gradients(zip(gradients, variables_but_classifier))\n\n epoch_target_domains(target_loss)\n\n print(\"Source digits loss={}, Source Accuracy={}, Source domain loss={}, Target domain loss={}\".format(\n epoch_source_digits.result(), epoch_accuracy.result(),\n epoch_source_domains.result(), epoch_target_domains.result()))\n\n\nfor epoch in range(epochs):\n print(\"Epoch: {}\".format(epoch), end=\" \")\n loss_record = train_epoch(source_train_generator, target_train_generator)\n"
] |
[
[
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.losses.BinaryCrossentropy"
]
] |
bobvdvelde/polyaxon
|
[
"90a8cc37143a9ba1dd7eaf040377100d7a33f0d1"
] |
[
"src/core/tests/test_tracking/test_run_tracking.py"
] |
[
"#!/usr/bin/python\n#\n# Copyright 2018-2021 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport os\nimport pandas as pd\nimport pytest\nimport tempfile\nimport uuid\n\nfrom unittest.mock import patch\n\nimport altair as alt\nimport matplotlib.pyplot as plt\n\nfrom bokeh.plotting import figure\nfrom plotly import figure_factory\n\nfrom polyaxon import settings\nfrom polyaxon.constants.globals import DEFAULT, PLATFORM_DIST_CE\nfrom polyaxon.containers.contexts import (\n CONTEXT_ARTIFACTS_FORMAT,\n CONTEXT_MOUNT_ARTIFACTS_FORMAT,\n CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT,\n CONTEXT_OFFLINE_FORMAT,\n CONTEXTS_OUTPUTS_SUBPATH_FORMAT,\n)\nfrom polyaxon.env_vars import getters\nfrom polyaxon.env_vars.getters import get_run_info\nfrom polyaxon.env_vars.keys import (\n POLYAXON_KEYS_COLLECT_ARTIFACTS,\n POLYAXON_KEYS_COLLECT_RESOURCES,\n POLYAXON_KEYS_LOG_LEVEL,\n POLYAXON_KEYS_RUN_INSTANCE,\n)\nfrom polyaxon.exceptions import PolyaxonClientException\nfrom polyaxon.polyboard.artifacts import V1ArtifactKind\nfrom polyaxon.polyboard.events import V1Events, get_asset_path, get_event_path\nfrom polyaxon.polyboard.processors.writer import EventFileWriter, ResourceFileWriter\nfrom polyaxon.tracking.run import Run\nfrom polyaxon.utils.path_utils import create_path\nfrom tests.utils import TestEnvVarsCase, tensor_np\n\n\n@pytest.mark.tracking_mark\nclass TestRunTracking(TestEnvVarsCase):\n def setUp(self):\n super().setUp()\n settings.CLIENT_CONFIG.is_managed = True\n settings.CLIENT_CONFIG.is_offline = True\n\n def test_get_collect_artifacts_return_false_out_cluster(self):\n settings.CLIENT_CONFIG.is_managed = False\n os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = \"false\"\n assert getters.get_collect_artifacts() is False\n\n def test_empty_collect_artifacts_path(self):\n settings.CLIENT_CONFIG.is_managed = True\n assert getters.get_collect_artifacts() is False\n\n def test_valid_artifacts_path(self):\n settings.CLIENT_CONFIG.is_managed = True\n self.check_valid_value(\n POLYAXON_KEYS_COLLECT_ARTIFACTS, getters.get_collect_artifacts, \"true\", True\n )\n\n def test_get_collect_resources_return_false_out_cluster(self):\n settings.CLIENT_CONFIG.is_managed = False\n os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = \"false\"\n assert getters.get_collect_resources() is False\n\n def test_empty_collect_resources_path(self):\n settings.CLIENT_CONFIG.is_managed = True\n assert getters.get_collect_resources() is False\n\n def test_valid_resources_path(self):\n settings.CLIENT_CONFIG.is_managed = True\n self.check_valid_value(\n POLYAXON_KEYS_COLLECT_RESOURCES, getters.get_collect_resources, \"true\", True\n )\n\n def test_get_log_level_out_cluster(self):\n settings.CLIENT_CONFIG.is_managed = False\n self.check_empty_value(POLYAXON_KEYS_LOG_LEVEL, getters.get_log_level)\n\n def test_empty_log_level(self):\n settings.CLIENT_CONFIG.is_managed = True\n self.check_empty_value(POLYAXON_KEYS_LOG_LEVEL, getters.get_log_level)\n\n def test_run_info_checks_is_managed(self):\n settings.CLIENT_CONFIG.is_managed = False\n with self.assertRaises(PolyaxonClientException):\n get_run_info()\n\n def test_empty_run_info(self):\n self.check_raise_for_invalid_value(\n POLYAXON_KEYS_RUN_INSTANCE, get_run_info, None, PolyaxonClientException\n )\n\n def test_non_valid_run_info(self):\n self.check_raise_for_invalid_value(\n POLYAXON_KEYS_RUN_INSTANCE,\n get_run_info,\n \"something random\",\n PolyaxonClientException,\n )\n\n self.check_raise_for_invalid_value(\n POLYAXON_KEYS_RUN_INSTANCE,\n get_run_info,\n \"foo.bar\",\n PolyaxonClientException,\n )\n\n def test_dict_run_info(self):\n uid = uuid.uuid4().hex\n run_info = \"user.project_bar.runs.{}\".format(uid)\n self.check_valid_value(\n POLYAXON_KEYS_RUN_INSTANCE,\n get_run_info,\n run_info,\n (\"user\", \"project_bar\", uid),\n )\n\n @patch(\"polyaxon.managers.base.os.path.expanduser\")\n def test_run_init(self, expanduser):\n expanduser.return_value = tempfile.mkdtemp()\n settings.CLIENT_CONFIG.is_managed = False\n settings.CLIENT_CONFIG.is_offline = False\n with self.assertRaises(PolyaxonClientException):\n Run()\n\n # Uses default as owner in non CE\n with self.assertRaises(PolyaxonClientException):\n Run(project=\"test\")\n\n # Uses default as owner in CE\n settings.CLIENT_CONFIG.is_offline = True\n settings.CLI_CONFIG.installation = {\"dist\": PLATFORM_DIST_CE}\n with patch(\"polyaxon.tracking.run.Run._set_exit_handler\") as exit_mock:\n run = Run(project=\"test\", track_code=False, track_env=False)\n assert exit_mock.call_count == 1\n assert run.owner == DEFAULT\n\n with patch(\"polyaxon.tracking.run.Run._set_exit_handler\") as exit_mock:\n run = Run(\n owner=\"owner-test\", project=\"test\", track_code=False, track_env=False\n )\n assert exit_mock.call_count == 1\n assert run.owner == \"owner-test\"\n assert run.project == \"test\"\n\n with patch(\"polyaxon.tracking.run.Run._set_exit_handler\") as exit_mock:\n run = Run(project=\"owner-test.test\")\n assert exit_mock.call_count == 1\n assert run.owner == \"owner-test\"\n assert run.project == \"test\"\n\n settings.CLIENT_CONFIG.is_managed = True\n settings.CLIENT_CONFIG.is_offline = False\n with self.assertRaises(PolyaxonClientException):\n Run()\n\n settings.CLI_CONFIG.installation = None\n # Uses default as owner in non CE\n with self.assertRaises(PolyaxonClientException):\n Run(project=\"test\")\n\n # Uses default as owner in CE\n settings.CLIENT_CONFIG.is_offline = True\n settings.CLI_CONFIG.installation = {\"dist\": PLATFORM_DIST_CE}\n run = Run(project=\"test\")\n assert run.owner == DEFAULT\n\n # FQN non CE\n settings.CLI_CONFIG.installation = None\n os.environ[POLYAXON_KEYS_RUN_INSTANCE] = \"user.project_bar.runs.uid\"\n run = Run()\n assert run.owner == \"user\"\n assert run.project == \"project_bar\"\n assert run.run_uuid == \"uid\"\n\n # FQN CE\n settings.CLI_CONFIG.installation = {\"dist\": PLATFORM_DIST_CE}\n os.environ[POLYAXON_KEYS_RUN_INSTANCE] = \"user.project_bar.runs.uid\"\n run = Run()\n assert run.owner == \"user\"\n assert run.project == \"project_bar\"\n assert run.run_uuid == \"uid\"\n\n def test_event_logger_from_non_managed_run(self):\n settings.CLIENT_CONFIG.is_managed = False\n settings.CLIENT_CONFIG.is_offline = False\n\n with patch(\"polyaxon.tracking.run.Run._set_exit_handler\") as exit_mock:\n run = Run(\n project=\"owner-test.test\",\n track_code=False,\n track_env=False,\n collect_artifacts=False,\n auto_create=False,\n )\n assert exit_mock.call_count == 1\n artifacts_context = CONTEXT_ARTIFACTS_FORMAT.format(run.run_uuid)\n assert run.get_artifacts_path() == artifacts_context\n assert run.get_outputs_path() == CONTEXTS_OUTPUTS_SUBPATH_FORMAT.format(\n artifacts_context\n )\n assert run._event_logger is None\n\n # Add run id\n with patch(\"polyaxon.tracking.run.Run._set_exit_handler\") as exit_mock:\n run = Run(\n project=\"owner-test.test\",\n run_uuid=\"uuid\",\n track_code=False,\n track_env=False,\n collect_artifacts=False,\n )\n assert exit_mock.call_count == 1\n artifacts_context = CONTEXT_ARTIFACTS_FORMAT.format(run.run_uuid)\n assert run.get_artifacts_path() == artifacts_context\n assert run.get_outputs_path() == CONTEXTS_OUTPUTS_SUBPATH_FORMAT.format(\n artifacts_context\n )\n assert run._event_logger is None\n\n run.set_artifacts_path()\n assert run.get_artifacts_path() == CONTEXT_MOUNT_ARTIFACTS_FORMAT.format(\"uuid\")\n assert run.get_outputs_path() == CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT.format(\"uuid\")\n\n with patch(\"polyaxon.tracking.run.EventFileWriter\") as mock_call:\n run.set_run_event_logger()\n assert mock_call.call_count == 1\n\n with patch(\"polyaxon.tracking.run.ResourceFileWriter\") as mock_call:\n run.set_run_resource_logger()\n assert mock_call.call_count == 1\n\n # Set collect flag\n os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = \"true\"\n os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = \"true\"\n settings.CLIENT_CONFIG.is_managed = True\n with patch(\"polyaxon.tracking.run.EventFileWriter\") as event_call:\n with patch(\"polyaxon.tracking.run.ResourceFileWriter\") as resource_call:\n with patch(\"polyaxon.tracking.run.Run.refresh_data\") as refresh_call:\n with patch(\n \"polyaxon.tracking.run.Run._set_exit_handler\"\n ) as exit_call:\n run = Run(project=\"owner-test.test\", run_uuid=\"uuid\")\n\n assert refresh_call.call_count == 1\n assert event_call.call_count == 1\n assert resource_call.call_count == 1\n assert exit_call.call_count == 1\n assert run.get_artifacts_path() == CONTEXT_MOUNT_ARTIFACTS_FORMAT.format(\"uuid\")\n assert run.get_outputs_path() == CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT.format(\"uuid\")\n\n def test_event_logger_from_a_managed_run(self):\n # Set managed flag\n settings.CLIENT_CONFIG.is_managed = True\n settings.CLIENT_CONFIG.is_offline = False\n os.environ[POLYAXON_KEYS_RUN_INSTANCE] = \"user.project_bar.runs.uid\"\n os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = \"false\"\n os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = \"false\"\n\n with patch(\"polyaxon.tracking.run.Run.refresh_data\") as refresh_call:\n run = Run()\n assert refresh_call.call_count == 1\n assert run.get_artifacts_path() == CONTEXT_MOUNT_ARTIFACTS_FORMAT.format(\"uid\")\n assert run.get_outputs_path() == CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT.format(\"uid\")\n assert run._event_logger is None\n\n # Set collect flag\n os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = \"true\"\n os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = \"true\"\n\n # Add run id\n with patch(\"polyaxon.tracking.run.Run.set_run_event_logger\") as event_call:\n with patch(\n \"polyaxon.tracking.run.Run.set_run_resource_logger\"\n ) as resource_call:\n with patch(\"polyaxon.tracking.run.Run.refresh_data\") as refresh_call:\n with patch(\n \"polyaxon.tracking.run.Run._set_exit_handler\"\n ) as exit_call:\n Run(project=\"test.test\", run_uuid=\"uuid\")\n assert event_call.call_count == 1\n assert resource_call.call_count == 1\n assert refresh_call.call_count == 1\n assert exit_call.call_count == 1\n\n # Set run info\n os.environ[POLYAXON_KEYS_RUN_INSTANCE] = \"user.project_bar.runs.uid\"\n # Add run id\n with patch(\"polyaxon.tracking.run.Run.set_run_event_logger\") as event_call:\n with patch(\n \"polyaxon.tracking.run.Run.set_run_resource_logger\"\n ) as resource_call:\n with patch(\"polyaxon.tracking.run.Run.refresh_data\") as refresh_call:\n Run()\n assert event_call.call_count == 1\n assert resource_call.call_count == 1\n assert refresh_call.call_count == 1\n\n def test_event_logger_from_an_offline_run(self):\n # Set managed flag\n settings.CLIENT_CONFIG.is_managed = False\n settings.CLIENT_CONFIG.is_offline = True\n os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = \"false\"\n os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = \"false\"\n\n with patch(\"polyaxon.tracking.run.Run._set_exit_handler\") as exit_mock:\n run = Run(project=\"test.test\", run_uuid=\"uid\")\n assert exit_mock.call_count == 1\n artifacts_path = CONTEXT_OFFLINE_FORMAT.format(\"uid\")\n assert run.get_artifacts_path() == artifacts_path\n assert run.get_outputs_path() == CONTEXTS_OUTPUTS_SUBPATH_FORMAT.format(\n artifacts_path\n )\n assert run._event_logger is None\n\n # Set collect flag\n os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = \"true\"\n os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = \"true\"\n\n # Add run id\n with patch(\"polyaxon.tracking.run.Run.set_run_event_logger\") as event_call:\n with patch(\n \"polyaxon.tracking.run.Run.set_run_resource_logger\"\n ) as resource_call:\n with patch(\"polyaxon.tracking.run.Run._set_exit_handler\") as exit_mock:\n Run(project=\"test.test\", run_uuid=\"uid\")\n assert exit_mock.call_count == 1\n assert event_call.call_count == 1\n assert resource_call.call_count == 1\n\n\n@pytest.mark.tracking_mark\nclass TestRunLogging(TestEnvVarsCase):\n def setUp(self):\n super().setUp()\n self.run_path = tempfile.mkdtemp()\n self.run_outputs_path = tempfile.mkdtemp()\n settings.CLIENT_CONFIG.is_managed = False\n os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = \"false\"\n os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = \"false\"\n with patch(\"polyaxon.tracking.run.Run._set_exit_handler\") as exit_mock:\n self.run = Run(\n project=\"owner.project\",\n track_env=False,\n track_code=False,\n auto_create=False,\n )\n assert exit_mock.call_count == 1\n self.event_logger = EventFileWriter(run_path=self.run_path)\n self.resource_logger = ResourceFileWriter(run_path=self.run_path)\n self.run._artifacts_path = self.run_path\n self.run._outputs_path = self.run_outputs_path\n self.run._event_logger = self.event_logger\n self.run._resource_logger = self.resource_logger\n assert os.path.exists(get_event_path(self.run_path)) is True\n assert os.path.exists(get_asset_path(self.run_path)) is True\n\n @staticmethod\n def touch(path):\n with open(path, \"w\") as f:\n f.write(\"test\")\n\n def test_log_empty_metric(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is False\n )\n with patch(\"polyaxon.tracking.run.Run._log_has_metrics\") as log_metrics:\n self.run.log_metrics()\n assert log_metrics.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is False\n )\n\n def test_log_single_metric(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is False\n )\n with patch(\"polyaxon.tracking.run.Run._log_has_metrics\") as log_metrics:\n self.run.log_metrics(step=1, metric1=1.1)\n assert log_metrics.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.METRIC, name=\"metric1\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"metric\", name=\"metric1\", data=events_file)\n assert len(results.df.values) == 1\n\n def test_log_multiple_metrics(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is False\n )\n with patch(\"polyaxon.tracking.run.Run._log_has_metrics\") as log_metrics:\n self.run.log_metrics(step=1, metric1=1.1, metric2=21.1)\n assert log_metrics.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.METRIC, name=\"metric1\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"metric\", name=\"metric1\", data=events_file)\n assert len(results.df.values) == 1\n\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.METRIC, name=\"metric2\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"metric\", name=\"metric2\", data=events_file)\n assert len(results.df.values) == 1\n\n with patch(\"polyaxon.tracking.run.Run._log_has_metrics\") as log_metrics:\n self.run.log_metrics(step=2, metric1=1.1, metric2=21.1, metric3=12.1)\n assert log_metrics.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.METRIC, name=\"metric1\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"metric\", name=\"metric1\", data=events_file)\n assert len(results.df.values) == 2\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.METRIC, name=\"metric2\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"metric\", name=\"metric2\", data=events_file)\n assert len(results.df.values) == 2\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.METRIC, name=\"metric3\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"metric\", name=\"metric3\", data=events_file)\n assert len(results.df.values) == 1\n\n def test_log_image_from_path(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is False\n )\n image_file = tempfile.mkdtemp() + \"/file.png\"\n self.touch(image_file)\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_image:\n self.run.log_image(name=\"my_image\", data=image_file)\n assert log_image.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"my_image\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"image\", name=\"my_image\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"my_image\", ext=\"png\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_image_from_path_with_step(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is False\n )\n image_file = tempfile.mkdtemp() + \"/file.png\"\n self.touch(image_file)\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_image:\n self.run.log_image(name=\"my_image\", data=image_file, step=1)\n assert log_image.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"my_image\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"image\", name=\"my_image\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"my_image\", step=1, ext=\"png\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_data_image(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is False\n )\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_image:\n self.run.log_image(\n name=\"my_image\", data=tensor_np(shape=(1, 8, 8)), dataformats=\"CHW\"\n )\n assert log_image.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"my_image\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"image\", name=\"my_image\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"my_image\", ext=\"png\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_image_with_boxes(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is False\n )\n image_file = tempfile.mkdtemp() + \"/file.png\"\n self.touch(image_file)\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_image_with_boxes:\n self.run.log_image_with_boxes(\n name=\"my_image\",\n tensor_image=tensor_np(shape=(3, 32, 32)),\n tensor_boxes=np.array([[10, 10, 40, 40]]),\n )\n assert log_image_with_boxes.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"my_image\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"image\", name=\"my_image\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"my_image\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_mpl_image(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is False\n )\n\n figure, axes = plt.figure(), plt.gca()\n circle1 = plt.Circle((0.2, 0.5), 0.2, color=\"r\")\n circle2 = plt.Circle((0.8, 0.5), 0.2, color=\"g\")\n axes.add_patch(circle1)\n axes.add_patch(circle2)\n plt.axis(\"scaled\")\n plt.tight_layout()\n\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_mpl_image:\n self.run.log_mpl_image(name=\"figure\", data=figure, step=1, close=False)\n assert log_mpl_image.call_count == 1\n assert plt.fignum_exists(figure.number) is True\n\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"figure\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"image\", name=\"figure\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"figure\", step=1, ext=\"png\"\n )\n assert os.path.exists(asset_file) is True\n\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_dashboard:\n self.run.log_mpl_image(name=\"figure\", data=figure, step=2)\n assert log_dashboard.call_count == 1\n assert plt.fignum_exists(figure.number) is False\n\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"figure\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"image\", name=\"figure\", data=events_file)\n assert len(results.df.values) == 2\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"figure\", step=1, ext=\"png\"\n )\n assert os.path.exists(asset_file) is True\n\n @pytest.mark.filterwarnings(\"ignore::FutureWarning\")\n def test_log_mpl_images(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is False\n )\n\n figures = []\n for i in range(5):\n figure = plt.figure()\n plt.plot([i * 1, i * 2, i * 3], label=\"Plot \" + str(i))\n plt.xlabel(\"X\")\n plt.xlabel(\"Y\")\n plt.legend()\n plt.tight_layout()\n figures.append(figure)\n\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_mpl_image:\n self.run.log_mpl_image(name=\"figure\", data=figures, step=1, close=False)\n assert log_mpl_image.call_count == 1\n assert all([plt.fignum_exists(figure.number) is True for figure in figures])\n\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"figure\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"image\", name=\"figure\", data=events_file)\n assert len(results.df.values) == 1\n\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_mpl_image:\n self.run.log_mpl_image(name=\"figure\", data=figures, step=2)\n assert log_mpl_image.call_count == 1\n assert all([plt.fignum_exists(figure.number) is False for figure in figures])\n\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"figure\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"image\", name=\"figure\", data=events_file)\n assert len(results.df.values) == 2\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.IMAGE, name=\"figure\", step=1, ext=\"png\"\n )\n assert os.path.exists(asset_file) is True\n\n @pytest.mark.filterwarnings(\"ignore::RuntimeWarning\")\n def test_log_mpl_plotly(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))\n is False\n )\n\n figure, axes = plt.figure(), plt.gca()\n circle1 = plt.Circle((0.2, 0.5), 0.2, color=\"r\")\n circle2 = plt.Circle((0.8, 0.5), 0.2, color=\"g\")\n axes.add_patch(circle1)\n axes.add_patch(circle2)\n plt.axis(\"scaled\")\n plt.tight_layout()\n\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_mpl_plotly_chart:\n self.run.log_mpl_plotly_chart(name=\"figure\", figure=figure, step=1)\n assert log_mpl_plotly_chart.call_count == 1\n\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.CHART, name=\"figure\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"image\", name=\"figure\", data=events_file)\n assert len(results.df.values) == 1\n\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_mpl_plotly_chart:\n self.run.log_mpl_plotly_chart(name=\"figure\", figure=figure, step=2)\n assert log_mpl_plotly_chart.call_count == 1\n assert plt.fignum_exists(figure.number) is False\n\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.CHART, name=\"figure\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"image\", name=\"figure\", data=events_file)\n assert len(results.df.values) == 2\n\n def test_log_video_from_path(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.VIDEO))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.VIDEO))\n is False\n )\n video_file = tempfile.mkdtemp() + \"/video.gif\"\n self.touch(video_file)\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_video:\n self.run.log_video(name=\"my_video\", data=video_file)\n assert log_video.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.VIDEO))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.VIDEO))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.VIDEO, name=\"my_video\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"video\", name=\"my_video\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.VIDEO, name=\"my_video\", ext=\"gif\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_data_video(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.VIDEO))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.VIDEO))\n is False\n )\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_dashboard:\n self.run.log_video(name=\"my_video\", data=tensor_np(shape=(4, 3, 1, 8, 8)))\n assert log_dashboard.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.VIDEO))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.VIDEO))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.VIDEO, name=\"my_video\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"video\", name=\"my_video\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.VIDEO, name=\"my_video\", ext=\"gif\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_audio_from_path(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.AUDIO))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.AUDIO))\n is False\n )\n audio_file = tempfile.mkdtemp() + \"/audio.wav\"\n self.touch(audio_file)\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_audio:\n self.run.log_audio(name=\"my_audio\", data=audio_file)\n assert log_audio.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.AUDIO))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.AUDIO))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.AUDIO, name=\"my_audio\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"audio\", name=\"my_audio\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.AUDIO, name=\"my_audio\", ext=\"wav\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_data_audio(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.AUDIO))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.AUDIO))\n is False\n )\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_audio:\n self.run.log_audio(name=\"my_audio\", data=tensor_np(shape=(42,)))\n assert log_audio.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.AUDIO))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.AUDIO))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.AUDIO, name=\"my_audio\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"audio\", name=\"my_audio\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.AUDIO, name=\"my_audio\", ext=\"wav\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_text(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TEXT))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TEXT))\n is False\n )\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_text:\n self.run.log_text(name=\"my_text\", text=\"some text\", step=1)\n assert log_text.call_count == 1\n\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TEXT))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TEXT))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.TEXT, name=\"my_text\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"text\", name=\"my_text\", data=events_file)\n assert len(results.df.values) == 1\n\n def test_log_html(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HTML))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HTML))\n is False\n )\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_html:\n self.run.log_html(name=\"my_div\", html=\"<div>test<div/>\", step=1)\n assert log_html.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HTML))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HTML))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.HTML, name=\"my_div\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"html\", name=\"my_div\", data=events_file)\n assert len(results.df.values) == 1\n\n def test_log_histogram(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))\n is False\n )\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_histogram:\n self.run.log_histogram(\n name=\"histo\", values=tensor_np(shape=(1024,)), bins=\"auto\", step=1\n )\n self.run.log_histogram(\n name=\"histo\", values=tensor_np(shape=(1024,)), bins=\"fd\", step=1\n )\n self.run.log_histogram(\n name=\"histo\", values=tensor_np(shape=(1024,)), bins=\"doane\", step=1\n )\n assert log_histogram.call_count == 3\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.HISTOGRAM, name=\"histo\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"histogram\", name=\"histo\", data=events_file)\n assert len(results.df.values) == 3\n\n def test_log_np_histogram(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))\n is False\n )\n values, counts = np.histogram(np.random.randint(255, size=(1000,)))\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_np_histogram:\n self.run.log_np_histogram(\n name=\"histo\", values=values, counts=counts, step=1\n )\n assert log_np_histogram.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.HISTOGRAM, name=\"histo\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"histogram\", name=\"histo\", data=events_file)\n assert len(results.df.values) == 1\n\n def test_log_model_file(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is False\n )\n assert os.path.exists(self.run.get_outputs_path(V1ArtifactKind.MODEL)) is False\n model_file = tempfile.mkdtemp() + \"/model.pkl\"\n self.touch(model_file)\n with patch(\"polyaxon.tracking.run.Run.log_model_ref\") as log_model:\n self.run.log_model(\n name=\"my_model\", path=model_file, framework=\"scikit\", versioned=False\n )\n assert log_model.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is False\n )\n assert os.path.exists(self.run.get_outputs_path(V1ArtifactKind.MODEL)) is False\n model_file = self.run.get_outputs_path(\"model.pkl\")\n assert os.path.exists(model_file) is True\n\n def test_log_model_dir(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is False\n )\n assert os.path.exists(self.run.get_outputs_path(V1ArtifactKind.MODEL)) is False\n model_dir = tempfile.mkdtemp() + \"/model\"\n create_path(model_dir)\n model_file = model_dir + \"/model.pkl\"\n self.touch(model_file)\n weights_file = model_dir + \"/weights\"\n self.touch(weights_file)\n configs_file = model_dir + \"/configs\"\n self.touch(configs_file)\n with patch(\"polyaxon.tracking.run.Run.log_model_ref\") as log_model:\n self.run.log_model(\n name=\"my_model\", path=model_dir, framework=\"tensorflow\", versioned=False\n )\n assert log_model.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is False\n )\n assert os.path.exists(self.run.get_outputs_path(V1ArtifactKind.MODEL)) is True\n model_file = self.run.get_outputs_path(\n \"{}/{}\".format(V1ArtifactKind.MODEL, \"model.pkl\")\n )\n assert os.path.exists(model_file) is True\n\n def test_log_versioned_model_file(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is False\n )\n model_file = tempfile.mkdtemp() + \"/model.pkl\"\n self.touch(model_file)\n with patch(\"polyaxon.tracking.run.Run._log_has_model\") as log_model:\n self.run.log_model(\n name=\"my_model\", path=model_file, framework=\"scikit\", step=1\n )\n assert log_model.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.MODEL, name=\"my_model\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"model\", name=\"my_model\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.MODEL, name=\"my_model_1\", ext=\"pkl\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_versioned_model_dir(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is False\n )\n model_dir = tempfile.mkdtemp() + \"/model\"\n create_path(model_dir)\n model_file = model_dir + \"/model.pkl\"\n self.touch(model_file)\n weights_file = model_dir + \"/weights\"\n self.touch(weights_file)\n configs_file = model_dir + \"/configs\"\n self.touch(configs_file)\n with patch(\"polyaxon.tracking.run.Run._log_has_model\") as log_model:\n self.run.log_model(\n name=\"my_model\", path=model_dir, framework=\"tensorflow\", step=1\n )\n assert log_model.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.MODEL, name=\"my_model\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"model\", name=\"my_model\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.MODEL, name=\"my_model_1\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_dataframe_ref(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))\n is False\n )\n model_file = tempfile.mkdtemp() + \"/df.pkl\"\n self.touch(model_file)\n with patch(\"polyaxon.tracking.run.Run.log_artifact_ref\") as log_artifact_ref:\n self.run.log_artifact(\n name=\"dataframe\",\n path=model_file,\n kind=V1ArtifactKind.DATAFRAME,\n versioned=False,\n )\n assert log_artifact_ref.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))\n is False\n )\n asset_file = self.run.get_outputs_path(rel_path=\"df.pkl\")\n assert os.path.exists(asset_file) is True\n\n def test_log_dataframe(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))\n is False\n )\n df = pd.DataFrame(data=[])\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_dataframe:\n self.run.log_dataframe(df=df, name=\"dataframe\", content_type=\"csv\")\n assert log_dataframe.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.DATAFRAME, name=\"dataframe\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=\"dataframe\", name=\"dataframe\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.DATAFRAME, name=\"dataframe\", ext=\"csv\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_artifact(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))\n is False\n )\n tsv_file = tempfile.mkdtemp() + \"/file.tsv\"\n self.touch(tsv_file)\n with patch(\"polyaxon.tracking.run.Run.log_artifact_ref\") as log_artifact:\n self.run.log_artifact(\n name=\"file\",\n path=tsv_file,\n kind=V1ArtifactKind.TSV,\n versioned=False,\n )\n assert log_artifact.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))\n is False\n )\n assert os.path.exists(self.run.get_outputs_path(\"file.tsv\")) is True\n\n def test_versioned_log_artifact(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))\n is False\n )\n tsv_file = tempfile.mkdtemp() + \"/file.tsv\"\n self.touch(tsv_file)\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_artifact:\n self.run.log_artifact(\n name=\"file\", path=tsv_file, kind=V1ArtifactKind.TSV, step=1\n )\n assert log_artifact.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.TSV, name=\"file\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=V1ArtifactKind.TSV, name=\"file\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.TSV, name=\"file_1\", ext=\"tsv\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_artifact_without_name(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))\n is False\n )\n tsv_file = tempfile.mkdtemp() + \"/file.tsv\"\n self.touch(tsv_file)\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_artifact:\n self.run.log_artifact(path=tsv_file, kind=V1ArtifactKind.TSV, step=1)\n assert log_artifact.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.TSV, name=\"file\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=V1ArtifactKind.TSV, name=\"file\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.TSV, name=\"file_1\", ext=\"tsv\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_artifact_without_name_and_filename_with_several_dots(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.FILE))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.FILE))\n is False\n )\n tar_file = tempfile.mkdtemp() + \"/file.tar.gz\"\n self.touch(tar_file)\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_artifact:\n self.run.log_artifact(path=tar_file, kind=V1ArtifactKind.FILE, step=1)\n assert log_artifact.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.FILE))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.FILE))\n is True\n )\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.FILE, name=\"file\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=V1ArtifactKind.FILE, name=\"file\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.FILE, name=\"file_1\", ext=\"tar.gz\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_versioned_artifacts(self):\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))\n is False\n )\n tsv_file = tempfile.mkdtemp() + \"/file.tsv\"\n self.touch(tsv_file)\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_artifact:\n self.run.log_artifact(\n name=\"file\", path=tsv_file, kind=V1ArtifactKind.TSV, step=1\n )\n assert log_artifact.call_count == 1\n pd_file = tempfile.mkdtemp() + \"/dataframe\"\n self.touch(pd_file)\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_artifact:\n self.run.log_artifact(\n name=\"file2\", path=pd_file, kind=V1ArtifactKind.DATAFRAME, step=1\n )\n assert log_artifact.call_count == 1\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))\n is True\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))\n is True\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))\n is True\n )\n\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.TSV, name=\"file\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=V1ArtifactKind.TSV, name=\"file\", data=events_file)\n assert len(results.df.values) == 1\n\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.DATAFRAME, name=\"file2\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(kind=V1ArtifactKind.TSV, name=\"file\", data=events_file)\n assert len(results.df.values) == 1\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.TSV, name=\"file_1\", ext=\"tsv\"\n )\n assert os.path.exists(asset_file) is True\n\n asset_file = get_asset_path(\n self.run_path, kind=V1ArtifactKind.DATAFRAME, name=\"file2_1\"\n )\n assert os.path.exists(asset_file) is True\n\n def test_log_charts(self):\n x = [1, 2, 3, 4, 5]\n y = [6, 7, 2, 4, 5]\n bokeh_test = figure(\n title=\"simple line example\", x_axis_label=\"x\", y_axis_label=\"y\"\n )\n bokeh_test.line(x, y, line_width=2)\n\n x1 = np.random.randn(200) - 2\n x2 = np.random.randn(200)\n x3 = np.random.randn(200) + 2\n hist_data = [x1, x2, x3]\n group_labels = [\"Group 1\", \"Group 2\", \"Group 3\"]\n plotly_test = figure_factory.create_distplot(\n hist_data, group_labels, bin_size=[0.1, 0.25, 0.5]\n )\n\n df1 = pd.DataFrame([[\"A\", \"B\", \"C\", \"D\"], [28, 55, 43, 91]], index=[\"a\", \"b\"]).T\n alt_test = alt.Chart(df1).mark_bar().encode(x=\"a\", y=\"b\")\n\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))\n is False\n )\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))\n is False\n )\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_charts:\n self.run.log_bokeh_chart(name=\"bokeh_test\", figure=bokeh_test, step=1)\n self.run.log_plotly_chart(name=\"plotly_test\", figure=plotly_test, step=1)\n self.run.log_altair_chart(name=\"alt_test\", figure=alt_test, step=1)\n assert log_charts.call_count == 3\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))\n is True\n )\n\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.CHART, name=\"bokeh_test\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(\n kind=V1ArtifactKind.CHART, name=\"bokeh_test\", data=events_file\n )\n assert len(results.df.values) == 1\n\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.CHART, name=\"plotly_test\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(\n kind=V1ArtifactKind.CHART, name=\"plotly_test\", data=events_file\n )\n assert len(results.df.values) == 1\n\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.CHART, name=\"alt_test\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(\n kind=V1ArtifactKind.CHART, name=\"alt_test\", data=events_file\n )\n assert len(results.df.values) == 1\n\n def test_log_curves(self):\n x = [1, 2, 3, 4, 5]\n y = [6, 7, 2, 4, 5]\n\n with patch(\"polyaxon.tracking.run.Run._log_has_events\") as log_curves:\n self.run.log_roc_auc_curve(name=\"roc_test\", fpr=x, tpr=y, auc=0.6, step=1)\n self.run.log_pr_curve(\n name=\"pr_test\", precision=x, recall=y, average_precision=0.6, step=1\n )\n self.run.log_curve(name=\"curve_test\", x=x, y=y, annotation=0.6, step=1)\n assert log_curves.call_count == 3\n self.event_logger.flush()\n assert (\n os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CURVE))\n is False\n )\n assert (\n os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CURVE))\n is True\n )\n\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.CURVE, name=\"roc_test\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(\n kind=V1ArtifactKind.CURVE, name=\"roc_test\", data=events_file\n )\n assert len(results.df.values) == 1\n\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.CURVE, name=\"pr_test\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(\n kind=V1ArtifactKind.CHART, name=\"pr_test\", data=events_file\n )\n assert len(results.df.values) == 1\n\n events_file = get_event_path(\n self.run_path, kind=V1ArtifactKind.CURVE, name=\"curve_test\"\n )\n assert os.path.exists(events_file) is True\n results = V1Events.read(\n kind=V1ArtifactKind.CHART, name=\"curve_test\", data=events_file\n )\n assert len(results.df.values) == 1\n"
] |
[
[
"numpy.array",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"numpy.random.randn",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.fignum_exists",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axis"
]
] |
Jerry-li-uw/privacy
|
[
"7019bc04c9068772f7412650fac4c56851cdcc23"
] |
[
"tutorials/dp_optimizer_adp.py"
] |
[
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Differentially private version of Keras optimizer v2.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nimport sys\n\nfrom tensorflow_privacy.privacy.dp_query import gaussian_query\n\n\ndef make_keras_optimizer_class(cls):\n \"\"\"Constructs a DP Keras optimizer class from an existing one.\"\"\"\n\n class DPOptimizerClass(cls):\n \"\"\"Differentially private subclass of given class cls.\n\n The class tf.keras.optimizers.Optimizer has two methods to compute\n gradients, `_compute_gradients` and `get_gradients`. The first works\n with eager execution, while the second runs in graph mode and is used\n by canned estimators.\n\n Internally, DPOptimizerClass stores hyperparameters both individually\n and encapsulated in a `GaussianSumQuery` object for these two use cases.\n However, this should be invisible to users of this class.\n \"\"\"\n\n def __init__(\n self,\n l2_norm_clip,\n noise_multiplier,\n changing_clipping=False,\n num_microbatches=None,\n gradient_norm=None,\n *args, # pylint: disable=keyword-arg-before-vararg, g-doc-args\n **kwargs):\n \"\"\"Initialize the DPOptimizerClass.\n\n Args:\n l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients)\n noise_multiplier: Ratio of the standard deviation to the clipping norm\n num_microbatches: The number of microbatches into which each minibatch\n is split.\n \"\"\"\n super(DPOptimizerClass, self).__init__(*args, **kwargs)\n self._l2_norm_clip = l2_norm_clip\n self._norm_clip = tf.Variable(l2_norm_clip)\n self._noise_multiplier = noise_multiplier\n self._num_microbatches = num_microbatches\n self._dp_sum_query = gaussian_query.GaussianSumQuery(\n l2_norm_clip, l2_norm_clip * noise_multiplier)\n self._global_state = None\n self._was_dp_gradients_called = False\n self._changing_clipping = changing_clipping\n self.gradient_norm = gradient_norm\n\n def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):\n # if self._changing_clipping:\n # tf.print(\"cur C:\", self._norm_clip.value(), output_stream=sys.stdout)\n\n \"\"\"DP version of superclass method.\"\"\"\n\n self._was_dp_gradients_called = True\n # Compute loss.\n if not callable(loss) and tape is None:\n raise ValueError('`tape` is required when a `Tensor` loss is passed.')\n tape = tape if tape is not None else tf.GradientTape()\n\n if callable(loss):\n with tape:\n if not callable(var_list):\n tape.watch(var_list)\n\n if callable(loss):\n loss = loss()\n microbatch_losses = tf.reduce_mean(\n tf.reshape(loss, [self._num_microbatches, -1]), axis=1)\n\n if callable(var_list):\n var_list = var_list()\n else:\n with tape:\n microbatch_losses = tf.reduce_mean(\n tf.reshape(loss, [self._num_microbatches, -1]), axis=1)\n\n var_list = tf.nest.flatten(var_list)\n\n # Compute the per-microbatch losses using helpful jacobian method.\n with tf.keras.backend.name_scope(self._name + '/gradients'):\n jacobian = tape.jacobian(microbatch_losses, var_list)\n\n if self._changing_clipping:\n\n if False:\n self._norm_clip.assign_add(0.002)\n tf.print(\"cur C:\", self._norm_clip.value(), output_stream=sys.stdout)\n\n else:\n gr_norm = jacobian.copy()\n for i in range(len(gr_norm)):\n gr_norm[i] = tf.norm(gr_norm[i]) \n gr_mean = tf.math.reduce_mean(gr_norm)\n # tf.print(\"cur norm:\", gr_mean, output_stream=sys.stdout)\n self._norm_clip.assign(gr_mean)\n C = self._norm_clip.value()\n tf.print(\"cur C:\", C, output_stream=sys.stdout)\n # d_clip = gr_norm\n # if self._changing_clipping and (C < d_clip):\n # d_clip += 0.1*(d_clip - C)\n # self._norm_clip.assign(d_clip)\n # tf.print(\"cur C:\", self._norm_clip.value(), output_stream=sys.stdout)\n\n # Clip gradients to given l2_norm_clip.\n def clip_gradients(g):\n return tf.clip_by_global_norm(g, self._norm_clip.value())[0]\n\n clipped_gradients = tf.map_fn(clip_gradients, jacobian)\n\n def reduce_noise_normalize_batch(g):\n # Sum gradients over all microbatches.\n summed_gradient = tf.reduce_sum(g, axis=0)\n\n # Add noise to summed gradients.\n noise_stddev = self._l2_norm_clip * self._noise_multiplier\n noise = tf.random.normal(\n tf.shape(input=summed_gradient), stddev=noise_stddev)\n noised_gradient = tf.add(summed_gradient, noise)\n\n # Normalize by number of microbatches and return.\n return tf.truediv(noised_gradient, self._num_microbatches)\n\n final_gradients = tf.nest.map_structure(reduce_noise_normalize_batch,\n clipped_gradients)\n\n return list(zip(final_gradients, var_list))\n\n def get_gradients(self, loss, params):\n if self._changing_clipping:\n self._l2_norm_clip *= 0.99\n tf.print(\"cur C:\", self._l2_norm_clip, output_stream=sys.stdout)\n\n \"\"\"DP version of superclass method.\"\"\"\n\n self._was_dp_gradients_called = True\n if self._global_state is None:\n self._global_state = self._dp_sum_query.initial_global_state()\n\n # This code mostly follows the logic in the original DPOptimizerClass\n # in dp_optimizer.py, except that this returns only the gradients,\n # not the gradients and variables.\n microbatch_losses = tf.reshape(loss, [self._num_microbatches, -1])\n sample_params = (\n self._dp_sum_query.derive_sample_params(self._global_state))\n\n def process_microbatch(i, sample_state):\n \"\"\"Process one microbatch (record) with privacy helper.\"\"\"\n mean_loss = tf.reduce_mean(\n input_tensor=tf.gather(microbatch_losses, [i]))\n grads = tf.gradients(mean_loss, params)\n sample_state = self._dp_sum_query.accumulate_record(\n sample_params, sample_state, grads)\n return sample_state\n\n sample_state = self._dp_sum_query.initial_sample_state(params)\n for idx in range(self._num_microbatches):\n sample_state = process_microbatch(idx, sample_state)\n grad_sums, self._global_state = (\n self._dp_sum_query.get_noised_result(sample_state,\n self._global_state))\n \n\n\n def normalize(v):\n try:\n return tf.truediv(v, tf.cast(self._num_microbatches, tf.float32))\n except TypeError:\n return None\n\n final_grads = tf.nest.map_structure(normalize, grad_sums)\n\n return final_grads\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n assert self._was_dp_gradients_called, (\n 'Neither _compute_gradients() or get_gradients() on the '\n 'differentially private optimizer was called. This means the '\n 'training is not differentially private. It may be the case that '\n 'you need to upgrade to TF 2.4 or higher to use this particular '\n 'optimizer.')\n return super(DPOptimizerClass,\n self).apply_gradients(grads_and_vars, global_step, name)\n\n return DPOptimizerClass\n\n\nDPKerasAdagradOptimizer = make_keras_optimizer_class(\n tf.keras.optimizers.Adagrad)\nDPKerasAdamOptimizer = make_keras_optimizer_class(tf.keras.optimizers.Adam)\nDPKerasSGDOptimizer = make_keras_optimizer_class(tf.keras.optimizers.SGD)\n"
] |
[
[
"tensorflow.shape",
"tensorflow.GradientTape",
"tensorflow.Variable",
"tensorflow.keras.backend.name_scope",
"tensorflow.reshape",
"tensorflow.nest.map_structure",
"tensorflow.nest.flatten",
"tensorflow.map_fn",
"tensorflow.print",
"tensorflow.gradients",
"tensorflow.truediv",
"tensorflow.reduce_sum",
"tensorflow.math.reduce_mean",
"tensorflow.gather",
"tensorflow.norm",
"tensorflow.add",
"tensorflow.cast"
]
] |
JustinSGray/blue
|
[
"49f4edd00eda43f7ce5e10a5121839f2e897d429"
] |
[
"openmdao/components/tests/test_meta_model_structured_comp.py"
] |
[
"\"\"\"\nUnit tests for the structured metamodel component.\n\"\"\"\nimport unittest\nimport inspect\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\n\nimport openmdao.api as om\nfrom openmdao.utils.assert_utils import assert_near_equal, assert_warning, assert_check_partials\nfrom openmdao.utils.general_utils import set_pyoptsparse_opt\n\nscipy_gte_019 = True\ntry:\n from scipy.interpolate._bsplines import make_interp_spline\nexcept ImportError:\n scipy_gte_019 = False\n\n# check that pyoptsparse is installed\n# if it is, try to use SNOPT but fall back to SLSQP\nOPT, OPTIMIZER = set_pyoptsparse_opt('SNOPT')\nif OPTIMIZER:\n from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver\n\nx = np.array([-0.97727788, -0.15135721, -0.10321885, 0.40015721, 0.4105985,\n 0.95008842, 0.97873798, 1.76405235, 1.86755799, 2.2408932 ])\n\ny = np.array([ 0.12167502, 0.14404357, 0.44386323, 0.76103773, 1.45427351])\n\nz = np.array([-2.55298982, -1.45436567, -0.85409574, -0.74216502, -0.20515826, 0.04575852,\n 0.3130677, 0.33367433, 0.6536186, 0.8644362, 1.49407907, 2.26975462])\n\nf = np.array([\n [[-0.18718385, 1.53277921, 1.46935877, 0.15494743, 0.37816252, -0.88778575,\n -1.98079647, -0.34791215, 0.15634897, 1.23029068, 1.20237985, -0.38732682],\n [-0.30230275, -1.04855297, -1.42001794, -1.70627019, 1.9507754, -0.50965218,\n -0.4380743, -1.25279536, 0.77749036, -1.61389785, -0.21274028, -0.89546656],\n [ 0.3869025, -0.51080514, -1.18063218, -0.02818223, 0.42833187, 0.06651722,\n 0.3024719, -0.63432209, -0.36274117, -0.67246045, -0.35955316, -0.81314628],\n [-1.7262826, 0.17742614, -0.40178094, -1.63019835, 0.46278226, -0.90729836,\n 0.0519454, 0.72909056, 0.12898291, 1.13940068, -1.23482582, 0.40234164],\n [-0.68481009, -0.87079715, -0.57884966, -0.31155253, 0.05616534, -1.16514984,\n 0.90082649, 0.46566244, -1.53624369, 1.48825219, 1.89588918, 1.17877957]],\n\n [[-0.17992484, -1.07075262, 1.05445173, -0.40317695, 1.22244507, 0.20827498,\n 0.97663904, 0.3563664, 0.70657317, 0.01050002, 1.78587049, 0.12691209],\n [ 0.40198936, 1.8831507, -1.34775906, -1.270485, 0.96939671, -1.17312341,\n 1.94362119, -0.41361898, -0.74745481, 1.92294203, 1.48051479, 1.86755896],\n [ 0.90604466, -0.86122569, 1.91006495, -0.26800337, 0.8024564, 0.94725197,\n -0.15501009, 0.61407937, 0.92220667, 0.37642553, -1.09940079, 0.29823817],\n [ 1.3263859, -0.69456786, -0.14963454, -0.43515355, 1.84926373, 0.67229476,\n 0.40746184, -0.76991607, 0.53924919, -0.67433266, 0.03183056, -0.63584608],\n [ 0.67643329, 0.57659082, -0.20829876, 0.39600671, -1.09306151, -1.49125759,\n 0.4393917, 0.1666735, 0.63503144, 2.38314477, 0.94447949, -0.91282223]],\n\n [[ 1.11701629, -1.31590741, -0.4615846, -0.06824161, 1.71334272, -0.74475482,\n -0.82643854, -0.09845252, -0.66347829, 1.12663592, -1.07993151, -1.14746865],\n [-0.43782004, -0.49803245, 1.92953205, 0.94942081, 0.08755124, -1.22543552,\n 0.84436298, -1.00021535, -1.5447711, 1.18802979, 0.31694261, 0.92085882],\n [ 0.31872765, 0.85683061, -0.65102559, -1.03424284, 0.68159452, -0.80340966,\n -0.68954978, -0.4555325, 0.01747916, -0.35399391, -1.37495129, -0.6436184 ],\n [-2.22340315, 0.62523145, -1.60205766, -1.10438334, 0.05216508, -0.739563,\n 1.5430146, -1.29285691, 0.26705087, -0.03928282, -1.1680935, 0.52327666],\n [-0.17154633, 0.77179055, 0.82350415, 2.16323595, 1.33652795, -0.36918184,\n -0.23937918, 1.0996596, 0.65526373, 0.64013153, -1.61695604, -0.02432612]],\n\n [[-0.73803091, 0.2799246, -0.09815039, 0.91017891, 0.31721822, 0.78632796,\n -0.4664191, -0.94444626, -0.41004969, -0.01702041, 0.37915174, 2.25930895],\n [-0.04225715, -0.955945 , -0.34598178, -0.46359597, 0.48148147, -1.54079701,\n 0.06326199, 0.15650654, 0.23218104, -0.59731607, -0.23792173, -1.42406091],\n [-0.49331988, -0.54286148, 0.41605005, -1.15618243, 0.7811981, 1.49448454,\n -2.06998503, 0.42625873, 0.67690804, -0.63743703, -0.39727181, -0.13288058],\n [-0.29779088, -0.30901297, -1.67600381, 1.15233156, 1.07961859, -0.81336426,\n -1.46642433, 0.52106488, -0.57578797, 0.14195316, -0.31932842, 0.69153875],\n [ 0.69474914, -0.72559738, -1.38336396, -1.5829384, 0.61037938, -1.18885926,\n -0.50681635, -0.59631404, -0.0525673, -1.93627981, 0.1887786, 0.52389102]],\n\n [[ 0.08842209, -0.31088617, 0.09740017, 0.39904635, -2.77259276, 1.95591231,\n 0.39009332, -0.65240858, -0.39095338, 0.49374178, -0.11610394, -2.03068447],\n [ 2.06449286, -0.11054066, 1.02017271, -0.69204985, 1.53637705, 0.28634369,\n 0.60884383, -1.04525337, 1.21114529, 0.68981816, 1.30184623, -0.62808756],\n [-0.48102712, 2.3039167, -1.06001582, -0.1359497, 1.13689136, 0.09772497,\n 0.58295368, -0.39944903, 0.37005589, -1.30652685, 1.65813068, -0.11816405],\n [-0.6801782, 0.66638308, -0.46071979, -1.33425847, -1.34671751, 0.69377315,\n -0.15957344, -0.13370156, 1.07774381, -1.12682581, -0.73067775, -0.38487981],\n [ 0.09435159, -0.04217145, -0.28688719, -0.0616264, -0.10730528, -0.71960439,\n -0.81299299, 0.27451636, -0.89091508, -1.15735526, -0.31229225, -0.15766702]],\n\n [[ 2.2567235, -0.70470028, 0.94326072, 0.74718833, -1.18894496, 0.77325298,\n -1.18388064, -2.65917224, 0.60631952, -1.75589058, 0.45093446, -0.6840109 ],\n [ 1.6595508, 1.0685094, -0.4533858, -0.68783761, -1.2140774, -0.44092263,\n -0.2803555, -0.36469354, 0.15670386, 0.5785215, 0.34965446, -0.76414392],\n [-1.43779147, 1.36453185, -0.68944918, -0.6522936, -0.52118931, -1.84306955,\n -0.477974 , -0.47965581, 0.6203583, 0.69845715, 0.00377089, 0.93184837],\n [ 0.33996498, -0.01568211, 0.16092817, -0.19065349, -0.39484951, -0.26773354,\n -1.12801133, 0.28044171, -0.99312361, 0.84163126, -0.24945858, 0.04949498],\n [ 0.49383678, 0.64331447, -1.57062341, -0.20690368, 0.88017891, -1.69810582,\n 0.38728048, -2.25556423, -1.02250684, 0.03863055, -1.6567151, -0.98551074]],\n\n [[-1.47183501, 1.64813493, 0.16422776, 0.56729028, -0.2226751, -0.35343175,\n -1.61647419, -0.29183736, -0.76149221, 0.85792392, 1.14110187, 1.46657872],\n [ 0.85255194, -0.59865394, -1.11589699, 0.76666318, 0.35629282, -1.76853845,\n 0.35548179, 0.81451982, 0.05892559, -0.18505367, -0.80764849, -1.4465347 ],\n [ 0.80029795, -0.30911444, -0.23346666, 1.73272119, 0.68450111, 0.370825 ,\n 0.14206181, 1.51999486, 1.71958931, 0.92950511, 0.58222459, -2.09460307],\n [ 0.12372191, -0.13010695, 0.09395323, 0.94304609, -2.73967717, -0.56931205,\n 0.26990435, -0.46684555, -1.41690611, 0.86896349, 0.27687191, -0.97110457],\n [ 0.3148172, 0.82158571, 0.00529265, 0.8005648, 0.07826018, -0.39522898,\n -1.15942052, -0.08593077, 0.19429294, 0.87583276, -0.11510747, 0.45741561]],\n\n [[-0.96461201, -0.78262916, -0.1103893, -1.05462846, 0.82024784, 0.46313033,\n 0.27909576, 0.33890413, 2.02104356, -0.46886419, -2.20144129, 0.1993002 ],\n [-0.05060354, -0.51751904, -0.97882986, -0.43918952, 0.18133843, -0.5028167,\n 2.41245368, -0.96050438, -0.79311736, -2.28862004, 0.25148442, -2.01640663],\n [-0.53945463, -0.27567053, -0.70972797, 1.73887268, 0.99439439, 1.31913688,\n -0.88241882, 1.12859406, 0.49600095, 0.77140595, 1.02943883, -0.90876325],\n [-0.42431762, 0.86259601, -2.65561909, 1.51332808, 0.55313206, -0.04570396,\n 0.22050766, -1.02993528, -0.34994336, 1.10028434, 1.29802197, 2.69622405],\n [-0.07392467, -0.65855297, -0.51423397, -1.01804188, -0.07785476, 0.38273243,\n -0.03424228, 1.09634685, -0.2342158, -0.34745065, -0.58126848, -1.63263453]],\n\n [[-1.56776772, -1.17915793, 1.30142807, 0.89526027, 1.37496407, -1.33221165,\n -1.96862469, -0.66005632, 0.17581895, 0.49869027, 1.04797216, 0.28427967],\n [ 1.74266878, -0.22260568, -0.91307922, -1.68121822, -0.88897136, 0.24211796,\n -0.88872026, 0.93674246, 1.41232771, -2.36958691, 0.8640523, -2.23960406],\n [ 0.40149906, 1.22487056, 0.06485611, -1.27968917, -0.5854312, -0.26164545,\n -0.18224478, -0.20289684, -0.10988278, 0.21348005, -1.20857365, -0.24201983],\n [ 1.51826117, -0.38464542, -0.44383609, 1.0781973, -2.55918467, 1.1813786,\n -0.63190376, 0.16392857, 0.09632136, 0.94246812, -0.26759475, -0.67802578],\n [ 1.29784579, -2.36417382, 0.02033418, -1.34792542, -0.76157339, 2.01125668,\n -0.04459543, 0.1950697, -1.78156286, -0.72904466, 0.1965574, 0.35475769]],\n\n [[ 0.61688655, 0.0086279, 0.52700421, 0.45378191, -1.82974041, 0.03700572,\n 0.76790241, 0.58987982, -0.36385881, -0.80562651, -1.11831192, -0.13105401],\n [ 1.13307988, -1.9518041, -0.65989173, -1.13980246, 0.78495752, -0.55430963,\n -0.47063766, -0.21694957, 0.44539325, -0.392389, -3.04614305, 0.54331189],\n [ 0.43904296, -0.21954103, -1.08403662, 0.35178011, 0.37923553, -0.47003288,\n -0.21673147, -0.9301565, -0.17858909, -1.55042935, 0.41731882, -0.94436849],\n [ 0.23810315, -1.40596292, -0.59005765, -0.11048941, -1.66069981, 0.11514787,\n -0.37914756, -1.7423562, -1.30324275, 0.60512008, 0.89555599, -0.13190864],\n [ 0.40476181, 0.22384356, 0.32962298, 1.28598401, -1.5069984, 0.67646073,\n -0.38200896, -0.22425893, -0.30224973, -0.37514712, -1.22619619, 0.1833392 ]\n]])\n\ng = np.array([\n [[ 1.67094303e+00, -5.61330204e-02, -1.38504274e-03,\n -6.87299037e-01, -1.17474546e-01, 4.66166426e-01,\n -3.70242441e-01, -4.53804041e-01, 4.03264540e-01,\n -9.18004770e-01, 2.52496627e-01, 8.20321797e-01],\n [ 1.35994854e+00, -9.03820073e-02, 1.36759724e+00,\n 1.03440989e+00, -9.96212640e-01, -1.21793851e+00,\n -3.04963638e-01, 1.02893549e+00, -7.22870076e-02,\n -6.00657558e-01, 1.55224318e+00, 2.86904488e-01],\n [-2.32059428e+00, 3.17160626e-01, 5.20040615e-01,\n 2.25608654e-01, 4.49712100e-01, -6.72756089e-02,\n -1.31839587e+00, -3.70704003e-01, -9.45615796e-01,\n -9.32740911e-01, -1.26306835e+00, 4.52489093e-01],\n [ 9.78961454e-02, -4.48165363e-01, -6.49337928e-01,\n -2.34231050e-02, 1.07919473e+00, -2.00421572e+00,\n 3.76876521e-01, -5.45711974e-01, -1.88458584e+00,\n -1.94570308e+00, -9.12783494e-01, 2.19509556e-01],\n [ 3.93062934e-01, -9.38981573e-01, 1.01702099e+00,\n 1.42298350e+00, 3.96086585e-01, -5.91402668e-01,\n 1.12441918e+00, 7.55395696e-01, 8.67407411e-01,\n -6.56463675e-01, -2.83455451e+00, 2.11679102e+00]],\n\n [[-1.61087840e+00, -3.57680719e-02, 2.38074535e+00,\n 3.30576756e-01, 9.49246474e-01, -1.50239657e+00,\n -1.77766695e+00, -5.32702792e-01, 1.09074973e+00,\n -3.46249448e-01, -7.94636321e-01, 1.97967290e-01],\n [ 1.08193522e+00, -1.44494020e+00, -1.21054299e+00,\n -7.88669255e-01, 1.09463837e+00, 2.34821526e-01,\n 2.13215341e+00, 9.36445726e-01, -3.50951769e-02,\n 1.26507784e+00, 2.11497013e-01, -7.04921353e-01],\n [ 6.79974844e-01, -6.96326654e-01, -2.90397101e-01,\n 1.32778270e+00, -1.01281486e-01, -8.03141387e-01,\n -4.64337691e-01, 1.02179059e+00, -5.52540673e-01,\n -3.86870847e-01, -5.10292740e-01, 1.83925494e-01],\n [-3.85489760e-01, -1.60183605e+00, -8.87180942e-01,\n -9.32789042e-01, 1.24331938e+00, 8.12674042e-01,\n 5.87259379e-01, -5.05358317e-01, -8.15791542e-01,\n -5.07517602e-01, -1.05188010e+00, 2.49720039e+00],\n [-2.24532165e+00, 5.64008535e-01, -1.28455230e+00,\n -1.04343491e-01, -9.88001942e-01, -1.17762896e+00,\n -1.14019630e+00, 1.75498615e+00, -1.32988422e-01,\n -7.65702194e-01, 5.55786964e-01, 1.03493146e-02]],\n\n [[ 7.20033759e-01, -1.82425666e+00, 3.03603904e-01,\n 7.72694837e-01, -1.66159829e+00, 4.48195284e-01,\n 1.69618157e+00, -1.48577034e-02, 8.21405937e-01,\n 6.70570450e-01, -7.07505698e-01, 3.97667346e-02],\n [-1.56699471e+00, -4.51303037e-01, 2.65687975e-01,\n 7.23100494e-01, 2.46121252e-02, 7.19983730e-01,\n -1.10290621e+00, -1.01697275e-01, 1.92793845e-02,\n 1.84959125e+00, -2.14166656e-01, -4.99016638e-01],\n [ 2.13512238e-02, -9.19113445e-01, 1.92753849e-01,\n -3.65055217e-01, -1.79132755e+00, -5.85865511e-02,\n -3.17543094e-01, -1.63242330e+00, -6.71341546e-02,\n 1.48935596e+00, 5.21303748e-01, 6.11927193e-01],\n [-1.34149673e+00, 4.76898369e-01, 1.48449581e-01,\n 5.29045238e-01, 4.22628622e-01, -1.35978073e+00,\n -4.14008116e-02, -7.57870860e-01, -5.00840943e-02,\n -8.97400927e-01, 1.31247037e+00, -8.58972388e-01],\n [-8.98942156e-01, 7.45864065e-02, -1.07709907e+00,\n -4.24663302e-01, -8.29964598e-01, 1.41117206e+00,\n 7.85803827e-01, -5.74695185e-02, -3.91217052e-01,\n 9.40917615e-01, 4.05204080e-01, 4.98052405e-01]],\n\n [[-2.61922373e-02, -1.68823003e+00, -1.12465983e-01,\n -5.32489919e-01, 6.45055273e-01, 1.01184243e+00,\n -6.57951045e-01, 4.68385234e-01, 1.73587900e+00,\n -6.67712721e-01, 1.68192174e+00, -8.52585847e-01],\n [ 2.29597556e-02, -1.11456118e-02, 1.14988999e-02,\n -8.37678042e-01, -5.91183104e-01, -6.67720286e-01,\n 3.26962595e-01, 3.30035115e-01, 2.22594433e+00,\n 1.37098901e+00, -5.09843242e-01, 3.24869616e-01],\n [ 9.97117981e-01, 3.06018243e-02, -6.96415784e-02,\n 5.15749428e-02, 8.67276629e-01, -8.48320523e-01,\n -3.25669469e-01, 4.70433145e-01, 3.11447072e-01,\n 2.39582760e-01, -3.69801166e-01, 9.72535789e-01],\n [ 2.13386825e+00, 4.06415494e-01, -1.93176702e-01,\n 7.55740289e-01, -5.39132637e-01, -7.49690345e-01,\n 3.28087476e-02, -2.58279663e+00, -1.15395036e+00,\n -3.47961856e-01, -1.35338886e+00, -1.03264310e+00],\n [-4.36748337e-01, -1.64296529e+00, -4.06071796e-01,\n -5.35270165e-01, 2.54052084e-02, 1.15418403e+00,\n 1.72504416e-01, 2.10620213e-02, 9.94544570e-02,\n 2.27392775e-01, -1.01673865e+00, -1.14775325e-01]],\n\n [[ 3.08751242e-01, -1.37075998e+00, 8.65652923e-01,\n 1.08137603e+00, -6.31375988e-01, -2.41337791e-01,\n -8.78190343e-01, 6.99380484e-01, -1.06122229e+00,\n -2.22477010e-01, -8.58919908e-01, 5.09542770e-02],\n [-1.79422927e+00, 1.32646164e+00, -9.64606424e-01,\n 5.98946831e-02, -2.12523045e-01, -7.62114512e-01,\n -8.87780137e-01, 9.36398544e-01, -5.25640593e-01,\n 2.71170185e-01, -8.01496885e-01, -6.47181432e-01],\n [ 4.72247150e-01, 9.30408496e-01, -1.75316402e-01,\n -1.42191987e+00, 1.99795608e+00, -8.56549308e-01,\n -1.54158740e+00, 2.59442459e+00, -4.04032294e-01,\n -1.46173269e+00, -6.83439767e-01, 3.67544896e-01],\n [ 1.90311558e-01, -8.51729197e-01, 1.82272360e+00,\n -5.21579678e-01, -1.18468659e+00, 9.60693398e-01,\n 1.32906285e+00, -8.17493098e-01, -1.40134729e+00,\n 1.03043827e+00, -2.04732361e+00, -1.22662166e+00],\n [ 9.67446150e-01, -5.53525480e-02, -2.63937349e-01,\n 3.52816606e-01, -1.52774424e-01, -1.29868672e+00,\n 1.27607535e+00, 1.32501405e+00, 2.05332564e-01,\n 4.51340154e-02, 2.33962481e+00, -2.76432845e-01]],\n\n [[-2.59576982e-01, 3.64481249e-01, 1.47132196e+00,\n 1.59277075e+00, -2.58572632e-01, 3.08331246e-01,\n -1.37808347e+00, -3.11976108e-01, -8.40290395e-01,\n -1.00683175e+00, 1.68157672e+00, -7.92286662e-01],\n [-5.31605908e-01, 3.65848788e-01, 1.29782527e+00,\n 4.81115126e-01, 2.75935511e+00, -7.46679783e-02,\n 2.58716440e-01, 2.75600674e-01, 1.43504939e+00,\n 5.07238951e-01, -1.16229700e-01, -9.47488595e-01],\n [ 2.44443456e-01, 1.40134483e+00, -4.10381794e-01,\n 5.28943618e-01, 2.46147789e-01, 8.63519658e-01,\n -8.04753741e-01, 2.34664703e+00, -1.27916111e+00,\n -3.65551090e-01, 9.38092541e-01, 2.96733172e-01],\n [ 8.29986159e-01, -4.96102334e-01, -7.48049827e-02,\n 1.22319836e-02, 1.56925961e+00, 6.90429024e-01,\n 7.96672108e-01, -6.57926093e-01, 9.68882639e-01,\n 2.25581664e-01, 1.38914532e+00, 2.01406015e+00],\n [-3.06765776e-01, -4.06303130e-01, -8.64044991e-01,\n -1.43579512e-01, -3.82025449e-01, 3.59504400e-01,\n -1.44566817e-01, -3.61599281e-01, 1.06458514e+00,\n -9.37880231e-01, 4.33107953e-01, -4.05941727e-01]],\n\n [[ 7.24368505e-01, 1.38526155e+00, -3.03098253e-01,\n 4.41032907e-01, 1.78792866e-01, -7.99422400e-01,\n 2.40787510e-01, 2.89120505e-01, 4.12870820e-01,\n -1.98398897e-01, 9.41923003e-02, -1.14761094e+00],\n [-3.58114075e-01, 5.55962680e-01, 8.92473887e-01,\n -4.22314824e-01, 1.04714029e-01, 2.28053325e-01,\n 2.01479947e-01, 5.40773585e-01, -1.81807763e+00,\n -4.93240701e-02, 2.39033601e-01, -1.00033035e+00],\n [ 1.67398571e+00, 1.61559267e-01, 1.56340475e+00,\n -7.90523022e-01, -9.07300122e-01, 2.24252221e-01,\n -1.67868836e+00, 2.14965591e-01, 9.72192320e-02,\n 1.01566528e+00, 7.01041341e-01, -4.17477350e-01],\n [-1.09749665e+00, 1.71230522e+00, -7.92115021e-01,\n -1.04552456e+00, -1.08485606e+00, 1.11730532e+00,\n -5.18900204e-01, -7.53704466e-01, 1.37689826e-01,\n -2.06944711e-01, -6.78095461e-01, 7.53991467e-01],\n [ 1.06531549e+00, 9.85317509e-01, 7.66919670e-01,\n 4.02625531e-01, -1.77588800e+00, 1.66925081e+00,\n 3.01989210e-01, 6.08156428e-01, 1.11496232e+00,\n 1.43335250e+00, 4.18398011e-01, 4.35546159e-01]],\n\n [[-5.99224277e-01, 3.30897511e-02, -8.54161261e-01,\n -7.19940532e-01, -8.93574402e-01, -1.56023891e-01,\n 1.04909319e+00, 3.17097477e+00, 1.89499638e-01,\n -1.34841309e+00, 1.26498333e+00, -3.00783876e-01],\n [-6.60608594e-01, 2.09849478e-01, -1.24062460e+00,\n 2.22463164e-01, -8.83755232e-02, 9.83779068e-02,\n 3.81416254e-01, 6.74922572e-02, 1.63380841e-02,\n 2.84314519e-01, 4.15400626e-01, -1.03148246e+00],\n [-1.42999126e+00, -6.16380522e-02, -1.43273549e+00,\n 8.75314709e-02, 9.38746876e-01, 6.07111672e-01,\n -1.04817041e+00, -8.60262452e-01, 3.28301295e-01,\n -4.01297805e-01, -3.16655295e-01, 5.96906481e-01],\n [-9.87286693e-01, -4.01234710e-01, -8.00082476e-01,\n -1.04312950e+00, -8.57078189e-01, 6.77462169e-01,\n 5.18203895e-02, -8.79160629e-01, -2.31101608e-01,\n -1.63880731e+00, -7.33312808e-01, 2.14957453e+00],\n [-9.02438497e-02, 7.31658927e-01, -6.54883751e-02,\n 3.48169235e-01, 6.63258090e-01, -1.10461660e+00,\n -3.09362573e-02, 1.57886519e+00, -7.95500550e-01,\n -5.66439854e-01, -3.07691277e-01, 2.69024073e-01]],\n\n [[ 5.24917864e-01, 1.26741165e+00, 4.99498233e-01,\n -6.20531258e-02, 1.25916713e+00, 7.04111022e-01,\n -1.49567952e+00, 2.52636824e+00, 1.76992139e+00,\n -1.68214223e-01, 3.77910102e-01, 1.32435875e+00],\n [-1.72200793e-01, 7.30351790e-01, 1.10457847e+00,\n -1.01482591e+00, -6.02331854e-01, 9.21408398e-01,\n 4.60814477e-01, 9.23796560e-01, -1.32568015e-01,\n -2.89005211e-01, -1.99863948e+00, -1.14600043e+00],\n [ 4.70660947e-02, 8.24557220e-01, 5.31178367e-01,\n -1.28241974e-01, -2.71771566e-01, 2.17179633e-01,\n 7.82111811e-02, 1.40454551e+00, 1.46440770e-01,\n -1.48124596e+00, -1.27255814e+00, 1.51875934e+00],\n [-1.17116046e+00, 7.64497453e-01, -2.68372735e-01,\n -1.69758294e-01, -1.34132783e-01, 1.22138496e+00,\n -1.92841829e-01, -3.33192828e-02, -1.53080350e+00,\n 2.06690512e-01, 5.31042507e-01, 2.39145581e-01],\n [ 1.39789626e+00, 5.51713548e-02, 2.98977456e-01,\n 1.64850401e+00, -1.55001419e+00, -4.55825348e-01,\n 1.42615875e+00, 9.36129148e-01, 6.78380099e-01,\n 8.32650739e-01, 3.27066209e-01, 1.63159743e+00]],\n\n [[ 3.77759170e-01, 2.39867106e-01, 1.58958674e-01,\n 1.92863956e-01, -1.15701728e+00, 7.70673054e-01,\n -1.30439734e-01, 1.82191510e+00, -7.56504706e-02,\n 4.20918284e-01, 2.46602186e-01, -6.25557035e-01],\n [ 9.92136829e-01, 1.90506364e+00, -1.47772197e-02,\n -3.00478786e-01, -3.55028731e-01, -1.89236189e+00,\n -1.77813144e-01, 2.50998116e-01, 1.05475793e+00,\n 9.60047741e-01, -4.16499082e-01, -2.76822995e-01],\n [ 1.12390531e+00, -1.73463897e-01, -5.10029540e-01,\n 1.39251845e+00, 1.03758567e+00, 1.87917918e-02,\n -5.93777448e-01, -2.01188032e+00, 5.89703606e-01,\n -8.96369723e-01, -1.96273201e+00, 1.58482053e+00],\n [ 6.47967791e-01, -1.13900819e+00, -1.21440138e+00,\n 8.70961782e-01, -8.77970617e-01, 1.29614987e+00,\n 6.16459313e-01, 5.36596521e-01, 4.04695456e-01,\n 1.91450872e-01, 8.80511199e-01, -4.54080363e-01],\n [ 8.59519734e-02, 7.51946588e-01, 5.62989719e-01,\n -1.19498681e+00, -5.00409667e-01, 2.52803505e-01,\n -4.08014709e-01, 1.77465856e+00, -3.93153195e-01,\n -1.62218448e-01, 7.69430178e-01, 3.30532743e-01]]\n])\n\n\nclass SampleMap(object):\n param_data = []\n np.random.seed(0)\n param_data.append({'name': 'x',\n 'units': None,\n 'default': 0,\n 'values': x})\n param_data.append({'name': 'y',\n 'units': None,\n 'default': 0,\n 'values': y})\n param_data.append({'name': 'z',\n 'units': None,\n 'default': 0,\n 'values': z})\n\n output_data = []\n output_data.append({'name': 'f',\n 'units': None,\n 'default': 0,\n 'values': f})\n\n output_data.append({'name': 'g',\n 'units': None,\n 'default': 0,\n 'values': g})\n\n\n@unittest.skipIf(not scipy_gte_019, \"only run if scipy>=0.19.\")\nclass TestMetaModelStructuredScipy(unittest.TestCase):\n \"\"\"\n Tests the regular grid map component. specifically the analytic derivatives\n vs. finite difference estimates.\n \"\"\"\n\n def setUp(self):\n\n model = om.Group()\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n x, y, z = params\n outs = mapdata.output_data\n z = outs[0]\n ivc.add_output('x', x['default'], units=x['units'])\n ivc.add_output('y', y['default'], units=y['units'])\n ivc.add_output('z', z['default'], units=z['units'])\n\n model.add_subsystem('des_vars', ivc, promotes=[\"*\"])\n\n comp = om.MetaModelStructuredComp(method='scipy_slinear', extrapolate=True)\n\n for param in params:\n comp.add_input(param['name'], param['default'], param['values'])\n\n for out in outs:\n comp.add_output(out['name'], out['default'], out['values'])\n\n model.add_subsystem('comp', comp, promotes=[\"*\"])\n self.prob = om.Problem(model)\n self.prob.setup()\n self.prob['x'] = 1.0\n self.prob['y'] = 0.75\n self.prob['z'] = -1.7\n\n def test_deriv1(self):\n # run at default pt\n self.run_and_check_derivs(self.prob)\n\n # test output values\n f, g = self.prob['comp.f'], self.prob['comp.g']\n\n tol = 1e-6\n assert_near_equal(f, -0.05624571, tol)\n assert_near_equal(g, 1.02068754, tol)\n\n def test_deriv1_swap(self):\n # Bugfix test that we can add outputs before inputs.\n\n model = om.Group()\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n x, y, z = params\n outs = mapdata.output_data\n z = outs[0]\n ivc.add_output('x', x['default'], units=x['units'])\n ivc.add_output('y', y['default'], units=y['units'])\n ivc.add_output('z', z['default'], units=z['units'])\n\n model.add_subsystem('des_vars', ivc, promotes=[\"*\"])\n\n comp = om.MetaModelStructuredComp(method='scipy_slinear', extrapolate=True)\n\n for out in outs:\n comp.add_output(out['name'], out['default'], out['values'])\n\n for param in params:\n comp.add_input(param['name'], param['default'], param['values'])\n\n model.add_subsystem('comp', comp, promotes=[\"*\"])\n prob = om.Problem(model)\n prob.setup()\n prob['x'] = 1.0\n prob['y'] = 0.75\n prob['z'] = -1.7\n\n # run at default pt\n self.run_and_check_derivs(prob)\n\n def test_deriv2(self):\n self.prob['x'] = 10.0\n self.prob['y'] = 0.81\n self.prob['z'] = 1.1\n self.run_and_check_derivs(self.prob)\n\n def test_deriv3(self):\n self.prob['x'] = 90.0\n self.prob['y'] = 1.2\n self.prob['z'] = 2.1\n self.run_and_check_derivs(self.prob)\n\n def test_deriv4(self):\n # Tests extrapolation.\n self.prob['x'] = 65.0\n self.prob['y'] = 0.951\n self.prob['z'] = 2.5\n self.run_and_check_derivs(self.prob)\n\n def test_raise_out_of_bounds_error(self):\n model = om.Group()\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n x, y, z = params\n outs = mapdata.output_data\n z = outs[0]\n ivc.add_output('x', x['default'], units=x['units'])\n ivc.add_output('y', y['default'], units=y['units'])\n ivc.add_output('z', z['default'], units=z['units'])\n\n model.add_subsystem('des_vars', ivc, promotes=[\"*\"])\n\n # Need to make sure extrapolate is False for bounds to be checked\n comp = om.MetaModelStructuredComp(method='scipy_slinear', extrapolate=False)\n\n for param in params:\n comp.add_input(param['name'], param['default'], param['values'])\n\n for out in outs:\n comp.add_output(out['name'], out['default'], out['values'])\n\n model.add_subsystem('comp', comp, promotes=[\"*\"])\n self.prob = om.Problem(model)\n self.prob.setup()\n\n self.prob['x'] = 1.0\n self.prob['y'] = 0.75\n self.prob['z'] = 9.0 # intentionally set to be out of bounds\n\n # The interpolating output name is given as a regexp because the exception could\n # happen with f or g first. The order those are evaluated comes from the keys of\n # dict so no guarantee on the order except for Python 3.6 !\n msg = \"'comp' <class MetaModelStructuredComp>: Error interpolating output '[f|g]' because input 'comp.z' was \" \\\n \"out of bounds \\('.*', '.*'\\) with value '9.0'\"\n with self.assertRaisesRegex(om.AnalysisError, msg):\n self.run_and_check_derivs(self.prob)\n\n def test_training_gradient(self):\n model = om.Group()\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n outs = mapdata.output_data\n\n ivc.add_output('x', np.array([-0.3, 0.7, 1.2]))\n ivc.add_output('y', np.array([0.14, 0.313, 1.41]))\n ivc.add_output('z', np.array([-2.11, -1.2, 2.01]))\n\n ivc.add_output('f_train', outs[0]['values'])\n ivc.add_output('g_train', outs[1]['values'])\n\n comp = om.MetaModelStructuredComp(training_data_gradients=True,\n method='scipy_cubic',\n vec_size=3)\n for param in params:\n comp.add_input(param['name'], param['default'], param['values'])\n\n for out in outs:\n comp.add_output(out['name'], out['default'], out['values'])\n\n model.add_subsystem('ivc', ivc, promotes=[\"*\"])\n model.add_subsystem('comp',\n comp,\n promotes=[\"*\"])\n\n\n prob = om.Problem(model)\n prob.setup()\n prob.run_model()\n\n val0 = np.array([ 50.26787317, 49.76106232, 19.66117913])\n val1 = np.array([-32.62094041, -31.67449135, -27.46959668])\n\n tol = 1e-5\n assert_near_equal(prob['f'], val0, tol)\n assert_near_equal(prob['g'], val1, tol)\n self.run_and_check_derivs(prob)\n\n def test_training_gradient_setup_called_twice(self):\n model = om.Group()\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n outs = mapdata.output_data\n\n ivc.add_output('x', np.array([-0.3, 0.7, 1.2]))\n ivc.add_output('y', np.array([0.14, 0.313, 1.41]))\n ivc.add_output('z', np.array([-2.11, -1.2, 2.01]))\n\n ivc.add_output('f_train', outs[0]['values'])\n ivc.add_output('g_train', outs[1]['values'])\n\n comp = om.MetaModelStructuredComp(training_data_gradients=True,\n method='scipy_cubic',\n vec_size=3)\n for param in params:\n comp.add_input(param['name'], param['default'], param['values'])\n\n for out in outs:\n comp.add_output(out['name'], out['default'], out['values'])\n\n model.add_subsystem('ivc', ivc, promotes=[\"*\"])\n model.add_subsystem('comp',\n comp,\n promotes=[\"*\"])\n\n prob = om.Problem(model)\n prob.setup()\n prob.run_model()\n\n val0 = np.array([ 50.26787317, 49.76106232, 19.66117913])\n val1 = np.array([-32.62094041, -31.67449135, -27.46959668])\n\n tol = 1e-5\n assert_near_equal(prob['f'], val0, tol)\n assert_near_equal(prob['g'], val1, tol)\n self.run_and_check_derivs(prob)\n\n # Setup and run again\n prob.setup()\n prob.run_model()\n\n val0 = np.array([ 50.26787317, 49.76106232, 19.66117913])\n val1 = np.array([-32.62094041, -31.67449135, -27.46959668])\n\n tol = 1e-5\n assert_near_equal(prob['f'], val0, tol)\n assert_near_equal(prob['g'], val1, tol)\n self.run_and_check_derivs(prob)\n\n def run_and_check_derivs(self, prob, tol=1e-5, verbose=False):\n \"\"\"Runs check_partials and compares to analytic derivatives.\"\"\"\n\n prob.run_model()\n derivs = prob.check_partials(out_stream=None)\n\n for i in derivs['comp'].keys():\n if verbose:\n print(\"Checking derivative pair:\", i)\n if derivs['comp'][i]['J_fwd'].sum() != 0.0:\n rel_err = derivs['comp'][i]['rel error'][0]\n self.assertLessEqual(rel_err, tol)\n\n def test_error_msg_vectorized(self):\n # Tests bug in error message where it doesn't give the correct node value.\n\n x_bp = np.array([0., 1.])\n y_data = np.array([0., 4.])\n nn = 5\n\n class MMComp(om.MetaModelStructuredComp):\n\n def setup(self):\n nn = self.options['vec_size']\n self.add_input(name='x', val=np.ones(nn), units=None, training_data=x_bp)\n\n self.add_output(name='y', val=np.zeros(nn), units=None, training_data=y_data)\n\n p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output('x', val=np.linspace(.5, 1.1, nn))\n\n p.model.add_subsystem('ivc', ivc, promotes=['x'])\n p.model.add_subsystem('MM', MMComp(vec_size=nn), promotes=['x', 'y'])\n\n p.setup()\n\n with self.assertRaises(om.AnalysisError) as cm:\n p.run_model()\n\n msg = (\"'MM' <class MMComp>: Error interpolating output 'y' because input 'MM.x' was out of bounds ('0.0', '1.0') with value '1.1'\")\n self.assertEqual(str(cm.exception), msg)\n\n\nclass TestMetaModelStructuredPython(unittest.TestCase):\n \"\"\"\n Tests the regular grid map component. specifically the analytic derivatives\n vs. finite difference estimates.\n \"\"\"\n\n def setUp(self):\n\n model = om.Group()\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n x, y, z = params\n outs = mapdata.output_data\n z = outs[0]\n ivc.add_output('x', x['default'], units=x['units'])\n ivc.add_output('y', y['default'], units=y['units'])\n ivc.add_output('z', z['default'], units=z['units'])\n\n model.add_subsystem('des_vars', ivc, promotes=[\"*\"])\n\n comp = om.MetaModelStructuredComp(method='slinear', extrapolate=True)\n\n for param in params:\n comp.add_input(param['name'], param['default'], param['values'])\n\n for out in outs:\n comp.add_output(out['name'], out['default'], out['values'])\n\n model.add_subsystem('comp', comp, promotes=[\"*\"])\n self.prob = om.Problem(model)\n self.prob.setup()\n self.prob['x'] = 1.0\n self.prob['y'] = 0.75\n self.prob['z'] = -1.7\n\n def run_and_check_derivs(self, prob, tol=1e-5, verbose=False):\n \"\"\"Runs check_partials and compares to analytic derivatives.\"\"\"\n\n prob.run_model()\n derivs = prob.check_partials(method='cs', out_stream=None)\n\n for i in derivs['comp'].keys():\n if verbose:\n print(\"Checking derivative pair:\", i)\n if derivs['comp'][i]['J_fwd'].sum() != 0.0:\n rel_err = derivs['comp'][i]['rel error'][0]\n self.assertLessEqual(rel_err, tol)\n\n def test_deriv1(self):\n # run at default pt\n self.run_and_check_derivs(self.prob)\n\n # test output values\n f, g = self.prob['comp.f'], self.prob['comp.g']\n\n tol = 1e-6\n assert_near_equal(f, -0.05624571, tol)\n assert_near_equal(g, 1.02068754, tol)\n\n def test_deriv1_swap(self):\n # Bugfix test that we can add outputs before inputs.\n\n model = om.Group()\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n x, y, z = params\n outs = mapdata.output_data\n z = outs[0]\n ivc.add_output('x', x['default'], units=x['units'])\n ivc.add_output('y', y['default'], units=y['units'])\n ivc.add_output('z', z['default'], units=z['units'])\n\n model.add_subsystem('des_vars', ivc, promotes=[\"*\"])\n\n comp = om.MetaModelStructuredComp(method='slinear', extrapolate=True)\n\n for out in outs:\n comp.add_output(out['name'], out['default'], out['values'])\n\n for param in params:\n comp.add_input(param['name'], param['default'], param['values'])\n\n model.add_subsystem('comp', comp, promotes=[\"*\"])\n prob = om.Problem(model)\n prob.setup()\n prob['x'] = 1.0\n prob['y'] = 0.75\n prob['z'] = -1.7\n\n # run at default pt\n self.run_and_check_derivs(prob)\n\n def test_deriv2(self):\n self.prob['x'] = 10.0\n self.prob['y'] = 0.81\n self.prob['z'] = 1.1\n self.run_and_check_derivs(self.prob)\n\n def test_deriv3(self):\n self.prob['x'] = 90.0\n self.prob['y'] = 1.2\n self.prob['z'] = 2.1\n self.run_and_check_derivs(self.prob)\n\n def test_deriv4(self):\n # Tests extrapolation.\n self.prob['x'] = 65.0\n self.prob['y'] = 0.951\n self.prob['z'] = 2.5\n self.run_and_check_derivs(self.prob)\n\n def test_vectorized_linear(self):\n prob = om.Problem()\n model = prob.model\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n x, y, _ = params\n outs = mapdata.output_data\n z = outs[0]\n ivc.add_output('x', np.array([x['default'], x['default'], x['default']]),\n units=x['units'])\n ivc.add_output('y', np.array([y['default'], y['default'], y['default']]),\n units=x['units'])\n ivc.add_output('z', np.array([z['default'], z['default'], z['default']]),\n units=x['units'])\n\n model.add_subsystem('des_vars', ivc, promotes=[\"*\"])\n\n comp = om.MetaModelStructuredComp(method='slinear', extrapolate=True, vec_size=3)\n\n for param in params:\n comp.add_input(param['name'], np.array([param['default'], param['default'], param['default']]),\n param['values'])\n\n for out in outs:\n comp.add_output(out['name'], np.array([out['default'], out['default'], out['default']]),\n out['values'])\n\n model.add_subsystem('comp', comp, promotes=[\"*\"])\n\n prob.setup(force_alloc_complex=True)\n prob['x'] = np.array([1.0, 10.0, 90.0])\n prob['y'] = np.array([0.75, 0.81, 1.2])\n prob['z'] = np.array([-1.7, 1.1, 2.1])\n\n prob.run_model()\n\n partials = prob.check_partials(method='cs', out_stream=None)\n assert_check_partials(partials, rtol=1e-10)\n\n def test_vectorized_lagrange2(self):\n prob = om.Problem()\n model = prob.model\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n x, y, _ = params\n outs = mapdata.output_data\n z = outs[0]\n ivc.add_output('x', np.array([x['default'], x['default'], x['default']]),\n units=x['units'])\n ivc.add_output('y', np.array([y['default'], y['default'], y['default']]),\n units=x['units'])\n ivc.add_output('z', np.array([z['default'], z['default'], z['default']]),\n units=x['units'])\n\n model.add_subsystem('des_vars', ivc, promotes=[\"*\"])\n\n comp = om.MetaModelStructuredComp(method='lagrange2', extrapolate=True, vec_size=3)\n\n for param in params:\n comp.add_input(param['name'], np.array([param['default'], param['default'], param['default']]),\n param['values'])\n\n for out in outs:\n comp.add_output(out['name'], np.array([out['default'], out['default'], out['default']]),\n out['values'])\n\n model.add_subsystem('comp', comp, promotes=[\"*\"])\n\n prob.setup(force_alloc_complex=True)\n prob['x'] = np.array([1.0, 10.0, 90.0])\n prob['y'] = np.array([0.75, 0.81, 1.2])\n prob['z'] = np.array([-1.7, 1.1, 2.1])\n\n prob.run_model()\n\n partials = prob.check_partials(method='cs', out_stream=None)\n # Derivs are large, so ignore atol.\n assert_check_partials(partials, atol=1e10, rtol=1e-10)\n\n def test_vectorized_lagrange3(self):\n prob = om.Problem()\n model = prob.model\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n x, y, _ = params\n outs = mapdata.output_data\n\n comp = om.MetaModelStructuredComp(method='lagrange3', extrapolate=True, vec_size=3)\n\n for param in params:\n comp.add_input(param['name'], np.array([param['default'], param['default'], param['default']]),\n param['values'], units=param['units'])\n\n for out in outs:\n comp.add_output(out['name'], np.array([out['default'], out['default'], out['default']]),\n out['values'])\n\n model.add_subsystem('comp', comp, promotes=[\"*\"])\n\n prob.setup(force_alloc_complex=True)\n prob.set_val('x', np.array([1.0, 10.0, 90.0]))\n prob.set_val('y', np.array([0.75, 0.81, 1.2]))\n prob.set_val('z', np.array([-1.7, 1.1, 2.1]))\n\n prob.run_model()\n\n partials = prob.check_partials(method='cs', out_stream=None)\n # Derivs are large, so ignore atol.\n assert_check_partials(partials, atol=1e10, rtol=1e-10)\n\n def test_vectorized_akima(self):\n prob = om.Problem()\n model = prob.model\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n x, y, _ = params\n outs = mapdata.output_data\n z = outs[0]\n ivc.add_output('x', np.array([x['default'], x['default'], x['default']]),\n units=x['units'])\n ivc.add_output('y', np.array([y['default'], y['default'], y['default']]),\n units=y['units'])\n ivc.add_output('z', np.array([z['default'], z['default'], z['default']]),\n units=z['units'])\n\n model.add_subsystem('des_vars', ivc, promotes=[\"*\"])\n\n comp = om.MetaModelStructuredComp(method='akima', extrapolate=True, vec_size=3)\n\n for param in params:\n comp.add_input(param['name'], np.array([param['default'], param['default'], param['default']]),\n param['values'])\n\n for out in outs:\n comp.add_output(out['name'], np.array([out['default'], out['default'], out['default']]),\n out['values'])\n\n model.add_subsystem('comp', comp, promotes=[\"*\"])\n\n prob.setup(force_alloc_complex=True)\n prob['x'] = np.array([1.0, 10.0, 90.0])\n prob['y'] = np.array([0.75, 0.81, 1.2])\n prob['z'] = np.array([-1.7, 1.1, 2.1])\n\n prob.run_model()\n\n partials = prob.check_partials(method='cs', out_stream=None)\n # Derivs are large, so ignore atol.\n assert_check_partials(partials, atol=1e10, rtol=1e-10)\n\n def test_vectorized_cubic(self):\n prob = om.Problem()\n model = prob.model\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n x, y, _ = params\n outs = mapdata.output_data\n z = outs[0]\n ivc.add_output('x', np.array([x['default'], x['default'], x['default']]),\n units=x['units'])\n ivc.add_output('y', np.array([y['default'], y['default'], y['default']]),\n units=x['units'])\n ivc.add_output('z', np.array([z['default'], z['default'], z['default']]),\n units=x['units'])\n\n model.add_subsystem('des_vars', ivc, promotes=[\"*\"])\n\n comp = om.MetaModelStructuredComp(method='cubic', extrapolate=True, vec_size=3)\n\n for param in params:\n comp.add_input(param['name'], np.array([param['default'], param['default'], param['default']]),\n param['values'])\n\n for out in outs:\n comp.add_output(out['name'], np.array([out['default'], out['default'], out['default']]),\n out['values'])\n\n model.add_subsystem('comp', comp, promotes=[\"*\"])\n\n prob.setup(force_alloc_complex=True)\n prob['x'] = np.array([1.0, 10.0, 90.0])\n prob['y'] = np.array([0.75, 0.81, 1.2])\n prob['z'] = np.array([-1.7, 1.1, 2.1])\n\n prob.run_model()\n\n partials = prob.check_partials(method='cs', out_stream=None)\n # Derivs are large, so ignore atol.\n assert_check_partials(partials, atol=1e10, rtol=1e-10)\n\n def test_training_gradient_lagrange3(self):\n model = om.Group()\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n outs = mapdata.output_data\n\n ivc.add_output('x', np.array([-0.3, 0.7, 1.2]))\n ivc.add_output('y', np.array([0.14, 0.313, 1.41]))\n ivc.add_output('z', np.array([-2.11, -1.2, 2.01]))\n\n ivc.add_output('f_train', outs[0]['values'])\n ivc.add_output('g_train', outs[1]['values'])\n\n comp = om.MetaModelStructuredComp(training_data_gradients=True,\n method='lagrange3', vec_size=3)\n for param in params:\n comp.add_input(param['name'], param['default'], param['values'])\n\n for out in outs:\n comp.add_output(out['name'], out['default'], out['values'])\n\n model.add_subsystem('ivc', ivc, promotes=[\"*\"])\n model.add_subsystem('comp',\n comp,\n promotes=[\"*\"])\n\n prob = om.Problem(model)\n prob.setup()\n prob.run_model()\n\n self.run_and_check_derivs(prob)\n\n def test_training_gradient_akima(self):\n model = om.Group()\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n outs = mapdata.output_data\n\n ivc.add_output('x', np.array([-0.3, 0.7, 1.2]))\n ivc.add_output('y', np.array([0.14, 0.313, 1.41]))\n ivc.add_output('z', np.array([-2.11, -1.2, 2.01]))\n\n ivc.add_output('f_train', outs[0]['values'])\n ivc.add_output('g_train', outs[1]['values'])\n\n comp = om.MetaModelStructuredComp(training_data_gradients=True,\n method='akima', vec_size=3)\n for param in params:\n comp.add_input(param['name'], param['default'], param['values'])\n\n for out in outs:\n comp.add_output(out['name'], out['default'], out['values'])\n\n model.add_subsystem('ivc', ivc, promotes=[\"*\"])\n model.add_subsystem('comp',\n comp,\n promotes=[\"*\"])\n\n prob = om.Problem(model)\n prob.setup(force_alloc_complex=True)\n prob.run_model()\n\n self.run_and_check_derivs(prob)\n\n def test_training_gradient_akima_basic(self):\n # Mimics usage as an order-reducing interpolating polynomial.\n model = om.Group()\n ivc = om.IndepVarComp()\n\n mapdata = SampleMap()\n\n params = mapdata.param_data\n outs = mapdata.output_data\n\n ivc.add_output('x', np.array([.33]))\n\n ivc.add_output('f_train', np.array([.3, .7, .5, .6, .3, .4, .2]))\n\n comp = om.MetaModelStructuredComp(training_data_gradients=True,\n method='akima', vec_size=1)\n comp.add_input('x', 0.0, np.array([.1, .2, .3, .4, .5, .6, .7]))\n comp.add_output('f', 0.0, np.array([.3, .7, .5, .6, .3, .4, .2]))\n\n model.add_subsystem('ivc', ivc, promotes=[\"*\"])\n model.add_subsystem('comp',\n comp,\n promotes=[\"*\"])\n\n prob = om.Problem(model)\n prob.setup(force_alloc_complex=True)\n prob.run_model()\n\n self.run_and_check_derivs(prob)\n\n @unittest.skipIf(OPT is None or OPTIMIZER is None, \"only run if pyoptsparse is installed.\")\n def test_analysis_error_warning_msg(self):\n x_tr = np.linspace(0, 2*np.pi, 100)\n y_tr = np.sin(x_tr)\n\n p = om.Problem(model=om.Group())\n\n p.driver = om.pyOptSparseDriver(optimizer=OPTIMIZER)\n\n mm = om.MetaModelStructuredComp(extrapolate=False)\n mm.add_input('x', val=1.0, training_data=x_tr)\n mm.add_output('y', val=1.0, training_data=y_tr)\n p.model.add_subsystem('interp', mm, promotes_inputs=['x'], promotes_outputs=['y'])\n\n p.model.add_objective('y', scaler=-1)\n p.model.add_design_var('x', lower=6, upper=10)\n\n p.set_solver_print(level=0)\n p.setup()\n\n p.set_val('x', 0.75)\n\n msg = \"Analysis Error: 'interp' <class MetaModelStructuredComp> \" \\\n \"Line 203 of file {}\".format(inspect.getsourcefile(om.MetaModelStructuredComp))\n with assert_warning(UserWarning, msg):\n p.run_driver()\n\n\n@unittest.skipIf(not scipy_gte_019, \"only run if scipy>=0.19.\")\nclass TestMetaModelStructuredCompFeature(unittest.TestCase):\n\n @unittest.skipIf(not scipy_gte_019, \"only run if scipy>=0.19.\")\n def test_xor(self):\n import numpy as np\n import openmdao.api as om\n\n # Create regular grid interpolator instance\n xor_interp = om.MetaModelStructuredComp(method='scipy_slinear')\n\n # set up inputs and outputs\n xor_interp.add_input('x', 0.0, training_data=np.array([0.0, 1.0]), units=None)\n xor_interp.add_input('y', 1.0, training_data=np.array([0.0, 1.0]), units=None)\n\n\n xor_interp.add_output('xor', 1.0, training_data=np.array([[0.0, 1.0], [1.0, 0.0]]), units=None)\n\n # Set up the OpenMDAO model\n model = om.Group()\n model.add_subsystem('comp', xor_interp, promotes=[\"*\"])\n prob = om.Problem(model)\n prob.setup()\n\n prob.set_val('x', 0)\n\n # Now test out a 'fuzzy' XOR\n prob.set_val('x', 0.9)\n prob.set_val('y', 0.001242)\n\n prob.run_model()\n\n computed = prob.get_val('xor')\n actual = 0.8990064\n\n assert_almost_equal(computed, actual)\n\n # we can verify all gradients by checking against finite-difference\n prob.check_partials(compact_print=True)\n\n @unittest.skipIf(not scipy_gte_019, \"only run if scipy>=0.19.\")\n def test_shape(self):\n import numpy as np\n import openmdao.api as om\n\n # create input param training data, of sizes 25, 5, and 10 points resp.\n p1 = np.linspace(0, 100, 25)\n p2 = np.linspace(-10, 10, 5)\n p3 = np.linspace(0, 1, 10)\n\n # can use meshgrid to create a 3D array of test data\n P1, P2, P3 = np.meshgrid(p1, p2, p3, indexing='ij')\n f = np.sqrt(P1) + P2 * P3\n\n # verify the shape matches the order and size of the input params\n print(f.shape)\n\n # Create regular grid interpolator instance\n interp = om.MetaModelStructuredComp(method='scipy_cubic')\n interp.add_input('p1', 0.5, training_data=p1)\n interp.add_input('p2', 0.0, training_data=p2)\n interp.add_input('p3', 3.14, training_data=p3)\n\n interp.add_output('f', 0.0, training_data=f)\n\n # Set up the OpenMDAO model\n model = om.Group()\n model.add_subsystem('comp', interp, promotes=[\"*\"])\n prob = om.Problem(model)\n prob.setup()\n\n # set inputs\n prob.set_val('p1', 55.12)\n prob.set_val('p2', -2.14)\n prob.set_val('p3', 0.323)\n\n prob.run_model()\n\n computed = prob.get_val('f')\n actual = 6.73306472\n\n assert_almost_equal(computed, actual)\n\n # we can verify all gradients by checking against finite-difference\n prob.check_partials(compact_print=True)\n\n @unittest.skipIf(not scipy_gte_019, \"only run if scipy>=0.19.\")\n def test_vectorized(self):\n import numpy as np\n import openmdao.api as om\n\n # create input param training data, of sizes 25, 5, and 10 points resp.\n p1 = np.linspace(0, 100, 25)\n p2 = np.linspace(-10, 10, 5)\n p3 = np.linspace(0, 1, 10)\n\n # can use meshgrid to create a 3D array of test data\n P1, P2, P3 = np.meshgrid(p1, p2, p3, indexing='ij')\n f = np.sqrt(P1) + P2 * P3\n\n # Create regular grid interpolator instance\n interp = om.MetaModelStructuredComp(method='scipy_cubic', vec_size=2)\n interp.add_input('p1', 0.5, training_data=p1)\n interp.add_input('p2', 0.0, training_data=p2)\n interp.add_input('p3', 3.14, training_data=p3)\n\n interp.add_output('f', 0.0, training_data=f)\n\n # Set up the OpenMDAO model\n model = om.Group()\n model.add_subsystem('comp', interp, promotes=[\"*\"])\n prob = om.Problem(model)\n prob.setup()\n\n # set inputs\n prob.set_val('p1', np.array([55.12, 12.0]))\n prob.set_val('p2', np.array([-2.14, 3.5]))\n prob.set_val('p3', np.array([0.323, 0.5]))\n\n prob.run_model()\n\n computed = prob['f']\n actual = np.array([6.73306472, 5.2118645])\n\n assert_almost_equal(computed, actual)\n\n @unittest.skipIf(not scipy_gte_019, \"only run if scipy>=0.19.\")\n def test_training_derivatives(self):\n import numpy as np\n import openmdao.api as om\n\n # create input param training data, of sizes 25, 5, and 10 points resp.\n p1 = np.linspace(0, 100, 25)\n p2 = np.linspace(-10, 10, 5)\n p3 = np.linspace(0, 1, 10)\n\n # can use meshgrid to create a 3D array of test data\n P1, P2, P3 = np.meshgrid(p1, p2, p3, indexing='ij')\n f = np.sqrt(P1) + P2 * P3\n\n # verify the shape matches the order and size of the input params\n print(f.shape)\n\n # Create regular grid interpolator instance\n interp = om.MetaModelStructuredComp(method='scipy_cubic', training_data_gradients=True)\n interp.add_input('p1', 0.5, p1)\n interp.add_input('p2', 0.0, p2)\n interp.add_input('p3', 3.14, p3)\n\n interp.add_output('f', 0.0, f)\n\n # Set up the OpenMDAO model\n model = om.Group()\n model.add_subsystem('comp', interp, promotes=[\"*\"])\n prob = om.Problem(model)\n prob.setup()\n\n # set inputs\n prob.set_val('p1', 55.12)\n prob.set_val('p2', -2.14)\n prob.set_val('p3', 0.323)\n\n prob.run_model()\n\n computed = prob.get_val('f')\n actual = 6.73306472\n\n assert_almost_equal(computed, actual)\n\n # we can verify all gradients by checking against finite-difference\n prob.check_partials(compact_print=True)\n\n def test_error_messages_scalar_only(self):\n prob = om.Problem()\n model = prob.model\n\n comp = om.MetaModelStructuredComp(training_data_gradients=True,\n method='slinear', vec_size=3)\n\n with self.assertRaises(ValueError) as cm:\n comp.add_input('x1', np.array([1.0, 2.0]))\n\n msg = \"MetaModelStructuredComp: Input x1 must either be scalar, or of length equal to vec_size.\"\n self.assertEqual(str(cm.exception), msg)\n\n with self.assertRaises(ValueError) as cm:\n comp.add_input('x1', np.zeros((3, 3)))\n\n self.assertEqual(str(cm.exception), msg)\n\n with self.assertRaises(ValueError) as cm:\n comp.add_output('x1', np.array([1.0, 2.0]))\n\n msg = \"MetaModelStructuredComp: Output x1 must either be scalar, or of length equal to vec_size.\"\n self.assertEqual(str(cm.exception), msg)\n\n with self.assertRaises(ValueError) as cm:\n comp.add_output('x1', np.zeros((3, 3)))\n\n self.assertEqual(str(cm.exception), msg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.array",
"numpy.sin",
"numpy.zeros",
"numpy.random.seed",
"numpy.testing.assert_almost_equal",
"numpy.ones",
"numpy.sqrt",
"numpy.linspace",
"numpy.meshgrid"
]
] |
skylook/mobilestereonet
|
[
"45b5ce6e9cf96698ac3f274353c64e5c5fc12b5d"
] |
[
"datasets/dataset.py"
] |
[
"import os\nimport random\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom datasets.data_io import get_transform, read_all_lines, pfm_imread\n\nclass PicoStereoDataset(Dataset):\n def __init__(self, datapath, list_filename, training):\n self.datapath = datapath\n self.left_filenames, self.right_filenames, self.disp_filenames = self.load_path(list_filename)\n self.training = training\n\n def load_path(self, list_filename):\n lines = read_all_lines(list_filename)\n splits = [line.split() for line in lines]\n left_images = [x[0] for x in splits]\n right_images = [x[1] for x in splits]\n if len(splits[0]) == 2: # ground truth not available\n return left_images, right_images, None\n else:\n disp_images = [x[2] for x in splits]\n return left_images, right_images, disp_images\n\n def load_image(self, filename):\n return Image.open(filename).convert('RGB')\n\n def load_disp(self, filename):\n data, scale = pfm_imread(filename)\n data = np.ascontiguousarray(data, dtype=np.float32)\n return data\n\n def __len__(self):\n return len(self.left_filenames)\n\n def __getitem__(self, index):\n left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index]))\n right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index]))\n\n if self.disp_filenames: # has disparity ground truth\n disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index]))\n else:\n disparity = None\n\n # to tensor, normalize\n processed = get_transform()\n left_img = processed(left_img)\n right_img = processed(right_img)\n\n return {\"left\": left_img,\n \"right\": right_img,\n # \"disparity\": disparity,\n \"top_pad\": 0,\n \"right_pad\": 0,\n \"left_filename\": self.left_filenames[index]}\n\n\nclass SceneFlowDataset(Dataset):\n def __init__(self, datapath, list_filename, training):\n self.datapath = datapath\n self.left_filenames, self.right_filenames, self.disp_filenames = self.load_path(list_filename)\n self.training = training\n\n def load_path(self, list_filename):\n lines = read_all_lines(list_filename)\n splits = [line.split() for line in lines]\n left_images = [x[0] for x in splits]\n right_images = [x[1] for x in splits]\n disp_images = [x[2] for x in splits]\n return left_images, right_images, disp_images\n\n def load_image(self, filename):\n return Image.open(filename).convert('RGB')\n\n def load_disp(self, filename):\n data, scale = pfm_imread(filename)\n data = np.ascontiguousarray(data, dtype=np.float32)\n return data\n\n def __len__(self):\n return len(self.left_filenames)\n\n def __getitem__(self, index):\n left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index]))\n right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index]))\n disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index]))\n\n if self.training:\n w, h = left_img.size\n crop_w, crop_h = 512, 256\n\n x1 = random.randint(0, w - crop_w)\n y1 = random.randint(0, h - crop_h)\n\n # random crop\n left_img = left_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n right_img = right_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n disparity = disparity[y1:y1 + crop_h, x1:x1 + crop_w]\n\n # to tensor, normalize\n processed = get_transform()\n left_img = processed(left_img)\n right_img = processed(right_img)\n\n return {\"left\": left_img,\n \"right\": right_img,\n \"disparity\": disparity}\n else:\n w, h = left_img.size\n crop_w, crop_h = 960, 512\n\n left_img = left_img.crop((w - crop_w, h - crop_h, w, h))\n right_img = right_img.crop((w - crop_w, h - crop_h, w, h))\n disparity = disparity[h - crop_h:h, w - crop_w: w]\n\n processed = get_transform()\n left_img = processed(left_img)\n right_img = processed(right_img)\n\n return {\"left\": left_img,\n \"right\": right_img,\n \"disparity\": disparity,\n \"top_pad\": 0,\n \"right_pad\": 0,\n \"left_filename\": self.left_filenames[index]}\n\n\nclass KITTIDataset(Dataset):\n def __init__(self, datapath, list_filename, training):\n self.datapath = datapath\n self.left_filenames, self.right_filenames, self.disp_filenames = self.load_path(list_filename)\n self.training = training\n if self.training:\n assert self.disp_filenames is not None\n\n def load_path(self, list_filename):\n lines = read_all_lines(list_filename)\n splits = [line.split() for line in lines]\n left_images = [x[0] for x in splits]\n right_images = [x[1] for x in splits]\n if len(splits[0]) == 2: # ground truth not available\n return left_images, right_images, None\n else:\n disp_images = [x[2] for x in splits]\n return left_images, right_images, disp_images\n\n def load_image(self, filename):\n return Image.open(filename).convert('RGB')\n\n def load_disp(self, filename):\n data = Image.open(filename)\n data = np.array(data, dtype=np.float32) / 256.\n return data\n\n def __len__(self):\n return len(self.left_filenames)\n\n def __getitem__(self, index):\n left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index]))\n right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index]))\n\n if self.disp_filenames: # has disparity ground truth\n disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index]))\n else:\n disparity = None\n\n if self.training:\n w, h = left_img.size\n crop_w, crop_h = 512, 256\n\n x1 = random.randint(0, w - crop_w)\n y1 = random.randint(0, h - crop_h)\n\n # random crop\n left_img = left_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n right_img = right_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n disparity = disparity[y1:y1 + crop_h, x1:x1 + crop_w]\n\n # to tensor, normalize\n processed = get_transform()\n left_img = processed(left_img)\n right_img = processed(right_img)\n\n return {\"left\": left_img,\n \"right\": right_img,\n \"disparity\": disparity}\n else:\n w, h = left_img.size\n\n # normalize\n processed = get_transform()\n left_img = processed(left_img).numpy()\n right_img = processed(right_img).numpy()\n\n # pad to size 1248x384\n top_pad = 384 - h\n right_pad = 1248 - w\n assert top_pad > 0 and right_pad > 0\n # pad images\n left_img = np.lib.pad(left_img, ((0, 0), (top_pad, 0), (0, right_pad)), mode='constant', constant_values=0)\n right_img = np.lib.pad(right_img, ((0, 0), (top_pad, 0), (0, right_pad)), mode='constant',\n constant_values=0)\n # pad disparity gt\n if disparity is not None:\n assert len(disparity.shape) == 2\n disparity = np.lib.pad(disparity, ((top_pad, 0), (0, right_pad)), mode='constant', constant_values=0)\n\n if disparity is not None:\n return {\"left\": left_img,\n \"right\": right_img,\n \"disparity\": disparity,\n \"top_pad\": top_pad,\n \"right_pad\": right_pad,\n \"left_filename\": self.left_filenames[index]}\n else:\n return {\"left\": left_img,\n \"right\": right_img,\n \"top_pad\": top_pad,\n \"right_pad\": right_pad,\n \"left_filename\": self.left_filenames[index],\n \"right_filename\": self.right_filenames[index]}\n\n\nclass DrivingStereoDataset(Dataset):\n def __init__(self, datapath, list_filename, training):\n self.datapath = datapath\n self.left_filenames, self.right_filenames, self.disp_filenames = self.load_path(list_filename)\n self.training = training\n\n def load_path(self, list_filename):\n lines = read_all_lines(list_filename)\n splits = [line.split() for line in lines]\n left_images = [x[0] for x in splits]\n right_images = [x[1] for x in splits]\n disp_images = [x[2] for x in splits]\n return left_images, right_images, disp_images\n\n def load_image(self, filename):\n return Image.open(filename).convert('RGB')\n\n def load_disp(self, filename):\n data = Image.open(filename)\n data = np.array(data, dtype=np.float32) / 256.\n return data\n\n def __len__(self):\n return len(self.left_filenames)\n\n def __getitem__(self, index):\n left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index]))\n right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index]))\n disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index]))\n\n if self.training:\n w, h = left_img.size # (881, 400)\n crop_w, crop_h = 512, 256\n\n x1 = random.randint(0, w - crop_w)\n y1 = random.randint(0, h - crop_h)\n\n # random crop\n left_img = left_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n right_img = right_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n disparity = disparity[y1:y1 + crop_h, x1:x1 + crop_w]\n\n # to tensor, normalize\n processed = get_transform()\n left_img = processed(left_img)\n right_img = processed(right_img)\n\n return {\"left\": left_img,\n \"right\": right_img,\n \"disparity\": disparity}\n\n else:\n w, h = left_img.size\n crop_w, crop_h = 880, 400\n\n left_img = left_img.crop((w - crop_w, h - crop_h, w, h))\n right_img = right_img.crop((w - crop_w, h - crop_h, w, h))\n disparity = disparity[h - crop_h:h, w - crop_w: w]\n\n processed = get_transform()\n left_img = processed(left_img)\n right_img = processed(right_img)\n\n return {\"left\": left_img,\n \"right\": right_img,\n \"disparity\": disparity,\n \"top_pad\": 0,\n \"right_pad\": 0,\n \"left_filename\": self.left_filenames[index]}\n"
] |
[
[
"numpy.ascontiguousarray",
"numpy.array",
"numpy.lib.pad"
]
] |
akio-kobayashi/lc_lstm
|
[
"c5367518ebf56d13a29794d90061fdfb06677e3e",
"c5367518ebf56d13a29794d90061fdfb06677e3e"
] |
[
"nbest_multi_fixed_generator.py",
"post_multi_fixed_generator.py"
] |
[
"import numpy as np\nimport sys, os, re, gzip, struct\nimport random\nimport h5py\nimport copy\nfrom keras import backend as K\nfrom keras.utils import Sequence\nimport keras.utils\nimport tensorflow as tf\nimport multi_utils\nimport mat_utils\n\nclass NbestFixedDataGenerator(Sequence):\n\n def __init__(self, file, key_file, batch_size=64, feat_dim=40, n_labels=1024,\n procs=10, extras1=10, extras2=10, num_extras1=1, nbest=100, mode='train', shuffle=False,\n mod=1):\n\n self.file=file\n self.batch_size=batch_size\n self.feat_dim=feat_dim\n self.n_labels=n_labels\n self.procs=procs\n self.extras1=extras1\n self.extras2=extras2\n self.num_extras1=num_extras1\n self.nbest=nbest\n self.shuffle=shuffle\n self.keys=[]\n self.sorted_keys=[]\n self.mode=mode\n self.mod=1\n\n self.h5fd = h5py.File(self.file, 'r')\n self.n_samples = len(self.h5fd.keys())\n if key_file is not None:\n with open(key_file, 'r') as f:\n for line in f:\n self.sorted_keys.append(line.strip())\n for key in self.h5fd.keys():\n self.keys.append(key)\n\n self.n_samples = len(self.h5fd.keys())\n for key in self.h5fd.keys():\n self.keys.append(key)\n if len(self.sorted_keys) > 0:\n self.keys = self.sorted_keys\n\n def __len__(self):\n return int(np.ceil(self.n_samples)/self.batch_size)\n\n def __getitem__(self, index, return_keys=False):\n list_keys_temp = [self.keys[k] for k in range(index*self.batch_size,\n min( (index+1)*self.batch_size,\n len(self.keys) ) )]\n\n # [input_sequences, label_sequences, inputs_lengths, labels_length]\n if self.mode == 'train':\n x, mask, y = self.__data_generation(list_keys_temp)\n if return_keys == True:\n return x, mask, y, list_keys_temp\n else:\n return x, mask, y\n else:\n x, mask = self.__data_generation(list_keys_temp)\n if return_keys == True:\n return x, mask, list_keys_temp\n else:\n return x, mask\n\n def on_epoch_end(self):\n if self.shuffle == True:\n random.shuffle(self.keys)\n\n def __data_generation(self, list_keys_temp):\n\n max_num_blocks=0\n max_num_frames=0\n\n for i, key in enumerate(list_keys_temp):\n mat = self.h5fd[key+'/data'][()]\n mat = mat_utils.pad_mat(mat, self.mod)\n [ex_blocks,ex_frames] = multi_utils.expected_num_blocks(mat,\n self.procs,\n self.extras1,\n self.extras2,\n self.num_extras1)\n if ex_blocks > max_num_blocks:\n max_num_blocks = ex_blocks\n if ex_frames > max_num_frames:\n max_num_frames = ex_frames\n\n input_mat=np.zeros((len(list_keys_temp), max_num_blocks,\n self.procs+max(self.extras1, self.extras2), self.feat_dim))\n input_mask=np.zeros((len(list_keys_temp), max_num_blocks,\n self.procs+max(self.extras1, self.extras2), 1))\n if self.mode == 'train':\n numer_labels=np.zeros((len(list_keys_temp), max_num_blocks,\n self.procs+max(self.extras1, self.extras2), self.n_labels+1))\n numer_lmscores = np.zros((len(list_keys_temp), 1))\n\n denom_labels=np.zeros((len(list_keys_temp), self.nbest, max_num_blocks,\n self.procs+max(self.extras1, self.extras2), self.n_labels+1))\n denom_lmlscores = np.zeros((len(list_keys_temp), self.nbest, 1))\n\n for i, key in enumerate(list_keys_temp):\n mat = self.h5fd[key+'/data'][()]\n [ex_blocks, ex_frames] = multi_utils.expected_num_blocks(mat,\n self.procs,\n self.extras1,\n self.extras2,\n self.num_extras1)\n blocked_mat, mask , _ = multi_utils.split_utt(mat, self.procs, self.extras1,\n self.extras2,\n self.num_extras1,\n ex_blocks,\n self.feat_dim, max_num_blocks)\n input_mat[i,:,:,:] = np.expand_dims(blocked_mat, axis=0)\n input_mask[i,:,:,:] = np.expand_dims(mask, axis=0)\n\n if self.mode == 'train':\n # label is a list of string starting from 0\n numer = self.h5fd[key+'/1best'][()]\n numer_labels = multi_utils.str2dict(numer)\n numer_lmscores[i,0] = self.h5fd[key+'/1best_scores'][()]\n\n denom = self.h5fd[key+'/nbest'][()]\n denom_labels = multi_utils.str2nbest(denom)\n denom_lmscores[i, :, 0] = self.h5fd[key+'/nbest_scores'][()]\n\n # w/o padding for convenience\n # splitting labels\n # (blocks, frames, feats)\n number_blocked_labels = multi_utils.split_post_label(numer_labels, self.procs, self.extras1,\n self.extras2, self.num_extras1, ex_blocks,\n self.n_labels+1, max_num_blocks)\n # expand dimensions along with batch dim.\n numer_labels[i,:,:,:] = np.expand_dims(numer_blocked_labels, axis=0)\n\n # (nbest, blocks, time, feats)\n denom_blocked_labels = muti_utils.split_nbest_label(denom_labels, self.procs, self.extra1,\n self.extra2, self.num_extras1, ex_blocks,\n self.n_labels+1, max_num_blocks)\n denom_labels[i,:,:,:,:] = np.expand_dims(denom_blocked_labels, axis=0)\n\n # transpose batch and block axes for outer loop in training\n input_mat = input_mat.transpose((1,0,2,3))\n input_mask = input_mask.transpose((1,0,2,3))\n if self.mode == 'train':\n # transpose batch dim. <-> block dim.\n number_labels = numer_labels.transpose((1,0,2,3)) # (batch,, blocks, time, feats) -> (blocks, batch, time, feats)\n denom_labels = denom_labels.transpose((2,1,0,3,4)) # (batch, nbest, blocks, time, feats)->(nbest, blocks, batch, time, feats)\n\n if self.mode == 'train':\n return input_mat, input_mask, [numer_labels, numer_lmscores, denom_labels, denom_lmscores]\n else:\n return input_mat, input_mask\n",
"import numpy as np\nimport sys, os, re, gzip, struct\nimport random\nimport h5py\nimport copy\nfrom keras import backend as K\nfrom keras.utils import Sequence\nimport keras.utils\nimport tensorflow as tf\nimport multi_utils\nimport mat_utils\n\nclass PostFixedDataGenerator(Sequence):\n\n def __init__(self, file, key_file, batch_size=64, feat_dim=40, n_labels=1024,\n procs=10, extras1=10, extras2=10, num_extras1=1, mode='train', shuffle=False,\n mod=1):\n\n self.file=file\n self.batch_size=batch_size\n self.feat_dim=feat_dim\n self.n_labels=n_labels\n self.procs=procs\n self.extras1=extras1\n self.extras2=extras2\n self.num_extras1=num_extras1\n self.shuffle=shuffle\n self.keys=[]\n self.sorted_keys=[]\n self.mode=mode\n self.mod=1\n\n self.h5fd = h5py.File(self.file, 'r')\n self.n_samples = len(self.h5fd.keys())\n if key_file is not None:\n with open(key_file, 'r') as f:\n for line in f:\n self.sorted_keys.append(line.strip())\n for key in self.h5fd.keys():\n self.keys.append(key)\n\n #self.h5fd = h5py.File(self.file, 'r')\n self.n_samples = len(self.h5fd.keys())\n for key in self.h5fd.keys():\n self.keys.append(key)\n if len(self.sorted_keys) > 0:\n self.keys = self.sorted_keys\n\n def __len__(self):\n return int(np.ceil(self.n_samples)/self.batch_size)\n\n def __getitem__(self, index, return_keys=False):\n list_keys_temp = [self.keys[k] for k in range(index*self.batch_size,\n min( (index+1)*self.batch_size,\n len(self.keys) ) )]\n\n # [input_sequences, label_sequences, inputs_lengths, labels_length]\n if self.mode == 'train':\n x, mask, label_mask, y = self.__data_generation(list_keys_temp)\n return x, mask, label_mask, y\n else:\n return x, mask, label_mask, y, list_keys_temp\n\n def on_epoch_end(self):\n if self.shuffle == True:\n random.shuffle(self.keys)\n\n def __data_generation(self, list_keys_temp):\n\n max_num_blocks=0\n max_num_frames=0\n\n for i, key in enumerate(list_keys_temp):\n mat = self.h5fd[key+'/data'][()]\n mat = mat_utils.pad_mat(mat, self.mod)\n [ex_blocks,ex_frames] = multi_utils.expected_num_blocks(mat,\n self.procs,\n self.extras1,\n self.extras2,\n self.num_extras1)\n #print(\"%d %d %d\" % (mat.shape[0], ex_blocks, ex_frames))\n if ex_blocks > max_num_blocks:\n max_num_blocks = ex_blocks\n if ex_frames > max_num_frames:\n max_num_frames = ex_frames\n\n input_mat=np.zeros((len(list_keys_temp), max_num_blocks,\n self.procs+max(self.extras1, self.extras2), self.feat_dim))\n input_mask=np.zeros((len(list_keys_temp), max_num_blocks,\n self.procs+max(self.extras1, self.extras2), 1))\n if self.mode == 'train':\n output_mask=np.zeros((len(list_keys_temp), max_num_blocks,\n self.procs+max(self.extras1, self.extras2), 1))\n output_labels=np.zeros((len(list_keys_temp), max_num_blocks,\n self.procs+max(self.extras1, self.extras2), self.n_labels+1))\n\n for i, key in enumerate(list_keys_temp):\n mat = self.h5fd[key+'/data'][()]\n # w/o padding\n #mat = mat_utils.pad_mat(mat, self.mod)\n [ex_blocks, ex_frames] = multi_utils.expected_num_blocks(mat,\n self.procs,\n self.extras1,\n self.extras2,\n self.num_extras1)\n blocked_mat, mask , label_mask = multi_utils.split_utt(mat, self.procs, self.extras1,\n self.extras2,\n self.num_extras1,\n ex_blocks,\n self.feat_dim, max_num_blocks)\n input_mat[i, :, :, :] = np.expand_dims(blocked_mat, axis=0)\n input_mask[i,:,:,:] = np.expand_dims(mask, axis=0)\n\n if self.mode == 'train':\n # label is a list of string starting from 0\n posts = self.h5fd[key+'/labels'][()]\n # str to dict\n post_labels = multi_utils.str2dict(posts)\n # w/o padding for convenience\n # splitting labels\n blocked_labels = multi_utils.split_post_label(post_labels, self.procs, self.extras1,\n self.extras2, self.num_extras1, ex_blocks,\n self.n_labels+1, max_num_blocks)\n # expand dimensions along with batch dim.\n output_labels[i,:,:,:] = np.expand_dims(blocked_labels, axis=0)\n output_mask[i,:,:,:] = np.expand_dims(label_mask, axis=0)\n # transpose batch and block axes for outer loop in training\n input_mat = input_mat.transpose((1,0,2,3))\n input_mask = input_mask.transpose((1,0,2,3))\n if self.mode == 'train':\n # transpose batch dim. <-> block dim.\n output_labels = output_labels.transpose((1,0,2,3))\n output_mask = output_mask.transpose((1,0,2,3))\n\n if self.mode == 'train':\n return input_mat, input_mask, output_mask, output_labels\n else:\n return input_mat, input_mask, output_mask\n"
] |
[
[
"numpy.ceil",
"numpy.expand_dims"
],
[
"numpy.ceil",
"numpy.expand_dims"
]
] |
mathandy/svgtree
|
[
"884fd0aea780996a263ffe487f5803aec253f56c"
] |
[
"noIntersections4rings.py"
] |
[
"# External Dependencies\nfrom __future__ import division\nfrom numpy import isclose\nfrom svgpathtools import Path\n\n# Internal Dependencies\nfrom misc4rings import isNear\n\n\nclass ClosedRingsOverlapError(Exception):\n def __init__(self,mes):\n self.mes = mes\n def __str__(self):\n return repr(self.mes)\n\n\ndef findAppropriateTstep(path, T, stepInPositiveDirection):\n# Often the overlapping part of two paths is so small that when removed, pathXpathIntersections, will still consider the two curves as intersecting. This function is to find the smallest (signed) Tstep such that isNear(path(T),path(T+Tstep))==False.\n# note: stepInPositiveDirection should be True if Tstep should be positve\n\n # set initial guess as max possible step distance (and set sign of Tstep)\n # T = float(T)\n if stepInPositiveDirection:\n Tstep = 1 - T\n else:\n Tstep = 0 - T\n\n #check that what we're asking for is possible\n if isNear(path.point(T + Tstep), path.point(T)):\n raise Exception(\"An impossible Tstep was asked for.\")\n\n #Find a lower bound for Tstep by bisection\n maxIts = 200 # limits Tstep to be > (1/2)**200\n its = 0\n while not isNear(path.point(T + Tstep), path.point(T)) and its < maxIts:\n Tstep /= 2\n its += 1\n if its >= maxIts:\n raise Exception(\"Max iterations reached in bisection to find \"\n \"appropriate Tstep. This could theoretically be ok \"\n \"if you have a curve with a huge number of \"\n \"segments... just increase the maxIts in \"\n \"findAppropriateTstep if you have a such a curve \"\n \"(but I doubt that's the case - so tell Andy).\")\n return 2 * Tstep\n\n\ndef shortPart(path,T):\n if isclose(T, 0) or isclose(T, 1):\n return Path()\n if T < 1-T: # T is closer to 0\n # return cropPath(path,0,T)\n return path.cropped(0, T)\n else: # T is closer to 1\n # return cropPath(path,T,1)\n return path.cropped(T, 1)\n\n\ndef longPart(path, T, remove_a_little_extra=True):\n if remove_a_little_extra:\n if T < 1 - T: # T is closer to 0 than 1\n extra = T\n if isNear(path.point(T + extra), path.point(T)):\n extra = findAppropriateTstep(path, T, True)\n else: # T is closer to 1 than 0\n extra = 1-T\n if isNear(path.point(T+extra), path.point(T)):\n extra = -1 * findAppropriateTstep(path, T, False)\n else:\n extra = 0\n if T < 1 - T: #T is closer to 0 than 1\n # return cropPath(path,T+extra,1)\n return path.cropped(T + extra, 1)\n else: #T is closer to 1 than 0\n # return cropPath(path,0,T-extra)\n return path.cropped(0, T - extra)\n\n\ndef remove_intersections(ipath, jpath, iclosed, jclosed, iringupdated=False, jringupdated=False): #removes one intersection at a time until all are gone\n new_ipath = ipath\n new_jpath = jpath\n\n #find all intersections\n res = ipath.intersect(jpath, justonemode=True)\n # res = pathXpathIntersections(ipath, jpath, justonemode=True)\n if res:\n iT, iseg, i_t = res[0]\n jT, jseg, j_t = res[1]\n # iT = ipath.t2T(iseg, i_t)\n # jT = jpath.t2T(jseg, j_t)\n else:\n run_again = False\n return new_ipath, new_jpath, iringupdated, jringupdated, run_again\n\n\n #Now find crop the path (if one ring is closed, crop the other ring)\n if iclosed and jclosed: #then crop jpath\n raise ClosedRingsOverlapError(\"\")\n\n elif jclosed: #jring closed so crop iring\n new_ipath = longPart(ipath, iT)\n new_jpath = jpath\n iringupdated = True\n\n elif iclosed: #iring closed so crop jring\n new_jpath = longPart(jpath, jT)\n new_ipath = ipath\n jringupdated = True\n\n else: #both rings are incomplete\n if iT in [0, 1]:\n new_ipath = longPart(ipath, iT)\n new_jpath = jpath\n iringupdated = True\n elif jT in [0, 1]:\n new_jpath = longPart(jpath, jT)\n new_ipath = ipath\n jringupdated = True\n elif shortPart(ipath, iT).length() < shortPart(jpath, jT).length():\n new_ipath = longPart(ipath, iT)\n new_jpath = jpath\n iringupdated = True\n else:\n new_jpath = longPart(jpath, jT)\n new_ipath = ipath\n jringupdated = True\n run_again = True # might be more intersections to remove, so run again\n return new_ipath, new_jpath, iringupdated, jringupdated, run_again\n\n\ndef remove_intersections_from_rings(rings):\n from options4rings import intersection_removal_progress_output_on\n from time import time as current_time\n from andysmod import n_choose_k, format_time\n\n [r.record_wasClosed() for r in rings] # record the current closure status\n\n #for output\n num_segments_in_ring_list = sum(len(r.path) for r in rings)\n num_seg_pairs2check = n_choose_k(num_segments_in_ring_list, 2)\n num_seg_pairs_checked = 0\n current_percent_complete = 0\n start_time = current_time()\n\n count = 0\n overlappingClosedRingPairs = []\n for i in range(len(rings)):\n iring = rings[i]\n ipath = iring.path\n new_ipath = ipath\n iclosed = iring.wasClosed\n iringupdated = False\n num_segs_in_ipath = len(ipath) # for progress output\n\n for j in range(i+1, len(rings)):\n if rings[j].maxR < rings[i].minR or rings[i].maxR < rings[j].minR:\n continue\n jring = rings[j]\n jpath = jring.path\n new_jpath = jpath\n jclosed = jring.wasClosed\n jringupdated = False\n num_segs_in_jpath = len(jpath) #for progress output\n\n # while loop to remove intersections between iring and jring (if any exist)\n run_again = True\n maxits = 20\n its = 0\n while run_again and its < maxits:\n try:\n args = (new_ipath, new_jpath, iclosed, jclosed)\n res = remove_intersections(*args, iringupdated=iringupdated, jringupdated=jringupdated)\n new_ipath, new_jpath, iringupdated, jringupdated, run_again = res\n except ClosedRingsOverlapError:\n overlappingClosedRingPairs.append((i, j))\n run_again = False\n pass\n its += 1\n\n # raise Exception if while loop terminateded due to reaching max allowed iteratations\n if its >= maxits:\n # remove_intersections_from_rings([iring, jring])\n # print(iring.xml)\n # print(jring.xml)\n raise Exception(\"Max iterations reached while removing intersections. Either the above two rings have over 20 intersections or this is a bug.\")\n\n # Output progess\n if intersection_removal_progress_output_on.b:\n num_seg_pairs_checked += num_segs_in_jpath*num_segs_in_ipath\n if 100 * num_seg_pairs_checked / num_seg_pairs2check > int(100 * current_percent_complete):\n current_percent_complete = num_seg_pairs_checked / num_seg_pairs2check\n time_elapsed = current_time() - start_time\n estimated_time_remaining = (1-current_percent_complete) * time_elapsed / current_percent_complete\n stuff = (int(100 * current_percent_complete),\n format_time(estimated_time_remaining),\n format_time(time_elapsed))\n mes = (\"[%s%% complete || Est. Remaining Time = %s || \"\n \"Elapsed Time = %s]\\r\" % stuff)\n intersection_removal_progress_output_on.dprint(mes)\n\n # update jring if jpath was trimmed\n if jringupdated:\n jring.updatePath(new_jpath)\n count += 1\n # update iring if ipath was trimmed\n if iringupdated:\n iring.updatePath(new_ipath)\n count += 1\n return rings, count, overlappingClosedRingPairs"
] |
[
[
"numpy.isclose"
]
] |
dinga92/neuropredict
|
[
"8e7a445424f8c649a6583567f5692fdf73d7e1d9"
] |
[
"neuropredict/run_workflow.py"
] |
[
"\"\"\"\nneuropredict : easy and comprehensive predictive analysis.\n\n\"\"\"\nfrom __future__ import print_function\n\n__all__ = ['run', 'cli', 'get_parser']\n\nimport argparse\nimport os\nimport sys\nimport textwrap\nimport traceback\nimport warnings\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sys import version_info\nfrom os.path import join as pjoin, exists as pexists, abspath, realpath, basename\nimport numpy as np\nfrom pyradigm import MLDataset\n\nif version_info.major > 2:\n # the order of import is very important to avoid circular imports\n from neuropredict import __version__\n from neuropredict import config_neuropredict as cfg\n from neuropredict import rhst, visualize\n from neuropredict.freesurfer import aseg_stats_subcortical, aseg_stats_whole_brain\n from neuropredict.io import get_metadata, get_features, get_metadata_in_pyradigm, \\\n get_data_matrix, get_dir_of_dirs, get_pyradigm, get_arff, saved_dataset_matches\n from neuropredict.utils import check_paths, uniq_combined_name, check_num_procs, sub_group_identifier, \\\n save_options, load_options, validate_feature_selection_size, make_dataset_filename, not_unspecified\nelse:\n raise NotImplementedError('neuropredict requires Python 3+.')\n\n\ndef get_parser():\n \"Parser to specify arguments and their defaults.\"\n\n parser = argparse.ArgumentParser(prog=\"neuropredict\", formatter_class=argparse.RawTextHelpFormatter,\n description='Easy, standardized and comprehensive predictive analysis.')\n\n help_text_fs_dir = textwrap.dedent(\"\"\"\n Absolute path to ``SUBJECTS_DIR`` containing the finished runs of Freesurfer parcellation\n Each subject will be queried after its ID in the metadata file.\n\n E.g. ``--fs_subject_dir /project/freesurfer_v5.3``\n \\n \\n \"\"\")\n\n help_text_user_defined_folder = textwrap.dedent(\"\"\"\n List of absolute paths to user's own features.\n\n Format: Each of these folders contains a separate folder for each subject (named after its ID in the metadata file)\n containing a file called features.txt with one number per line.\n All the subjects (in a given folder) must have the number of features (#lines in file).\n Different parent folders (describing one feature set) can have different number of features for each subject,\n but they must all have the same number of subjects (folders) within them.\n\n Names of each folder is used to annotate the results in visualizations.\n Hence name them uniquely and meaningfully, keeping in mind these figures will be included in your papers.\n For example,\n\n .. parsed-literal::\n\n --user_feature_paths /project/fmri/ /project/dti/ /project/t1_volumes/\n\n Only one of ``--pyradigm_paths``, ``user_feature_paths``, ``data_matrix_path`` or ``arff_paths`` options can be specified.\n \\n \\n \"\"\")\n\n help_text_pyradigm_paths = textwrap.dedent(\"\"\"\n Path(s) to pyradigm datasets.\n\n Each path is self-contained dataset identifying each sample, its class and features.\n \\n \\n \"\"\")\n\n help_text_data_matrix = textwrap.dedent(\"\"\"\n List of absolute paths to text files containing one matrix of size N x p (num_samples x num_features).\n\n Each row in the data matrix file must represent data corresponding to sample in the same row\n of the meta data file (meta data file and data matrix must be in row-wise correspondence).\n\n Name of this file will be used to annotate the results and visualizations.\n\n E.g. ``--data_matrix_paths /project/fmri.csv /project/dti.csv /project/t1_volumes.csv ``\n\n Only one of ``--pyradigm_paths``, ``user_feature_paths``, ``data_matrix_path`` or ``arff_paths`` options can be specified.\n \n File format could be\n - a simple comma-separated text file (with extension .csv or .txt): which can easily be read back with\n numpy.loadtxt(filepath, delimiter=',')\n or\n - a numpy array saved to disk (with extension .npy or .numpy) that can read in with numpy.load(filepath).\n\n One could use ``numpy.savetxt(data_array, delimiter=',')`` or ``numpy.save(data_array)`` to save features.\n\n File format is inferred from its extension.\n \\n \\n \"\"\")\n\n help_text_arff_paths = textwrap.dedent(\"\"\"\n List of paths to files saved in Weka's ARFF dataset format.\n \n Note: \n - this format does NOT allow IDs for each subject.\n - given feature values are saved in text format, this can lead to large files with high-dimensional data, \n compared to numpy arrays saved to disk in binary format.\n \n More info: https://www.cs.waikato.ac.nz/ml/weka/arff.html\n \\n \\n \"\"\")\n\n help_text_positive_class = textwrap.dedent(\"\"\"\n Name of the positive class (e.g. Alzheimers, MCI etc) to be used in calculation of area under the ROC curve.\n Applicable only for binary classification experiments.\n\n Default: class appearing last in order specified in metadata file.\n \\n \\n \"\"\")\n\n help_text_train_perc = textwrap.dedent(\"\"\"\n Percentage of the smallest class to be reserved for training.\n\n Must be in the interval [0.01 0.99].\n\n If sample size is sufficiently big, we recommend 0.5.\n If sample size is small, or class imbalance is high, choose 0.8.\n \\n \\n \"\"\")\n\n help_text_num_rep_cv = textwrap.dedent(\"\"\"\n Number of repetitions of the repeated-holdout cross-validation.\n\n The larger the number, more stable the estimates will be.\n \\n \\n \"\"\")\n\n help_text_sub_groups = textwrap.dedent(\"\"\"\n This option allows the user to study different combinations of classes in a multi-class (N>2) dataset.\n\n For example, in a dataset with 3 classes CN, FTD and AD,\n two studies of pair-wise combinations can be studied separately\n with the following flag ``--sub_groups CN,FTD CN,AD``.\n This allows the user to focus on few interesting subgroups depending on their dataset/goal.\n\n Format: Different subgroups must be separated by space,\n and each sub-group must be a comma-separated list of class names defined in the meta data file.\n Hence it is strongly recommended to use class names without any spaces, commas, hyphens and special characters,\n and ideally just alphanumeric characters separated by underscores.\n\n Any number of subgroups can be specified, but each subgroup must have atleast two distinct classes.\n\n Default: ``'all'``, leading to inclusion of all available classes in a all-vs-all multi-class setting.\n \\n \\n \"\"\")\n\n help_text_metadata_file = textwrap.dedent(\"\"\"\n Abs path to file containing metadata for subjects to be included for analysis.\n\n At the minimum, each subject should have an id per row followed by the class it belongs to.\n\n E.g.\n .. parsed-literal::\n\n sub001,control\n sub002,control\n sub003,disease\n sub004,disease\n\n \\n \\n \"\"\")\n\n help_text_feature_selection = textwrap.dedent(\"\"\"Number of features to select as part of feature selection.\n Options:\n\n - 'tenth'\n - 'sqrt'\n - 'log2'\n - 'all'\n\n Default: \\'tenth\\' of the number of samples in the training set.\n\n For example, if your dataset has 90 samples, you chose 50 percent for training (default),\n then Y will have 90*.5=45 samples in training set, leading to 5 features to be selected for taining.\n If you choose a fixed integer, ensure all the feature sets under evaluation have atleast that many features.\n \\n \\n \"\"\")\n\n help_text_gs_level = textwrap.dedent(\"\"\"\n Flag to specify the level of grid search during hyper-parameter optimization on the training set.\n Allowed options are : 'none', 'light' and 'exhaustive', in the order of how many values/values will be optimized. \n \n More parameters and more values demand more resources and much longer time for optimization.\n \n The 'light' option tries to \"folk wisdom\" to try least number of values (no more than one or two),\n for the parameters for the given classifier. (e.g. a lage number say 500 trees for a random forest optimization). \n The 'light' will be the fastest and should give a \"rough idea\" of predictive performance. \n The 'exhaustive' option will try to most parameter values for the most parameters that can be optimized.\n \"\"\")\n\n help_text_make_vis = textwrap.dedent(\"\"\"\n Option to make visualizations from existing results in the given path. \n This is helpful when neuropredict failed to generate result figures automatically \n e.g. on a HPC cluster, or another environment when DISPLAY is either not available.\n \n \"\"\")\n\n help_text_atlas = textwrap.dedent(\"\"\"\n Name of the atlas to use for visualization. Default: fsaverage, if available.\n \\n \\n \"\"\")\n help_text_num_cpus = textwrap.dedent(\"\"\"\n Number of CPUs to use to parallelize CV repetitions.\n\n Default : 4.\n\n Number of CPUs will be capped at the number available on the machine if higher is requested.\n \\n \\n \"\"\")\n\n help_text_out_dir = textwrap.dedent(\"\"\"\n Output folder to store gathered features & results.\n \\n \\n \"\"\")\n\n help_classifier = textwrap.dedent(\"\"\"\n \n String specifying one of the implemented classifiers. \n (Classifiers are carefully chosen to allow for the comprehensive report provided by neuropredict).\n \n Default: 'RandomForestClassifier'\n \n \"\"\")\n\n help_feat_select_method = textwrap.dedent(\"\"\"\n Feature selection method to apply prior to training the classifier.\n \n Default: 'VarianceThreshold', removing features with 0.001 percent of lowest variance (zeros etc).\n \n \"\"\")\n\n parser.add_argument(\"-m\", \"--meta_file\", action=\"store\", dest=\"meta_file\",\n default=None, required=False, help=help_text_metadata_file)\n\n parser.add_argument(\"-o\", \"--out_dir\", action=\"store\", dest=\"out_dir\", required=False, help=help_text_out_dir,\n default=None )\n\n parser.add_argument(\"-f\", \"--fs_subject_dir\", action=\"store\", dest=\"fs_subject_dir\",\n default=None, help=help_text_fs_dir)\n\n user_defined = parser.add_argument_group(title='Input data and formats',\n description='Only one of the following types can be specified.')\n\n user_defined.add_argument(\"-y\", \"--pyradigm_paths\", action=\"store\", dest=\"pyradigm_paths\",\n nargs='+', # to allow for multiple features\n default=None,\n help=help_text_pyradigm_paths)\n\n user_defined.add_argument(\"-u\", \"--user_feature_paths\", action=\"store\", dest=\"user_feature_paths\",\n nargs='+', # to allow for multiple features\n default=None,\n help=help_text_user_defined_folder)\n\n user_defined.add_argument(\"-d\", \"--data_matrix_paths\", action=\"store\", dest=\"data_matrix_paths\",\n nargs='+',\n default=None,\n help=help_text_data_matrix)\n\n user_defined.add_argument(\"-a\", \"--arff_paths\", action=\"store\", dest=\"arff_paths\",\n nargs='+',\n default=None,\n help=help_text_arff_paths)\n\n cv_args_group = parser.add_argument_group(title='Cross-validation',\n description='Parameters related to training and optimization during cross-validation')\n cv_args_group.add_argument(\"-p\", \"--positive_class\", action=\"store\", dest=\"positive_class\",\n default=None,\n help=help_text_positive_class)\n\n cv_args_group.add_argument(\"-t\", \"--train_perc\", action=\"store\", dest=\"train_perc\",\n default=cfg.default_train_perc,\n help=help_text_train_perc)\n\n cv_args_group.add_argument(\"-n\", \"--num_rep_cv\", action=\"store\", dest=\"num_rep_cv\",\n default=cfg.default_num_repetitions,\n help=help_text_num_rep_cv)\n\n cv_args_group.add_argument(\"-k\", \"--num_features_to_select\", dest=\"num_features_to_select\",\n action=\"store\", default=cfg.default_num_features_to_select,\n help=help_text_feature_selection)\n\n cv_args_group.add_argument(\"-sg\", \"--sub_groups\", action=\"store\", dest=\"sub_groups\",\n nargs=\"*\",\n default=\"all\",\n help=help_text_sub_groups)\n\n cv_args_group.add_argument(\"-g\", \"--gs_level\", action=\"store\", dest=\"gs_level\",\n default=\"light\", help=help_text_gs_level, choices=cfg.GRIDSEARCH_LEVELS)\n\n pipeline_group = parser.add_argument_group(title='Predictive Model',\n description='Parameters related to pipeline comprising the predictive model')\n\n pipeline_group.add_argument(\"-fs\", \"--feat_select_method\", action=\"store\", dest=\"feat_select_method\",\n default=cfg.default_feat_select_method, help=help_feat_select_method,\n choices=cfg.feature_selection_choices)\n\n pipeline_group.add_argument(\"-e\", \"--classifier\", action=\"store\", dest=\"classifier\",\n default=cfg.default_classifier, help=help_classifier,\n choices=cfg.classifier_choices)\n\n vis_args = parser.add_argument_group(title='Visualization',\n description='Parameters related to generating visualizations')\n\n vis_args.add_argument(\"-z\", \"--make_vis\", action=\"store\", dest=\"make_vis\",\n default=None, help=help_text_make_vis)\n\n comp_args = parser.add_argument_group(title='Computing',\n description='Parameters related to computations/debugging')\n\n comp_args.add_argument(\"-c\", \"--num_procs\", action=\"store\", dest=\"num_procs\",\n default=cfg.DEFAULT_NUM_PROCS, help=help_text_num_cpus)\n\n comp_args.add_argument('-v', '--version', action='version',\n version='%(prog)s {version}'.format(version=__version__))\n\n return parser\n\n\ndef organize_inputs(user_args):\n \"\"\"\n Validates the input features specified and returns organized list of paths and readers.\n\n Parameters\n ----------\n user_args : ArgParse object\n Various options specified by the user.\n\n Returns\n -------\n user_feature_paths : list\n List of paths to specified input features\n user_feature_type : str\n String identifying the type of user-defined input\n fs_subject_dir : str\n Path to freesurfer subject directory, if supplied.\n\n \"\"\"\n\n atleast_one_feature_specified = False\n # specifying pyradigm avoids the need for separate meta data file\n meta_data_supplied = False\n meta_data_format = None\n\n if not_unspecified(user_args.fs_subject_dir):\n fs_subject_dir = abspath(user_args.fs_subject_dir)\n if not pexists(fs_subject_dir):\n raise IOError(\"Given Freesurfer directory doesn't exist.\")\n atleast_one_feature_specified = True\n else:\n fs_subject_dir = None\n\n # ensuring only one type is specified\n mutually_excl_formats = ['user_feature_paths', 'data_matrix_paths', 'pyradigm_paths', 'arff_paths']\n not_none_count = 0\n for format in mutually_excl_formats:\n if not_unspecified(getattr(user_args, format)):\n not_none_count = not_none_count + 1\n if not_none_count > 1:\n raise ValueError('Only one of the following formats can be specified:\\n{}'.format(mutually_excl_formats))\n\n if not_unspecified(user_args.user_feature_paths):\n user_feature_paths = check_paths(user_args.user_feature_paths, path_type='user defined (dir_of_dirs)')\n atleast_one_feature_specified = True\n user_feature_type = 'dir_of_dirs'\n\n elif not_unspecified(user_args.data_matrix_paths):\n user_feature_paths = check_paths(user_args.data_matrix_paths, path_type='data matrix')\n atleast_one_feature_specified = True\n user_feature_type = 'data_matrix'\n\n elif not_unspecified(user_args.pyradigm_paths):\n user_feature_paths = check_paths(user_args.pyradigm_paths, path_type='pyradigm')\n atleast_one_feature_specified = True\n meta_data_supplied = user_feature_paths[0]\n meta_data_format = 'pyradigm'\n user_feature_type = 'pyradigm'\n\n elif not_unspecified(user_args.arff_paths):\n user_feature_paths = check_paths(user_args.arff_paths, path_type='ARFF')\n atleast_one_feature_specified = True\n user_feature_type = 'arff'\n meta_data_supplied = user_feature_paths[0]\n meta_data_format = 'arff'\n else:\n user_feature_paths = None\n user_feature_type = None\n\n # map in python 3 returns a generator, not a list, so len() wouldnt work\n if not isinstance(user_feature_paths, list):\n user_feature_paths = list(user_feature_paths)\n\n if not atleast_one_feature_specified:\n raise ValueError('Atleast one method specifying features must be specified. '\n 'It can be a path(s) to pyradigm dataset, matrix file, user-defined folder or a Freesurfer subject directory.')\n\n return user_feature_paths, user_feature_type, fs_subject_dir, meta_data_supplied, meta_data_format\n\n\ndef parse_args():\n \"\"\"Parser/validator for the cmd line args.\"\"\"\n\n parser = get_parser()\n\n if len(sys.argv) < 2:\n print('Too few arguments!')\n parser.print_help()\n parser.exit(1)\n\n # parsing\n try:\n user_args = parser.parse_args()\n except:\n parser.exit(1)\n\n if len(sys.argv) == 3 and not_unspecified(user_args.make_vis):\n out_dir = realpath(user_args.make_vis)\n res_path = pjoin(out_dir,cfg.file_name_results)\n if pexists(out_dir) and pexists(res_path):\n print('\\n\\nSaving the visualizations to \\n{}'.format(out_dir))\n make_visualizations(res_path, out_dir)\n sys.exit(0)\n else:\n raise ValueError('Given folder does not exist, or has no results!')\n\n user_feature_paths, user_feature_type, fs_subject_dir, meta_data_path, meta_data_format = organize_inputs(user_args)\n\n if not meta_data_path:\n if user_args.meta_file is not None:\n meta_file = abspath(user_args.meta_file)\n if not pexists(meta_file):\n raise IOError(\"Meta data file doesn't exist.\")\n else:\n raise ValueError('Metadata file must be provided when not using pyradigm/ARFF inputs.')\n\n sample_ids, classes = get_metadata(meta_file)\n else:\n print('Using meta data from:\\n{}'.format(meta_data_path))\n sample_ids, classes = get_metadata_in_pyradigm(meta_data_path, meta_data_format)\n\n if user_args.out_dir is not None:\n out_dir = realpath(user_args.out_dir)\n else:\n out_dir = pjoin(realpath(os.getcwd()), cfg.output_dir_default)\n\n try:\n os.makedirs(out_dir, exist_ok=True)\n except:\n raise IOError('Output folder could not be created.')\n\n train_perc = np.float32(user_args.train_perc)\n if not ( 0.01 <= train_perc <= 0.99):\n raise ValueError(\"Training percentage {} out of bounds - must be >= 0.01 and <= 0.99\".format(train_perc))\n\n num_rep_cv = np.int64(user_args.num_rep_cv)\n if num_rep_cv < 10:\n raise ValueError(\"Atleast 10 repetitions of CV is recommened.\")\n\n num_procs = check_num_procs(user_args.num_procs)\n\n class_set, subgroups, positive_class = validate_class_set(classes, user_args.sub_groups, user_args.positive_class)\n\n feature_selection_size = validate_feature_selection_size(user_args.num_features_to_select)\n\n grid_search_level = user_args.gs_level.lower()\n if grid_search_level not in cfg.GRIDSEARCH_LEVELS:\n raise ValueError('Unrecognized level of grid search. Valid choices: {}'.format(cfg.GRIDSEARCH_LEVELS))\n\n classifier = user_args.classifier.lower()\n feat_select_method = user_args.feat_select_method.lower()\n\n # saving the validated and expanded values to disk for later use.\n options_to_save = [sample_ids, classes, out_dir, user_feature_paths, user_feature_type, fs_subject_dir,\n train_perc, num_rep_cv, positive_class, subgroups, feature_selection_size, num_procs,\n grid_search_level, classifier, feat_select_method]\n options_path = save_options(options_to_save, out_dir)\n\n return sample_ids, classes, out_dir, options_path, \\\n user_feature_paths, user_feature_type, fs_subject_dir, \\\n train_perc, num_rep_cv, \\\n positive_class, subgroups, \\\n feature_selection_size, num_procs, \\\n grid_search_level, classifier, feat_select_method\n\n\ndef make_visualizations(results_file_path, out_dir, options_path=None):\n \"\"\"\n Produces the performance visualizations/comparisons from the cross-validation results.\n\n Parameters\n ----------\n results_file_path : str\n Path to file containing results produced by `rhst`\n\n out_dir : str\n Path to a folder to store results.\n\n \"\"\"\n\n results_dict = rhst.load_results_dict(results_file_path)\n\n # using shorter names for readability\n accuracy_balanced = results_dict['accuracy_balanced']\n method_names = results_dict['method_names']\n num_classes = results_dict['num_classes']\n class_sizes = results_dict['class_sizes']\n confusion_matrix = results_dict['confusion_matrix']\n class_order = results_dict['class_set']\n feature_importances_rf = results_dict['feature_importances_rf']\n feature_names = results_dict['feature_names']\n num_times_misclfd = results_dict['num_times_misclfd']\n num_times_tested = results_dict['num_times_tested']\n\n feature_importances_available = True\n if options_path is not None:\n user_options = load_options(out_dir, options_path)\n if user_options['classifier_name'].lower() not in cfg.clfs_with_feature_importance:\n feature_importances_available = False\n else:\n # check if the all values are NaN\n unusable = [ np.all(np.isnan(method_fi.flatten())) for method_fi in feature_importances_rf ]\n feature_importances_available = not np.all(unusable)\n\n try:\n\n balacc_fig_path = pjoin(out_dir, 'balanced_accuracy')\n visualize.metric_distribution(accuracy_balanced, method_names, balacc_fig_path,\n class_sizes, num_classes, \"Balanced Accuracy\")\n\n confmat_fig_path = pjoin(out_dir, 'confusion_matrix')\n visualize.confusion_matrices(confusion_matrix, class_order, method_names, confmat_fig_path)\n\n cmp_misclf_fig_path = pjoin(out_dir, 'compare_misclf_rates')\n if num_classes > 2:\n visualize.compare_misclf_pairwise(confusion_matrix, class_order, method_names, cmp_misclf_fig_path)\n elif num_classes == 2:\n visualize.compare_misclf_pairwise_parallel_coord_plot(confusion_matrix, class_order, method_names,\n cmp_misclf_fig_path)\n\n if feature_importances_available:\n featimp_fig_path = pjoin(out_dir, 'feature_importance')\n visualize.feature_importance_map(feature_importances_rf, method_names, featimp_fig_path, feature_names)\n else:\n print('\\nCurrent predictive model does not provide feature importance values. Skipping them.')\n\n misclf_out_path = pjoin(out_dir, 'misclassified_subjects')\n visualize.freq_hist_misclassifications(num_times_misclfd, num_times_tested, method_names, misclf_out_path)\n except:\n traceback.print_exc()\n warnings.warn('Error generating the visualizations! Skipping ..')\n\n # cleaning up\n plt.close('all')\n\n return\n\n\ndef validate_class_set(classes, subgroups, positive_class=None):\n \"Ensures class names are valid and sub-groups exist.\"\n\n class_set = list(set(classes.values()))\n\n sub_group_list = list()\n if subgroups != 'all':\n if isinstance(subgroups, str):\n subgroups = [ subgroups, ]\n\n for comb in subgroups:\n cls_list = comb.split(',')\n # ensuring each subgroup has atleast two classes\n if len(set(cls_list)) < 2:\n raise ValueError('This subgroup {} does not contain two unique classes.'.format(comb))\n\n # verify each of them were defined in meta\n for cls in cls_list:\n if cls not in class_set:\n raise ValueError(\"Class {} in combination {} \"\n \"does not exist in meta data.\".format(cls, comb))\n\n sub_group_list.append(cls_list)\n else:\n # using all classes\n sub_group_list.append(class_set)\n\n # the following loop is required to preserve original order\n # this does not: class_order_in_meta = list(set(classes.values()))\n class_order_in_meta = list()\n for x in class_set:\n if x not in class_order_in_meta:\n class_order_in_meta.append(x)\n\n num_classes = len(class_order_in_meta)\n if num_classes < 2:\n raise ValueError(\"Atleast two classes are required for predictive analysis! \"\n \"Only one given ({})\".format(set(classes.values())))\n\n if num_classes == 2:\n if not_unspecified(positive_class):\n if positive_class not in class_order_in_meta:\n raise ValueError('Positive class specified does not exist in meta data.\\n'\n 'Choose one of {}'.format(class_order_in_meta))\n print('Positive class specified for AUC calculation: {}'.format(positive_class))\n else:\n positive_class = class_order_in_meta[-1]\n print('Positive class inferred for AUC calculation: {}'.format(positive_class))\n\n return class_set, sub_group_list, positive_class\n\n\ndef import_datasets(method_list, out_dir, subjects, classes, feature_path, feature_type='dir_of_dirs'):\n \"\"\"\n Imports all the specified feature sets and organizes them into datasets.\n\n Parameters\n ----------\n method_list : list of callables\n Set of predefined methods returning a vector of features for a given sample id and location\n out_dir : str\n Path to the output folder\n\n subjects : list of str\n List of sample ids\n classes : dict\n Dict identifying the class for each sample id in the dataset.\n feature_path : list of str\n List of paths to the root directory containing the features (pre- or user-defined).\n Must be of same length as method_list\n feature_type : str\n a string identifying the structure of feature set.\n Choices = ('dir_of_dirs', 'data_matrix')\n\n Returns\n -------\n method_names : list of str\n List of method names used for annotation.\n dataset_paths_file : str\n Path to the file containing paths to imported feature sets.\n\n \"\"\"\n\n def clean_str(string): return ' '.join(string.strip().split(' _-:\\n\\r\\t'))\n\n method_names = list()\n outpath_list = list()\n for mm, cur_method in enumerate(method_list):\n if cur_method in [get_dir_of_dirs]:\n method_name = basename(feature_path[mm])\n\n elif cur_method in [get_data_matrix]:\n method_name = os.path.splitext(basename(feature_path[mm]))[0]\n\n elif cur_method in [get_pyradigm]:\n\n if feature_type in ['pyradigm']:\n loaded_dataset = MLDataset(filepath=feature_path[mm])\n else:\n raise ValueError('Invalid state of the program!')\n\n if len(loaded_dataset.description) > 1:\n method_name = loaded_dataset.description\n else:\n method_name = basename(feature_path[mm])\n\n method_names.append(clean_str(method_name))\n if saved_dataset_matches(loaded_dataset, subjects, classes):\n outpath_list.append(feature_path[mm])\n continue\n else:\n raise ValueError('supplied pyradigm dataset does not match samples in the meta data.')\n\n elif cur_method in [get_arff]:\n\n loaded_dataset = MLDataset(arff_path=feature_path[mm])\n if len(loaded_dataset.description) > 1:\n method_name = loaded_dataset.description\n else:\n method_name = basename(feature_path[mm])\n\n method_names.append(clean_str(method_name))\n out_name = make_dataset_filename(method_name)\n outpath_dataset = pjoin(out_dir, out_name)\n loaded_dataset.save(outpath_dataset)\n outpath_list.append(outpath_dataset)\n continue\n else:\n # adding an index for an even more unique identification\n # method_name = '{}_{}'.format(cur_method.__name__,mm)\n method_name = cur_method.__name__\n\n method_names.append(clean_str(method_name))\n out_name = make_dataset_filename(method_name)\n\n outpath_dataset = pjoin(out_dir, out_name)\n if not saved_dataset_matches(outpath_dataset, subjects, classes):\n # noinspection PyTypeChecker\n outpath_dataset = get_features(subjects, classes,\n feature_path[mm],\n out_dir, out_name,\n cur_method, feature_type)\n\n outpath_list.append(outpath_dataset)\n\n combined_name = uniq_combined_name(method_names)\n\n dataset_paths_file = pjoin(out_dir, 'datasetlist.' + combined_name + '.txt')\n with open(dataset_paths_file, 'w') as dpf:\n dpf.writelines('\\n'.join(outpath_list))\n\n return method_names, dataset_paths_file\n\n\n\ndef make_method_list(fs_subject_dir, user_feature_paths, user_feature_type='dir_of_dirs'):\n \"\"\"\n Returns an organized list of feature paths and methods to read in features.\n\n Parameters\n ----------\n fs_subject_dir : str\n user_feature_paths : list of str\n user_feature_type : str\n\n Returns\n -------\n feature_dir : list\n method_list : list\n\n\n \"\"\"\n\n freesurfer_readers = [aseg_stats_subcortical, aseg_stats_whole_brain]\n userdefined_readers = {'dir_of_dirs': get_dir_of_dirs,\n 'data_matrix': get_data_matrix,\n 'pyradigm': get_pyradigm,\n 'arff': get_arff}\n\n feature_dir = list()\n method_list = list()\n if not_unspecified(user_feature_paths):\n if user_feature_type not in userdefined_readers:\n raise NotImplementedError(\"Invalid feature type or its reader is not implemented yet!\")\n\n for upath in user_feature_paths:\n feature_dir.append(upath)\n method_list.append(userdefined_readers[user_feature_type])\n\n if not_unspecified(fs_subject_dir):\n for fsrdr in freesurfer_readers:\n feature_dir.append(fs_subject_dir)\n method_list.append(fsrdr)\n\n if len(method_list) != len(feature_dir):\n raise ValueError('Invalid specification for features!')\n\n if len(method_list) < 1:\n raise ValueError('Atleast one feature set must be specified.')\n\n print(\"\\nRequested features for analysis:\")\n for mm, method in enumerate(method_list):\n print(\"{} from {}\".format(method.__name__, feature_dir[mm]))\n\n return feature_dir, method_list\n\n\ndef prepare_and_run(subjects, classes, out_dir, options_path,\n user_feature_paths, user_feature_type, fs_subject_dir,\n train_perc, num_rep_cv, positive_class,\n sub_group_list, feature_selection_size, num_procs,\n grid_search_level, classifier, feat_select_method):\n \"Organizes the inputs and prepares them for CV\"\n\n feature_dir, method_list = make_method_list(fs_subject_dir, user_feature_paths, user_feature_type)\n\n method_names, dataset_paths_file = import_datasets(method_list, out_dir, subjects, classes,\n feature_dir, user_feature_type)\n\n # iterating through the given set of subgroups\n for sub_group in sub_group_list:\n print('{}\\nProcessing subgroup : {}\\n{}'.format('-'*80, sub_group, '-'*80))\n out_dir_sg = pjoin(out_dir, sub_group_identifier(sub_group))\n results_file_path = rhst.run(dataset_paths_file, method_names, out_dir_sg,\n train_perc=train_perc, num_repetitions=num_rep_cv,\n positive_class=positive_class, sub_group=sub_group,\n feat_sel_size=feature_selection_size, num_procs=num_procs,\n grid_search_level=grid_search_level,\n classifier_name=classifier, feat_select_method=feat_select_method,\n options_path=options_path)\n\n print('\\n\\nSaving the visualizations to \\n{}'.format(out_dir))\n make_visualizations(results_file_path, out_dir_sg, options_path)\n print('\\n')\n\n return\n\n\ndef cli():\n \"\"\"\n Main entry point.\n\n \"\"\"\n\n subjects, classes, out_dir, options_path, user_feature_paths, user_feature_type, \\\n fs_subject_dir, train_perc, num_rep_cv, positive_class, sub_group_list, \\\n feature_selection_size, num_procs, grid_search_level, classifier, feat_select_method = parse_args()\n\n print('Running neuropredict {}'.format(__version__))\n prepare_and_run(subjects, classes, out_dir, options_path,\n user_feature_paths, user_feature_type, fs_subject_dir,\n train_perc, num_rep_cv, positive_class,\n sub_group_list, feature_selection_size, num_procs,\n grid_search_level, classifier, feat_select_method)\n\n return\n\n\ndef run(feature_sets,\n feature_type=cfg.default_feature_type,\n meta_data=None,\n output_dir=None,\n pipeline=None,\n train_perc=0.5,\n num_repetitions=200,\n positive_class=None,\n feat_sel_size=cfg.default_num_features_to_select,\n sub_groups='all',\n grid_search_level=cfg.GRIDSEARCH_LEVEL_DEFAULT,\n num_procs=2):\n \"\"\"\n Generate comprehensive report on the predictive performance for different feature sets and statistically compare them.\n\n Main entry point for API access.\n\n Parameters\n ----------\n feature_sets : list\n The input can be specified in either of the following ways:\n - list of paths to pyradigm datasets saved on disk\n - path to a file containing list of paths (each line containing path to a valid MLDataset)\n - list of MLDatasets that are already loaded\n - list of tuples (to specify multiple features), each element containing (X, y) i.e. data and target labels\n - a single tuple containing (X, y) i.e. data and target labels\n - list of paths to CSV files, each containing one type of features.\n\n When specifying multiple sets of input features, ensure:\n - all of them contain the same number of samples\n - each sample belongs to same class across all feature sets.\n\n feature_type : str\n String identifying the type of features as described above. It could be:\n 'list_of_pyradigm_paths', 'pyradigm_list',\n 'list_of_tuples', 'tuple', 'list_of_csv_paths'\n\n meta_data : multiple\n The meta data can be specified in either of the following ways:\n\n - a path to a meta data file (see :doc:`features` page)\n - a dict keyed in by sample IDs with values representing their classes.\n - None, if meta data is already specified in ``feature_sets`` input (e.g. with pyradigms).\n\n pipeline : str or object\n If a string, it identifying one of the implemented classifiers e.g. 'RandomForestClassifier' or 'ExtraTreesClassifier'\n If an object, it must be a sciki-learn pipeline describing the sequence of steps.\n This is typically a set of feature selections or dimensionality reduction steps followed by an estimator (classifier).\n\n See http://scikit-learn.org/stable/modules/pipeline.html#pipeline for more details.\n\n Default: None, which leads to the selection of a Random Forest classifier,\n with robust scaling, followed by removal of low variance features.\n\n method_names : list\n A list of names to denote the different feature sets\n\n out_results_dir : str\n Path to output directory to save the cross validation results to.\n If not specified, a new directory named 'neuropredict' will be created in the current directory.\n\n train_perc : float, optional\n Percetange of subjects to train the classifier on.\n The percentage is applied to the size of the smallest class to estimate\n the number of subjects from each class to be reserved for training.\n The smallest class is chosen to avoid class-imbalance in the training set.\n Default: 0.8 (80%).\n\n positive_class : str\n Name of the class to be treated as positive in calculation of AUC\n\n feat_sel_size : str or int\n Number of features to select as part of feature selection. Options:\n\n - 'tenth'\n - 'sqrt'\n - 'log2'\n - 'all'\n\n Default: \\'tenth\\' of the number of samples in the training set. For example, if your dataset has 90 samples, you chose 50 percent for training (default), then Y will have 90*.5=45 samples in training set, leading to 5 features to be selected for taining. If you choose a fixed integer, ensure all the feature sets under evaluation have atleast that many features.\n\n num_repetitions : int, optional\n Number of repetitions of cross-validation estimation. Default: 200.\n\n num_procs : int, optional\n Number of CPUs to use to parallelize CV repetitions.\n\n Default : 4. Number of CPUs will be capped at the number available on the machine if higher is requested.\n\n sub_groups : list\n This option allows the user to study different combinations of classes in a multi-class (N>2) dataset. For example, in a dataset with 3 classes CN, FTD and AD, two studies of pair-wise combinations can be studied separately with the following flag ``--sub_groups CN,FTD CN,AD``. This allows the user to focus on few interesting subgroups depending on their dataset/goal.\n\n Format: Different subgroups must be separated by space, and each sub-group must be a comma-separated list of class names defined in the meta data file. Hence it is strongly recommended to use class names without any spaces, commas, hyphens and special characters, and ideally just alphanumeric characters separated by underscores. Any number of subgroups can be specified, but each subgroup must have atleast two distinct classes.\n\n Default: ``'all'``, leading to inclusion of all available classes in a all-vs-all multi-class setting.\n\n grid_search_level : str\n Flag to specify the level of grid search during hyper-parameter optimization on the training set.\n Allowed options are : 'none', 'light' and 'exhaustive', in the order of how many values/values will be optimized.\n\n More parameters and more values demand more resources and much longer time for optimization.\n\n The 'light' option tries to \"folk wisdom\" to try least number of values (no more than one or two),\n for the parameters for the given classifier. (e.g. a lage number say 500 trees for a random forest optimization).\n The 'light' will be the fastest and should give a \"rough idea\" of predictive performance.\n The 'exhaustive' option will try to most parameter values for the most parameters that can be optimized.\n\n Returns\n -------\n results_path : str\n Path to pickle file containing full set of CV results.\n\n \"\"\"\n\n raise NotImplementedError\n\n return\n\nif __name__ == '__main__':\n cli()\n"
] |
[
[
"numpy.all",
"numpy.int64",
"numpy.float32",
"matplotlib.pyplot.close"
]
] |
Zhiwei-Z/prompzzw
|
[
"9d109f1a604125411a1e7894c3222cd50a0ec975"
] |
[
"meta_policy_search/meta_algos/trpo_maml.py"
] |
[
"from meta_policy_search.utils import logger\nfrom meta_policy_search.meta_algos.base import MAMLAlgo\nfrom meta_policy_search.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer\n\nimport tensorflow as tf\nfrom collections import OrderedDict\n\nclass TRPOMAML(MAMLAlgo):\n \"\"\"\n Algorithm for TRPO MAML\n\n Args:\n policy (Policy): policy object\n name (str): tf variable scope\n step_size (int): trust region size for the meta policy optimization through TPRO\n inner_type (str): One of 'log_likelihood', 'likelihood_ratio', 'dice', choose which inner update to use\n exploration (bool): whether to use E-MAML or MAML\n inner_lr (float) : gradient step size used for inner step\n meta_batch_size (int): number of meta-learning tasks\n num_inner_grad_steps (int) : number of gradient updates taken per maml iteration\n trainable_inner_step_size (boolean): whether make the inner step size a trainable variable\n \"\"\"\n def __init__(\n self,\n *args,\n name=\"trpo_maml\",\n step_size=0.01,\n inner_type='likelihood_ratio',\n exploration=False,\n **kwargs\n ):\n super(TRPOMAML, self).__init__(*args, **kwargs)\n\n assert inner_type in [\"log_likelihood\", \"likelihood_ratio\", \"dice\"]\n self.step_size = step_size\n self.inner_type = inner_type\n self.name = name\n self._optimization_keys = ['observations', 'actions', 'advantages', 'agent_infos']\n\n self.exploration = exploration\n if exploration: # add adjusted average rewards tp optimization keys\n self._optimization_keys.append('adj_avg_rewards')\n\n\n self.optimizer = ConjugateGradientOptimizer()\n\n self.build_graph()\n\n def _adapt_objective_sym(self, action_sym, adv_sym, dist_info_old_sym, dist_info_new_sym):\n if self.inner_type == 'likelihood_ratio':\n with tf.variable_scope(\"likelihood_ratio\"):\n likelihood_ratio_adapt = self.policy.distribution.likelihood_ratio_sym(action_sym,\n dist_info_old_sym, \n dist_info_new_sym)\n with tf.variable_scope(\"surrogate_loss\"):\n surr_obj_adapt = -tf.reduce_mean(likelihood_ratio_adapt * adv_sym)\n\n elif self.inner_type == 'log_likelihood':\n with tf.variable_scope(\"log_likelihood\"):\n log_likelihood_adapt = self.policy.distribution.log_likelihood_sym(action_sym, dist_info_new_sym)\n with tf.variable_scope(\"surrogate_loss\"):\n surr_obj_adapt = -tf.reduce_mean(log_likelihood_adapt * adv_sym)\n\n else:\n raise NotImplementedError\n\n return surr_obj_adapt\n\n def build_graph(self):\n \"\"\"\n Creates the computation graph\n \"\"\"\n\n \"\"\" Create Variables \"\"\"\n # assert self.num_inner_grad_steps == 1 or not self.exploration, \"Not sure if the math is right for more than 1 inner step\"\n\n with tf.variable_scope(self.name):\n self.step_sizes = self._create_step_size_vars()\n\n \"\"\" --- Build inner update graph for adapting the policy and sampling trajectories --- \"\"\"\n # this graph is only used for adapting the policy and not computing the meta-updates\n self.adapted_policies_params, self.adapt_input_ph_dict = self._build_inner_adaption()\n\n \"\"\" ----- Build graph for the meta-update ----- \"\"\"\n self.meta_op_phs_dict = OrderedDict()\n obs_phs, action_phs, adv_phs, dist_info_old_phs, all_phs_dict = self._make_input_placeholders('step0')\n self.meta_op_phs_dict.update(all_phs_dict)\n\n distribution_info_vars, current_policy_params = [], []\n all_surr_objs, all_inner_kls = [], []\n\n for i in range(self.meta_batch_size):\n dist_info_sym = self.policy.distribution_info_sym(obs_phs[i], params=None)\n distribution_info_vars.append(dist_info_sym) # step 0\n current_policy_params.append(self.policy.policy_params) # set to real policy_params (tf.Variable)\n\n initial_distribution_info_vars = distribution_info_vars\n initial_action_phs = action_phs\n\n with tf.variable_scope(self.name):\n \"\"\" Inner updates\"\"\"\n for step_id in range(1, self.num_inner_grad_steps+1):\n surr_objs, adapted_policy_params = [], []\n\n # inner adaptation step for each task\n for i in range(self.meta_batch_size):\n surr_loss = self._adapt_objective_sym(action_phs[i], adv_phs[i], dist_info_old_phs[i], distribution_info_vars[i])\n\n adapted_params_var = self._adapt_sym(surr_loss, current_policy_params[i])\n\n adapted_policy_params.append(adapted_params_var)\n surr_objs.append(surr_loss)\n\n all_surr_objs.append(surr_objs)\n\n # Create new placeholders for the next step\n obs_phs, action_phs, adv_phs, dist_info_old_phs, all_phs_dict = self._make_input_placeholders('step%i' % step_id)\n self.meta_op_phs_dict.update(all_phs_dict)\n\n # dist_info_vars_for_next_step\n distribution_info_vars = [self.policy.distribution_info_sym(obs_phs[i], params=adapted_policy_params[i])\n for i in range(self.meta_batch_size)]\n current_policy_params = adapted_policy_params\n\n \"\"\" Outer objective \"\"\"\n surr_objs, outer_kls = [], []\n\n # Create placeholders\n # meta-objective\n for i in range(self.meta_batch_size):\n likelihood_ratio = self.policy.distribution.likelihood_ratio_sym(action_phs[i], dist_info_old_phs[i],\n distribution_info_vars[i])\n outer_kl = tf.reduce_mean(self.policy.distribution.kl_sym(dist_info_old_phs[i], distribution_info_vars[i]))\n\n surr_obj = - tf.reduce_mean(likelihood_ratio * adv_phs[i])\n\n if self.exploration:\n # add adj_avg_reward placeholder\n adj_avg_rewards = tf.placeholder(dtype=tf.float32, shape=[None], name='adj_avg_rewards' + '_' + str(self.num_inner_grad_steps) + '_' + str(i))\n self.meta_op_phs_dict['step%i_task%i_%s' % (self.num_inner_grad_steps, i, 'adj_avg_rewards')] = adj_avg_rewards\n\n log_likelihood_inital = self.policy.distribution.log_likelihood_sym(initial_action_phs[i],\n initial_distribution_info_vars[i])\n surr_obj += - tf.reduce_mean(adj_avg_rewards) * tf.reduce_mean(log_likelihood_inital)\n\n surr_objs.append(surr_obj)\n outer_kls.append(outer_kl)\n\n mean_outer_kl = tf.reduce_mean(tf.stack(outer_kls))\n\n \"\"\" Mean over meta tasks \"\"\"\n meta_objective = tf.reduce_mean(tf.stack(surr_objs, 0))\n\n self.optimizer.build_graph(\n loss=meta_objective,\n target=self.policy,\n input_ph_dict=self.meta_op_phs_dict,\n leq_constraint=(mean_outer_kl, self.step_size),\n )\n\n def optimize_policy(self, all_samples_data, log=True):\n \"\"\"\n Performs MAML outer step\n\n Args:\n all_samples_data (list) : list of lists of lists of samples (each is a dict) split by gradient update and\n meta task\n log (bool) : whether to log statistics\n\n Returns:\n None\n \"\"\"\n meta_op_input_dict = self._extract_input_dict_meta_op(all_samples_data, self._optimization_keys)\n logger.log(\"Computing KL before\")\n mean_kl_before = self.optimizer.constraint_val(meta_op_input_dict)\n\n logger.log(\"Computing loss before\")\n loss_before = self.optimizer.loss(meta_op_input_dict)\n logger.log(\"Optimizing\")\n self.optimizer.optimize(meta_op_input_dict)\n logger.log(\"Computing loss after\")\n loss_after = self.optimizer.loss(meta_op_input_dict)\n\n logger.log(\"Computing KL after\")\n mean_kl = self.optimizer.constraint_val(meta_op_input_dict)\n if log:\n logger.logkv('MeanKLBefore', mean_kl_before)\n logger.logkv('MeanKL', mean_kl)\n\n logger.logkv('LossBefore', loss_before)\n logger.logkv('LossAfter', loss_after)\n logger.logkv('dLoss', loss_before - loss_after)"
] |
[
[
"tensorflow.variable_scope",
"tensorflow.reduce_mean",
"tensorflow.stack"
]
] |
Fennec2000GH/Poly-Finance
|
[
"33de7a7503f4a4c800ab364f4720104c889cf30f"
] |
[
"ml.py"
] |
[
"import matplotlib.pyplot as plt, streamlit as st\r\nfrom typing import Iterable, Union\r\n\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import roc_curve, auc, RocCurveDisplay\r\n\r\ndef train(estimator: object, X: Iterable[Union[int, float]], y: Iterable):\r\n \"\"\"\r\n Train custom classifier model.\r\n\r\n Parameters:\r\n estimator: Unfitted estimator.\r\n X: Input training data.\r\n y: Labels for test data.\r\n\r\n Returns:\r\n Fitted estimator model.\r\n \"\"\"\r\n return estimator.fit(X=X, y=y)\r\n\r\ndef classify(estimator: object, X: Iterable[Union[int, float]]):\r\n \"\"\"\r\n Predict with custom classifier model.\r\n\r\n Parameters:\r\n estimator: Fitted estimator.\r\n X: Input test data.\r\n\r\n Returns:\r\n Predicted labels.\r\n \"\"\"\r\n return estimator.predict(X=X)\r\n\r\n\r\ndef regress(estimator: object, X: Iterable[Union[int, float]], y: Iterable):\r\n \"\"\"\r\n Predict with custom regressor model.\r\n\r\n Parameters:\r\n estimator: Fitted estimator.\r\n X: Input test data.\r\n y: Labels for test data.\r\n\r\n Returns:\r\n Predicted labels.\r\n \"\"\"\r\n pass\r\n\r\ndef evaluate(estimator: object, X: Iterable[Union[int, float]], y: Iterable):\r\n \"\"\"\r\n Predict with custom classifier model.\r\n\r\n Parameters:\r\n estimator: Fitted estimator.\r\n X: Input test data.\r\n y: Labels for test data.\r\n\r\n Returns:\r\n Predicted labels.\r\n \"\"\"\r\n pred = estimator.predict(X=X)\r\n\r\n # classification report\r\n report = classification_report(y_true=y, y_pred=pred)\r\n st.write('Classification Report')\r\n st.write(report)\r\n\r\n # ROC curve\r\n fpr, tpr, thresholds = roc_curve(y, pred)\r\n roc_auc = auc(fpr, tpr)\r\n _, _, figure = RocCurveDisplay(\r\n fpr=fpr,\r\n tpr=tpr,\r\n roc_auc=roc_auc,\r\n estimator_name=type(estimator)\r\n )\r\n\r\n st.pyplot(fig=figure)\r\n"
] |
[
[
"sklearn.metrics.classification_report",
"sklearn.metrics.auc",
"sklearn.metrics.roc_curve"
]
] |
liuyenting/olive-camera-dcamapi
|
[
"0ac8eeeb2d666561fefd8f6e6a137bfa304f5d3a"
] |
[
"setup.py"
] |
[
"import os\r\nfrom setuptools import Extension, setup\r\nimport sys\r\n\r\nfrom Cython.Build import build_ext\r\nimport numpy\r\n\r\nNAME = \"olive-camera-dcamapi\"\r\nVERSION = \"0.1\"\r\nDESCRIPTION = \"A small template project that shows how to wrap C/C++ code into python using Cython\"\r\nURL = \"https://github.com/liuyenting/olive-camera-dcamapi\"\r\n\r\n# Trove classifiers\r\n# https://pypi.org/classifiers/\r\nCLASSIFIERS = [\r\n \"License :: OSI Approved :: Apache Software License\",\r\n \"Operating System :: Microsoft :: Windows\",\r\n]\r\nKEYWORDS = []\r\n\r\nAUTHOR = \"Liu, Yen-Ting\"\r\nEMAIL = \"ytliu@gate.sinica.edu.tw\"\r\n\r\nREQUIRES = [\"numpy\", \"trio~=0.13.0\"]\r\n\r\nPACKAGES = [\"olive.drivers.dcamapi\"]\r\n\r\nEXT_DEFS = [\r\n {\r\n \"name\": \"olive.drivers.dcamapi.wrapper\",\r\n \"language\": \"c++\",\r\n \"include_dirs\": [\r\n # \"Module .pxd file not found next to .pyx file\", https://github.com/cython/cython/issues/2452\r\n \".\",\r\n # numpy\r\n numpy.get_include(),\r\n ],\r\n \"libraries\": [\"dcamapi\"],\r\n \"library_dirs\": [\"lib\"],\r\n }\r\n]\r\n\r\n######################################################################################\r\n\r\ncwd = os.path.abspath(os.path.dirname(__file__))\r\n\r\nwith open(os.path.join(cwd, \"README.md\"), encoding=\"utf-8\") as fd:\r\n LONG_DESCRIPTION = fd.read()\r\n\r\n# - install cython headers so other modules can cimport\r\n# - force sdist to keep the .pyx files\r\nPACKAGE_DATA = {pkg: [\"*.pxd\", \"*.pyx\"] for pkg in PACKAGES}\r\n\r\n\r\ndef generate_extension(ext_def):\r\n \"\"\"Generate extension constructors.\"\"\"\r\n assert \"name\" in ext_def, \"invalid extension name\"\r\n\r\n ext_path = ext_def[\"name\"].replace(\".\", os.path.sep) + \".pyx\"\r\n ext_root = os.path.dirname(ext_path)\r\n\r\n ext_def[\"sources\"] = [ext_path]\r\n\r\n if \"extra_objects\" in ext_def:\r\n if not sys.platform.startswith(\"linux\"):\r\n # NOTE:\r\n # re-route static library on Windows https://stackoverflow.com/a/49139257\r\n # extract names\r\n static_libs = [os.path.split(lib) for lib in ext_def[\"extra_objects\"]]\r\n lib_dirs, lib_names = zip(*static_libs)\r\n lib_names = [os.path.splitext(name)[0] for name in lib_names]\r\n # 1) save it into 'libraries'\r\n # 2) append search path (remove duplicates on-the-fly)\r\n ext_def.setdefault(\"libraries\", []).extend(lib_names)\r\n ext_def.setdefault(\"library_dirs\", []).extend(list(set(lib_dirs)))\r\n # empty 'extra_objects'\r\n del ext_def[\"extra_objects\"]\r\n\r\n # prepend root directory\r\n arguments = (\r\n \"include_dirs\",\r\n \"library_dirs\",\r\n \"runtime_library_dirs\",\r\n \"extra_objects\",\r\n )\r\n for argument in arguments:\r\n try:\r\n ext_def[argument] = [\r\n os.path.join(ext_root, path) for path in ext_def[argument]\r\n ]\r\n except KeyError:\r\n # ignore unused argument\r\n pass\r\n\r\n return Extension(**ext_def)\r\n\r\n\r\nEXTENSIONS = [generate_extension(ext_def) for ext_def in EXT_DEFS]\r\n\r\nsetup(\r\n #\r\n # Project Info\r\n #\r\n name=NAME,\r\n version=VERSION,\r\n description=DESCRIPTION,\r\n long_description=LONG_DESCRIPTION,\r\n long_description_content_type=\"text/markdown\",\r\n url=URL,\r\n classifiers=CLASSIFIERS,\r\n keywords=KEYWORDS,\r\n #\r\n # Author\r\n #\r\n author=AUTHOR,\r\n author_email=EMAIL,\r\n #\r\n # Dependencies\r\n #\r\n # use pyproject.toml for setup dependencies instead\r\n # setup_requires=[],remove\r\n install_requires=REQUIRES,\r\n #\r\n # Package Structure\r\n #\r\n # package to install\r\n packages=PACKAGES,\r\n package_data=PACKAGE_DATA,\r\n #\r\n # Build Instruction\r\n #\r\n cmdclass={\"build_ext\": build_ext},\r\n ext_modules=EXTENSIONS,\r\n # disable zip_safe\r\n # - Cython cannot find .pxd files inside installed .egg\r\n # - dynmaic loader may require library unzipped to a temporary directory at import time\r\n zip_safe=False,\r\n)\r\n"
] |
[
[
"numpy.get_include"
]
] |
ytakefuji/machine-learning
|
[
"f0bc65c221ffbf775ad43055f4164abbd3ffee4a"
] |
[
"elm_pima.py"
] |
[
"import pandas as pd\npima=pd.read_csv('pima-indians-diabetes.csv',encoding=\"shift-jis\")\npima.columns=['pregnant','plasmaGlucose','bloodP','skinThick','serumInsulin','weight','pedigree','age','diabetes']\nfrom sklearn.model_selection import train_test_split\ny = pima['diabetes']\nX=pima.drop(['diabetes'],axis=1)\nnh = 4\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=54,shuffle=True)\nfrom sklearn_extensions.extreme_learning_machines.elm import GenELMClassifier\nfrom sklearn_extensions.extreme_learning_machines.random_layer import RBFRandomLayer, MLPRandomLayer\nsrhl_tanh = MLPRandomLayer(n_hidden=nh, activation_func='tanh')\nsrhl_rbf = RBFRandomLayer(n_hidden=nh*2, rbf_width=0.1, random_state=0)\nclf1=GenELMClassifier(hidden_layer=srhl_tanh)\nclf1.fit(X_train,y_train)\nprint(clf1.score(X_test,y_test))\n'''\ndic=dict(zip(X.columns,clf.feature_importances_))\nfor item in sorted(dic.items(), key=lambda x: x[1], reverse=True):\n print(item[0],round(item[1],4))\n'''\n"
] |
[
[
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
ecom-research/pytorch_geometric
|
[
"bca73a6f0808cfcf7133548edfbb5628ad6e49fa"
] |
[
"benchmark/recsys/gcn_solver.py"
] |
[
"import argparse\nimport torch\nimport os\nimport numpy as np\nimport random as rd\n\nfrom models import GCN\nfrom utils import get_folder_path\nfrom base_solver import BaseSolver\n\n\nMODEL = 'GCN'\n\nparser = argparse.ArgumentParser()\n# Dataset params\nparser.add_argument(\"--dataset\", type=str, default='Movielens', help=\"\")\nparser.add_argument(\"--dataset_name\", type=str, default='1m', help=\"\")\nparser.add_argument(\"--if_use_features\", type=bool, default=False, help=\"\")\nparser.add_argument(\"--num_core\", type=int, default=10, help=\"\")\nparser.add_argument(\"--num_feat_core\", type=int, default=10, help=\"\")\nparser.add_argument(\"--train_ratio\", type=float, default=0.8, help=\"\")\n# Model params\nparser.add_argument(\"--dropout\", type=float, default=0.5, help=\"\")\nparser.add_argument(\"--emb_dim\", type=int, default=64, help=\"\")\nparser.add_argument(\"--repr_dim\", type=int, default=16, help=\"\")\nparser.add_argument(\"--hidden_size\", type=int, default=64, help=\"\")\n# Train params\nparser.add_argument(\"--num_negative_samples\", type=int, default=5, help=\"\")\nparser.add_argument(\"--init_eval\", type=bool, default=True, help=\"\")\n\nparser.add_argument(\"--device\", type=str, default='cuda', help=\"\")\nparser.add_argument(\"--gpu_idx\", type=str, default='0', help=\"\")\nparser.add_argument(\"--runs\", type=int, default=100, help=\"\")\nparser.add_argument(\"--epochs\", type=int, default=100, help=\"\")\nparser.add_argument(\"--opt\", type=str, default='adam', help=\"\")\nparser.add_argument(\"--loss\", type=str, default='mse', help=\"\")\nparser.add_argument(\"--batch_size\", type=int, default=4, help=\"\")\nparser.add_argument(\"--lr\", type=float, default=1e-4, help=\"\")\nparser.add_argument(\"--weight_decay\", type=float, default=1e-3, help=\"\")\nparser.add_argument(\"--early_stopping\", type=int, default=60, help=\"\")\nparser.add_argument(\"--save_epochs\", type=list, default=[10, 40, 80], help=\"\")\nparser.add_argument(\"--save_every_epoch\", type=int, default=40, help=\"\")\n\nargs = parser.parse_args()\n\n\n# Setup data and weights file path\ndata_folder, weights_folder, logger_folder = \\\n get_folder_path(model=MODEL, dataset=args.dataset + args.dataset_name)\n\n# Setup device\nif not torch.cuda.is_available() or args.device == 'cpu':\n device = 'cpu'\nelse:\n device = 'cuda:{}'.format(args.gpu_idx)\n\n# Setup args\ndataset_args = {\n 'root': data_folder, 'dataset': args.dataset, 'name': args.dataset_name,\n 'if_use_features': args.if_use_features,\n 'num_core': args.num_core, 'num_feat_core': args.num_feat_core,\n 'train_ratio': args.train_ratio\n}\nmodel_args = {\n 'if_use_features': args.if_use_features,\n 'emb_dim': args.emb_dim, 'hidden_size': args.hidden_size,\n 'repr_dim': args.repr_dim, 'dropout': args.dropout\n}\ntrain_args = {\n 'init_eval': args.init_eval, 'num_negative_samples': args.num_negative_samples,\n 'opt': args.opt, 'loss': args.loss,\n 'runs': args.runs, 'epochs': args.epochs, 'batch_size': args.batch_size,\n 'weight_decay': args.weight_decay, 'lr': args.lr, 'device': device,\n 'weights_folder': os.path.join(weights_folder, str(model_args)),\n 'logger_folder': os.path.join(logger_folder, str(model_args)),\n 'save_epochs': args.save_epochs, 'save_every_epoch': args.save_every_epoch\n}\nprint('dataset params: {}'.format(dataset_args))\nprint('task params: {}'.format(model_args))\nprint('train params: {}'.format(train_args))\n\n\nclass GCNSolver(BaseSolver):\n def __init__(self, GCN, dataset_args, model_args, train_args):\n super(GCNSolver, self).__init__(GCN, dataset_args, model_args, train_args)\n\n def prepare_model_input(self, data, if_use_features=False):\n edge_index_np = np.hstack(data.edge_index_nps[0].values())\n edge_index_np = np.hstack([edge_index_np, np.flip(edge_index_np, 0)])\n edge_index = torch.from_numpy(edge_index_np).long().to(self.train_args['device'])\n\n kwargs = {'edge_index': edge_index}\n if if_use_features:\n kwargs['x'] = data.x\n\n return kwargs\n\n def train_negative_sampling(self, u_nid, train_pos_unid_inid_map, test_pos_unid_inid_map, neg_unid_inid_map, data):\n \"\"\"\n Unliked popular movie negative sampling:\n :param u_nid:\n :param train_pos_unid_inid_map:\n :param test_pos_unid_inid_map:\n :param neg_unid_inid_map:\n :param data:\n :return:\n \"\"\"\n num_pos_samples = len(train_pos_unid_inid_map[u_nid])\n\n negative_inids = test_pos_unid_inid_map[u_nid] + neg_unid_inid_map[u_nid]\n nid_occs = np.array([data.item_nid_occs[0][nid] for nid in negative_inids])\n nid_occs = nid_occs / np.sum(nid_occs)\n negative_inids = rd.choices(population=negative_inids, weights=nid_occs, k=num_pos_samples * 5)\n\n return negative_inids\n\n def generate_candidates(self, train_pos_unid_inid_map, test_pos_unid_inid_map, neg_unid_inid_map, u_nid):\n pos_i_nids = test_pos_unid_inid_map[u_nid]\n neg_i_nids = np.array(neg_unid_inid_map[u_nid])\n\n neg_i_nids_indices = np.array(rd.sample(range(neg_i_nids.shape[0]), 99), dtype=int)\n\n return pos_i_nids, list(neg_i_nids[neg_i_nids_indices])\n\n\nif __name__ == '__main__':\n solver = GCNSolver(GCN, dataset_args, model_args, train_args)\n solver.run()\n"
] |
[
[
"numpy.array",
"numpy.sum",
"torch.from_numpy",
"torch.cuda.is_available",
"numpy.flip"
]
] |
SumanSudhir/Kaggle-SIIM-ISIC-Melanoma-Classification
|
[
"d22e91f357dd2940f8aac5d254f13d865bcba3bb"
] |
[
"src/test.py"
] |
[
"import os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport random\n\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nimport torch.nn.functional as F\nimport albumentations as A\nfrom albumentations.pytorch.transforms import ToTensorV2\n\n\nfrom dataset import MelanomaDataset\nfrom modules import ResNetModel, EfficientModel, Model\n\n\n\nimport time\n\n\"\"\" Initialization\"\"\"\nSEED = 45\nresolution = 320 # orignal res for B5\ninput_res = 512\nDEBUG = False\n\n# test = '../data_256/test'\ntest ='../data_merged_512/512x512-test/512x512-test'\nlabels = '../data/test.csv'\n# train_labels = '../data/train_combined.csv'\nsample = '../data/sample_submission.csv'\n# external = '../data/external_mal.csv'\n\ndef seed_everything(seed):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n # torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\nseed_everything(SEED)\n\ntrain_on_gpu = torch.cuda.is_available()\n\nif not train_on_gpu:\n print(\"CUDA is not available. Testing on CPU...\")\nelse:\n print(\"CUDA is available. Testing on GPU...\")\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndf = pd.read_csv(labels)\ndf=df.rename(columns = {'image_name':'image_id'})\n# df_train = pd.read_csv(train_labels)\n# df_ext = pd.read_csv(external)\n\n# df_train = pd.concat([df_train, df_ext], ignore_index=True)\n\n\n\"\"\" Normalizing Meta features\"\"\"\n## Sex Features\ndf['sex'] = df['sex'].map({'male': 1, 'female': 0})\ndf[\"sex\"] = df[\"sex\"].fillna(-1)\n\n## Age Features\ndf[\"age_approx\"] /= df[\"age_approx\"].max()\ndf['age_approx'] = df['age_approx'].fillna(0)\n\nmeta_features = ['sex', 'age_approx']\n\nprint(df.head())\n\n\n\nprint(\"Previous Length\", len(df))\nif DEBUG:\n df = df[:100]\nprint(\"Usable Length\", len(df))\n\n\"\"\" Dataset \"\"\"\n\n# test_transform=transforms.Compose([\n# # transforms.Resize((256,256)),\n# transforms.ToTensor(),\n# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[\n# 0.229, 0.224, 0.225])])\n\ntest_transform = A.Compose([\n A.JpegCompression(p=0.5),\n A.RandomSizedCrop(min_max_height=(int(resolution*0.9), int(resolution*1.1)),\n height=resolution, width=resolution, p=1.0),\n A.HorizontalFlip(p=0.5),\n A.VerticalFlip(p=0.5),\n A.Transpose(p=0.5),\n A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ToTensorV2(),\n ], p=1.0)\n\n\n\nt_dataset=MelanomaDataset(df=df, imfolder=test,\n train=False, transforms=test_transform, meta_features=meta_features)\n\nprint('Length of test set is {}'.format(len(t_dataset)))\n\ntestloader=DataLoader(t_dataset, batch_size=8, shuffle=False, num_workers=8)\n\n\"\"\"Testing\"\"\"\n# model = ResNetModel()()\n# model = EfficientModel()\n# model = EfficientModel(n_meta_features=len(meta_features))\nmodel = Model(arch='efficientnet-b1')\n# model.load_state_dict(torch.load(\"../checkpoint/fold_1/efficient_256/efficientb0_256_14_0.9212.pth\", map_location=torch.device(device)))\nmodel.load_state_dict(torch.load(\"..//checkpoint/fold_1/efficient_320/efficientb1_320_14_0.9293.pth\", map_location=torch.device(device)))\nmodel.to(device)\n\nmodel.eval()\ntest_prob_stack = []\nimg_ids = []\nwith torch.no_grad():\n for i in range(15):\n test_prob = []\n for img, meta, img_id in tqdm(testloader):\n if train_on_gpu:\n img, meta = img.to(device), meta.to(device)\n\n logits = model.forward(img)\n\n pred = logits.sigmoid().detach().cpu()\n test_prob.append(pred)\n\n if i == 0:\n img_ids.append(img_id)\n\n test_prob = torch.cat(test_prob).cpu()\n test_prob_stack.append(test_prob)\n\ntest_prob_stack = torch.stack([test_prob_stack[0], test_prob_stack[1], test_prob_stack[2], test_prob_stack[3], test_prob_stack[4], test_prob_stack[5], test_prob_stack[6], test_prob_stack[7], test_prob_stack[8], test_prob_stack[9], test_prob_stack[10], test_prob_stack[11], test_prob_stack[12], test_prob_stack[13], test_prob_stack[14]], dim=0)\n\ntest_prob_avg = torch.mean(test_prob_stack, dim=0).numpy()\n\ntest_prob_avg = np.concatenate(test_prob_avg, axis=None)\nimg_ids = np.concatenate(img_ids, axis=None)\n\nsub_df = pd.DataFrame({'image_name': img_ids, 'target': test_prob_avg})\nsub_df.to_csv('../submission/submission_b1_320.csv', index=False)\nprint(sub_df.head())\n\n# print(test_prob_avg)\n# sub = pd.read_csv(sample)\n# sub['target'] = test_prob_avg.reshape(-1,)\n# sub.to_csv('../submission/submission_15.csv', index=False)\n"
] |
[
[
"numpy.concatenate",
"torch.device",
"torch.cat",
"torch.cuda.manual_seed",
"torch.stack",
"numpy.random.seed",
"pandas.DataFrame",
"torch.no_grad",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"torch.mean"
]
] |
alvisdeng/NLP-Language-Model
|
[
"84903b344936bc66ccf217e1d74524552049b8f8"
] |
[
"lm.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2019 Zimeng Qiu <zimengq@andrew.cmu.edu>\n\n\"\"\"\nF19 11-411/611 NLP Assignment 3 Task 1\nN-gram Language Model Implementation Script\nZimeng Qiu Sep 2019\n\nThis is a simple implementation of N-gram language model\n\nWrite your own implementation in this file!\n\"\"\"\n\nimport argparse\nfrom utils import *\nimport numpy as np\n\n\nclass LanguageModel(object):\n \"\"\"\n Base class for all language models\n \"\"\"\n def __init__(self, corpus, ngram, min_freq, uniform=False):\n \"\"\"\n Initialize language model\n :param corpus: input text corpus to build LM on\n :param ngram: number of n-gram, e.g. 1, 2, 3, ...\n :param min_freq: minimum frequency threshold to set a word to UNK placeholder\n set to 1 to not use this threshold\n :param uniform: boolean flag, set to True to indicate this model is a simple uniform LM\n otherwise will be an N-gram model\n \"\"\"\n # write your initialize code below\n self.corpus = corpus\n self.ngram = ngram\n self.min_freq = min_freq\n self.uniform = uniform\n\n self.uniform_table = None\n self.unigram_table = None\n self.bigram_table = None\n self.trigram_table = None\n\n self.infrequent_words = find_infrequent_words(self.corpus,self.min_freq)\n replace_infrequent_words(self.corpus,self.infrequent_words)\n\n self.corpus_1gram,self.vocabulary,self.V,self.N = get_vocabulary(self.corpus)\n self.word_to_idx,self.idx_to_word = get_word_mappings(self.vocabulary)\n self.counter_1gram = get_counter(self.corpus_1gram)\n\n self.build()\n\n def build(self):\n \"\"\"\n Build LM from text corpus\n \"\"\"\n # Write your own implementation here\n \n # uniform\n if self.uniform:\n self.uniform_table = get_uniform_tables(self.V)\n else:\n # unigram\n if self.ngram == 1:\n self.unigram_table = get_unigram_tables(self.V,self.N,self.counter_1gram,self.word_to_idx)\n # bigram\n elif self.ngram == 2:\n self.corpus_2gram = [(self.corpus_1gram[i],self.corpus_1gram[i+1]) for i in range(len(self.corpus_1gram)-1)]\n self.counter_2gram = get_counter(self.corpus_2gram)\n\n self.bigram_table = get_bigram_tables(self.V,self.counter_1gram,self.counter_2gram,self.word_to_idx,self.idx_to_word)\n # trigram\n elif self.ngram == 3:\n self.corpus_2gram = [(self.corpus_1gram[i],self.corpus_1gram[i+1]) for i in range(len(self.corpus_1gram)-1)]\n self.counter_2gram = get_counter(self.corpus_2gram)\n\n self.corpus_3gram = [(self.corpus_1gram[i],self.corpus_1gram[i+1],self.corpus_1gram[i+2]) for i in range(len(self.corpus_1gram)-2)]\n self.counter_3gram = get_counter(self.corpus_3gram)\n\n self.trigram_table = get_trigram_tables(self.V,self.counter_2gram,self.counter_3gram,self.word_to_idx)\n\n def most_common_words(self, k):\n \"\"\"\n Return the top-k most frequent n-grams and their frequencies in sorted order.\n For uniform models, the frequency should be \"1\" for each token.\n\n Your return should be sorted in descending order of frequency.\n Sort according to ascending alphabet order when multiple words have same frequency.\n :return: list[tuple(token, freq)] of top k most common tokens\n \"\"\"\n # Write your own implementation here\n\n if self.uniform:\n return [(word,1) for word in sorted(self.vocabulary)[0:k]]\n else:\n if self.ngram == 1:\n return sorted(self.counter_1gram.most_common(),key=lambda x:(-x[1],x[0]))[0:k]\n elif self.ngram == 2:\n return [(token[0]+' '+token[1],num) for token, num in sorted(self.counter_2gram.most_common(),key=lambda x:(-x[1],x[0]))[0:k]]\n elif self.ngram == 3:\n return [(token[0]+' '+token[1]+' '+token[2],num) for token,num in sorted(self.counter_3gram.most_common(),key=lambda x:(-x[1],x[0]))[0:k]]\n return\n\n\ndef calculate_perplexity(models, coefs, data):\n \"\"\"\n Calculate perplexity with given model\n :param models: language models\n :param coefs: coefficients\n :param data: test data\n :return: perplexity\n \"\"\"\n # Write your own implementation here\n pp = 0\n uniform_prob = []\n unigram_prob = []\n bigram_prob = []\n trigram_prob = []\n \n prob_table_unifrom = None\n prob_table_1gram = None\n prob_table_2gram = None\n prob_table_3gram = None\n\n\n min_freq = models[0].min_freq\n train_vocabulary = models[0].vocabulary\n word_to_idx,idx_to_word = models[0].word_to_idx,models[0].idx_to_word\n\n test_infrequent_words = find_infrequent_words(data,min_freq)\n replace_infrequent_words(data,test_infrequent_words)\n\n for i in range(len(data)):\n for j in range(len(data[i])):\n if data[i][j] not in train_vocabulary:\n data[i][j] = 'UNK'\n \n corpus_1gram,vocabulary,V,N = get_vocabulary(data)\n corpus_2gram = [(corpus_1gram[i],corpus_1gram[i+1]) for i in range(len(corpus_1gram)-1)]\n corpus_3gram = [(corpus_1gram[i],corpus_1gram[i+1],corpus_1gram[i+2]) for i in range(len(corpus_1gram)-2)]\n\n for i in range(len(models)):\n model = models[i]\n if model.uniform:\n prob_table_unifrom = model.uniform_table\n for word in corpus_1gram:\n uniform_prob.append(prob_table_unifrom[0][word_to_idx[word]]*coefs[0])\n else:\n if model.ngram == 1:\n prob_table_1gram = model.unigram_table\n for word in corpus_1gram:\n unigram_prob.append(prob_table_1gram[0][word_to_idx[word]]*coefs[1])\n elif model.ngram == 2:\n prob_table_2gram = model.bigram_table\n bigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_2gram[0][0]]])\n for words in corpus_2gram:\n word1 = words[0]\n word2 = words[1]\n\n prob_1gram = prob_table_1gram[0][word_to_idx[word2]]\n prob_2gram = prob_table_2gram[word_to_idx[word1]][word_to_idx[word2]]\n\n if prob_2gram != 0:\n bigram_prob.append(prob_2gram*coefs[2])\n else:\n bigram_prob.append(prob_1gram*coefs[2])\n\n elif model.ngram == 3:\n prob_table_3gram = model.trigram_table\n train_corpus_3gram = set(model.corpus_3gram)\n\n trigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_3gram[0][0]]])\n trigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_3gram[0][1]]])\n for words in corpus_3gram:\n word1 = words[0]\n word2 = words[1]\n word3 = words[2]\n if words in train_corpus_3gram:\n prob_3gram = prob_table_3gram[(word1,word2,word3)]\n trigram_prob.append(prob_3gram*coefs[3])\n else:\n prob_1gram = prob_table_1gram[0][word_to_idx[word3]]\n prob_2gram = prob_table_2gram[word_to_idx[word2]][word_to_idx[word3]]\n if prob_2gram != 0:\n trigram_prob.append(prob_2gram*coefs[3])\n else:\n trigram_prob.append(prob_1gram*coefs[3])\n\n\n prob = np.zeros((N,),dtype=np.float64)\n for i in range(len(prob)):\n prob[i] += uniform_prob[i]\n prob[i] += unigram_prob[i]\n prob[i] += bigram_prob[i]\n prob[i] += trigram_prob[i]\n\n for p in prob:\n pp += np.log2(p)\n \n pp /= -N\n pp = np.power(2,pp)\n\n return pp\n\n# Do not modify this function!\ndef parse_args():\n \"\"\"\n Parse input positional arguments from command line\n :return: args - parsed arguments\n \"\"\"\n parser = argparse.ArgumentParser('N-gram Language Model')\n parser.add_argument('coef_unif', help='coefficient for the uniform model.', type=float)\n parser.add_argument('coef_uni', help='coefficient for the unigram model.', type=float)\n parser.add_argument('coef_bi', help='coefficient for the bigram model.', type=float)\n parser.add_argument('coef_tri', help='coefficient for the trigram model.', type=float)\n parser.add_argument('min_freq', type=int,\n help='minimum frequency threshold for substitute '\n 'with UNK token, set to 1 for not use this threshold')\n parser.add_argument('testfile', help='test text file.')\n parser.add_argument('trainfile', help='training text file.', nargs='+')\n return parser.parse_args()\n\n\n# Main executable script provided for your convenience\n# Not executed on autograder, so do what you want\nif __name__ == '__main__':\n # parse arguments\n args = parse_args()\n\n # load and preprocess train and test data\n train = preprocess(load_dataset(args.trainfile))\n test = preprocess(read_file(args.testfile))\n\n # build language models\n uniform = LanguageModel(train, ngram=1, min_freq=args.min_freq, uniform=True)\n unigram = LanguageModel(train, ngram=1, min_freq=args.min_freq)\n # print('Unique 1-gram types:',len(unigram.counter_1gram.most_common()))\n # print('top 15 unigram:',unigram.counter_1gram.most_common()[:15])\n bigram = LanguageModel(train, ngram=2, min_freq=args.min_freq)\n # print('Unique 2-gram types:',len(bigram.counter_2gram.most_common()))\n # print('top 15 bigram:',bigram.counter_2gram.most_common()[:15])\n trigram = LanguageModel(train, ngram=3, min_freq=args.min_freq)\n # print('Unique 3-gram types:',len(trigram.counter_3gram.most_common()))\n # print('top 15 trigram:',trigram.counter_3gram.most_common()[:50])\n\n # calculate perplexity on test file\n ppl = calculate_perplexity(\n models=[uniform, unigram, bigram, trigram],\n coefs=[args.coef_unif, args.coef_uni, args.coef_bi, args.coef_tri],\n data=test)\n\n print(\"Perplexity: {}\".format(ppl))\n"
] |
[
[
"numpy.log2",
"numpy.power",
"numpy.zeros"
]
] |
LottieWang/mindspore
|
[
"d16e9bc3f689cf0b52c15a4566f5aad2c54a236e",
"d16e9bc3f689cf0b52c15a4566f5aad2c54a236e",
"1331c7e432fb691d1cfa625ab7cc7451dcfc7ce0"
] |
[
"tests/st/fl/mobile/test_mobile_lenet.py",
"model_zoo/official/cv/centerface/dependency/evaluate/eval.py",
"tests/ut/python/dataset/test_save_op.py"
] |
[
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport argparse\nimport numpy as np\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.nn import TrainOneStepCell, WithLossCell\nfrom src.model import LeNet5\nfrom src.adam import AdamWeightDecayOp\n\nparser = argparse.ArgumentParser(description=\"test_fl_lenet\")\nparser.add_argument(\"--device_target\", type=str, default=\"CPU\")\nparser.add_argument(\"--server_mode\", type=str, default=\"FEDERATED_LEARNING\")\nparser.add_argument(\"--ms_role\", type=str, default=\"MS_WORKER\")\nparser.add_argument(\"--worker_num\", type=int, default=0)\nparser.add_argument(\"--server_num\", type=int, default=1)\nparser.add_argument(\"--scheduler_ip\", type=str, default=\"127.0.0.1\")\nparser.add_argument(\"--scheduler_port\", type=int, default=8113)\nparser.add_argument(\"--fl_server_port\", type=int, default=6666)\nparser.add_argument(\"--start_fl_job_threshold\", type=int, default=1)\nparser.add_argument(\"--start_fl_job_time_window\", type=int, default=3000)\nparser.add_argument(\"--update_model_ratio\", type=float, default=1.0)\nparser.add_argument(\"--update_model_time_window\", type=int, default=3000)\nparser.add_argument(\"--fl_name\", type=str, default=\"Lenet\")\nparser.add_argument(\"--fl_iteration_num\", type=int, default=25)\nparser.add_argument(\"--client_epoch_num\", type=int, default=20)\nparser.add_argument(\"--client_batch_size\", type=int, default=32)\nparser.add_argument(\"--client_learning_rate\", type=float, default=0.1)\nparser.add_argument(\"--scheduler_manage_port\", type=int, default=11202)\nparser.add_argument(\"--config_file_path\", type=str, default=\"\")\nparser.add_argument(\"--encrypt_type\", type=str, default=\"NOT_ENCRYPT\")\n# parameters for encrypt_type='DP_ENCRYPT'\nparser.add_argument(\"--dp_eps\", type=float, default=50.0)\nparser.add_argument(\"--dp_delta\", type=float, default=0.01) # 1/worker_num\nparser.add_argument(\"--dp_norm_clip\", type=float, default=1.0)\n# parameters for encrypt_type='PW_ENCRYPT'\nparser.add_argument(\"--share_secrets_ratio\", type=float, default=1.0)\nparser.add_argument(\"--cipher_time_window\", type=int, default=300000)\nparser.add_argument(\"--reconstruct_secrets_threshold\", type=int, default=3)\n\nargs, _ = parser.parse_known_args()\ndevice_target = args.device_target\nserver_mode = args.server_mode\nms_role = args.ms_role\nworker_num = args.worker_num\nserver_num = args.server_num\nscheduler_ip = args.scheduler_ip\nscheduler_port = args.scheduler_port\nfl_server_port = args.fl_server_port\nstart_fl_job_threshold = args.start_fl_job_threshold\nstart_fl_job_time_window = args.start_fl_job_time_window\nupdate_model_ratio = args.update_model_ratio\nupdate_model_time_window = args.update_model_time_window\nshare_secrets_ratio = args.share_secrets_ratio\ncipher_time_window = args.cipher_time_window\nreconstruct_secrets_threshold = args.reconstruct_secrets_threshold\nfl_name = args.fl_name\nfl_iteration_num = args.fl_iteration_num\nclient_epoch_num = args.client_epoch_num\nclient_batch_size = args.client_batch_size\nclient_learning_rate = args.client_learning_rate\nscheduler_manage_port = args.scheduler_manage_port\nconfig_file_path = args.config_file_path\ndp_eps = args.dp_eps\ndp_delta = args.dp_delta\ndp_norm_clip = args.dp_norm_clip\nencrypt_type = args.encrypt_type\n\nctx = {\n \"enable_fl\": True,\n \"server_mode\": server_mode,\n \"ms_role\": ms_role,\n \"worker_num\": worker_num,\n \"server_num\": server_num,\n \"scheduler_ip\": scheduler_ip,\n \"scheduler_port\": scheduler_port,\n \"fl_server_port\": fl_server_port,\n \"start_fl_job_threshold\": start_fl_job_threshold,\n \"start_fl_job_time_window\": start_fl_job_time_window,\n \"update_model_ratio\": update_model_ratio,\n \"update_model_time_window\": update_model_time_window,\n \"share_secrets_ratio\": share_secrets_ratio,\n \"cipher_time_window\": cipher_time_window,\n \"reconstruct_secrets_threshold\": reconstruct_secrets_threshold,\n \"fl_name\": fl_name,\n \"fl_iteration_num\": fl_iteration_num,\n \"client_epoch_num\": client_epoch_num,\n \"client_batch_size\": client_batch_size,\n \"client_learning_rate\": client_learning_rate,\n \"scheduler_manage_port\": scheduler_manage_port,\n \"config_file_path\": config_file_path,\n \"dp_eps\": dp_eps,\n \"dp_delta\": dp_delta,\n \"dp_norm_clip\": dp_norm_clip,\n \"encrypt_type\": encrypt_type\n}\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False)\ncontext.set_fl_context(**ctx)\n\nif __name__ == \"__main__\":\n epoch = 5\n np.random.seed(0)\n network = LeNet5(62)\n criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)\n net_adam_opt = AdamWeightDecayOp(network.trainable_params(), weight_decay=0.1)\n net_with_criterion = WithLossCell(network, criterion)\n train_network = TrainOneStepCell(net_with_criterion, net_opt)\n train_network.set_train()\n losses = []\n\n for _ in range(epoch):\n data = Tensor(np.random.rand(32, 3, 32, 32).astype(np.float32))\n label = Tensor(np.random.randint(0, 61, (32)).astype(np.int32))\n loss = train_network(data, label).asnumpy()\n losses.append(loss)\n print(losses)\n",
"\"\"\"\nWiderFace evaluation code\nauthor: wondervictor\nmail: tianhengcheng@gmail.com\ncopyright@wondervictor\n\nMIT License\n\nCopyright (c) 2018 Vic Chan\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nfrom __future__ import division\n\nimport os\nimport pickle\nimport argparse\nimport numpy as np\nfrom scipy.io import loadmat\nfrom bbox import bbox_overlaps\n\ndef get_gt_boxes(gt_dir):\n \"\"\" gt dir: (wider_face_val.mat, wider_easy_val.mat, wider_medium_val.mat, wider_hard_val.mat)\"\"\"\n\n gt_mat = loadmat(os.path.join(gt_dir, 'val.mat')) # you own ground_truth name\n hard_mat = loadmat(os.path.join(gt_dir, 'wider_hard_val.mat'))\n medium_mat = loadmat(os.path.join(gt_dir, 'wider_medium_val.mat'))\n easy_mat = loadmat(os.path.join(gt_dir, 'wider_easy_val.mat'))\n\n facebox_list = gt_mat['face_bbx_list']\n event_list = gt_mat['event_list']\n file_list = gt_mat['file_list']\n\n hard_gt_list = hard_mat['gt_list']\n medium_gt_list = medium_mat['gt_list']\n easy_gt_list = easy_mat['gt_list']\n\n return facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list\n\n\ndef get_gt_boxes_from_txt(gt_path, cache_dir):\n \"\"\"\n Get gt boxes from binary txt file.\n \"\"\"\n cache_file = os.path.join(cache_dir, 'gt_cache.pkl')\n if os.path.exists(cache_file):\n f = open(cache_file, 'rb')\n boxes = pickle.load(f)\n f.close()\n return boxes\n\n f = open(gt_path, 'r')\n state = 0\n lines = f.readlines()\n lines = list(map(lambda x: x.rstrip('\\r\\n'), lines))\n boxes = {}\n f.close()\n current_boxes = []\n current_name = None\n for line in lines:\n if state == 0 and '--' in line:\n state = 1\n current_name = line\n continue\n if state == 1:\n state = 2\n continue\n\n if state == 2 and '--' in line:\n state = 1\n boxes[current_name] = np.array(current_boxes).astype('float32')\n current_name = line\n current_boxes = []\n continue\n\n if state == 2:\n box = [float(x) for x in line.split(' ')[:4]]\n current_boxes.append(box)\n continue\n\n f = open(cache_file, 'wb')\n pickle.dump(boxes, f)\n f.close()\n return boxes\n\n\ndef read_pred_file(filepath):\n\n with open(filepath, 'r') as f:\n lines = f.readlines()\n img_file = lines[0].rstrip('\\n\\r')\n lines = lines[2:]\n\n boxes = np.array(list(map(lambda x: [float(a) for a in x.rstrip('\\r\\n').split(' ')], lines))).astype('float')\n return img_file.split('/')[-1], boxes\n\n\ndef get_preds(pred_dir):\n \"\"\"Get preds\"\"\"\n events = os.listdir(pred_dir)\n boxes = dict()\n pbar = events\n for event in pbar:\n event_dir = os.path.join(pred_dir, event)\n event_images = os.listdir(event_dir)\n current_event = dict()\n for imgtxt in event_images:\n imgname, box = read_pred_file(os.path.join(event_dir, imgtxt))\n current_event[imgname.rstrip('.jpg')] = box\n boxes[event] = current_event\n return boxes\n\n\ndef norm_score(pred_norm):\n \"\"\" norm score\n pred_norm {key: [[x1,y1,x2,y2,s]]}\n \"\"\"\n max_score = 0\n min_score = 1\n\n for _, k in pred_norm.items():\n for _, v in k.items():\n if v.size == 0:\n continue\n min_v = np.min(v[:, -1])\n max_v = np.max(v[:, -1])\n max_score = max(max_v, max_score)\n min_score = min(min_v, min_score)\n\n diff = max_score - min_score\n for _, k in pred_norm.items():\n for _, v in k.items():\n if v.size == 0:\n continue\n v[:, -1] = (v[:, -1] - min_score)/diff\n\n\ndef image_eval(pred_eval, gt, ignore, iou_thresh):\n \"\"\" single image evaluation\n pred_eval: Nx5\n gt: Nx4\n ignore:\n \"\"\"\n pred_t = pred_eval.copy()\n gt_t = gt.copy()\n pred_recall = np.zeros(pred_t.shape[0])\n recall_list = np.zeros(gt_t.shape[0])\n proposal_list = np.ones(pred_t.shape[0])\n\n pred_t[:, 2] = pred_t[:, 2] + pred_t[:, 0]\n pred_t[:, 3] = pred_t[:, 3] + pred_t[:, 1]\n gt_t[:, 2] = gt_t[:, 2] + gt_t[:, 0]\n gt_t[:, 3] = gt_t[:, 3] + gt_t[:, 1]\n\n overlaps = bbox_overlaps(pred_t[:, :4], gt_t)\n\n for h in range(pred_t.shape[0]):\n\n gt_overlap = overlaps[h]\n max_overlap, max_idx = gt_overlap.max(), gt_overlap.argmax()\n if max_overlap >= iou_thresh:\n if ignore[max_idx] == 0:\n recall_list[max_idx] = -1\n proposal_list[h] = -1\n elif recall_list[max_idx] == 0:\n recall_list[max_idx] = 1\n\n r_keep_index = np.where(recall_list == 1)[0]\n pred_recall[h] = len(r_keep_index)\n return pred_recall, proposal_list\n\n\ndef img_pr_info(thresh_num, pred_info, proposal_list, pred_recall):\n \"\"\"\n Image pr info\n \"\"\"\n pr_info = np.zeros((thresh_num, 2)).astype('float')\n for t in range(thresh_num):\n\n thresh = 1 - (t+1)/thresh_num\n r_index = np.where(pred_info[:, 4] >= thresh)[0]\n if r_index.size == 0:\n pr_info[t, 0] = 0\n pr_info[t, 1] = 0\n else:\n r_index = r_index[-1]\n p_index = np.where(proposal_list[:r_index+1] == 1)[0]\n pr_info[t, 0] = len(p_index)\n pr_info[t, 1] = pred_recall[r_index]\n return pr_info\n\n\ndef dataset_pr_info(thresh_num, pr_curve, count_face):\n pr_curve_t = np.zeros((thresh_num, 2))\n for i in range(thresh_num):\n pr_curve_t[i, 0] = pr_curve[i, 1] / pr_curve[i, 0]\n pr_curve_t[i, 1] = pr_curve[i, 1] / count_face\n return pr_curve_t\n\n\ndef voc_ap(rec, prec):\n \"\"\"\n Voc ap calculation\n \"\"\"\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef evaluation(pred_evaluation, gt_path, iou_thresh=0.4):\n \"\"\"\n evaluation method.\n \"\"\"\n print_pred = pred_evaluation\n pred_evaluation = get_preds(pred_evaluation)\n norm_score(pred_evaluation)\n facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list = get_gt_boxes(gt_path)\n event_num = len(event_list)\n thresh_num = 1000\n setting_gts = [easy_gt_list, medium_gt_list, hard_gt_list]\n\n aps = []\n for setting_id in range(3):\n # different setting\n gt_list = setting_gts[setting_id]\n count_face = 0\n pr_curve = np.zeros((thresh_num, 2)).astype('float')\n # [hard, medium, easy]\n pbar = range(event_num)\n error_count = 0\n for i in pbar:\n event_name = str(event_list[i][0][0])\n img_list = file_list[i][0]\n pred_list = pred_evaluation[event_name]\n sub_gt_list = gt_list[i][0]\n gt_bbx_list = facebox_list[i][0]\n\n for j, _ in enumerate(img_list):\n try:\n pred_info = pred_list[str(img_list[j][0][0])]\n except KeyError:\n error_count += 1\n continue\n\n gt_boxes = gt_bbx_list[j][0].astype('float')\n keep_index = sub_gt_list[j][0]\n count_face += len(keep_index)\n if gt_boxes.size == 0 or pred_info.size == 0:\n continue\n ignore = np.zeros(gt_boxes.shape[0])\n if keep_index.size != 0:\n ignore[keep_index-1] = 1\n pred_recall, proposal_list = image_eval(pred_info, gt_boxes, ignore, iou_thresh)\n\n pr_curve += img_pr_info(thresh_num, pred_info, proposal_list, pred_recall)\n\n pr_curve = dataset_pr_info(thresh_num, pr_curve, count_face)\n\n propose = pr_curve[:, 0]\n recall = pr_curve[:, 1]\n\n ap = voc_ap(recall, propose)\n aps.append(ap)\n\n print(\"==================== Results = ====================\", print_pred)\n print(\"Easy Val AP: {}\".format(aps[0]))\n print(\"Medium Val AP: {}\".format(aps[1]))\n print(\"Hard Val AP: {}\".format(aps[2]))\n print(\"=================================================\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--pred', default='',\n help='test output, txt contain box positions and scores')\n parser.add_argument('-g', '--gt', default='', help='ground truth path, mat format')\n args = parser.parse_args()\n\n pred = args.pred\n if os.path.isdir(pred):\n evaluation(pred, args.gt)\n else:\n pass\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nThis is the test module for saveOp.\n\"\"\"\nimport os\nfrom string import punctuation\nimport numpy as np\nimport pytest\nimport mindspore.dataset as ds\nfrom mindspore import log as logger\nfrom mindspore.mindrecord import FileWriter\n\nTEMP_FILE = \"../data/mindrecord/testMindDataSet/temp.mindrecord\"\nAUTO_FILE = \"../data/mindrecord/testMindDataSet/auto.mindrecord\"\nTFRECORD_FILES = \"../data/mindrecord/testTFRecordData/dummy.tfrecord\"\nFILES_NUM = 1\nnum_readers = 1\n\n\n@pytest.fixture(name=\"add_remove_file\")\ndef fixture_remove():\n \"\"\"add/remove cv file\"\"\"\n if os.path.exists(\"{}\".format(TEMP_FILE)):\n os.remove(\"{}\".format(TEMP_FILE))\n if os.path.exists(\"{}.db\".format(TEMP_FILE)):\n os.remove(\"{}.db\".format(TEMP_FILE))\n\n if os.path.exists(\"{}\".format(AUTO_FILE)):\n os.remove(\"{}\".format(AUTO_FILE))\n if os.path.exists(\"{}.db\".format(AUTO_FILE)):\n os.remove(\"{}.db\".format(AUTO_FILE))\n yield \"yield_cv_data\"\n if os.path.exists(\"{}\".format(TEMP_FILE)):\n os.remove(\"{}\".format(TEMP_FILE))\n if os.path.exists(\"{}.db\".format(TEMP_FILE)):\n os.remove(\"{}.db\".format(TEMP_FILE))\n\n if os.path.exists(\"{}\".format(AUTO_FILE)):\n os.remove(\"{}\".format(AUTO_FILE))\n if os.path.exists(\"{}.db\".format(AUTO_FILE)):\n os.remove(\"{}.db\".format(AUTO_FILE))\n\n\ndef test_case_00(add_remove_file): # only bin data\n data = [{\"image1\": bytes(\"image1 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image1 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image1 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image1 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image1 bytes mno\", encoding='UTF-8')},\n {\"image1\": bytes(\"image2 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image2 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image2 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image2 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image2 bytes mno\", encoding='UTF-8')},\n {\"image1\": bytes(\"image3 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image3 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image3 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image3 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image3 bytes mno\", encoding='UTF-8')},\n {\"image1\": bytes(\"image5 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image5 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image5 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image5 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image5 bytes mno\", encoding='UTF-8')},\n {\"image1\": bytes(\"image6 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image6 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image6 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image6 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image6 bytes mno\", encoding='UTF-8')}]\n schema = {\n \"image1\": {\"type\": \"bytes\"},\n \"image2\": {\"type\": \"bytes\"},\n \"image3\": {\"type\": \"bytes\"},\n \"image4\": {\"type\": \"bytes\"},\n \"image5\": {\"type\": \"bytes\"}}\n writer = FileWriter(TEMP_FILE, FILES_NUM)\n writer.add_schema(schema, \"schema\")\n writer.write_raw_data(data)\n writer.commit()\n\n d1 = ds.MindDataset(TEMP_FILE, None, num_readers, shuffle=False)\n d1.save(AUTO_FILE, FILES_NUM)\n data_value_to_list = []\n\n for item in data:\n new_data = {}\n new_data['image1'] = np.asarray(list(item[\"image1\"]), dtype=np.uint8)\n new_data['image2'] = np.asarray(list(item[\"image2\"]), dtype=np.uint8)\n new_data['image3'] = np.asarray(list(item[\"image3\"]), dtype=np.uint8)\n new_data['image4'] = np.asarray(list(item[\"image4\"]), dtype=np.uint8)\n new_data['image5'] = np.asarray(list(item[\"image5\"]), dtype=np.uint8)\n data_value_to_list.append(new_data)\n\n d2 = ds.MindDataset(dataset_file=AUTO_FILE,\n num_parallel_workers=num_readers,\n shuffle=False)\n assert d2.get_dataset_size() == 5\n num_iter = 0\n for item in d2.create_dict_iterator(num_epochs=1, output_numpy=True):\n assert len(item) == 5\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 5\n\n\ndef test_case_01(add_remove_file): # only raw data\n data = [{\"file_name\": \"001.jpg\", \"label\": 43},\n {\"file_name\": \"002.jpg\", \"label\": 91},\n {\"file_name\": \"003.jpg\", \"label\": 61},\n {\"file_name\": \"004.jpg\", \"label\": 29},\n {\"file_name\": \"005.jpg\", \"label\": 78},\n {\"file_name\": \"006.jpg\", \"label\": 37}]\n schema = {\"file_name\": {\"type\": \"string\"},\n \"label\": {\"type\": \"int32\"}\n }\n\n writer = FileWriter(TEMP_FILE, FILES_NUM)\n writer.add_schema(schema, \"schema\")\n writer.write_raw_data(data)\n writer.commit()\n\n d1 = ds.MindDataset(TEMP_FILE, None, num_readers, shuffle=False)\n d1.save(AUTO_FILE, FILES_NUM)\n\n data_value_to_list = []\n for item in data:\n new_data = {}\n new_data['file_name'] = np.asarray(item[\"file_name\"], dtype='S')\n new_data['label'] = np.asarray(list([item[\"label\"]]), dtype=np.int32)\n data_value_to_list.append(new_data)\n\n d2 = ds.MindDataset(dataset_file=AUTO_FILE,\n num_parallel_workers=num_readers,\n shuffle=False)\n assert d2.get_dataset_size() == 6\n num_iter = 0\n for item in d2.create_dict_iterator(num_epochs=1, output_numpy=True):\n logger.info(item)\n assert len(item) == 2\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n\ndef test_case_02(add_remove_file): # muti-bytes\n data = [{\"file_name\": \"001.jpg\", \"label\": 43,\n \"float32_array\": np.array([1.2, 2.78, 3.1234, 4.9871, 5.12341], dtype=np.float32),\n \"float64_array\": np.array([48.1234556789, 49.3251241431, 50.13514312414, 51.8971298471,\n 123414314.2141243, 87.1212122], dtype=np.float64),\n \"float32\": 3456.12345,\n \"float64\": 1987654321.123456785,\n \"source_sos_ids\": np.array([1, 2, 3, 4, 5], dtype=np.int32),\n \"source_sos_mask\": np.array([6, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"image1\": bytes(\"image1 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image1 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image1 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image1 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image1 bytes mno\", encoding='UTF-8')},\n {\"file_name\": \"002.jpg\", \"label\": 91,\n \"float32_array\": np.array([1.2, 2.78, 4.1234, 4.9871, 5.12341], dtype=np.float32),\n \"float64_array\": np.array([48.1234556789, 49.3251241431, 60.13514312414, 51.8971298471,\n 123414314.2141243, 87.1212122], dtype=np.float64),\n \"float32\": 3456.12445,\n \"float64\": 1987654321.123456786,\n \"source_sos_ids\": np.array([11, 2, 3, 4, 5], dtype=np.int32),\n \"source_sos_mask\": np.array([16, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"image1\": bytes(\"image2 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image2 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image2 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image2 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image2 bytes mno\", encoding='UTF-8')},\n {\"file_name\": \"003.jpg\", \"label\": 61,\n \"float32_array\": np.array([1.2, 2.78, 5.1234, 4.9871, 5.12341], dtype=np.float32),\n \"float64_array\": np.array([48.1234556789, 49.3251241431, 70.13514312414, 51.8971298471,\n 123414314.2141243, 87.1212122], dtype=np.float64),\n \"float32\": 3456.12545,\n \"float64\": 1987654321.123456787,\n \"source_sos_ids\": np.array([21, 2, 3, 4, 5], dtype=np.int32),\n \"source_sos_mask\": np.array([26, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"image1\": bytes(\"image3 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image3 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image3 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image3 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image3 bytes mno\", encoding='UTF-8')},\n {\"file_name\": \"004.jpg\", \"label\": 29,\n \"float32_array\": np.array([1.2, 2.78, 6.1234, 4.9871, 5.12341], dtype=np.float32),\n \"float64_array\": np.array([48.1234556789, 49.3251241431, 80.13514312414, 51.8971298471,\n 123414314.2141243, 87.1212122], dtype=np.float64),\n \"float32\": 3456.12645,\n \"float64\": 1987654321.123456788,\n \"source_sos_ids\": np.array([31, 2, 3, 4, 5], dtype=np.int32),\n \"source_sos_mask\": np.array([36, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"image1\": bytes(\"image4 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image4 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image4 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image4 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image4 bytes mno\", encoding='UTF-8')},\n {\"file_name\": \"005.jpg\", \"label\": 78,\n \"float32_array\": np.array([1.2, 2.78, 7.1234, 4.9871, 5.12341], dtype=np.float32),\n \"float64_array\": np.array([48.1234556789, 49.3251241431, 90.13514312414, 51.8971298471,\n 123414314.2141243, 87.1212122], dtype=np.float64),\n \"float32\": 3456.12745,\n \"float64\": 1987654321.123456789,\n \"source_sos_ids\": np.array([41, 2, 3, 4, 5], dtype=np.int32),\n \"source_sos_mask\": np.array([46, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"image1\": bytes(\"image5 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image5 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image5 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image5 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image5 bytes mno\", encoding='UTF-8')},\n {\"file_name\": \"006.jpg\", \"label\": 37,\n \"float32_array\": np.array([1.2, 2.78, 7.1234, 4.9871, 5.12341], dtype=np.float32),\n \"float64_array\": np.array([48.1234556789, 49.3251241431, 90.13514312414, 51.8971298471,\n 123414314.2141243, 87.1212122], dtype=np.float64),\n \"float32\": 3456.12745,\n \"float64\": 1987654321.123456789,\n \"source_sos_ids\": np.array([51, 2, 3, 4, 5], dtype=np.int32),\n \"source_sos_mask\": np.array([56, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"image1\": bytes(\"image6 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image6 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image6 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image6 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image6 bytes mno\", encoding='UTF-8')}\n ]\n schema = {\"file_name\": {\"type\": \"string\"},\n \"float32_array\": {\"type\": \"float32\", \"shape\": [-1]},\n \"float64_array\": {\"type\": \"float64\", \"shape\": [-1]},\n \"float32\": {\"type\": \"float32\"},\n \"float64\": {\"type\": \"float64\"},\n \"source_sos_ids\": {\"type\": \"int32\", \"shape\": [-1]},\n \"source_sos_mask\": {\"type\": \"int64\", \"shape\": [-1]},\n \"image1\": {\"type\": \"bytes\"},\n \"image2\": {\"type\": \"bytes\"},\n \"image3\": {\"type\": \"bytes\"},\n \"label\": {\"type\": \"int32\"},\n \"image4\": {\"type\": \"bytes\"},\n \"image5\": {\"type\": \"bytes\"}}\n writer = FileWriter(TEMP_FILE, FILES_NUM)\n writer.add_schema(schema, \"schema\")\n writer.write_raw_data(data)\n writer.commit()\n\n d1 = ds.MindDataset(TEMP_FILE, None, num_readers, shuffle=False)\n d1.save(AUTO_FILE, FILES_NUM)\n data_value_to_list = []\n\n for item in data:\n new_data = {}\n new_data['file_name'] = np.asarray(item[\"file_name\"], dtype='S')\n new_data['float32_array'] = item[\"float32_array\"]\n new_data['float64_array'] = item[\"float64_array\"]\n new_data['float32'] = item[\"float32\"]\n new_data['float64'] = item[\"float64\"]\n new_data['source_sos_ids'] = item[\"source_sos_ids\"]\n new_data['source_sos_mask'] = item[\"source_sos_mask\"]\n new_data['label'] = np.asarray(list([item[\"label\"]]), dtype=np.int32)\n new_data['image1'] = np.asarray(list(item[\"image1\"]), dtype=np.uint8)\n new_data['image2'] = np.asarray(list(item[\"image2\"]), dtype=np.uint8)\n new_data['image3'] = np.asarray(list(item[\"image3\"]), dtype=np.uint8)\n new_data['image4'] = np.asarray(list(item[\"image4\"]), dtype=np.uint8)\n new_data['image5'] = np.asarray(list(item[\"image5\"]), dtype=np.uint8)\n data_value_to_list.append(new_data)\n\n d2 = ds.MindDataset(dataset_file=AUTO_FILE,\n num_parallel_workers=num_readers,\n shuffle=False)\n assert d2.get_dataset_size() == 6\n num_iter = 0\n for item in d2.create_dict_iterator(num_epochs=1, output_numpy=True):\n assert len(item) == 13\n for field in item:\n if isinstance(item[field], np.ndarray):\n if item[field].dtype == np.float32:\n assert (item[field] ==\n np.array(data_value_to_list[num_iter][field], np.float32)).all()\n else:\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n\ndef generator_1d():\n for i in range(10):\n yield (np.array([i]),)\n\n\ndef test_case_03(add_remove_file):\n\n # apply dataset operations\n d1 = ds.GeneratorDataset(generator_1d, [\"data\"], shuffle=False)\n\n d1.save(AUTO_FILE)\n\n d2 = ds.MindDataset(dataset_file=AUTO_FILE,\n num_parallel_workers=num_readers,\n shuffle=False)\n\n i = 0\n # each data is a dictionary\n for item in d2.create_dict_iterator(num_epochs=1, output_numpy=True):\n golden = np.array([i])\n np.testing.assert_array_equal(item[\"data\"], golden)\n i = i + 1\n\n\ndef generator_with_type(t):\n for i in range(64):\n yield (np.array([i], dtype=t),)\n\n\ndef type_tester(t):\n logger.info(\"Test with Type {}\".format(t.__name__))\n\n # apply dataset operations\n data1 = ds.GeneratorDataset((lambda: generator_with_type(t)), [\"data\"], shuffle=False)\n\n data1 = data1.batch(4)\n\n data1 = data1.repeat(3)\n\n data1.save(AUTO_FILE)\n\n d2 = ds.MindDataset(dataset_file=AUTO_FILE,\n num_parallel_workers=num_readers,\n shuffle=False)\n\n i = 0\n num_repeat = 0\n # each data is a dictionary\n for item in d2.create_dict_iterator(num_epochs=1, output_numpy=True):\n golden = np.array([[i], [i + 1], [i + 2], [i + 3]], dtype=t)\n logger.info(item)\n np.testing.assert_array_equal(item[\"data\"], golden)\n i = i + 4\n if i == 64:\n i = 0\n num_repeat += 1\n assert num_repeat == 3\n if os.path.exists(\"{}\".format(AUTO_FILE)):\n os.remove(\"{}\".format(AUTO_FILE))\n if os.path.exists(\"{}.db\".format(AUTO_FILE)):\n os.remove(\"{}.db\".format(AUTO_FILE))\n\n\ndef test_case_04():\n # uint8 will drop shape as mindrecord store uint8 as bytes\n types = [np.int8, np.int16, np.int32, np.int64,\n np.uint16, np.uint32, np.float32, np.float64]\n\n for t in types:\n type_tester(t)\n\n\ndef test_case_05(add_remove_file):\n\n d1 = ds.GeneratorDataset(generator_1d, [\"data\"], shuffle=False)\n\n with pytest.raises(Exception, match=\"num_files should between 0 and 1000.\"):\n d1.save(AUTO_FILE, 0)\n\n\ndef test_case_06(add_remove_file):\n\n d1 = ds.GeneratorDataset(generator_1d, [\"data\"], shuffle=False)\n\n with pytest.raises(Exception, match=\"tfrecord dataset format is not supported.\"):\n d1.save(AUTO_FILE, 1, \"tfrecord\")\n\n\ndef cast_name(key):\n \"\"\"\n Cast schema names which containing special characters to valid names.\n \"\"\"\n special_symbols = set('{}{}'.format(punctuation, ' '))\n special_symbols.remove('_')\n new_key = ['_' if x in special_symbols else x for x in key]\n casted_key = ''.join(new_key)\n return casted_key\n\n\ndef test_case_07():\n if os.path.exists(\"{}\".format(AUTO_FILE)):\n os.remove(\"{}\".format(AUTO_FILE))\n if os.path.exists(\"{}.db\".format(AUTO_FILE)):\n os.remove(\"{}.db\".format(AUTO_FILE))\n d1 = ds.TFRecordDataset(TFRECORD_FILES, shuffle=False)\n tf_data = []\n for x in d1.create_dict_iterator(num_epochs=1, output_numpy=True):\n tf_data.append(x)\n d1.save(AUTO_FILE, FILES_NUM)\n d2 = ds.MindDataset(dataset_file=AUTO_FILE,\n num_parallel_workers=num_readers,\n shuffle=False)\n mr_data = []\n for x in d2.create_dict_iterator(num_epochs=1, output_numpy=True):\n mr_data.append(x)\n count = 0\n for x in tf_data:\n for k, v in x.items():\n if isinstance(v, np.ndarray):\n assert (v == mr_data[count][cast_name(k)]).all()\n else:\n assert v == mr_data[count][cast_name(k)]\n count += 1\n assert count == 10\n\n if os.path.exists(\"{}\".format(AUTO_FILE)):\n os.remove(\"{}\".format(AUTO_FILE))\n if os.path.exists(\"{}.db\".format(AUTO_FILE)):\n os.remove(\"{}.db\".format(AUTO_FILE))\n\n\ndef generator_dynamic_1d():\n arr = []\n for i in range(10):\n if i % 5 == 0:\n arr = []\n arr += [i]\n yield (np.array(arr),)\n\n\ndef generator_dynamic_2d_0():\n for i in range(10):\n if i < 5:\n yield (np.arange(5).reshape([1, 5]),)\n else:\n yield (np.arange(10).reshape([2, 5]),)\n\n\ndef generator_dynamic_2d_1():\n for i in range(10):\n if i < 5:\n yield (np.arange(5).reshape([5, 1]),)\n else:\n yield (np.arange(10).reshape([5, 2]),)\n\n\ndef test_case_08(add_remove_file):\n\n # apply dataset operations\n d1 = ds.GeneratorDataset(generator_dynamic_1d, [\"data\"], shuffle=False)\n\n d1.save(AUTO_FILE)\n\n d2 = ds.MindDataset(dataset_file=AUTO_FILE,\n num_parallel_workers=num_readers,\n shuffle=False)\n\n i = 0\n arr = []\n for item in d2.create_dict_iterator(num_epochs=1, output_numpy=True):\n if i % 5 == 0:\n arr = []\n arr += [i]\n golden = np.array(arr)\n np.testing.assert_array_equal(item[\"data\"], golden)\n i = i + 1\n\n\ndef test_case_09(add_remove_file):\n\n # apply dataset operations\n d1 = ds.GeneratorDataset(generator_dynamic_2d_0, [\"data\"], shuffle=False)\n\n d1.save(AUTO_FILE)\n\n d2 = ds.MindDataset(dataset_file=AUTO_FILE,\n num_parallel_workers=num_readers,\n shuffle=False)\n\n i = 0\n for item in d2.create_dict_iterator(num_epochs=1, output_numpy=True):\n if i < 5:\n golden = np.arange(5).reshape([1, 5])\n else:\n golden = np.arange(10).reshape([2, 5])\n np.testing.assert_array_equal(item[\"data\"], golden)\n i = i + 1\n\n\ndef test_case_10(add_remove_file):\n\n # apply dataset operations\n d1 = ds.GeneratorDataset(generator_dynamic_2d_1, [\"data\"], shuffle=False)\n\n with pytest.raises(Exception, match=\n \"Error: besides dimension 0, other dimension shape is different from the previous's\"):\n d1.save(AUTO_FILE)\n"
] |
[
[
"numpy.random.seed",
"numpy.random.randint",
"numpy.random.rand"
],
[
"numpy.concatenate",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.ones",
"numpy.min",
"numpy.where",
"numpy.maximum"
],
[
"numpy.array",
"numpy.arange",
"numpy.asarray",
"numpy.testing.assert_array_equal"
]
] |
mmtechslv/PhyloMAF
|
[
"bab43dd4a4d2812951b1fdf4f1abb83edb79ea88",
"bab43dd4a4d2812951b1fdf4f1abb83edb79ea88"
] |
[
"pmaf/biome/essentials/_taxonomy.py",
"pmaf/sequence/_multiple/_stream.py"
] |
[
"import warnings\n\nwarnings.simplefilter(\"ignore\", category=FutureWarning)\nfrom pmaf.biome.essentials._metakit import EssentialFeatureMetabase\nfrom pmaf.biome.essentials._base import EssentialBackboneBase\nfrom pmaf.internal._constants import (\n AVAIL_TAXONOMY_NOTATIONS,\n jRegexGG,\n jRegexQIIME,\n BIOM_TAXONOMY_NAMES,\n VALID_RANKS,\n)\nfrom pmaf.internal._shared import (\n generate_lineages_from_taxa,\n get_rank_upto,\n indentify_taxon_notation,\n validate_ranks,\n extract_valid_ranks,\n cols2ranks,\n)\nfrom collections import defaultdict\nfrom os import path\nimport pandas as pd\nimport numpy as np\nimport biom\nfrom typing import Union, Sequence, Tuple, Any, Optional\nfrom pmaf.internal._typing import AnyGenericIdentifier, Mapper\n\n\nclass RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):\n \"\"\"An `essential` class for handling taxonomy data.\"\"\"\n\n def __init__(\n self,\n taxonomy: Union[pd.DataFrame, pd.Series, str],\n taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"Constructor for :class:`.RepTaxonomy`\n\n Parameters\n ----------\n taxonomy\n Data containing feature taxonomy\n taxonomy_columns\n Column(s) containing taxonomy data\n kwargs\n Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.\n \"\"\"\n tmp_metadata = kwargs.pop(\"metadata\", {})\n self.__avail_ranks = []\n self.__internal_taxonomy = None\n if isinstance(taxonomy, pd.DataFrame):\n if taxonomy.shape[0] > 0:\n if taxonomy.shape[1] > 1:\n if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):\n tmp_taxonomy = taxonomy\n else:\n raise ValueError(\n \"Provided `taxonomy` Datafame has invalid ranks.\"\n )\n else:\n tmp_taxonomy = taxonomy.iloc[:, 0]\n else:\n raise ValueError(\"Provided `taxonomy` Datafame is invalid.\")\n elif isinstance(taxonomy, pd.Series):\n if taxonomy.shape[0] > 0:\n tmp_taxonomy = taxonomy\n else:\n raise ValueError(\"Provided `taxonomy` Series is invalid.\")\n elif isinstance(taxonomy, str):\n if path.isfile(taxonomy):\n file_extension = path.splitext(taxonomy)[-1].lower()\n if file_extension in [\".csv\", \".tsv\"]:\n if taxonomy_columns is None:\n tmp_taxonomy = pd.read_csv(\n taxonomy,\n sep=kwargs.pop(\"sep\", \",\"),\n header=kwargs.pop(\"header\", \"infer\"),\n index_col=kwargs.pop(\"index_col\", None),\n )\n else:\n if isinstance(taxonomy_columns, int):\n tmp_taxonomy = pd.read_csv(\n taxonomy,\n sep=kwargs.pop(\"sep\", \",\"),\n header=kwargs.pop(\"header\", \"infer\"),\n index_col=kwargs.pop(\"index_col\", None),\n ).iloc[:, taxonomy_columns]\n else:\n tmp_taxonomy = pd.read_csv(\n taxonomy,\n sep=kwargs.pop(\"sep\", \",\"),\n header=kwargs.pop(\"header\", \"infer\"),\n index_col=kwargs.pop(\"index_col\", None),\n ).loc[:, taxonomy_columns]\n elif file_extension in [\".biom\", \".biome\"]:\n tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)\n tmp_metadata.update({\"biom\": new_metadata})\n else:\n raise NotImplementedError(\"File type is not supported.\")\n else:\n raise FileNotFoundError(\"Provided `taxonomy` file path is invalid.\")\n else:\n raise TypeError(\"Provided `taxonomy` has invalid type.\")\n self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)\n super().__init__(metadata=tmp_metadata, **kwargs)\n\n @classmethod\n def from_csv(\n cls,\n filepath: str,\n taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,\n **kwargs: Any\n ) -> \"RepTaxonomy\":\n \"\"\"Factory method to construct a :class:`.RepTaxonomy` from CSV file.\n\n Parameters\n ----------\n filepath\n Path to .csv File\n taxonomy_columns\n Column(s) containing taxonomy data\n kwargs\n Passed to the constructor.\n filepath:\n\n Returns\n -------\n Instance of\n class:`.RepTaxonomy`\n \"\"\"\n if taxonomy_columns is None:\n tmp_taxonomy = pd.read_csv(filepath, **kwargs)\n else:\n if isinstance(taxonomy_columns, int):\n tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]\n else:\n tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]\n tmp_metadata = kwargs.pop(\"metadata\", {})\n tmp_metadata.update({\"filepath\": path.abspath(filepath)})\n return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)\n\n @classmethod\n def from_biom(cls, filepath: str, **kwargs: Any) -> \"RepTaxonomy\":\n \"\"\"Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`\n file.\n\n Parameters\n ----------\n filepath\n :mod:`biom` file path.\n kwargs\n Passed to the constructor.\n\n Returns\n -------\n Instance of\n class:`.RepTaxonomy`\n \"\"\"\n taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)\n tmp_metadata = kwargs.pop(\"metadata\", {})\n tmp_metadata.update({\"biom\": new_metadata})\n return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)\n\n @classmethod\n def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:\n \"\"\"Actual private method to process :mod:`biom` file.\n\n Parameters\n ----------\n filepath\n :mod:`biom` file path.\n kwargs\n Compatibility\n \"\"\"\n biom_file = biom.load_table(filepath)\n if biom_file.metadata(axis=\"observation\") is not None:\n obs_data = biom_file.metadata_to_dataframe(\"observation\")\n col_names = list(obs_data.columns.values)\n col_names_low = [col.lower() for col in col_names]\n avail_col_names = [\n colname\n for tax_name in BIOM_TAXONOMY_NAMES\n for colname in col_names_low\n if colname[::-1].find(tax_name[::-1]) < 3\n and colname[::-1].find(tax_name[::-1]) > -1\n ]\n metadata_cols = [\n col for col in col_names if col.lower() not in avail_col_names\n ]\n if len(avail_col_names) == 1:\n tmp_col_index = col_names_low.index(avail_col_names[0])\n taxonomy_frame = obs_data[col_names[tmp_col_index]]\n else:\n taxonomy_frame = obs_data\n tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()\n return taxonomy_frame, tmp_metadata\n else:\n raise ValueError(\"Biom file does not contain observation metadata.\")\n\n def _remove_features_by_id(\n self, ids: AnyGenericIdentifier, **kwargs: Any\n ) -> Optional[AnyGenericIdentifier]:\n \"\"\"Remove features by features ids and ratify action.\n\n Parameters\n ----------\n ids\n Feature identifiers\n kwargs\n Compatibility\n \"\"\"\n tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)\n if len(tmp_ids) > 0:\n self.__internal_taxonomy.drop(tmp_ids, inplace=True)\n return self._ratify_action(\"_remove_features_by_id\", ids, **kwargs)\n\n def _merge_features_by_map(\n self, map_dict: Mapper, done: bool = False, **kwargs: Any\n ) -> Optional[Mapper]:\n \"\"\"Merge features and ratify action.\n\n Parameters\n ----------\n map_dict\n Map to use for merging\n done\n Whether merging was completed or not. Compatibility.\n kwargs\n Compatibility\n \"\"\"\n if not done:\n raise NotImplementedError\n if map_dict:\n return self._ratify_action(\n \"_merge_features_by_map\",\n map_dict,\n _annotations=self.__internal_taxonomy.loc[:, \"lineage\"].to_dict(),\n **kwargs\n )\n\n def drop_feature_by_id(\n self, ids: AnyGenericIdentifier, **kwargs: Any\n ) -> Optional[AnyGenericIdentifier]:\n \"\"\"Remove features by feature `ids`.\n\n Parameters\n ----------\n ids\n Feature identifiers\n kwargs\n Compatibility\n \"\"\"\n target_ids = np.asarray(ids)\n if self.xrid.isin(target_ids).sum() == len(target_ids):\n return self._remove_features_by_id(target_ids, **kwargs)\n else:\n raise ValueError(\"Invalid feature ids are provided.\")\n\n def get_taxonomy_by_id(\n self, ids: Optional[AnyGenericIdentifier] = None\n ) -> pd.DataFrame:\n \"\"\"Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.\n\n Parameters\n ----------\n ids\n Either feature indices or None for all.\n\n Returns\n -------\n class:`pandas.DataFrame` with taxonomy data\n \"\"\"\n if ids is None:\n target_ids = self.xrid\n else:\n target_ids = np.asarray(ids)\n if self.xrid.isin(target_ids).sum() <= len(target_ids):\n return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]\n else:\n raise ValueError(\"Invalid feature ids are provided.\")\n\n def get_lineage_by_id(\n self,\n ids: Optional[AnyGenericIdentifier] = None,\n missing_rank: bool = False,\n desired_ranks: Union[bool, Sequence[str]] = False,\n drop_ranks: Union[bool, Sequence[str]] = False,\n **kwargs: Any\n ) -> pd.Series:\n \"\"\"Get taxonomy lineages by feature `ids`.\n\n Parameters\n ----------\n ids\n Either feature indices or None for all.\n missing_rank\n If True will generate prefix like `s__` or `d__`\n desired_ranks\n List of desired ranks to generate.\n If False then will generate all main ranks\n drop_ranks\n List of ranks to drop from desired ranks.\n This parameter only useful if `missing_rank` is True\n kwargs\n Compatibility.\n\n Returns\n -------\n class:`pandas.Series` with consensus lineages and corresponding IDs\n \"\"\"\n if ids is None:\n target_ids = self.xrid\n else:\n target_ids = np.asarray(ids)\n tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks\n total_valid_rids = self.xrid.isin(target_ids).sum()\n if total_valid_rids == len(target_ids):\n return generate_lineages_from_taxa(\n self.__internal_taxonomy.loc[target_ids],\n missing_rank,\n tmp_desired_ranks,\n drop_ranks,\n )\n elif total_valid_rids < len(target_ids):\n return generate_lineages_from_taxa(\n self.__internal_taxonomy.loc[np.unique(target_ids)],\n missing_rank,\n tmp_desired_ranks,\n drop_ranks,\n )\n else:\n raise ValueError(\"Invalid feature ids are provided.\")\n\n def find_features_by_pattern(\n self, pattern_str: str, case_sensitive: bool = False, regex: bool = False\n ) -> np.ndarray:\n \"\"\"Searches for features with taxa that matches `pattern_str`\n\n Parameters\n ----------\n pattern_str\n Pattern to search for\n case_sensitive\n Case sensitive mode\n regex\n Use regular expressions\n\n\n Returns\n -------\n class:`~numpy.ndarray` with indices\n \"\"\"\n return self.__internal_taxonomy[\n self.__internal_taxonomy.loc[:, \"lineage\"].str.contains(\n pattern_str, case=case_sensitive, regex=regex\n )\n ].index.values\n\n def drop_features_without_taxa(\n self, **kwargs: Any\n ) -> Optional[AnyGenericIdentifier]:\n \"\"\"Remove features that do not contain taxonomy.\n\n Parameters\n ----------\n kwargs\n Compatibility\n \"\"\"\n ids_to_drop = self.find_features_without_taxa()\n return self._remove_features_by_id(ids_to_drop, **kwargs)\n\n def drop_features_without_ranks(\n self, ranks: Sequence[str], any: bool = False, **kwargs: Any\n ) -> Optional[AnyGenericIdentifier]: # Done\n \"\"\"Remove features that do not contain `ranks`\n\n Parameters\n ----------\n ranks\n Ranks to look for\n any\n If True removes feature with single occurrence of missing rank.\n If False all `ranks` must be missing.\n kwargs\n Compatibility\n \"\"\"\n target_ranks = np.asarray(ranks)\n if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(\n target_ranks\n ):\n no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()\n no_rank_mask_adjusted = (\n no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)\n )\n ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index\n return self._remove_features_by_id(ids_to_drop, **kwargs)\n else:\n raise ValueError(\"Invalid ranks are provided.\")\n\n def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:\n \"\"\"Merge features with duplicated taxonomy.\n\n Parameters\n ----------\n kwargs\n Compatibility\n \"\"\"\n ret = {}\n groupby = self.__internal_taxonomy.groupby(\"lineage\")\n if any([len(group) > 1 for group in groupby.groups.values()]):\n tmp_feature_lineage = []\n tmp_groups = []\n group_indices = list(range(len(groupby.groups)))\n for lineage, feature_ids in groupby.groups.items():\n tmp_feature_lineage.append(lineage)\n tmp_groups.append(list(feature_ids))\n self.__init_internal_taxonomy(\n pd.Series(data=tmp_feature_lineage, index=group_indices)\n )\n ret = dict(zip(group_indices, tmp_groups))\n return self._merge_features_by_map(ret, True, **kwargs)\n\n def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:\n \"\"\"Merge features by taxonomic rank/level.\n\n Parameters\n ----------\n level\n Taxonomic rank/level to use for merging.\n kwargs\n Compatibility\n \"\"\"\n ret = {}\n if not isinstance(level, str):\n raise TypeError(\"`rank` must have str type.\")\n if level in self.__avail_ranks:\n target_ranks = get_rank_upto(self.avail_ranks, level, True)\n if target_ranks:\n tmp_lineages = generate_lineages_from_taxa(\n self.__internal_taxonomy, False, target_ranks, False\n )\n groups = tmp_lineages.groupby(tmp_lineages)\n if len(groups.groups) > 1:\n tmp_feature_lineage = []\n tmp_groups = []\n group_indices = list(range(len(groups.groups)))\n for lineage, feature_ids in groups.groups.items():\n tmp_feature_lineage.append(lineage)\n tmp_groups.append(list(feature_ids))\n self.__init_internal_taxonomy(\n pd.Series(data=tmp_feature_lineage, index=group_indices)\n )\n ret = dict(zip(group_indices, tmp_groups))\n else:\n raise ValueError(\"Invalid rank are provided.\")\n return self._merge_features_by_map(ret, True, **kwargs)\n\n def find_features_without_taxa(self) -> np.ndarray:\n \"\"\"Find features without taxa.\n\n Returns\n -------\n class:`~numpy.ndarray` with feature indices.\n \"\"\"\n return self.__internal_taxonomy.loc[\n self.__internal_taxonomy.loc[:, VALID_RANKS].agg(\n lambda rank: len(\"\".join(map(lambda x: (str(x or \"\")), rank))), axis=1\n )\n < 1\n ].index.values\n\n def get_subset(\n self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any\n ) -> \"RepTaxonomy\":\n \"\"\"Get subset of the :class:`.RepTaxonomy`.\n\n Parameters\n ----------\n rids\n Feature identifiers.\n args\n Compatibility\n kwargs\n Compatibility\n\n Returns\n -------\n class:`.RepTaxonomy`\n \"\"\"\n if rids is None:\n target_rids = self.xrid\n else:\n target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)\n if not self.xrid.isin(target_rids).sum() == len(target_rids):\n raise ValueError(\"Invalid feature ids are provided.\")\n return type(self)(\n taxonomy=self.__internal_taxonomy.loc[target_rids, \"lineage\"],\n metadata=self.metadata,\n name=self.name,\n )\n\n def _export(\n self, taxlike: str = \"lineage\", ascending: bool = True, **kwargs: Any\n ) -> Tuple[pd.Series, dict]:\n \"\"\"Creates taxonomy for export.\n\n Parameters\n ----------\n taxlike\n Generate taxonomy in format(currently only `lineage` is supported.)\n ascending\n Sorting\n kwargs\n Compatibility\n \"\"\"\n if taxlike == \"lineage\":\n return (\n self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),\n kwargs,\n )\n else:\n raise NotImplemented\n\n def export(\n self,\n output_fp: str,\n *args,\n _add_ext: bool = False,\n sep: str = \",\",\n **kwargs: Any\n ) -> None:\n \"\"\"Exports the taxonomy into the specified file.\n\n Parameters\n ----------\n output_fp\n Export filepath\n args\n Compatibility\n _add_ext\n Add file extension or not.\n sep\n Delimiter\n kwargs\n Compatibility\n \"\"\"\n tmp_export, rkwarg = self._export(*args, **kwargs)\n if _add_ext:\n tmp_export.to_csv(\"{}.csv\".format(output_fp), sep=sep)\n else:\n tmp_export.to_csv(output_fp, sep=sep)\n\n def copy(self) -> \"RepTaxonomy\":\n \"\"\"Copy of the instance.\"\"\"\n return type(self)(\n taxonomy=self.__internal_taxonomy.loc[:, \"lineage\"],\n metadata=self.metadata,\n name=self.name,\n )\n\n def __fix_taxon_names(self) -> None:\n \"\"\"Fix invalid taxon names.\"\"\"\n\n def taxon_fixer(taxon):\n if taxon is not None and pd.notna(taxon):\n tmp_taxon_trimmed = taxon.lower().strip()\n if len(tmp_taxon_trimmed) > 0:\n if tmp_taxon_trimmed[0] == \"[\":\n tmp_taxon_trimmed = tmp_taxon_trimmed[1:]\n if tmp_taxon_trimmed[-1] == \"]\":\n tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]\n return tmp_taxon_trimmed.capitalize()\n else:\n return None\n else:\n return None\n\n self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[\n :, VALID_RANKS\n ].applymap(taxon_fixer)\n\n def __reconstruct_internal_lineages(self) -> None:\n \"\"\"Reconstruct the internal lineages.\"\"\"\n self.__internal_taxonomy.loc[:, \"lineage\"] = generate_lineages_from_taxa(\n self.__internal_taxonomy, True, self.__avail_ranks, False\n )\n\n def __init_internal_taxonomy(\n self,\n taxonomy_data: Union[pd.Series, pd.DataFrame],\n taxonomy_notation: Optional[str] = \"greengenes\",\n order_ranks: Optional[Sequence[str]] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"Main method to initialize taxonomy.\n\n Parameters\n ----------\n taxonomy_data\n Incoming parsed taxonomy data\n taxonomy_notation\n Taxonomy lineage notation style. Can be one of\n :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`\n order_ranks\n List with the target rank order. Default is set to None.\n The 'silva' notation require `order_ranks`.\n kwargs\n Compatibility\n \"\"\"\n if isinstance(taxonomy_data, pd.Series):\n new_taxonomy = self.__init_taxonomy_from_lineages(\n taxonomy_data, taxonomy_notation, order_ranks\n )\n elif isinstance(taxonomy_data, pd.DataFrame):\n if taxonomy_data.shape[1] == 1:\n taxonomy_data_series = pd.Series(\n data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index\n )\n new_taxonomy = self.__init_taxonomy_from_lineages(\n taxonomy_data_series, taxonomy_notation, order_ranks\n )\n else:\n new_taxonomy = self.__init_taxonomy_from_frame(\n taxonomy_data, taxonomy_notation, order_ranks\n )\n else:\n raise RuntimeError(\n \"`taxonomy_data` must be either pd.Series or pd.Dataframe\"\n )\n\n if new_taxonomy is None:\n raise ValueError(\"Provided taxonomy is invalid.\")\n\n # Assign newly constructed taxonomy to the self.__internal_taxonomy\n self.__internal_taxonomy = new_taxonomy\n self.__fix_taxon_names() # Fix incorrect taxa\n tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]\n self.__avail_ranks = [\n rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()\n ]\n # Reconstruct internal lineages for default greengenes notation\n self.__reconstruct_internal_lineages()\n self._init_state = True\n\n def __init_taxonomy_from_lineages(\n self,\n taxonomy_series: pd.Series,\n taxonomy_notation: Optional[str],\n order_ranks: Optional[Sequence[str]],\n ) -> pd.DataFrame: # Done\n \"\"\"Main method that produces taxonomy dataframe from lineages.\n\n Parameters\n ----------\n taxonomy_series\n :class:`pandas.Series` with taxonomy lineages\n taxonomy_notation\n Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`\n order_ranks\n List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.\n \"\"\"\n # Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation\n if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:\n notation = taxonomy_notation\n else:\n # Get first lineage _sample for notation testing assuming the rest have the the same notations\n sample_taxon = taxonomy_series.iloc[0]\n # Identify notation of the lineage string\n notation = indentify_taxon_notation(sample_taxon)\n if order_ranks is not None:\n if all([rank in VALID_RANKS for rank in order_ranks]):\n target_order_ranks = order_ranks\n else:\n raise NotImplementedError\n else:\n target_order_ranks = VALID_RANKS\n if notation == \"greengenes\":\n lineages = taxonomy_series.reset_index().values.tolist()\n ordered_taxa_list = []\n ordered_indices_list = [elem[0] for elem in lineages]\n for lineage in lineages:\n tmp_lineage = jRegexGG.findall(lineage[1])\n tmp_taxa_dict = {\n elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS\n }\n for rank in VALID_RANKS:\n if rank not in tmp_taxa_dict.keys():\n tmp_taxa_dict.update({rank: None})\n tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]\n ordered_taxa_list.append([None] + tmp_taxa_ordered)\n taxonomy = pd.DataFrame(\n index=ordered_indices_list,\n data=ordered_taxa_list,\n columns=[\"lineage\"] + VALID_RANKS,\n )\n return taxonomy\n elif notation == \"qiime\":\n lineages = taxonomy_series.reset_index().values.tolist()\n tmp_taxa_dict_list = []\n tmp_ranks = set()\n for lineage in lineages:\n tmp_lineage = jRegexQIIME.findall(lineage[1])\n tmp_lineage.sort(key=lambda x: x[0])\n tmp_taxa_dict = defaultdict(None)\n tmp_taxa_dict[None] = lineage[0]\n for rank, taxon in tmp_lineage:\n tmp_taxa_dict[rank] = taxon\n tmp_ranks.add(rank)\n tmp_taxa_dict_list.append(dict(tmp_taxa_dict))\n tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)\n tmp_taxonomy_df.set_index(None, inplace=True)\n tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]\n tmp_taxonomy_df.columns = [\n rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]\n ][::-1]\n for rank in VALID_RANKS:\n if rank not in tmp_taxonomy_df.columns:\n tmp_taxonomy_df.loc[:, rank] = None\n return tmp_taxonomy_df\n elif notation == \"silva\":\n lineages = taxonomy_series.reset_index().values.tolist()\n tmp_taxa_dict_list = []\n tmp_ranks = set()\n for lineage in lineages:\n tmp_lineage = lineage[1].split(\";\")\n tmp_taxa_dict = defaultdict(None)\n tmp_taxa_dict[None] = lineage[0]\n for rank_i, taxon in enumerate(tmp_lineage):\n rank = target_order_ranks[rank_i]\n tmp_taxa_dict[rank] = taxon\n tmp_ranks.add(rank)\n tmp_taxa_dict_list.append(dict(tmp_taxa_dict))\n tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)\n tmp_taxonomy_df.set_index(None, inplace=True)\n tmp_rank_ordered = [\n rank for rank in target_order_ranks if rank in VALID_RANKS\n ]\n tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]\n tmp_taxonomy_df.columns = [\n rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]\n ][::-1]\n for rank in VALID_RANKS:\n if rank not in tmp_taxonomy_df.columns:\n tmp_taxonomy_df.loc[:, rank] = None\n return tmp_taxonomy_df\n\n else:\n raise NotImplementedError\n\n def __init_taxonomy_from_frame(\n self,\n taxonomy_dataframe: pd.DataFrame,\n taxonomy_notation: Optional[str],\n order_ranks: Optional[Sequence[str]],\n ) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series\n \"\"\"Main method that produces taxonomy sheet from dataframe.\n\n Parameters\n ----------\n taxonomy_dataframe\n :class:`~pandas.DataFrame` with taxa split by ranks.\n taxonomy_notation\n Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`\n order_ranks\n List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.\n\n Returns\n -------\n :class:`~pandas.DataFrame`\n \"\"\"\n valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)\n if valid_ranks is not None:\n if len(valid_ranks) > 0:\n return pd.concat(\n [\n taxonomy_dataframe,\n pd.DataFrame(\n data=\"\",\n index=taxonomy_dataframe.index,\n columns=[\n rank for rank in VALID_RANKS if rank not in valid_ranks\n ],\n ),\n ],\n axis=1,\n )\n else:\n taxonomy_series = taxonomy_dataframe.apply(\n lambda taxa: \";\".join(taxa.values.tolist()), axis=1\n )\n return self.__init_taxonomy_from_lineages(\n taxonomy_series, taxonomy_notation, order_ranks\n )\n else:\n valid_ranks = cols2ranks(taxonomy_dataframe.columns)\n taxonomy_dataframe.columns = valid_ranks\n taxonomy_series = taxonomy_dataframe.apply(\n lambda taxa: \";\".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1\n )\n return self.__init_taxonomy_from_lineages(\n taxonomy_series, taxonomy_notation, order_ranks\n )\n\n @property\n def avail_ranks(self) -> Sequence[str]:\n \"\"\"List of available taxonomic ranks.\"\"\"\n return self.__avail_ranks\n\n @property\n def duplicated(self) -> pd.Index:\n \"\"\"List of duplicated feature indices.\"\"\"\n return self.__internal_taxonomy.index[\n self.__internal_taxonomy[\"lineage\"].duplicated(keep=False)\n ]\n\n @property\n def data(self) -> pd.DataFrame:\n \"\"\"Actual data representation as pd.DataFrame.\"\"\"\n return self.__internal_taxonomy\n\n @property\n def xrid(self) -> pd.Index:\n \"\"\"Feature indices as pd.Index.\"\"\"\n return self.__internal_taxonomy.index\n",
"import warnings\n\nwarnings.simplefilter(\"ignore\", category=FutureWarning)\nfrom shutil import copyfile\nfrom pmaf.sequence._metakit import (\n MultiSequenceMetabase,\n MultiSequenceStreamBackboneMetabase,\n)\nfrom pmaf.sequence._multiple._multiple import MultiSequence\nfrom pmaf.sequence import _shared as seq_shared\nfrom random import random\nfrom pmaf.sequence._sequence._nucleotide import Nucleotide\nfrom pmaf.sequence._metakit import NucleotideMetabase\nfrom Bio import SeqIO\nimport tempfile\nimport pandas as pd\nimport tables\nimport pickle\nimport os\n\n\nclass MultiSequenceStream(MultiSequenceStreamBackboneMetabase):\n \"\"\":meta private:\"\"\"\n\n _temp_filename_suffix_maker = lambda self, path_with_preffix: \"{}_pmaf_{}\".format(\n path_with_preffix, str(round(100000000 * random()))\n )\n _supported_compression_libraries = [\"zlib\", \"lzo\", \"bzip2\", \"blosc\"]\n _default_seq_encoding = \"ascii\"\n _default_complevel = 5\n _default_path_list = [\"/seq\", \"/meta\", \"/info\"]\n _default_info_node_path = \"/info/dump\"\n _default_seqs_node_path = \"/seq/seqs\"\n _default_meta_node_path = \"/meta/metas\"\n\n def __init__(\n self,\n filepath=None,\n expected_rows=1000,\n mode=\"DNA\",\n aligned=False,\n name=None,\n compressor=False,\n ):\n self._name = \"\"\n self._mode = None\n self._stream_filepath = None\n self._stream_storer = None\n self._temp_state = True if filepath is None else False\n self._stream_map = pd.Series(dtype=str)\n self._last_seq_length = None\n self._aligned = False\n self._compressor = None\n self._expected_rows = None\n restore_state = False\n if isinstance(aligned, bool):\n self._aligned = aligned\n else:\n raise TypeError(\"`aligned` must have bool type\")\n if isinstance(expected_rows, int):\n if expected_rows > 0:\n self._expected_rows = expected_rows\n else:\n raise ValueError(\"`expected_rows` must be positive number\")\n else:\n raise TypeError(\"`expected_rows` must have int type\")\n if isinstance(compressor, str):\n if compressor in self._supported_compression_libraries:\n self._compressor = compressor\n else:\n raise ValueError(\n \"Compressor is not supported. Please use one of {}\".format(\n \",\".join(self._supported_compression_libraries)\n )\n )\n elif compressor == False:\n self._compressor = False\n else:\n raise TypeError(\"`compressor` must have string type. \")\n if isinstance(name, str):\n self._name = name\n elif name is None:\n pass\n else:\n raise TypeError(\"Name can be only string or None.\")\n if isinstance(mode, str):\n if seq_shared.validate_seq_mode(mode):\n self._mode = mode.lower()\n else:\n raise ValueError(\"Sequence mode can only be 'DNA', 'RNA' or 'Protein'\")\n if isinstance(filepath, str):\n self._stream_filepath = os.path.abspath(filepath)\n if os.path.exists(self._stream_filepath):\n restore_state = True\n elif filepath is None:\n tmp_temp_filepath = self._temp_filename_suffix_maker(\n os.path.join(tempfile.gettempdir(), tempfile.gettempprefix())\n )\n while os.path.exists(tmp_temp_filepath):\n tmp_temp_filepath = self._temp_filename_suffix_maker(\n os.path.join(tempfile.gettempdir(), tempfile.gettempprefix())\n )\n self._stream_filepath = tmp_temp_filepath\n else:\n raise ValueError(\"`filepath` is invalid.\")\n if restore_state:\n if not self._restore_init():\n raise RuntimeError(\"Cannot load file.\")\n else:\n if not self._init_seq_stream_storer():\n raise RuntimeError(\"Cannot be initiate file.\")\n\n def __repr__(self):\n class_name = self.__class__.__name__\n name = self._name if self._name is not None else \"N/A\"\n count = len(self._stream_map)\n stream_filepath = self._stream_filepath\n aligned = \"Yes\" if self._aligned else \"No\"\n repr_str = \"<{}: {}, Total Sequences: {}, Filepath: {}, Aligned: {}>\".format(\n class_name, name, count, stream_filepath, aligned\n )\n return repr_str\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self._temp_state:\n os.unlink(self._stream_filepath)\n return\n\n def _init_seq_stream_storer(self):\n \"\"\"\"\"\"\n ret = False\n try:\n tmp_filters = (\n tables.Filters(\n complib=self._compressor, complevel=self._default_complevel\n )\n if self._compressor\n else None\n )\n tmp_stream_store = tables.open_file(\n self._stream_filepath, mode=\"a\", title=self._name, filters=tmp_filters\n )\n tmp_stream_store.create_group(\"/\", \"seq\", \"Raw sequences\")\n tmp_stream_store.create_vlarray(\n \"/seq\",\n \"seqs\",\n atom=tables.VLStringAtom(),\n expectedrows=self._expected_rows,\n )\n tmp_stream_store.create_group(\"/\", \"meta\", \"Sequence metadata\")\n tmp_stream_store.create_vlarray(\n \"/meta\",\n \"metas\",\n atom=tables.ObjectAtom(),\n expectedrows=self._expected_rows,\n )\n tmp_stream_store.create_group(\"/\", \"info\", \"Instance attributes\")\n tmp_stream_store.create_vlarray(\n \"/info\", \"dump\", atom=tables.ObjectAtom(), expectedrows=1\n )\n self._stream_storer = tmp_stream_store\n ret = True\n except:\n pass\n return ret\n\n def _restore_init(self):\n \"\"\"\"\"\"\n ret = False\n try:\n tmp_stream_store_read = tables.open_file(self._stream_filepath, mode=\"r\")\n group_list = []\n for group in tmp_stream_store_read.walk_groups():\n group_list.append(group._v_pathname)\n if all([group in group_list for group in self._default_path_list]):\n tmp_instance_dict_bytes = tmp_stream_store_read.get_node(\n self._default_info_node_path\n ).read()[0]\n tmp_instance_dict = pickle.loads(tmp_instance_dict_bytes)\n self.__dict__.update(tmp_instance_dict)\n tmp_stream_store_read.close()\n tmp_filters = (\n tables.Filters(\n complib=self._compressor, complevel=self._default_complevel\n )\n if self._compressor\n else None\n )\n tmp_stream_store = tables.open_file(\n self._stream_filepath, mode=\"a\", filters=tmp_filters\n )\n self._stream_storer = tmp_stream_store\n ret = True\n except:\n pass\n return ret\n\n def close(self, copy_filepath=None):\n \"\"\"\n\n Parameters\n ----------\n copy_filepath :\n (Default value = None)\n\n Returns\n -------\n\n \"\"\"\n tmp_instance_dict = {\n k: v\n for k, v in self.__dict__.items()\n if k not in [\"_stream_filepath\", \"_stream_storer\"]\n }\n tmp_instance_dict_bytes = pickle.dumps(tmp_instance_dict)\n self._stream_storer.get_node(self._default_info_node_path).remove()\n self._stream_storer.create_vlarray(\n \"/info\", \"dump\", atom=tables.ObjectAtom(), expectedrows=1\n )\n self._stream_storer.get_node(self._default_info_node_path).append(\n tmp_instance_dict_bytes\n )\n self._stream_storer.close()\n if copy_filepath is not None and isinstance(copy_filepath, str):\n if not os.path.exists(copy_filepath):\n copyfile(self._stream_filepath, copy_filepath)\n else:\n raise FileExistsError()\n if self._temp_state:\n os.unlink(self._stream_filepath)\n return\n\n def get_sequence_by_acc(self, acc_number):\n \"\"\"\n\n Parameters\n ----------\n acc_number :\n\n\n Returns\n -------\n\n \"\"\"\n ret = None\n if acc_number in self._stream_map.index:\n ret = self._get_sequence_by_acc_id(acc_number)\n return ret\n\n def get_multiseq_by_accs(self, acc_numbers):\n \"\"\"\n\n Parameters\n ----------\n acc_numbers :\n\n\n Returns\n -------\n\n \"\"\"\n ret = None\n if isinstance(acc_numbers, list):\n if len(acc_numbers) > 0:\n if self._stream_map.index.isin(acc_numbers).sum() == len(acc_numbers):\n seq_list = []\n for name in acc_numbers:\n seq_list.append(self._get_sequence_by_acc_id(name))\n ret = MultiSequence(\n seq_list,\n name=self._name,\n aligned=self._aligned,\n metadata={\"accession-numbers\": \"; \".join(acc_numbers)},\n )\n return ret\n\n def iter_sequences(self):\n \"\"\"\"\"\"\n for acc_num in self._stream_map.index.values.tolist():\n yield self._get_sequence_by_acc_id(acc_num)\n\n def write_all_to_fasta(self, fasta_fp, write_in_chunks=100):\n \"\"\"\n\n Parameters\n ----------\n fasta_fp :\n\n write_in_chunks :\n (Default value = 100)\n\n Returns\n -------\n\n \"\"\"\n if not os.path.exists(fasta_fp):\n if isinstance(write_in_chunks, int):\n if write_in_chunks >= 0:\n chunks = (\n len(self._stream_map)\n if write_in_chunks == 0\n else write_in_chunks\n )\n from Bio.Seq import Seq\n\n with open(fasta_fp, \"a\") as fasta_handle:\n chunk_counter = chunks\n records_chunk = []\n for sequence in self.iter_sequences():\n tmp_record_metadata = (\n sequence.metadata[\"description\"]\n if \"description\" in sequence.metadata.keys()\n else self._name\n )\n next_record = SeqIO.SeqRecord(\n Seq(sequence.text),\n sequence.name,\n description=tmp_record_metadata,\n )\n if chunk_counter > 1:\n records_chunk.append(next_record)\n chunk_counter = chunk_counter - 1\n else:\n records_chunk.append(next_record)\n chunk_counter = chunks\n SeqIO.write(records_chunk, fasta_handle, \"fasta\")\n records_chunk = []\n if chunk_counter > 0:\n SeqIO.write(records_chunk, fasta_handle, \"fasta\")\n else:\n raise TypeError(\"`write_in_chunks` must be integer.\")\n else:\n raise FileExistsError(\"Target file must not exists.\")\n return\n\n def _get_sequence_by_acc_id(self, accid):\n \"\"\"\n\n Parameters\n ----------\n accid :\n\n\n Returns\n -------\n\n \"\"\"\n seqid = self._accid_to_seqid(accid)\n seq_str = self._retrieve_seq_by_seqid(seqid)\n seq_meta_pack = self._retrieve_meta_by_seqid(seqid)\n tmp_seq = Nucleotide(seq_str, accid, mode=self._mode)\n tmp_seq.restore_buckle(seq_meta_pack)\n return tmp_seq\n\n def _accid_to_seqid(self, accid):\n \"\"\"\n\n Parameters\n ----------\n accid :\n\n\n Returns\n -------\n\n \"\"\"\n return self._stream_map[accid]\n\n def _retrieve_seq_by_seqid(self, seqid):\n \"\"\"\n\n Parameters\n ----------\n seqid :\n\n\n Returns\n -------\n\n \"\"\"\n tmp_seq_bytes = self._stream_storer.get_node(self._default_seqs_node_path)[\n seqid\n ]\n return tmp_seq_bytes.decode(self._default_seq_encoding)\n\n def _retrieve_meta_by_seqid(self, seqid):\n \"\"\"\n\n Parameters\n ----------\n seqid :\n\n\n Returns\n -------\n\n \"\"\"\n tmp_meta_bytes = self._stream_storer.get_node(self._default_meta_node_path)[\n seqid\n ]\n return pickle.loads(tmp_meta_bytes)\n\n def append_sequence(self, sequence):\n \"\"\"\n\n Parameters\n ----------\n sequence :\n\n\n Returns\n -------\n\n \"\"\"\n if isinstance(sequence, NucleotideMetabase):\n if isinstance(sequence, Nucleotide):\n if sequence.mode == self._mode:\n if (sequence.name is not None) and (\n sequence.name not in self._stream_map.index\n ):\n if self._verify_sequence(sequence.text):\n self._append_sequence(sequence)\n self._stream_storer.flush()\n else:\n raise ValueError(\"Sequences do not have same length.\")\n else:\n raise ValueError(\n \"Sequence name must be unique and have legnth > 0.\"\n )\n else:\n raise ValueError(\"All sequences must have same mode.\")\n else:\n raise TypeError(\"`sequence` have invalid type.\")\n\n def _append_sequence(self, sequence_instance):\n \"\"\"\n\n Parameters\n ----------\n sequence_instance :\n\n\n Returns\n -------\n\n \"\"\"\n tmp_metadata = sequence_instance.buckle_for_uid(self._name)\n tmp_seq_str = sequence_instance.text\n seqid = self._insert_seq_vlarray(tmp_seq_str)\n metaid = self._insert_meta_vlarray(tmp_metadata)\n if seqid == metaid:\n self._stream_map[str(sequence_instance.name)] = seqid\n else:\n raise RuntimeError(\n \"Impossible condition. Stream file might have been externally modified!\"\n )\n return\n\n def extend_multiseq(self, multiseq):\n \"\"\"\n\n Parameters\n ----------\n multiseq :\n\n\n Returns\n -------\n\n \"\"\"\n if isinstance(multiseq, MultiSequenceMetabase):\n if multiseq.count > 0:\n if multiseq.mode == self._mode:\n for sequence in multiseq.sequences:\n if (sequence.name is None) or (\n sequence.name in self._stream_map.index\n ):\n raise ValueError(\n \"Sequence name must be unique and have legnth > 0.\"\n )\n if not self._verify_sequence(sequence.text):\n raise ValueError(\"Sequences do not have same length.\")\n self._append_multiseq(multiseq)\n self._stream_storer.flush()\n else:\n raise ValueError(\"All sequences must have same mode.\")\n else:\n raise TypeError(\"`multiseq` have invalid type.\")\n\n def _append_multiseq(self, multiseq):\n \"\"\"\n\n Parameters\n ----------\n multiseq :\n\n\n Returns\n -------\n\n \"\"\"\n for sequence in multiseq.sequences:\n self._append_sequence(sequence)\n return\n\n def append_string(self, name, mode, sequence_str, metadata_dict={}):\n \"\"\"\n\n Parameters\n ----------\n name :\n\n mode :\n\n sequence_str :\n\n metadata_dict :\n (Default value = {})\n\n Returns\n -------\n\n \"\"\"\n if (\n isinstance(name, str)\n and isinstance(sequence_str, str)\n and isinstance(metadata_dict, dict)\n and isinstance(mode, str)\n ):\n if mode == self._mode:\n if len(name) > 0 and (name not in self._stream_map.index):\n if self._verify_sequence(sequence_str):\n self._append_sequence_str(name, sequence_str, metadata_dict)\n self._stream_storer.flush()\n else:\n raise ValueError(\"Sequences do not have same length.\")\n else:\n raise ValueError(\n \"Sequence name must be unique and have legnth > 0.\"\n )\n else:\n raise ValueError(\"All sequences must have same mode.\")\n else:\n raise TypeError(\"Invalid parameter types.\")\n return\n\n def _append_sequence_str(self, seq_name, sequence_str, metadata_dict):\n \"\"\"\n\n Parameters\n ----------\n seq_name :\n\n sequence_str :\n\n metadata_dict :\n\n\n Returns\n -------\n\n \"\"\"\n seqid = self._insert_seq_vlarray(sequence_str)\n metaid = self._insert_meta_vlarray(metadata_dict)\n if seqid == metaid:\n self._stream_map[seq_name] = seqid\n else:\n raise RuntimeError(\n \"Impossible condition. Stream file might have been externally modified!\"\n )\n return\n\n def _insert_seq_vlarray(self, seq_data):\n \"\"\"\n\n Parameters\n ----------\n seq_data :\n\n\n Returns\n -------\n\n \"\"\"\n self._last_seq_length = len(seq_data)\n seq_data_bytes = seq_data.encode(self._default_seq_encoding)\n self._stream_storer.get_node(self._default_seqs_node_path).append(\n seq_data_bytes\n )\n return self._stream_storer.get_node(self._default_seqs_node_path).nrows - 1\n\n def _insert_meta_vlarray(self, metadata):\n \"\"\"\n\n Parameters\n ----------\n metadata :\n\n\n Returns\n -------\n\n \"\"\"\n metadata_bytes = pickle.dumps(metadata)\n self._stream_storer.get_node(self._default_meta_node_path).append(\n metadata_bytes\n )\n return self._stream_storer.get_node(self._default_meta_node_path).nrows - 1\n\n def _verify_sequence(self, seq_str):\n \"\"\"\n\n Parameters\n ----------\n seq_str :\n\n\n Returns\n -------\n\n \"\"\"\n ret = True\n if self._aligned:\n if self._last_seq_length is not None:\n if self._last_seq_length == len(seq_str):\n ret = False\n return ret\n\n @property\n def name(self):\n \"\"\"\"\"\"\n return self._name\n\n @property\n def mode(self):\n \"\"\"\"\"\"\n return self._mode\n\n @property\n def count(self):\n \"\"\"\"\"\"\n return len(self._stream_map)\n\n @property\n def summarize(self):\n \"\"\"\"\"\"\n return\n\n @property\n def accession_numbers(self):\n \"\"\"\"\"\"\n return self._stream_map.index.tolist()\n"
] |
[
[
"pandas.DataFrame.from_records",
"numpy.asarray",
"pandas.DataFrame",
"pandas.notna",
"pandas.Series",
"pandas.read_csv",
"numpy.unique"
],
[
"pandas.Series"
]
] |
avigna/COMPAS-1
|
[
"63e1aeef4ed37ff661b8118842ef73a4e9bea1fd"
] |
[
"utils/example_plots/methods_paper_plots/fig_5_HR_diagram/pythonSubmit.py"
] |
[
"import numpy as np\nimport sys\nimport os\nimport re\nimport ntpath\nfrom subprocess import call\n\n#### DISCLAIMER: This script uses the `pythonSubmit.py` format\n#### that has been replaced by the `runSubmit.py` and \n#### `compasConfigDefault.yaml` combo as of v02.25.10.\n#### The `pythonSubmit.py` format will eventually become deprecated.\n\n# Check if we are using python 3\npython_version = sys.version_info[0]\nprint(\"python_version =\", python_version)\n\nclass pythonProgramOptions:\n \"\"\"\n A class to store and access COMPAS program options in python\n \"\"\"\n\n # Do './COMPAS --help' to see all options\n #-- Define variables\n\n # environment variable COMPAS_EXECUTABLE_PATH is used for docker runs\n # if COMPAS_EXECUTABLE_PATH is not set (== None) we assume this is an\n # interactive run with python3\n # if COMPAS_EXECUTABLE_PATH is set (!= None) we assume this is a run\n # inside a docker container - we have different directories inside a \n # docker container (src, obj, bin), and the COMPAS executable resides\n # in the bin directory (rather than the src directory)\n compas_executable_override = os.environ.get('COMPAS_EXECUTABLE_PATH')\n\n if (compas_executable_override is None):\n \n # we should fix this one day - we should not assume that the COMPAS executable\n # is in the 'src' directory. The standard is to put the object files created\n # by the compile into the 'obj' directory, and the executable files created by\n # the link in the 'bin' directory.\n #\n # for now though, because this is how everybody expects it to be, we'll just check\n # that the path to the root directory (the parent directory of the directory in\n # which we expect the executable to reside - for now, 'src') is set to something.\n\n compas_root_dir = os.environ.get('COMPAS_ROOT_DIR')\n assert compas_root_dir is not None, \"Unable to locate the COMPAS executable: check that the environment variable COMPAS_ROOT_DIR is set correctly, and the COMPAS executable exists.\"\n\n # construct path to executable \n #\n # ideally we wouldn't have the 'src' directory name (or any other directory name)\n # prepended to the executable name - if we just execute the executable name on its\n # own, as long as the user navigates to the directory in which the executable resides\n # they don't need to set the COMPAS_ROOT_DIR environment variable\n\n compas_executable = os.path.join(compas_root_dir, 'src/COMPAS')\n else:\n compas_executable = compas_executable_override\n\n # check that a file with the correct name exists where we expect it to\n assert os.path.isfile(compas_executable), \"Unable to locate the COMPAS executable: check that the environment variable COMPAS_ROOT_DIR is set correctly, and the COMPAS executable exists.\"\n\n\n enable_warnings = False # option to enable/disable warning messages\n\n number_of_systems = 10 # number of systems per batch\n\n populationPrinting = False\n\n randomSeedFileName = 'randomSeed.txt'\n if os.path.isfile(randomSeedFileName):\n random_seed = int(np.loadtxt(randomSeedFileName))\n else:\n random_seed = 0 # If you want a random seed, use: np.random.randint(2,2**63-1)\n\n # environment variable COMPAS_LOGS_OUTPUT_DIR_PATH is used primarily for docker runs\n # if COMPAS_LOGS_OUTPUT_DIR_PATH is set (!= None) it is used as the value for the\n # --output-path option\n # if COMPAS_LOGS_OUTPUT_DIR_PATH is not set (== None) the current working directory\n # is used as the value for the --output-path option\n compas_logs_output_override = os.environ.get('COMPAS_LOGS_OUTPUT_DIR_PATH')\n\n if (compas_logs_output_override is None):\n output = os.getcwd()\n output_container = None # names the directory to be created and in which log files are created. Default in COMPAS is \"COMPAS_Output\"\n else:\n output = compas_logs_output_override\n output_container = None\n\n # environment variable COMPAS_INPUT_DIR_PATH is used primarily for docker runs\n # if COMPAS_INPUT_DIR_PATH is set (!= None) it is prepended to input filenames\n # (such as grid_filename and logfile_definitions)\n # if COMPAS_INPUT_DIR_PATH is not set (== None) the current working directory\n # is prepended to input filenames\n compas_input_path_override = os.environ.get('COMPAS_INPUT_DIR_PATH')\n\n #-- option to make a grid of hyperparameter values at which to produce populations.\n #-- If this is set to true, it will divide the number_of_binaries parameter equally\n #-- amoungst the grid points (as closely as possible). See the hyperparameterGrid method below\n #-- for more details. If this is set to True, some hyperparameter values defined in this method'gridOutputs/'+str(i)\n #-- will be overwritten\n hyperparameterGrid = False\n hyperparameterList = False\n shareSeeds = False\n\n notes_hdrs = None # no annotations header strings (no annotations)\n notes = None # no annotations\n\n mode = 'BSE' # evolving single stars (SSE) or binaries (BSE)?\n\n grid_filename = 'grid.txt' # grid file name (e.g. 'mygrid.txt')\n\n if grid_filename != None:\n # if the grid filename supplied is already fully-qualified, leave it as is\n head, tail = ntpath.split(grid_filename) # split into pathname and base filename\n \n if head == '' or head == '.': # no path (or CWD) - add path as required\n grid_filename = tail or ntpath.basename(head)\n if compas_input_path_override == None:\n grid_filename = os.getcwd() + '/' + grid_filename.strip(\"'\\\"\")\n else:\n grid_filename = compas_input_path_override + '/' + grid_filename.strip(\"'\\\"\")\n\n logfile_definitions = None # logfile record definitions file name (e.g. 'logdefs.txt')\n\n if logfile_definitions != None:\n # if the grid filename supplied is already fully-qualified, leave it as is\n head, tail = ntpath.split(logfile_definitions) # split into pathname and base filename\n \n if head == '' or head == '.': # no path (or CWD) - add path as required\n logfile_definitions = tail or ntpath.basename(head)\n if compas_input_path_override == None:\n logfile_definitions = os.getcwd() + '/' + logfile_definitions.strip(\"'\\\"\")\n else:\n logfile_definitions = compas_input_path_override + '/' + logfile_definitions.strip(\"'\\\"\")\n\n initial_mass = None # initial mass for SSE\n initial_mass_1 = None # primary initial mass for BSE\n initial_mass_2 = None # secondary initial mass for BSE\n\n mass_ratio = None\n\n eccentricity = None # eccentricity for BSE\n semi_major_axis = None # semi-major axis for BSE\n orbital_period = None # orbital period for BSE\n\n\n use_mass_loss = True\n mass_transfer = True\n detailed_output = True # WARNING: this creates a data heavy file\n RLOFPrinting = True\n evolve_unbound_systems = False\n quiet = False\n\n metallicity = 0.0142 # metallicity for both SSE and BSE - Solar metallicity Asplund+2010\n\n allow_rlof_at_birth = True # allow binaries that have one or both stars in RLOF at birth to evolve?\n allow_touching_at_birth = False # record binaries that have stars touching at birth in output files?\n\n chemically_homogeneous_evolution = 'PESSIMISTIC' # chemically homogeneous evolution. Options are 'NONE', 'OPTIMISTIC' and 'PESSIMISTIC'\n\n switch_log = False\n\n common_envelope_alpha = 1.0\n common_envelope_lambda = 0.1 # Only if using 'LAMBDA_FIXED'\n common_envelope_lambda_prescription = 'LAMBDA_NANJING' # Xu & Li 2010\n common_envelope_slope_Kruckow = -5.0/6.0\n stellar_zeta_prescription = 'SOBERMAN'\n common_envelope_revised_energy_formalism = False\n common_envelope_maximum_donor_mass_revised_energy_formalism = 2.0\n common_envelope_recombination_energy_density = 1.5E13\n common_envelope_alpha_thermal = 1.0 # lambda = alpha_th*lambda_b + (1-alpha_th)*lambda_g\n common_envelope_lambda_multiplier = 1.0 # Multiply common envelope lambda by some constant\n common_envelope_allow_main_sequence_survive = True # Allow main sequence stars to survive CE. Was previously False by default\n common_envelope_mass_accretion_prescription = 'ZERO'\n common_envelope_mass_accretion_min = 0.04 # For 'MACLEOD+2014' [Msol]\n common_envelope_mass_accretion_max = 0.10 # For 'MACLEOD+2014' [Msol]\n envelope_state_prescription = 'LEGACY'\n common_envelope_allow_radiative_envelope_survive = False\n common_envelope_allow_immediate_RLOF_post_CE_survive = False\n\n mass_loss_prescription = 'VINK'\n luminous_blue_variable_prescription = 'HURLEY_ADD'\n luminous_blue_variable_multiplier = 1.5\n overall_wind_mass_loss_multiplier = 1.0\n wolf_rayet_multiplier = 1.0\n cool_wind_mass_loss_multiplier = 1.0\n check_photon_tiring_limit = False\n\n circularise_binary_during_mass_transfer = True\n angular_momentum_conservation_during_circularisation = False\n mass_transfer_angular_momentum_loss_prescription = 'ISOTROPIC'\n mass_transfer_accretion_efficiency_prescription = 'THERMAL'\n mass_transfer_fa = 0.5 # Only if using mass_transfer_accretion_efficiency_prescription = 'FIXED'\n mass_transfer_jloss = 1.0 # Only if using mass_transfer_angular_momentum_loss_prescription = 'FIXED'\n mass_transfer_rejuvenation_prescription = 'STARTRACK'\n mass_transfer_thermal_limit_accretor= 'CFACTOR'\n mass_transfer_thermal_limit_C= 10.0\n eddington_accretion_factor = 1 # multiplication Factor for eddington accretion onto NS&BH\n\n case_BB_stability_prescription = 'ALWAYS_STABLE'\n zeta_Main_Sequence = 2.0\n zeta_Radiative_Envelope_Giant = 6.5\n\n maximum_evolution_time = 13700.0 # Maximum physical time a system can be evolved [Myrs]\n maximum_number_timesteps = 99999\n timestep_multiplier = 0.1 # Optional multiplier relative to default time step duration\n\n initial_mass_function = 'KROUPA'\n initial_mass_min = 5.0 # Use 1.0 for LRNe, 5.0 for DCOs [Msol]\n initial_mass_max = 150.0 # Stellar tracks extrapolated above 50 Msol (Hurley+2000) [Msol]\n\n initial_mass_power = 0.0\n\n semi_major_axis_distribution = 'FLATINLOG'\n semi_major_axis_min = 0.01 # [AU]\n semi_major_axis_max = 1000.0 # [AU]\n\n orbital_period_distribution = 'FLATINLOG'\n orbital_period_min = 1.1 # [days]\n orbital_period_max = 1000 # [days]\n\n mass_ratio_distribution = 'FLAT'\n mass_ratio_min = 0.01\n mass_ratio_max = 1.0\n\n minimum_secondary_mass = 0.1 # Brown dwarf limit [Msol]\n\n eccentricity_distribution = 'ZERO'\n eccentricity_min = 0.0\n eccentricity_max = 1.0\n\n metallicity_distribution = 'ZSOLAR'\n metallicity_min = 0.0001\n metallicity_max = 0.03\n\n pulsar_birth_magnetic_field_distribution = 'ZERO'\n pulsar_birth_magnetic_field_min = 11.0 # [log10(B/G)]\n pulsar_birth_magnetic_field_max = 13.0 # [log10(B/G)]\n\n pulsar_birth_spin_period_distribution = \"ZERO\"\n pulsar_birth_spin_period_min = 10.0 # [ms]\n pulsar_birth_spin_period_max = 100.0 # [ms]\n\n pulsar_magnetic_field_decay_timescale = 1000.0 # [Myr]\n pulsar_magnetic_field_decay_massscale = 0.025 # [Msol]\n pulsar_minimum_magnetic_field = 8.0 # [log10(B/G)]\n\n evolvePulsars = False\n\n rotational_velocity_distribution = 'ZERO'\n\n neutron_star_equation_of_state = 'SSE'\n\n neutrino_mass_loss_BH_formation = \"FIXED_MASS\" # \"FIXED_FRACTION\"\n neutrino_mass_loss_BH_formation_value = 0.1 # Either fraction or mass (Msol) to lose\n\n remnant_mass_prescription = 'FRYER2012' #\n fryer_supernova_engine = 'DELAYED'\n black_hole_kicks = 'FALLBACK'\n kick_magnitude_distribution = 'MAXWELLIAN'\n\n kick_magnitude_sigma_CCSN_NS = 265.0 # [km/s]\n kick_magnitude_sigma_CCSN_BH = 265.0 # [km/s]\n kick_magnitude_sigma_ECSN = 30.0 # [km/s]\n kick_magnitude_sigma_USSN = 30.0 # [km/s]\n\n fix_dimensionless_kick_magnitude = -1\n kick_direction = 'ISOTROPIC'\n kick_direction_power = 0.0\n kick_scaling_factor = 1.0\n kick_magnitude_maximum = -1.0\n\n kick_magnitude_random = None # (SSE) used to draw the kick magnitude for the star should it undergo a supernova event\n kick_magnitude = None # (SSE) (drawn) kick magnitude for the star should it undergo a supernova event [km/s]\n\n kick_magnitude_random_1 = None # (BSE) used to draw the kick magnitude for the primary star should it undergo a supernova event\n kick_magnitude_1 = None # (BSE) (drawn) kick magnitude for the primary star should it undergo a supernova event [km/s]\n kick_theta_1 = None # (BSE) angle between the orbital plane and the 'z' axis of the supernova vector for the primary star should it undergo a supernova event [radians]\n kick_phi_1 = None # (BSE) angle between 'x' and 'y', both in the orbital plane of the supernova vector, for the primary star should it undergo a supernova event [radians]\n kick_mean_anomaly_1 = None # (BSE) mean anomaly at the instant of the supernova for the primary star should it undergo a supernova event - should be uniform in [0, 2pi) [radians]\n\n kick_magnitude_random_2 = None # (BSE) used to draw the kick velocity for the secondary star should it undergo a supernova event\n kick_magnitude_2 = None # (BSE) (drawn) kick magnitude for the secondary star should it undergo a supernova event [km/s]\n kick_theta_2 = None # (BSE) angle between the orbital plane and the 'z' axis of the supernova vector for the secondary star should it undergo a supernova event [radians]\n kick_phi_2 = None # (BSE) angle between 'x' and 'y', both in the orbital plane of the supernova vector, for the secondary star should it undergo a supernova event [radians]\n kick_mean_anomaly_2 = None # (BSE) mean anomaly at the instant of the supernova for the secondary star should it undergo a supernova event - should be uniform in [0, 2pi) [radians]\n\n muller_mandel_kick_multiplier_BH = 200.0 # scaling prefactor for BH kicks when using the 'MULLERMANDEL' kick magnitude distribution\n muller_mandel_kick_multiplier_NS = 400.0 # scaling prefactor for NS kicks when using the 'MULLERMANDEL' kick magnitude distribution\n\n pair_instability_supernovae = True\n PISN_lower_limit = 60.0 # Minimum core mass for PISN [Msol]\n PISN_upper_limit = 135.0 # Maximum core mass for PISN [Msol]\n\n pulsation_pair_instability = True\n PPI_lower_limit = 35.0 # Minimum core mass for PPI [Msol]\n PPI_upper_limit = 60.0 # Maximum core mass for PPI [Msol]\n\n pulsational_pair_instability_prescription = 'MARCHANT'\n\n maximum_neutron_star_mass = 2.5 # [Msol]\n\n add_options_to_sysparms = 'GRID' # should all option values be added to system parameters files? options are 'ALWAYS', 'GRID', and 'NEVER'\n\n log_level = 0\n log_classes = []\n\n debug_level = 0\n debug_classes = []\n\n logfile_name_prefix = None\n logfile_type = 'HDF5'\n\n hdf5_chunk_size = 100000\n hdf5_buffer_size = 1\n\n\n # set the logfile names here\n #\n # set to None (e.g. logfile_BSE_supernovae = None) to use the default filename\n # set to a string (e.g. logfile_BSE_supernovae = 'mySNfilename') to use that string as the filename \n # set to empty string (e.g. logfile_BSE_supernovae = '\"\"') to disable logging for that file (the file will not be created)\n #\n # We don't really need the 'BSE' or 'SSE' prefixes any more - they were put there because\n # prior to the implementation of the containing folder it was too hard to locate the files\n # created by a COMPAS run - especially the detailed output files. Now that the output\n # files are created inside a containing folder for each run there is really no need for\n # the prefixes - and if we don't have the prefixes we can share some of the options\n # (e.g. specifying the supernovae filename doesn't need to have separate options for \n # SSE and BSE - we really just need one (we only ever run in one mode or the other))\n #\n # For now though, I'll leave them as is - we can change this when (if) we decide to\n # drop the prefixes\n\n logfile_common_envelopes = None\n logfile_detailed_output = None\n logfile_double_compact_objects = None\n logfile_rlof_parameters = None\n logfile_pulsar_evolution = None\n logfile_supernovae = None\n logfile_switch_log = None\n logfile_system_parameters = None\n\n debug_to_file = False\n errors_to_file = False\n\n def booleanChoices(self):\n booleanChoices = [\n self.enable_warnings,\n self.use_mass_loss,\n self.mass_transfer,\n self.detailed_output,\n self.evolve_unbound_systems,\n self.populationPrinting,\n self.RLOFPrinting,\n self.circularise_binary_during_mass_transfer,\n self.angular_momentum_conservation_during_circularisation,\n self.pair_instability_supernovae,\n self.pulsation_pair_instability,\n self.quiet,\n self.common_envelope_allow_main_sequence_survive,\n self.common_envelope_allow_radiative_envelope_survive,\n self.common_envelope_allow_immediate_RLOF_post_CE_survive,\n self.evolvePulsars,\n self.debug_to_file,\n self.errors_to_file,\n self.allow_rlof_at_birth,\n self.allow_touching_at_birth,\n self.switch_log,\n self.check_photon_tiring_limit\n ]\n\n return booleanChoices\n\n def booleanCommands(self):\n booleanCommands = [\n '--enable-warnings',\n '--use-mass-loss',\n '--mass-transfer',\n '--detailed-output',\n '--evolve-unbound-systems',\n '--population-data-printing',\n '--rlof-printing',\n '--circularise-binary-during-mass-transfer',\n '--angular-momentum-conservation-during-circularisation',\n '--pair-instability-supernovae',\n '--pulsational-pair-instability',\n '--quiet',\n '--common-envelope-allow-main-sequence-survive',\n '--common-envelope-allow-radiative-envelope-survive',\n '--common-envelope-allow-immediate-rlof-post-ce-survive',\n '--evolve-pulsars',\n '--debug-to-file',\n '--errors-to-file',\n '--allow-rlof-at-birth',\n '--allow-touching-at-birth',\n '--switch-log',\n '--check-photon-tiring-limit'\n ]\n\n return booleanCommands\n\n def numericalChoices(self):\n numericalChoices = [\n self.number_of_systems,\n self.initial_mass,\n self.initial_mass_1,\n self.initial_mass_2,\n self.eccentricity,\n self.semi_major_axis,\n self.orbital_period,\n self.metallicity,\n self.common_envelope_alpha,\n self.common_envelope_lambda,\n self.common_envelope_slope_Kruckow,\n self.common_envelope_alpha_thermal,\n self.common_envelope_lambda_multiplier,\n self.luminous_blue_variable_multiplier,\n self.overall_wind_mass_loss_multiplier,\n self.wolf_rayet_multiplier,\n self.cool_wind_mass_loss_multiplier,\n self.mass_transfer_fa,\n self.mass_transfer_jloss,\n self.maximum_evolution_time,\n self.maximum_number_timesteps,\n self.timestep_multiplier,\n self.initial_mass_min,\n self.initial_mass_max,\n self.initial_mass_power,\n self.semi_major_axis_min,\n self.semi_major_axis_max,\n self.mass_ratio,\n self.mass_ratio_min,\n self.mass_ratio_max,\n self.minimum_secondary_mass,\n self.eccentricity_min,\n self.eccentricity_max,\n self.metallicity_min,\n self.metallicity_max,\n self.pulsar_birth_magnetic_field_min,\n self.pulsar_birth_magnetic_field_max,\n self.pulsar_birth_spin_period_min,\n self.pulsar_birth_spin_period_max,\n self.pulsar_magnetic_field_decay_timescale,\n self.pulsar_magnetic_field_decay_massscale,\n self.pulsar_minimum_magnetic_field,\n self.orbital_period_min,\n self.orbital_period_max,\n self.kick_magnitude_sigma_CCSN_NS,\n self.kick_magnitude_sigma_CCSN_BH,\n self.fix_dimensionless_kick_magnitude,\n self.kick_direction_power,\n self.random_seed,\n self.mass_transfer_thermal_limit_C,\n self.eddington_accretion_factor,\n self.PISN_lower_limit,\n self.PISN_upper_limit,\n self.PPI_lower_limit,\n self.PPI_upper_limit,\n self.maximum_neutron_star_mass,\n self.kick_magnitude_sigma_ECSN,\n self.kick_magnitude_sigma_USSN,\n self.kick_scaling_factor,\n self.common_envelope_maximum_donor_mass_revised_energy_formalism,\n self.common_envelope_recombination_energy_density,\n self.common_envelope_mass_accretion_max,\n self.common_envelope_mass_accretion_min,\n self.zeta_Main_Sequence,\n self.zeta_Radiative_Envelope_Giant,\n self.kick_magnitude_maximum,\n self.kick_magnitude_random,\n self.kick_magnitude,\n self.kick_magnitude_random_1,\n self.kick_magnitude_1,\n self.kick_theta_1,\n self.kick_phi_1,\n self.kick_mean_anomaly_1,\n self.kick_magnitude_random_2,\n self.kick_magnitude_2,\n self.kick_theta_2,\n self.kick_phi_2,\n self.kick_mean_anomaly_2,\n self.muller_mandel_kick_multiplier_BH,\n self.muller_mandel_kick_multiplier_NS,\n self.log_level,\n self.debug_level,\n self.hdf5_chunk_size,\n self.hdf5_buffer_size,\n self.neutrino_mass_loss_BH_formation_value\n ]\n\n return numericalChoices\n\n def numericalCommands(self):\n numericalCommands = [\n '--number-of-systems',\n '--initial-mass',\n '--initial-mass-1',\n '--initial-mass-2',\n '--eccentricity',\n '--semi-major-axis',\n '--orbital-period',\n '--metallicity',\n '--common-envelope-alpha',\n '--common-envelope-lambda',\n '--common-envelope-slope-kruckow',\n '--common-envelope-alpha-thermal',\n '--common-envelope-lambda-multiplier',\n '--luminous-blue-variable-multiplier',\n '--overall-wind-mass-loss-multiplier',\n '--wolf-rayet-multiplier',\n '--cool-wind-mass-loss-multiplier',\n '--mass-transfer-fa',\n '--mass-transfer-jloss',\n '--maximum-evolution-time',\n '--maximum-number-timestep-iterations',\n '--timestep-multiplier',\n '--initial-mass-min',\n '--initial-mass-max',\n '--initial-mass-power',\n '--semi-major-axis-min',\n '--semi-major-axis-max',\n '--mass-ratio',\n '--mass-ratio-min',\n '--mass-ratio-max',\n '--minimum-secondary-mass',\n '--eccentricity-min',\n '--eccentricity-max',\n '--metallicity-min',\n '--metallicity-max',\n '--pulsar-birth-magnetic-field-distribution-min',\n '--pulsar-birth-magnetic-field-distribution-max',\n '--pulsar-birth-spin-period-distribution-min',\n '--pulsar-birth-spin-period-distribution-max',\n '--pulsar-magnetic-field-decay-timescale',\n '--pulsar-magnetic-field-decay-massscale',\n '--pulsar-minimum-magnetic-field',\n '--orbital-period-min',\n '--orbital-period-max',\n '--kick-magnitude-sigma-CCSN-NS',\n '--kick-magnitude-sigma-CCSN-BH',\n '--fix-dimensionless-kick-magnitude',\n '--kick-direction-power',\n '--random-seed',\n '--mass-transfer-thermal-limit-C',\n '--eddington-accretion-factor',\n '--pisn-lower-limit',\n '--pisn-upper-limit',\n '--ppi-lower-limit',\n '--ppi-upper-limit',\n '--maximum-neutron-star-mass',\n '--kick-magnitude-sigma-ECSN',\n '--kick-magnitude-sigma-USSN',\n '--kick-scaling-factor',\n '--maximum-mass-donor-nandez-ivanova',\n '--common-envelope-recombination-energy-density',\n '--common-envelope-mass-accretion-max',\n '--common-envelope-mass-accretion-min',\n '--zeta-main-sequence',\n '--zeta-radiative-envelope-giant',\n '--kick-magnitude-max',\n '--kick-magnitude-random',\n '--kick-magnitude',\n '--kick-magnitude-random-1',\n '--kick-magnitude-1',\n '--kick-theta-1',\n '--kick-phi-1',\n '--kick-mean-anomaly-1',\n '--kick-magnitude-random-2',\n '--kick-magnitude-2',\n '--kick-theta-2',\n '--kick-phi-2',\n '--kick-mean-anomaly-2',\n '--muller-mandel-kick-multiplier-BH',\n '--muller-mandel-kick-multiplier-NS',\n '--log-level',\n '--debug-level',\n '--hdf5-chunk-size',\n '--hdf5-buffer-size',\n '--neutrino-mass-loss-BH-formation-value'\n ]\n\n return numericalCommands\n\n def stringChoices(self):\n stringChoices = [\n self.notes_hdrs,\n self.notes,\n self.mode,\n self.case_BB_stability_prescription,\n self.chemically_homogeneous_evolution,\n self.luminous_blue_variable_prescription,\n self.mass_loss_prescription,\n self.mass_transfer_angular_momentum_loss_prescription,\n self.mass_transfer_accretion_efficiency_prescription,\n self.mass_transfer_rejuvenation_prescription,\n self.initial_mass_function,\n self.semi_major_axis_distribution,\n self.orbital_period_distribution,\n self.mass_ratio_distribution,\n self.eccentricity_distribution,\n self.metallicity_distribution,\n self.rotational_velocity_distribution,\n self.remnant_mass_prescription,\n self.fryer_supernova_engine,\n self.black_hole_kicks,\n self.kick_magnitude_distribution,\n self.kick_direction,\n self.output,\n self.output_container,\n self.common_envelope_lambda_prescription,\n self.stellar_zeta_prescription,\n self.mass_transfer_thermal_limit_accretor,\n self.pulsational_pair_instability_prescription,\n self.neutron_star_equation_of_state,\n self.pulsar_birth_magnetic_field_distribution,\n self.pulsar_birth_spin_period_distribution,\n self.common_envelope_mass_accretion_prescription,\n self.envelope_state_prescription,\n self.logfile_name_prefix,\n self.logfile_type,\n self.logfile_definitions,\n self.grid_filename,\n self.logfile_common_envelopes,\n self.logfile_detailed_output,\n self.logfile_double_compact_objects,\n self.logfile_pulsar_evolution,\n self.logfile_rlof_parameters,\n self.logfile_supernovae,\n self.logfile_switch_log,\n self.logfile_system_parameters,\n self.neutrino_mass_loss_BH_formation,\n self.add_options_to_sysparms\n ]\n\n return stringChoices\n\n def stringCommands(self):\n stringCommands = [\n '--notes-hdrs',\n '--notes',\n '--mode',\n '--case-BB-stability-prescription',\n '--chemically-homogeneous-evolution',\n '--luminous-blue-variable-prescription',\n '--mass-loss-prescription',\n '--mass-transfer-angular-momentum-loss-prescription',\n '--mass-transfer-accretion-efficiency-prescription',\n '--mass-transfer-rejuvenation-prescription',\n '--initial-mass-function',\n '--semi-major-axis-distribution',\n '--orbital-period-distribution',\n '--mass-ratio-distribution',\n '--eccentricity-distribution',\n '--metallicity-distribution',\n '--rotational-velocity-distribution',\n '--remnant-mass-prescription',\n '--fryer-supernova-engine',\n '--black-hole-kicks',\n '--kick-magnitude-distribution',\n '--kick-direction',\n '--output-path',\n '--output-container',\n '--common-envelope-lambda-prescription',\n '--stellar-zeta-prescription',\n '--mass-transfer-thermal-limit-accretor',\n '--pulsational-pair-instability-prescription',\n '--neutron-star-equation-of-state',\n '--pulsar-birth-magnetic-field-distribution',\n '--pulsar-birth-spin-period-distribution',\n '--common-envelope-mass-accretion-prescription',\n '--envelope-state-prescription',\n '--logfile-name-prefix',\n '--logfile-type',\n '--logfile-definitions',\n '--grid',\n '--logfile-common-envelopes',\n '--logfile-detailed-output',\n '--logfile-double-compact-objects',\n '--logfile-pulsar-evolution',\n '--logfile-rlof-parameters',\n '--logfile-supernovae',\n '--logfile-switch-log',\n '--logfile-system-parameters',\n '--neutrino-mass-loss-BH-formation',\n '--add-options-to-sysparms'\n ]\n\n return stringCommands\n\n def listChoices(self):\n listChoices = [\n self.log_classes,\n self.debug_classes\n ]\n\n return listChoices\n\n def listCommands(self):\n listCommands = [\n '--log-classes',\n '--debug-classes'\n ]\n\n return listCommands\n\n\n def generateCommandLineOptionsDict(self):\n \"\"\"\n This function generates a dictionary mapping COMPAS options to their specified \n values (or empty strings for boolean options). These can be combined into a string\n and run directly as a terminal command, or passed to the stroopwafel interface\n where some of them may be overwritten. Options not to be included in the command \n line should be set to pythons None (except booleans, which should be set to False)\n\n Parameters\n -----------\n self : pythonProgramOptions\n Contains program options\n\n Returns\n --------\n commands : str or list of strs\n \"\"\"\n booleanChoices = self.booleanChoices()\n booleanCommands = self.booleanCommands()\n nBoolean = len(booleanChoices)\n assert len(booleanCommands) == nBoolean\n\n numericalChoices = self.numericalChoices()\n numericalCommands = self.numericalCommands()\n nNumerical = len(numericalChoices)\n assert len(numericalCommands) == nNumerical\n\n stringChoices = self.stringChoices()\n stringCommands = self.stringCommands()\n nString = len(stringChoices)\n assert len(stringCommands) == nString\n\n listChoices = self.listChoices()\n listCommands = self.listCommands()\n nList = len(listChoices)\n assert len(listCommands) == nList\n\n\n ### Collect all options into a dictionary mapping option name to option value\n\n command = {'compas_executable' : self.compas_executable}\n\n for i in range(nBoolean):\n if booleanChoices[i] == True:\n command.update({booleanCommands[i] : ''})\n elif booleanChoices[i] == False:\n command.update({booleanCommands[i] : 'False'})\n\n for i in range(nNumerical):\n if not numericalChoices[i] == None:\n command.update({numericalCommands[i] : str(numericalChoices[i])})\n\n for i in range(nString):\n if not stringChoices[i] == None:\n command.update({stringCommands[i] : cleanStringParameter(stringChoices[i])})\n\n for i in range(nList):\n if listChoices[i]:\n command.update({listCommands[i] : ' '.join(map(str,listChoices[i]))})\n\n return command\n\n\ndef combineCommandLineOptionsDictIntoShellCommand(commandOptions):\n \"\"\"\n Write out the compas input parameters into a shell string.\n Ensure the Compas executable is first, and not repeated.\n Options are non-ordered.\n \"\"\"\n\n shellCommand = commandOptions['compas_executable']\n del commandOptions['compas_executable']\n for key, val in commandOptions.items():\n shellCommand += ' ' + key + ' ' + val\n\n return shellCommand\n\n\ndef cleanStringParameter(str_param):\n \"\"\" clean up string parameters to avoid confusing Boost \"\"\"\n if str_param is not None:\n # strip any quotes from the ends of the string\n str_param = str_param.strip(\"'\\\"\")\n\n # escape any unescaped spaces or quotes within the string\n escapes = [\" \", \"'\", \"\\\"\"]\n for escape in escapes:\n str_param = re.sub(r\"(?<!\\\\){}\".format(escape), r\"\\{}\".format(escape), str_param)\n return str_param\n\n\nif __name__ == \"__main__\":\n\n #-- Get the program options\n programOptions = pythonProgramOptions()\n commandOptions = programOptions.generateCommandLineOptionsDict()\n\n #-- Convert options into a shell string\n shellCommand = combineCommandLineOptionsDictIntoShellCommand(commandOptions)\n\n #-- Run exectute COMPAS shell string\n print(shellCommand)\n call(shellCommand,shell=True)\n\n"
] |
[
[
"numpy.loadtxt"
]
] |
brianjstroh/BrawlStars
|
[
"1d4700ee9e1a7f595bf76bb205cbe1f65f6b635f"
] |
[
"Capstone_Tables&Figures_Results_Graphs.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 25 21:37:26 2021\n\n@author: brian\n\"\"\"\nimport os\nos.chdir('C:/Users/brian/Desktop/All/UWEC/DS785_Capstone/Project')\nimport brawl_data as bd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nfrom statsmodels.stats.proportion import proportion_confint\n\nall_win_rates = bd.sql_get_results('dbname=BrawlStars user=postgres password=PG!3%7(', '', '', '', 0, 0, my_id = '', \n custom_query = \"SELECT mode, map, brawler, wins, matches_played FROM population_aggs_high;\")\n\nall_win_rates['win_rate'] = all_win_rates['wins']/all_win_rates['matches_played']\nall_win_rates = all_win_rates.loc[all_win_rates['matches_played']>10,:]\nwin_rate_extremes = all_win_rates.groupby(['mode', 'map']).win_rate.agg(['min', 'max'])\nwin_rate_extremes = win_rate_extremes.reset_index()\nwin_rate_extremes['win_rate_differential'] = win_rate_extremes['max'] - win_rate_extremes['min']\nwin_rate_extremes = win_rate_extremes.sort_values(by = 'win_rate_differential')\nwin_rate_extremes.columns = ['Mode', 'Map', 'Minimum Brawler Win Rate', 'Maximum Brawler Win Rate', 'Win Rate Differential']\nsns.set_style(\"darkgrid\")\nsns.scatterplot(data=win_rate_extremes, \n x='Minimum Brawler Win Rate', \n y='Maximum Brawler Win Rate', \n hue='Win Rate Differential', \n palette=sns.cubehelix_palette(start=2, rot=0, dark=.2, light=.8, as_cmap=True))\nplt.title('Win Rates Differences for Brawlers Across Each Map-Mode')\n\nsns.violinplot(x=win_rate_extremes['Win Rate Differential'])\nplt.title('Differences Between Maximum and Minimum Win Rates for Brawlers Across Each Map-Mode')\n\n\nfor_example = all_win_rates.loc[all_win_rates['map'] == 'Split', :].sort_values('win_rate', ascending = False)\nfor_example = for_example.loc[:,['map', 'mode', 'brawler', 'win_rate']]\nfor_example = pd.concat([for_example.head(5),for_example.tail(5)])\n\nfor_example_2 = pd.concat([win_rate_extremes.head(5),win_rate_extremes.tail(5)])\nfor_example_2 = for_example_2.sort_values('Win Rate Differential', ascending=False)\n\n\nexample = bd.get_recommendation('dbname=BrawlStars user=postgres password=PG!3%7(', 'records', '#2G080980', 'brawlBall', 'Sneaky Fields', 0, 4)\nexample = pd.concat([example.head(5),example.tail(5)])\n\nmy_recs = bd.get_all_recommendations('dbname=BrawlStars user=postgres password=PG!3%7(', 'records', '#8VUPQ2PP', my_trophy_min = 500)\n\nmap_weaknesses = bd.get_map_weaknesses('dbname=BrawlStars user=postgres password=PG!3%7(', 'records')\nmap_weaknesses.head(10)\n\nall_individual_history = bd.sql_get_results('dbname=BrawlStars user=postgres password=PG!3%7(', '', '', '', 0, 0, my_id = '', \n custom_query = \"SELECT * FROM individual_aggs_high UNION ALL SELECT * FROM individual_aggs_mid UNION ALL SELECT * FROM individual_aggs_low;\")\nall_population_history = bd.sql_get_results('dbname=BrawlStars user=postgres password=PG!3%7(', '', '', '', 0, 0, my_id = '', \n custom_query = \"SELECT * FROM population_aggs_high UNION ALL SELECT * FROM population_aggs_mid UNION ALL SELECT * FROM population_aggs_low;\")\n\n#Calculate win rate confidence intervals\nall_individual_history['win_rate'] = all_individual_history['wins'] / all_individual_history['matches_played']\nall_individual_history['ci.lower'],all_individual_history['ci.upper'] = zip(*all_individual_history.apply(lambda row : proportion_confint(count = row['wins'], nobs = row['matches_played'], alpha = .1, method = 'agresti_coull'), axis = 1))\n\nall_population_history['win_rate'] = all_population_history['wins'] / all_population_history['matches_played']\nall_individual_history = all_population_history.merge(all_individual_history, how = 'left', left_on = ['mode', 'map', 'brawler'], right_on = ['mode', 'map', 'brawler'])\n\n#Compare population to individual history and inform recommendations\nbetter = (all_individual_history['win_rate_x'] < all_individual_history['ci.lower']) & (all_individual_history['matches_played_y'] >= 5)\nworse = (all_individual_history['win_rate_x'] > all_individual_history['ci.upper']) & (all_individual_history['matches_played_y'] >= 5)\nsum(better) + sum(worse)"
] |
[
[
"matplotlib.pyplot.title"
]
] |
STASYA00/CityMorph
|
[
"18b8273a1ae0a4e8f234850d0c98640a2483ab04"
] |
[
"scripts/writer.py"
] |
[
"import json\nimport os\nimport pandas as pd\nimport sys\n\nsys.path.append('scripts/')\nfrom polygon import Collection, Footprint\n\n\nclass Writer:\n\t\"\"\"\n\tClass that stores smart label values per instance\n\t\"\"\"\n\tdef __init__(self, filename):\n\t\t\"\"\"\n\t\tClass initialization.\n\t\t:param filename: name of the file to store the data, str\n\t\t\"\"\"\n\t\tself.filename = filename\n\t\tself.content = {}\n\n\tdef add(self, instance, result):\n\t\t\"\"\"\n\t\tFunction that adds an instance with its smart labels to the collection\n\t\t:param instance: name of instance, str\n\t\t:param result: smart labels, dict {label_name: label_value}\n\t\t:return:\n\t\t\"\"\"\n\t\tself.content[instance] = result\n\n\tdef get_instances(self) -> list:\n\t\t\"\"\"\n\t\tFunction that gets the instances that already exist in the file\n\t\t:return: existing instances, list\n\t\t\"\"\"\n\t\treturn list(self.content.keys())\n\n\tdef reset(self):\n\t\t\"\"\"\n\t\tFunction that resets the file to an empty state.\n\t\t:return:\n\t\t\"\"\"\n\t\tdel self.content\n\t\tself.content = {}\n\n\tdef save(self):\n\t\t\"\"\"\n\t\tFunction that saves all the smart labels in the class to a local file\n\t\tTODO: add saving to AWS based on AWS_SAVE in config\n\t\t:return:\n\t\t\"\"\"\n\t\twith open(self.filename, \"w\") as f:\n\t\t\tjson.dump(self.content, f)\n\n\nclass JsonWriter(Writer):\n\t\"\"\"\n\tClass that saves results in json format.\n\t\"\"\"\n\tdef __init__(self, filename='test'):\n\t\tWriter.__init__(self, filename)\n\t\tif not self.filename.endswith('.json'):\n\t\t\tself.filename += '.json'\n\t\t# with open(self.filename, 'r') as f:\n\t\t# \tself.content = json.load(f)\n\t\tself.content = {}\n\n\tdef save(self):\n\t\t\"\"\"\n\t\tFunction that saves the writer's content to local system in json format.\n\t\t:return:\n\t\t\"\"\"\n\t\twith open(self.filename, 'a') as json_file:\n\t\t\tjson.dump(self.content, json_file)\n\n\nclass CsvWriter:\n\tdef __init__(self, filename='result', features=[]):\n\t\tassert isinstance(filename, str), \"Expected name to be str, got {}\".format(filename)\n\n\t\tself.filename = filename\n\t\tself.features = features\n\t\tself.content = {}\n\t\tif self.filename + '.csv' in os.listdir():\n\t\t\tself.csv = pd.read_csv(self.filename + '.csv', index_col=0)\n\t\t\t# self.csv = self.csv.to_dict(orient='list')\n\t\telse:\n\t\t\tself.csv = {}\n\t\t\tself.reset()\n\t\t\tself.csv = pd.DataFrame(self.csv)\n\t\t\tself.csv.to_csv(self.filename + '.csv', mode='w')\n\t\t\tprint('csv saved as {}.csv'.format(self.filename))\n\n\tdef add(self, instance, result):\n\t\tif self._check(result):\n\t\t\tfor _feature in list(result.keys()):\n\t\t\t\tif _feature not in list(self.csv.keys()):\n\t\t\t\t\treturn ValueError\n\t\t\tself.content[instance] = result\n\n\t\t\tresult = {key: [value] for key, value in result.items()}\n\n\t\t\t_df = pd.DataFrame.from_dict(result)\n\t\t\tself.csv = self.csv.append(_df, ignore_index=True)\n\n\tdef _check(self, result):\n\t\treturn len(list(result.keys())) == len(self.features)\n\n\tdef save(self):\n\t\tdf = pd.DataFrame(self.csv)\n\t\tdf.to_csv(self.filename + '.csv', mode='a', header=False)\n\n\tdef reset(self):\n\t\tself.csv = {}\n\t\t# self.csv['iter'] = []\n\t\tfor feature in self.features:\n\t\t\tself.csv[feature] = []\n\n\nclass ShpWriter:\n\tdef __init__(self, name='result'):\n\t\tself.name = name\n\n\tdef save(self, collection):\n\t\tif not isinstance(collection, Collection):\n\t\t\tprint('Expected Collection, got {}'.format(collection))\n\t\t\traise TypeError\n\t\tif not isinstance(collection.class_type, Footprint.__class__):\n\t\t\tprint('Collection should be made of Footprints, got {}'.format(collection.class_type))\n\t\t\traise AttributeError\n\t\tr = []\n\t\tfor f in collection:\n\t\t\tr.append(f.polygon)\n\t\tdict = {'name': [0 for x in r], 'geometry': r}\n\t\tdf = gpd.GeoDataFrame(dict)\n\t\tdf.to_file('{}.shp'.format(self.name))"
] |
[
[
"pandas.DataFrame.from_dict",
"pandas.DataFrame",
"pandas.read_csv"
]
] |
MengwenHe-CMU/17S_10701_MachineLearning
|
[
"613a3087a57a206b83d79855cec359e04cb440f7"
] |
[
"Homeworks/HW2/Python/hw2_B.py"
] |
[
"from sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import svm\nfrom sklearn import metrics\nfrom matplotlib import pyplot\nfrom matplotlib import gridspec\nimport numpy\n\nclass IonosphereData:\n g_features = list()\n b_features = list()\n def __init__(self):\n self.g_features=list()\n self.b_features=list()\n\ndef load_data(filename):\n with open(filename, 'rt') as stopWordsFile:\n data = IonosphereData()\n for line in stopWordsFile:\n feature = line.strip().split(',')\n if(feature[34]=='g'):\n featurevalues = list()\n for id in range(34):\n featurevalues.append(float(feature[id]))\n data.g_features.append(featurevalues)\n else:\n featurevalues = list()\n for id in range(34):\n featurevalues.append(float(feature[id]))\n data.b_features.append(featurevalues)\n return data\n\nclass IonosphereDataCV:\n train = IonosphereData()\n test = IonosphereData()\n def __init__(self):\n self.train.g_features=list()\n self.train.b_features=list()\n self.test.g_features=list()\n self.test.b_features=list()\n\ndef split_data(data, rate):\n datacv = IonosphereDataCV()\n\n gn = len(data.g_features)\n grand = numpy.random.rand(gn)\n gsortedids = sorted(range(gn), key=lambda i: grand[i])\n for id in gsortedids[:int(gn*rate)]:\n datacv.train.g_features.append(data.g_features[id])\n for id in gsortedids[-(gn-int(gn*rate)):]:\n datacv.test.g_features.append(data.g_features[id])\n\n bn = len(data.b_features)\n brand = numpy.random.rand(bn)\n bsortedids = sorted(range(bn), key=lambda i: brand[i])\n for id in bsortedids[:int(bn * rate)]:\n datacv.train.b_features.append(data.b_features[id])\n for id in bsortedids[-(bn - int(bn * rate)):]:\n datacv.test.b_features.append(data.b_features[id])\n return datacv\n\n\nif __name__ == '__main__':\n data = load_data('./Problem2/ionosphere.txt')\n datacv = split_data(data,0.8)\n\n adaboost = list()\n adaboost.append(AdaBoostClassifier(n_estimators=100, base_estimator=DecisionTreeClassifier(max_depth=4, min_samples_leaf=1)))\n adaboost.append(AdaBoostClassifier(n_estimators=100, base_estimator=DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)))\n adaboost.append(AdaBoostClassifier(n_estimators=100, base_estimator=BernoulliNB()))\n adaboost.append(AdaBoostClassifier(n_estimators=100, base_estimator=LogisticRegression()))\n adaboost.append(AdaBoostClassifier(n_estimators=100, base_estimator=svm.SVC(probability=True, kernel='linear')))\n adaboost.append(AdaBoostClassifier(n_estimators=100, base_estimator=svm.SVC(probability=True, kernel='rbf')))\n\n weakestimatornames = [\"DecisionTreeClassifier (max_depth=4)\", \"DecisionStumpClassifier\", \"BernoulliNB\", \"LogisticRegression\", \"Linear SVM\", \"RBF SVM\"]\n\n trainfeatures = datacv.train.g_features + datacv.train.b_features\n gtrainlen = len(datacv.train.g_features)\n btrainlen = len(datacv.train.b_features)\n trainlabel = numpy.ones(gtrainlen).tolist() + numpy.zeros(btrainlen).tolist()\n\n testfeatures = datacv.test.g_features + datacv.test.b_features\n gtestlen = len(datacv.test.g_features)\n btestlen = len(datacv.test.b_features)\n testlabel = numpy.ones(gtestlen).tolist() + numpy.zeros(btestlen).tolist()\n\n fig = pyplot.figure()\n ax = list()\n ax.append(fig.add_subplot(2, 3, 1))\n ax.append(fig.add_subplot(2, 3, 2))\n ax.append(fig.add_subplot(2, 3, 3))\n ax.append(fig.add_subplot(2, 3, 4))\n ax.append(fig.add_subplot(2, 3, 5))\n ax.append(fig.add_subplot(2, 3, 6))\n\n for id in range(6):\n print(id)\n\n adaboost[id].fit(trainfeatures, trainlabel)\n train_err = list()\n for i,y_pred in enumerate(adaboost[id].staged_predict(trainfeatures)):\n train_err.append(metrics.zero_one_loss(trainlabel,y_pred))\n print(1-min(train_err))\n test_err = list()\n for i, y_pred in enumerate(adaboost[id].staged_predict(testfeatures)):\n test_err.append(metrics.zero_one_loss(testlabel, y_pred))\n print(1-min(test_err))\n\n ax[id].set_title(weakestimatornames[id])\n ax[id].set_xlabel(\"n_estimators\")\n ax[id].set_ylabel(\"error\")\n ax[id].plot(numpy.arange(100) + 1, train_err,\n label='Train Error',\n color='blue')\n ax[id].plot(numpy.arange(100) + 1, test_err,\n label='Test Error',\n color='red')\n\n ax[id].legend()\n\n pyplot.show()"
] |
[
[
"numpy.random.rand",
"numpy.zeros",
"sklearn.metrics.zero_one_loss",
"numpy.ones",
"matplotlib.pyplot.figure",
"sklearn.svm.SVC",
"sklearn.linear_model.LogisticRegression",
"sklearn.naive_bayes.BernoulliNB",
"numpy.arange",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.show"
]
] |
fabianrost84/pymc
|
[
"f514a8a42475ca12f2b0c2c9592c8c2f890d9f02"
] |
[
"pymc/StepMethods.py"
] |
[
"from __future__ import division\n\nimport numpy as np\nfrom .utils import msqrt, check_type, round_array, float_dtypes, integer_dtypes, bool_dtypes, safe_len, find_generations, logp_of_set, symmetrize, logp_gradient_of_set\nfrom numpy import ndim, ones, zeros, log, shape, cov, ndarray, inner, reshape, sqrt, any, array, all, abs, exp, where, isscalar, iterable, multiply, transpose, tri, pi\nfrom numpy.linalg.linalg import LinAlgError\nfrom numpy.linalg import pinv, cholesky\nfrom numpy.random import randint, random\nfrom numpy.random import normal as rnormal\nfrom numpy.random import poisson as rpoisson\nfrom numpy.random import uniform as runiform\nfrom numpy.random import exponential as rexponential\nfrom .PyMCObjects import Stochastic, Potential, Deterministic\nfrom .Container import Container\nfrom .Node import ZeroProbability, Node, Variable, StochasticBase\nfrom .decorators import prop\nfrom . import distributions\nfrom copy import copy\nfrom .InstantiationDecorators import deterministic\nimport pdb\nimport warnings\nimport sys\nimport inspect\n\nfrom . import six\nfrom .six import print_\n\n__docformat__ = 'reStructuredText'\n\nconjugate_Gibbs_competence = 0\nnonconjugate_Gibbs_competence = 0\n\n\nclass AdaptationError(ValueError):\n pass\n\n\n__all__ = [\n 'DiscreteMetropolis',\n 'Metropolis',\n 'PDMatrixMetropolis',\n 'StepMethod',\n 'assign_method',\n 'pick_best_methods',\n 'StepMethodRegistry',\n 'NoStepper',\n 'BinaryMetropolis',\n 'AdaptiveMetropolis',\n 'Gibbs',\n 'conjugate_Gibbs_competence',\n 'nonconjugate_Gibbs_competence',\n 'DrawFromPrior',\n 'Slicer']\n\n\nStepMethodRegistry = []\n\n\ndef pick_best_methods(stochastic):\n \"\"\"\n Picks the StepMethods best suited to handle\n a stochastic variable.\n \"\"\"\n\n # Keep track of most competent methohd\n max_competence = 0\n # Empty set of appropriate StepMethods\n best_candidates = set([])\n\n # Loop over StepMethodRegistry\n for method in StepMethodRegistry:\n\n # Parse method and its associated competence\n try:\n competence = method.competence(stochastic)\n except:\n competence = 0\n\n # If better than current best method, promote it\n if competence > max_competence:\n best_candidates = set([method])\n max_competence = competence\n\n # If same competence, add it to the set of best methods\n elif competence == max_competence:\n best_candidates.add(method)\n\n if max_competence <= 0:\n raise ValueError(\n 'Maximum competence reported for stochastic %s is <= 0... you may need to write a custom step method class.' %\n stochastic.__name__)\n\n # print_(s.__name__ + ': ', best_candidates, ' ', max_competence)\n return best_candidates\n\n\ndef assign_method(stochastic, scale=None, verbose=-1):\n \"\"\"\n Returns a step method instance to handle a\n variable. If several methods have the same competence,\n it picks one arbitrarily (using set.pop()).\n \"\"\"\n\n # Retrieve set of best candidates\n best_candidates = pick_best_methods(stochastic)\n\n # Randomly grab and appropriate method\n method = best_candidates.pop()\n\n failure_header = \"\"\"Failed attempting to automatically assign step method class %s\nto stochastic variable %s. Try setting %s's competence method to return 0\nand manually assigning it when appropriate. See the user guide.\n\nError message: \"\"\" % (method.__name__, stochastic.__name__, method.__name__)\n\n try:\n if scale:\n out = method(stochastic, scale=scale, verbose=verbose)\n else:\n out = method(stochastic, verbose=verbose)\n except:\n a, b, c = sys.exc_info()\n try:\n args = list(b.args)\n except AttributeError:\n args = []\n args.append(failure_header)\n b.args = args\n six.reraise(a, b, c)\n return out\n\n\nclass StepMethodMeta(type):\n\n \"\"\"\n Automatically registers new step methods if they can be automatically assigned:\n if their init method has one and only one required argument.\n \"\"\"\n def __init__(cls, name, bases, dict):\n type.__init__(cls, name, bases, dict)\n args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)\n auto_assignment_OK = False\n if len(args) == 2:\n auto_assignment_OK = True\n elif len(args) > 2:\n if defaults is not None:\n if len(defaults) == len(args) - 2:\n auto_assignment_OK = True\n elif len(args) == 1 and varargs is not None:\n auto_assignment_OK = True\n\n if auto_assignment_OK:\n StepMethodRegistry.append(cls)\n\n\nclass StepMethod(object):\n\n \"\"\"\n This object knows how to make Stochastics take single MCMC steps.\n Its step() method will be called by Model at every MCMC iteration.\n\n :Parameters:\n -variables : list, array or set\n Collection of PyMCObjects\n\n - verbose (optional) : integer\n Level of output verbosity: 0=none, 1=low, 2=medium, 3=high. Setting to -1 (Default) allows verbosity to be set by sampler.\n\n Externally-accessible attributes:\n stochastics: The Stochastics over which self has jurisdiction which have observed = False.\n children: The combined children of all Variables over which self has jurisdiction.\n parents: The combined parents of all Nodes over which self has jurisdiction, as a set.\n loglike: The summed log-probability of self's children conditional on all of self's\n Variables' current values. These will be recomputed only as necessary.\n This descriptor should eventually be written in C.\n\n Externally accesible methods:\n sample(): A single MCMC step for all the Stochastics over which self has\n jurisdiction. Must be overridden in subclasses.\n tune(): Tunes proposal distribution widths for all self's Stochastics.\n competence(s): Examines Stochastic instance s and returns self's\n competence to handle it, on a scale of 0 to 3.\n\n To instantiate a StepMethod called S with jurisdiction over a\n sequence/set N of Nodes:\n\n >>> S = StepMethod(N)\n\n :SeeAlso: Metropolis, Sampler.\n \"\"\"\n\n def __init__(self, variables, verbose=-1, tally=False):\n # StepMethod initialization\n\n if not iterable(variables) or isinstance(variables, Node):\n variables = [variables]\n\n self.stochastics = set()\n self.children = set()\n self.parents = set()\n self.tally = tally\n\n self._state = []\n self._tuning_info = []\n self.verbose = verbose\n\n # File away the variables\n for variable in variables:\n # Sort.\n\n if isinstance(variable, Stochastic) and not variable.observed:\n self.stochastics.add(variable)\n\n if len(self.stochastics) == 0:\n raise ValueError('No stochastics provided.')\n\n # Find children, no need to find parents; each variable takes care of\n # those.\n for variable in variables:\n self.children |= variable.children\n for parent in six.itervalues(variable.parents):\n if isinstance(parent, Variable):\n self.parents.add(parent)\n\n self.children = set([])\n self.parents = set([])\n for s in self.stochastics:\n self.children |= s.extended_children\n self.parents |= s.extended_parents\n\n # Remove own stochastics from children and parents.\n self.children -= self.stochastics\n self.parents -= self.stochastics\n\n # self.markov_blanket is a list, because we want self.stochastics to have the chance to\n # raise ZeroProbability exceptions before self.children.\n self.markov_blanket = list(self.stochastics) + list(self.children)\n\n # ID string for verbose feedback\n self._id = self.__class__.__name__ + '_' + \\\n '_'.join([s.__name__ for s in self.stochastics])\n\n def step(self):\n \"\"\"\n Specifies single step of step method.\n Must be overridden in subclasses.\n \"\"\"\n pass\n\n @staticmethod\n def competence(s):\n \"\"\"\n This function is used by Sampler to determine which step method class\n should be used to handle stochastic variables.\n\n Return value should be a competence\n score from 0 to 3, assigned as follows:\n\n 0: I can't handle that variable.\n 1: I can handle that variable, but I'm a generalist and\n probably shouldn't be your top choice (Metropolis\n and friends fall into this category).\n 2: I'm designed for this type of situation, but I could be\n more specialized.\n 3: I was made for this situation, let me handle the variable.\n\n In order to be eligible for inclusion in the registry, a sampling\n method's init method must work with just a single argument, a\n Stochastic object.\n\n If you want to exclude a particular step method from\n consideration for handling a variable, do this:\n\n Competence functions MUST be called 'competence' and be decorated by the\n '@staticmethod' decorator. Example:\n\n @staticmethod\n def competence(s):\n if isinstance(s, MyStochasticSubclass):\n return 2\n else:\n return 0\n\n :SeeAlso: pick_best_methods, assign_method\n \"\"\"\n return 0\n\n def tune(self, *args, **kwargs):\n return False\n\n @property\n def loglike(self):\n '''\n The summed log-probability of all stochastic variables that depend on\n self.stochastics, with self.stochastics removed.\n '''\n sum = logp_of_set(self.children)\n if self.verbose > 2:\n print_('\\t' + self._id + ' Current log-likelihood ', sum)\n return sum\n\n @property\n def logp_plus_loglike(self):\n '''\n The summed log-probability of all stochastic variables that depend on\n self.stochastics, and self.stochastics.\n '''\n sum = logp_of_set(self.markov_blanket)\n if self.verbose > 2:\n print_('\\t' + self._id +\n ' Current log-likelihood plus current log-probability', sum)\n return sum\n\n @property\n def logp_gradient(self):\n return logp_gradient_of_set(self.stochastics, self.markov_blanket)\n\n def current_state(self):\n \"\"\"Return a dictionary with the current value of the variables defining\n the state of the step method.\"\"\"\n state = {}\n for s in self._state:\n state[s] = getattr(self, s)\n return state\n\n @prop\n def ratio():\n \"\"\"Acceptance ratio\"\"\"\n\n def fget(self):\n return self.accepted / (self.accepted + self.rejected)\n return locals()\n\nStepMethod = six.with_metaclass(StepMethodMeta, StepMethod)\n\n\nclass NoStepper(StepMethod):\n\n \"\"\"\n Step and tune methods do nothing.\n\n Useful for holding stochastics constant without setting observed=True.\n \"\"\"\n\n def step(self):\n pass\n\n def tune(self, *args, **kwargs):\n pass\n\n# The default StepMethod, which Model uses to handle singleton stochastics.\n\n\nclass Metropolis(StepMethod):\n\n \"\"\"\n The default StepMethod, which Model uses to handle singleton, continuous variables.\n\n Applies the one-at-a-time Metropolis-Hastings algorithm to the Stochastic over which self has jurisdiction.\n\n To instantiate a Metropolis called M with jurisdiction over a Stochastic P:\n\n >>> M = Metropolis(P, scale=1, proposal_sd=None, dist=None)\n\n :Arguments:\n - s : Stochastic\n The variable over which self has jurisdiction.\n\n - scale (optional) : number\n The proposal jump width is set to scale * variable.value.\n\n - proposal_sd (optional) : number or vector\n The proposal jump width is set to proposal_sd.\n\n - proposal_distribution (optional) : string\n The proposal distribution. May be 'Normal',\n 'Prior' or None. If None is provided, a proposal distribution is chosen\n by examining P.value's type.\n\n - verbose (optional) : integer\n Level of output verbosity: 0=none, 1=low, 2=medium, 3=high. Setting to -1 (default) allows verbosity to be turned on by sampler.\n\n :SeeAlso: StepMethod, Sampler.\n \"\"\"\n\n def __init__(self, stochastic, scale=1., proposal_sd=None,\n proposal_distribution=None, verbose=-1, tally=True, check_before_accepting=True):\n # Metropolis class initialization\n\n # Initialize superclass\n StepMethod.__init__(self, [stochastic], tally=tally)\n\n # Initialize hidden attributes\n self.proposal_sd = proposal_sd\n\n self.adaptive_scale_factor = 1.\n self.accepted = 0.\n self.rejected = 0.\n self._state = [\n 'rejected',\n 'accepted',\n 'adaptive_scale_factor',\n 'proposal_sd',\n 'proposal_distribution',\n 'check_before_accepting']\n self._tuning_info = ['adaptive_scale_factor']\n self.check_before_accepting = check_before_accepting\n self.proposal_sd = proposal_sd\n\n # Set public attributes\n self.stochastic = stochastic\n if verbose > -1:\n self.verbose = verbose\n else:\n self.verbose = stochastic.verbose\n\n if proposal_distribution != \"Prior\":\n # Avoid zeros when setting proposal variance\n if proposal_sd is None:\n if all(self.stochastic.value != 0.):\n self.proposal_sd = ones(\n shape(\n self.stochastic.value)) * abs(\n self.stochastic.value) * scale\n else:\n self.proposal_sd = ones(shape(\n self.stochastic.value)) * scale\n\n # Initialize proposal deviate with array of zeros\n self.proposal_deviate = zeros(\n shape(self.stochastic.value),\n dtype=float)\n\n # Determine size of stochastic\n if isinstance(self.stochastic.value, ndarray):\n self._len = len(self.stochastic.value.ravel())\n else:\n self._len = 1\n\n # else: self.proposal_sd = None # Probably unnecessary\n # If no dist argument is provided, assign a proposal distribution\n # automatically.\n if not proposal_distribution:\n\n # Pick Gaussian by default\n self.proposal_distribution = \"Normal\"\n\n else:\n\n if proposal_distribution.capitalize() in self._valid_proposals:\n self.proposal_distribution = proposal_distribution\n else:\n raise ValueError(\n \"Invalid proposal distribution '%s' specified for Metropolis sampler.\" %\n proposal_distribution)\n\n _valid_proposals = ['Normal', 'Prior']\n\n @staticmethod\n def competence(s):\n \"\"\"\n The competence function for Metropolis\n \"\"\"\n if s.dtype is None:\n return .5\n\n if not s.dtype in float_dtypes:\n # If the stochastic's binary or discrete, I can't do it.\n return 0\n else:\n return 2\n\n def hastings_factor(self):\n \"\"\"\n If this is a Metropolis-Hastings method (proposal is not symmetric random walk),\n this method should return log(back_proposal) - log(forward_proposal).\n \"\"\"\n return 0.\n\n def step(self):\n \"\"\"\n The default step method applies if the variable is floating-point\n valued, and is not being proposed from its prior.\n \"\"\"\n\n # Probability and likelihood for s's current value:\n\n if self.verbose > 2:\n print_()\n print_(self._id + ' getting initial logp.')\n\n if self.proposal_distribution == \"Prior\":\n logp = self.loglike\n else:\n logp = self.logp_plus_loglike\n\n if self.verbose > 2:\n print_(self._id + ' proposing.')\n\n # Sample a candidate value\n self.propose()\n\n # Probability and likelihood for s's proposed value:\n try:\n if self.proposal_distribution == \"Prior\":\n logp_p = self.loglike\n # Check for weirdness before accepting jump\n if self.check_before_accepting:\n self.stochastic.logp\n else:\n logp_p = self.logp_plus_loglike\n\n except ZeroProbability:\n\n # Reject proposal\n if self.verbose > 2:\n print_(self._id + ' rejecting due to ZeroProbability.')\n self.reject()\n\n # Increment rejected count\n self.rejected += 1\n\n if self.verbose > 2:\n print_(self._id + ' returning.')\n return\n\n if self.verbose > 2:\n print_('logp_p - logp: ', logp_p - logp)\n\n HF = self.hastings_factor()\n\n # Evaluate acceptance ratio\n if log(random()) > logp_p - logp + HF:\n\n # Revert s if fail\n self.reject()\n\n # Increment rejected count\n self.rejected += 1\n if self.verbose > 2:\n print_(self._id + ' rejecting')\n else:\n # Increment accepted count\n self.accepted += 1\n if self.verbose > 2:\n print_(self._id + ' accepting')\n\n if self.verbose > 2:\n print_(self._id + ' returning.')\n\n def reject(self):\n # Sets current s value to the last accepted value\n # self.stochastic.value = self.stochastic.last_value\n self.stochastic.revert()\n\n def propose(self):\n \"\"\"\n This method is called by step() to generate proposed values\n if self.proposal_distribution is \"Normal\" (i.e. no proposal specified).\n \"\"\"\n \n if self.proposal_distribution == \"Normal\":\n self.stochastic.value = rnormal(\n self.stochastic.value,\n self.adaptive_scale_factor *\n self.proposal_sd,\n size=self.stochastic.value.shape)\n elif self.proposal_distribution == \"Prior\":\n self.stochastic.random()\n\n def tune(self, divergence_threshold=1e10, verbose=0):\n \"\"\"\n Tunes the scaling parameter for the proposal distribution\n according to the acceptance rate of the last k proposals:\n\n Rate Variance adaptation\n ---- -------------------\n <0.001 x 0.1\n <0.05 x 0.5\n <0.2 x 0.9\n >0.5 x 1.1\n >0.75 x 2\n >0.95 x 10\n\n This method is called exclusively during the burn-in period of the\n sampling algorithm.\n\n May be overridden in subclasses.\n \"\"\"\n\n if self.verbose > -1:\n verbose = self.verbose\n\n # Verbose feedback\n if verbose > 0:\n print_('\\t%s tuning:' % self._id)\n\n # Flag for tuning state\n tuning = True\n\n # Calculate recent acceptance rate\n if not (self.accepted + self.rejected):\n return tuning\n acc_rate = self.accepted / (self.accepted + self.rejected)\n \n current_factor = self.adaptive_scale_factor\n\n # Switch statement\n if acc_rate < 0.001:\n # reduce by 90 percent\n self.adaptive_scale_factor *= 0.1\n elif acc_rate < 0.05:\n # reduce by 50 percent\n self.adaptive_scale_factor *= 0.5\n elif acc_rate < 0.2:\n # reduce by ten percent\n self.adaptive_scale_factor *= 0.9\n elif acc_rate > 0.95:\n # increase by factor of ten\n self.adaptive_scale_factor *= 10.0\n elif acc_rate > 0.75:\n # increase by double\n self.adaptive_scale_factor *= 2.0\n elif acc_rate > 0.5:\n # increase by ten percent\n self.adaptive_scale_factor *= 1.1\n else:\n tuning = False\n\n # Re-initialize rejection count\n self.rejected = 0.\n self.accepted = 0.\n \n # Prevent from tuning to zero\n if not self.adaptive_scale_factor:\n self.adaptive_scale_factor = current_factor\n return False\n\n # More verbose feedback, if requested\n if verbose > 0:\n if hasattr(self, 'stochastic'):\n print_('\\t\\tvalue:', self.stochastic.value)\n print_('\\t\\tacceptance rate:', acc_rate)\n print_('\\t\\tadaptive scale factor:', self.adaptive_scale_factor)\n print_()\n\n return tuning\n\n\nclass PDMatrixMetropolis(Metropolis):\n\n \"\"\"Metropolis sampler with proposals customised for symmetric positive definite matrices\"\"\"\n\n def __init__(self, stochastic, scale=1.,\n proposal_sd=None, verbose=-1, tally=True):\n Metropolis.__init__(\n self,\n stochastic,\n scale=scale,\n proposal_sd=proposal_sd,\n proposal_distribution=\"Normal\",\n verbose=verbose,\n tally=tally)\n\n @staticmethod\n def competence(s):\n \"\"\"\n The competence function for MatrixMetropolis\n \"\"\"\n # MatrixMetropolis handles the Wishart family, which are valued as\n # _symmetric_ matrices.\n if any([isinstance(s, cls)\n for cls in [distributions.Wishart, distributions.WishartCov]]):\n return 2\n else:\n return 0\n\n def propose(self):\n \"\"\"\n Proposals for positive definite matrix using random walk deviations on the Cholesky\n factor of the current value.\n \"\"\"\n\n # Locally store size of matrix\n dims = self.stochastic.value.shape\n\n # Add normal deviate to value and symmetrize\n dev = rnormal(\n 0,\n self.adaptive_scale_factor *\n self.proposal_sd,\n size=dims)\n symmetrize(dev)\n\n # Replace\n self.stochastic.value = dev + self.stochastic.value\n\n\nclass Gibbs(Metropolis):\n\n \"\"\"\n Base class for the Gibbs step methods\n \"\"\"\n\n def __init__(self, stochastic, verbose=-1):\n Metropolis.__init__(self, stochastic, verbose=verbose, tally=False)\n\n # Override Metropolis's competence.\n competence = classmethod(StepMethod.competence)\n\n def step(self):\n if not self.conjugate:\n logp = self.stochastic.logp\n\n self.propose()\n\n if not self.conjugate:\n\n try:\n logp_p = self.stochastic.logp\n except ZeroProbability:\n self.reject()\n\n if log(random()) > logp_p - logp:\n self.reject()\n\n def tune(self, *args, **kwargs):\n return False\n\n def propose(self):\n raise NotImplementedError(\n 'The Gibbs class has to be subclassed, it is not usable directly.')\n\n\nclass DrawFromPrior(StepMethod):\n\n \"\"\"\n Handles dataless submodels.\n \"\"\"\n\n def __init__(self, variables, generations, verbose=-1):\n StepMethod.__init__(self, variables, verbose, tally=False)\n self.generations = generations\n\n # Some variables (eg GP) may not have logp attributes, so don't try to\n # evaluate their logps.\n self.variables_with_logp = set([])\n for s in self.markov_blanket:\n try:\n s.logp\n self.variables_with_logp.add(s)\n except:\n pass\n\n def get_logp_plus_loglike(self):\n return logp_of_set(self.variables_with_logp)\n logp_plus_loglike = property(get_logp_plus_loglike)\n\n def step(self):\n jumped = []\n try:\n for generation in self.generations:\n for s in generation:\n s.rand()\n jumped.append(s)\n self.logp_plus_loglike\n except ZeroProbability:\n if self.verbose > 2:\n forbidden = []\n for generation in self.generations:\n for s in self.stochastics:\n try:\n s.logp\n except ZeroProbability:\n forbidden.append(s.__name__)\n print_(\n 'DrawFromPrior jumped stochastics %s to value forbidden by objects %s, rejecting.' % (\n ', '.join(\n s.__name__ for s in jumped),\n ', '.join(\n forbidden)))\n warnings.warn('DrawFromPrior jumped to forbidden value')\n for s in jumped:\n s.revert()\n\n @classmethod\n def competence(s):\n # Dataless gets assigned specially before other step methods.\n return 0\n\n\nclass DiscreteMetropolis(Metropolis):\n\n \"\"\"\n Just like Metropolis, but rounds the variable's value.\n Good for discrete stochastics.\n \"\"\"\n\n def __init__(self, stochastic, scale=1., proposal_sd=None,\n proposal_distribution=\"Poisson\", positive=True, verbose=-1, tally=True):\n # DiscreteMetropolis class initialization\n\n # Initialize superclass\n Metropolis.__init__(\n self,\n stochastic,\n scale=scale,\n proposal_sd=proposal_sd,\n proposal_distribution=proposal_distribution,\n verbose=verbose,\n tally=tally)\n\n # Flag for positive-only values\n self._positive = positive\n\n _valid_proposals = ['Poisson', 'Normal', 'Prior']\n\n @staticmethod\n def competence(stochastic):\n \"\"\"\n The competence function for DiscreteMetropolis.\n \"\"\"\n if stochastic.dtype in integer_dtypes:\n return 1\n else:\n return 0\n\n def propose(self):\n # Propose new values using normal distribution\n\n if self.proposal_distribution == \"Normal\":\n\n # New normal deviate, centred on current value\n new_val = rnormal(\n self.stochastic.value,\n self.adaptive_scale_factor *\n self.proposal_sd)\n\n # Round before setting proposed value\n self.stochastic.value = round_array(new_val)\n\n elif self.proposal_distribution == \"Poisson\":\n\n k = shape(self.stochastic.value)\n # Add or subtract (equal probability) Poisson sample\n new_val = self.stochastic.value + rpoisson(\n self.adaptive_scale_factor * self.proposal_sd) * (\n -ones(\n k)) ** (\n random(\n k) > 0.5)\n\n if self._positive:\n # Enforce positive values\n self.stochastic.value = abs(new_val)\n else:\n self.stochastic.value = new_val\n\n elif self.proposal_distribution == \"Prior\":\n self.stochastic.random()\n\n# TODO Implement independence sampler for BinaryMetropolis\n\n\nclass BinaryMetropolis(Metropolis):\n\n \"\"\"\n Like Metropolis, but with a modified step() method.\n Good for binary variables.\n\n \"\"\"\n\n def __init__(self, stochastic, p_jump=.1,\n proposal_distribution=None, verbose=-1, tally=True):\n # BinaryMetropolis class initialization\n\n # Initialize superclass\n Metropolis.__init__(\n self,\n stochastic,\n proposal_distribution=proposal_distribution,\n verbose=verbose,\n tally=tally)\n\n self._state.remove('proposal_sd')\n\n # adaptive_scale_factor controls the jump probability\n self.adaptive_scale_factor = log(1. - p_jump) / log(.5)\n\n @staticmethod\n def competence(stochastic):\n \"\"\"\n The competence function for Binary One-At-A-Time Metropolis\n \"\"\"\n if stochastic.dtype in bool_dtypes:\n return 2\n\n elif isinstance(stochastic, distributions.Bernoulli):\n return 2\n \n elif (isinstance(stochastic, distributions.Categorical) and \n (len(stochastic.parents['p'])==2)):\n return 2\n\n else:\n return 0\n\n def step(self):\n if ndim(self.stochastic.value):\n Metropolis.step(self)\n else:\n\n # See what log-probability of True is.\n self.stochastic.value = True\n\n try:\n logp_true = self.logp_plus_loglike\n except ZeroProbability:\n self.stochastic.value = False\n return\n\n # See what log-probability of False is.\n self.stochastic.value = False\n\n try:\n logp_false = self.logp_plus_loglike\n except ZeroProbability:\n self.stochastic.value = True\n return\n\n # Test\n p_true = exp(logp_true)\n p_false = exp(logp_false)\n\n if self.verbose > 2:\n print_(\"\"\"%s step information:\n - logp_true: %f\n - logp_false: %f\n - p_true: %f\n - p_false: %f\n \"\"\" % (self._id, logp_true, logp_false, p_true, p_false))\n\n # Stochastically set value according to relative\n # probabilities of True and False\n if random() > p_false / (p_true + p_false):\n if self.verbose > 2:\n print_(\n \"%s setting %s's value to True.\" %\n (self._id, self.stochastic))\n self.stochastic.value = True\n elif self.verbose > 2:\n print_(\n \"%s setting %s's value to False.\" %\n (self._id, self.stochastic))\n\n def propose(self):\n\n if self.proposal_distribution == 'Prior':\n self.stochastic.random()\n else:\n # Convert adaptive_scale_factor to a jump probability\n p_jump = 1. - .5 ** self.adaptive_scale_factor\n\n rand_array = random(size=shape(self.stochastic.value))\n new_value = copy(self.stochastic.value)\n # Locations where switches occur, according to p_jump\n switch_locs = where(rand_array < p_jump)\n if shape(new_value):\n new_value[switch_locs] = True - new_value[switch_locs]\n else:\n new_value = True - new_value\n self.stochastic.value = new_value\n\n\nclass AdaptiveMetropolis(StepMethod):\n\n \"\"\"\n The AdaptativeMetropolis (AM) sampling algorithm works like a regular\n Metropolis, with the exception that stochastic parameters are block-updated\n using a multivariate jump distribution whose covariance is tuned during\n sampling. Although the chain is non-Markovian, i.e. the proposal\n distribution is asymmetric, it has correct ergodic properties. See\n (Haario et al., 2001) for details.\n\n :Parameters:\n - stochastic : PyMC objects\n Stochastic objects to be handled by the AM algorith,\n\n - cov : array\n Initial guess for the covariance matrix C. If it is None, the\n covariance will be estimated using the scales dictionary if provided,\n the existing trace if available, or the current stochastics value.\n It is suggested to provide a sensible guess for the covariance, and\n not rely on the automatic assignment from stochastics value.\n\n - delay : int\n Number of steps before the empirical covariance is computed. If greedy\n is True, the algorithm waits for delay *accepted* steps before computing\n the covariance.\n\n - interval : int\n Interval between covariance updates. Higher dimensional spaces require\n more samples to obtain reliable estimates for the covariance updates.\n\n - greedy : bool\n If True, only the accepted jumps are tallied in the internal trace\n until delay is reached. This is useful to make sure that the empirical\n covariance has a sensible structure.\n\n - shrink_if_necessary : bool\n If True, the acceptance rate is checked when the step method tunes. If\n the acceptance rate is small, the proposal covariance is shrunk according\n to the following rule:\n\n if acc_rate < .001:\n self.C *= .01\n elif acc_rate < .01:\n self.C *= .25\n\n - scales : dict\n Dictionary containing the scale for each stochastic keyed by name.\n If cov is None, those scales are used to define an initial covariance\n matrix. If neither cov nor scale is given, the initial covariance is\n guessed from the trace (it if exists) or the objects value, alt\n\n - verbose : int\n Controls the verbosity level.\n\n\n :Notes:\n Use the methods: `cov_from_scales`, `cov_from_trace` and `cov_from_values` for\n more control on the creation of an initial covariance matrix. A lot of problems\n can be avoided with a good initial covariance and long enough intervals between\n covariance updates. That is, do not compensate for a bad covariance guess by\n reducing the interval between updates thinking the covariance matrix will\n converge more rapidly.\n\n\n :Reference:\n Haario, H., E. Saksman and J. Tamminen, An adaptive Metropolis algorithm,\n Bernouilli, vol. 7 (2), pp. 223-242, 2001.\n \"\"\"\n\n def __init__(self, stochastic, cov=None, delay=1000, interval=200,\n greedy=True, shrink_if_necessary=False, scales=None, verbose=-1, tally=False):\n\n # Verbosity flag\n self.verbose = verbose\n\n self.accepted = 0\n self.rejected = 0\n\n if not np.iterable(stochastic) or isinstance(stochastic, Variable):\n stochastic = [stochastic]\n\n # Initialize superclass\n StepMethod.__init__(self, stochastic, verbose, tally)\n\n self._id = 'AdaptiveMetropolis_' + '_'.join(\n [p.__name__ for p in self.stochastics])\n # State variables used to restore the state in a latter session.\n self._state += [\n 'accepted', 'rejected', '_trace_count', '_current_iter', 'C', 'proposal_sd',\n '_proposal_deviate', '_trace', 'shrink_if_necessary']\n self._tuning_info = ['C']\n\n self.proposal_sd = None\n self.shrink_if_necessary = shrink_if_necessary\n\n # Number of successful steps before the empirical covariance is\n # computed\n self.delay = delay\n # Interval between covariance updates\n self.interval = interval\n # Flag for tallying only accepted jumps until delay reached\n self.greedy = greedy\n\n # Initialization methods\n self.check_type()\n self.dimension()\n\n # Set the initial covariance using cov, or the following fallback mechanisms:\n # 1. If scales is provided, use it.\n # 2. If a trace is present, compute the covariance matrix empirically from it.\n # 3. Use the stochastics value as a guess of the variance.\n if cov is not None:\n self.C = cov\n elif scales:\n self.C = self.cov_from_scales(scales)\n else:\n try:\n self.C = self.cov_from_trace()\n except AttributeError:\n self.C = self.cov_from_value(100.)\n\n self.updateproposal_sd()\n\n # Keep track of the internal trace length\n # It may be different from the iteration count since greedy\n # sampling can be done during warm-up period.\n self._trace_count = 0\n self._current_iter = 0\n\n self._proposal_deviate = np.zeros(self.dim)\n self.chain_mean = np.asmatrix(np.zeros(self.dim))\n self._trace = []\n\n if self.verbose >= 2:\n print_(\"Initialization...\")\n print_('Dimension: ', self.dim)\n print_(\"C_0: \", self.C)\n print_(\"Sigma: \", self.proposal_sd)\n\n @staticmethod\n def competence(stochastic):\n \"\"\"\n The competence function for AdaptiveMetropolis.\n The AM algorithm is well suited to deal with multivariate\n parameters, particularly those which are correlated with one\n another. However, it does not work reliably with all multivariate\n stochastics, so it must be applied manually via MCMC.use_step_method().\n \"\"\"\n return 0\n\n def cov_from_value(self, scaling):\n \"\"\"Return a covariance matrix for the jump distribution using\n the actual value of the stochastic as a guess of their variance,\n divided by the `scaling` argument.\n\n Note that this is likely to return a poor guess.\n \"\"\"\n rv = []\n for s in self.stochastics:\n rv.extend(np.ravel(s.value).copy())\n\n # Remove 0 values since this would lead to quite small jumps...\n arv = np.array(rv)\n arv[arv == 0] = 1.\n\n # Create a diagonal covariance matrix using the scaling factor.\n return np.eye(self.dim) * np.abs(arv) / scaling\n\n def cov_from_scales(self, scales):\n \"\"\"Return a covariance matrix built from a dictionary of scales.\n\n `scales` is a dictionary keyed by stochastic instances, and the\n values refer are the variance of the jump distribution for each\n stochastic. If a stochastic is a sequence, the variance must\n have the same length.\n \"\"\"\n\n # Get array of scales\n ord_sc = []\n for stochastic in self.stochastics:\n ord_sc.append(np.ravel(scales[stochastic]))\n ord_sc = np.concatenate(ord_sc)\n\n if np.squeeze(ord_sc).shape[0] != self.dim:\n raise ValueError(\"Improper initial scales, dimension don't match\",\n (np.squeeze(ord_sc), self.dim))\n\n # Scale identity matrix\n return np.eye(self.dim) * ord_sc\n\n def cov_from_trace(self, trace=slice(None)):\n \"\"\"Define the jump distribution covariance matrix from the object's\n stored trace.\n\n :Parameters:\n - `trace` : slice or int\n A slice for the stochastic object's trace in the last chain, or a\n an integer indicating the how many of the last samples will be used.\n\n \"\"\"\n n = []\n for s in self.stochastics:\n n.append(s.trace.length())\n n = set(n)\n if len(n) > 1:\n raise ValueError('Traces do not have the same length.')\n elif n == 0:\n raise AttributeError(\n 'Stochastic has no trace to compute covariance.')\n else:\n n = n.pop()\n\n if not isinstance(trace, slice):\n trace = slice(trace, n)\n\n a = self.trace2array(trace)\n\n return np.cov(a, rowvar=0)\n\n def check_type(self):\n \"\"\"Make sure each stochastic has a correct type, and identify discrete stochastics.\"\"\"\n self.isdiscrete = {}\n for stochastic in self.stochastics:\n if stochastic.dtype in integer_dtypes:\n self.isdiscrete[stochastic] = True\n elif stochastic.dtype in bool_dtypes:\n raise ValueError(\n 'Binary stochastics not supported by AdaptativeMetropolis.')\n else:\n self.isdiscrete[stochastic] = False\n\n def dimension(self):\n \"\"\"Compute the dimension of the sampling space and identify the slices\n belonging to each stochastic.\n \"\"\"\n self.dim = 0\n self._slices = {}\n for stochastic in self.stochastics:\n if isinstance(stochastic.value, np.matrix):\n p_len = len(stochastic.value.A.ravel())\n elif isinstance(stochastic.value, np.ndarray):\n p_len = len(stochastic.value.ravel())\n else:\n p_len = 1\n self._slices[stochastic] = slice(self.dim, self.dim + p_len)\n self.dim += p_len\n\n def update_cov(self):\n \"\"\"Recursively compute the covariance matrix for the multivariate normal\n proposal distribution.\n\n This method is called every self.interval once self.delay iterations\n have been performed.\n \"\"\"\n\n scaling = (2.4) ** 2 / self.dim # Gelman et al. 1996.\n epsilon = 1.0e-5\n chain = np.asarray(self._trace)\n\n # Recursively compute the chain mean\n self.C, self.chain_mean = self.recursive_cov(self.C, self._trace_count,\n self.chain_mean, chain, scaling=scaling, epsilon=epsilon)\n\n # Shrink covariance if acceptance rate is too small\n acc_rate = self.accepted / (self.accepted + self.rejected)\n if self.shrink_if_necessary:\n if acc_rate < .001:\n self.C *= .01\n elif acc_rate < .01:\n self.C *= .25\n if self.verbose > 1:\n if acc_rate < .01:\n print_(\n '\\tAcceptance rate was',\n acc_rate,\n 'shrinking covariance')\n self.accepted = 0.\n self.rejected = 0.\n\n if self.verbose > 1:\n print_(\"\\tUpdating covariance ...\\n\", self.C)\n print_(\"\\tUpdating mean ... \", self.chain_mean)\n\n # Update state\n adjustmentwarning = '\\n' +\\\n 'Covariance was not positive definite and proposal_sd cannot be computed by \\n' + \\\n 'Cholesky decomposition. The next jumps will be based on the last \\n' + \\\n 'valid covariance matrix. This situation may have arisen because no \\n' + \\\n 'jumps were accepted during the last `interval`. One solution is to \\n' + \\\n 'increase the interval, or specify an initial covariance matrix with \\n' + \\\n 'a smaller variance. For this simulation, each time a similar error \\n' + \\\n 'occurs, proposal_sd will be reduced by a factor .9 to reduce the \\n' + \\\n 'jumps and increase the likelihood of accepted jumps.'\n\n try:\n self.updateproposal_sd()\n except np.linalg.LinAlgError:\n warnings.warn(adjustmentwarning)\n self.covariance_adjustment(.9)\n\n self._trace_count += len(self._trace)\n self._trace = []\n\n def covariance_adjustment(self, f=.9):\n \"\"\"Multiply self.proposal_sd by a factor f. This is useful when the current proposal_sd is too large and all jumps are rejected.\n \"\"\"\n self.proposal_sd *= f\n\n def updateproposal_sd(self):\n \"\"\"Compute the Cholesky decomposition of self.C.\"\"\"\n self.proposal_sd = np.linalg.cholesky(self.C)\n\n def recursive_cov(self, cov, length, mean, chain, scaling=1, epsilon=0):\n r\"\"\"Compute the covariance recursively.\n\n Return the new covariance and the new mean.\n\n .. math::\n C_k & = \\frac{1}{k-1} (\\sum_{i=1}^k x_i x_i^T - k\\bar{x_k}\\bar{x_k}^T)\n C_n & = \\frac{1}{n-1} (\\sum_{i=1}^k x_i x_i^T + \\sum_{i=k+1}^n x_i x_i^T - n\\bar{x_n}\\bar{x_n}^T)\n & = \\frac{1}{n-1} ((k-1)C_k + k\\bar{x_k}\\bar{x_k}^T + \\sum_{i=k+1}^n x_i x_i^T - n\\bar{x_n}\\bar{x_n}^T)\n\n :Parameters:\n - cov : matrix\n Previous covariance matrix.\n - length : int\n Length of chain used to compute the previous covariance.\n - mean : array\n Previous mean.\n - chain : array\n Sample used to update covariance.\n - scaling : float\n Scaling parameter\n - epsilon : float\n Set to a small value to avoid singular matrices.\n \"\"\"\n n = length + len(chain)\n k = length\n new_mean = self.recursive_mean(mean, length, chain)\n\n t0 = k * np.outer(mean, mean)\n t1 = np.dot(chain.T, chain)\n t2 = n * np.outer(new_mean, new_mean)\n t3 = epsilon * np.eye(cov.shape[0])\n\n new_cov = (\n k - 1) / (\n n - 1.) * cov + scaling / (\n n - 1.) * (\n t0 + t1 - t2 + t3)\n return new_cov, new_mean\n\n def recursive_mean(self, mean, length, chain):\n r\"\"\"Compute the chain mean recursively.\n\n Instead of computing the mean :math:`\\bar{x_n}` of the entire chain,\n use the last computed mean :math:`bar{x_j}` and the tail of the chain\n to recursively estimate the mean.\n\n .. math::\n \\bar{x_n} & = \\frac{1}{n} \\sum_{i=1}^n x_i\n & = \\frac{1}{n} (\\sum_{i=1}^j x_i + \\sum_{i=j+1}^n x_i)\n & = \\frac{j\\bar{x_j}}{n} + \\frac{\\sum_{i=j+1}^n x_i}{n}\n\n :Parameters:\n - mean : array\n Previous mean.\n - length : int\n Length of chain used to compute the previous mean.\n - chain : array\n Sample used to update mean.\n \"\"\"\n n = length + len(chain)\n return length * mean / n + chain.sum(0) / n\n\n def propose(self):\n \"\"\"\n This method proposes values for stochastics based on the empirical\n covariance of the values sampled so far.\n\n The proposal jumps are drawn from a multivariate normal distribution.\n \"\"\"\n\n arrayjump = np.dot(\n self.proposal_sd,\n np.random.normal(\n size=self.proposal_sd.shape[\n 0]))\n if self.verbose > 2:\n print_('Jump :', arrayjump)\n\n # Update each stochastic individually.\n for stochastic in self.stochastics:\n jump = arrayjump[self._slices[stochastic]].squeeze()\n if np.iterable(stochastic.value):\n jump = np.reshape(\n arrayjump[\n self._slices[\n stochastic]],\n np.shape(\n stochastic.value))\n if self.isdiscrete[stochastic]:\n jump = round_array(jump)\n stochastic.value = stochastic.value + jump\n\n def step(self):\n \"\"\"\n Perform a Metropolis step.\n\n Stochastic parameters are block-updated using a multivariate normal\n distribution whose covariance is updated every self.interval once\n self.delay steps have been performed.\n\n The AM instance keeps a local copy of the stochastic parameter's trace.\n This trace is used to computed the empirical covariance, and is\n completely independent from the Database backend.\n\n If self.greedy is True and the number of iterations is smaller than\n self.delay, only accepted jumps are stored in the internal\n trace to avoid computing singular covariance matrices.\n \"\"\"\n\n # Probability and likelihood for stochastic's current value:\n logp = self.logp_plus_loglike\n if self.verbose > 1:\n print_('Current value: ', self.stoch2array())\n print_('Current likelihood: ', logp)\n\n # Sample a candidate value\n self.propose()\n\n # Metropolis acception/rejection test\n accept = False\n try:\n # Probability and likelihood for stochastic's proposed value:\n logp_p = self.logp_plus_loglike\n if self.verbose > 2:\n print_('Current value: ', self.stoch2array())\n print_('Current likelihood: ', logp_p)\n\n if np.log(random()) < logp_p - logp:\n accept = True\n self.accepted += 1\n if self.verbose > 2:\n print_('Accepted')\n else:\n self.rejected += 1\n if self.verbose > 2:\n print_('Rejected')\n except ZeroProbability:\n self.rejected += 1\n logp_p = None\n if self.verbose > 2:\n print_('Rejected with ZeroProbability Error.')\n\n if (not self._current_iter % self.interval) and self.verbose > 1:\n print_(\"Step \", self._current_iter)\n print_(\"\\tLogprobability (current, proposed): \", logp, logp_p)\n for stochastic in self.stochastics:\n print_(\n \"\\t\",\n stochastic.__name__,\n stochastic.last_value,\n stochastic.value)\n if accept:\n print_(\"\\tAccepted\\t*******\\n\")\n else:\n print_(\"\\tRejected\\n\")\n print_(\n \"\\tAcceptance ratio: \",\n self.accepted / (\n self.accepted + self.rejected))\n\n if self._current_iter == self.delay:\n self.greedy = False\n\n if not accept:\n self.reject()\n\n if accept or not self.greedy:\n self.internal_tally()\n\n if self._current_iter > self.delay and self._current_iter % self.interval == 0:\n self.update_cov()\n\n self._current_iter += 1\n\n # Please keep reject() factored out- helps RandomRealizations figure out\n # what to do.\n def reject(self):\n for stochastic in self.stochastics:\n # stochastic.value = stochastic.last_value\n stochastic.revert()\n\n def internal_tally(self):\n \"\"\"Store the trace of stochastics for the computation of the covariance.\n This trace is completely independent from the backend used by the\n sampler to store the samples.\"\"\"\n chain = []\n for stochastic in self.stochastics:\n chain.append(np.ravel(stochastic.value))\n self._trace.append(np.concatenate(chain))\n\n def trace2array(self, sl):\n \"\"\"Return an array with the trace of all stochastics, sliced by sl.\"\"\"\n chain = []\n for stochastic in self.stochastics:\n tr = stochastic.trace.gettrace(slicing=sl)\n if tr is None:\n raise AttributeError\n chain.append(tr)\n return np.hstack(chain)\n\n def stoch2array(self):\n \"\"\"Return the stochastic objects as an array.\"\"\"\n a = np.empty(self.dim)\n for stochastic in self.stochastics:\n a[self._slices[stochastic]] = stochastic.value\n return a\n\n def tune(self, verbose=0):\n \"\"\"Tuning is done during the entire run, independently from the Sampler\n tuning specifications. \"\"\"\n return False\n\n\nclass TWalk(StepMethod):\n\n \"\"\"\n The t-walk is a scale-independent, adaptive MCMC algorithm for arbitrary\n continuous distributions and correltation structures. The t-walk maintains\n two independent points in the sample space, and moves are based on\n proposals that are accepted or rejected with a standard M-H acceptance\n probability on the product space. The t-walk is strictly non-adaptive on\n the product space, but displays adaptive behaviour on the original state\n space. There are four proposal distributions (walk, blow, hop, traverse)\n that together offer an algorithm that is effective in sampling\n distributions of arbitrary scale.\n\n The t-walk was devised by J.A. Christen and C. Fox (2010).\n\n :Parameters:\n - stochastic : Stochastic\n The variable over which self has jurisdiction.\n - kernel_probs (optional) : iterable\n The probabilities of choosing each kernel.\n - walk_theta (optional) : float\n Parameter for the walk move. Christen and Fox recommend\n values in [0.3, 2] (Defaults to 1.5).\n - traverse_theta (optional) : float\n Parameter for the traverse move. Christen and Fox recommend\n values in [2, 10] (Defaults to 6.0).\n - n1 (optional) : integer\n The number of elements to be moved at each iteration.\n Christen and Fox recommend values in [2, 20] (Defaults to 4).\n - support (optional) : function\n Function defining the support of the stochastic\n (Defaults to real line).\n - verbose (optional) : integer\n Level of output verbosity: 0=none, 1=low, 2=medium, 3=high\n - tally (optional) : bool\n Flag for recording values for trace (Defaults to True).\n \"\"\"\n\n def __init__(self, stochastic, inits=None, kernel_probs=[\n 0.4918, 0.4918, 0.0082, 0.0082], walk_theta=1.5, traverse_theta=6.0, n1=4, support=lambda x: True, verbose=-1, tally=True):\n\n # Initialize superclass\n StepMethod.__init__(self, [stochastic], verbose=verbose, tally=tally)\n\n # Ordered list of proposal kernels\n self.kernels = [self.walk, self.traverse, self.blow, self.hop]\n\n # Kernel for current iteration\n self.current_kernel = None\n\n self.accepted = zeros(len(kernel_probs))\n self.rejected = zeros(len(kernel_probs))\n\n # Cumulative kernel probabilities\n self.cum_probs = np.cumsum(kernel_probs)\n\n self.walk_theta = walk_theta\n self.traverse_theta = traverse_theta\n\n # Set public attributes\n self.stochastic = stochastic\n if verbose > -1:\n self.verbose = verbose\n else:\n self.verbose = stochastic.verbose\n\n # Determine size of stochastic\n if isinstance(self.stochastic.value, ndarray):\n self._len = len(self.stochastic.value.ravel())\n else:\n self._len = 1\n\n # Create attribute for holding value and secondary value\n self.values = [self.stochastic.value]\n\n # Initialize to different value from stochastic or supplied values\n if inits is None:\n self.values.append(self.stochastic.random())\n # Reset original value\n self.stochastic.value = self.values[0]\n else:\n self.values.append(inits)\n\n # Flag for using second point in log-likelihood calculations\n self._prime = False\n\n # Proposal adjustment factor for current iteration\n self.hastings_factor = 0.0\n\n # Set probability of selecting any parameter\n self.p = 1. * min(self._len, n1) / self._len\n\n # Support function\n self._support = support\n\n self._state = ['accepted', 'rejected', 'p']\n\n def n1():\n doc = \"Mean number of parameters to be selected for updating\"\n\n def fget(self):\n return self._n1\n\n def fset(self, value):\n self._n1 = value\n self._calc_p()\n return locals()\n n1 = property(**n1())\n\n @staticmethod\n def competence(stochastic):\n \"\"\"\n The competence function for TWalk.\n \"\"\"\n # if stochastic.dtype in float_dtypes and np.alen(stochastic.value) > 4:\n # if np.alen(stochastic.value) >=10:\n # return 2\n # return 1\n return 0\n\n def walk(self):\n \"\"\"Walk proposal kernel\"\"\"\n\n if self.verbose > 1:\n print_('\\t' + self._id + ' Running Walk proposal kernel')\n\n # Mask for values to move\n phi = self.phi\n\n theta = self.walk_theta\n\n u = random(len(phi))\n z = (theta / (1 + theta)) * (theta * u ** 2 + 2 * u - 1)\n\n if self._prime:\n xp, x = self.values\n else:\n x, xp = self.values\n\n if self.verbose > 1:\n print_('\\t' + 'Current value = ' + str(x))\n\n x = x + phi * (x - xp) * z\n\n if self.verbose > 1:\n print_('\\t' + 'Proposed value = ' + str(x))\n\n self.stochastic.value = x\n\n # Set proposal adjustment factor\n self.hastings_factor = 0.0\n\n def traverse(self):\n \"\"\"Traverse proposal kernel\"\"\"\n\n if self.verbose > 1:\n print_('\\t' + self._id + ' Running Traverse proposal kernel')\n\n # Mask for values to move\n phi = self.phi\n\n theta = self.traverse_theta\n\n # Calculate beta\n if (random() < (theta - 1) / (2 * theta)):\n beta = exp(1 / (theta + 1) * log(random()))\n else:\n beta = exp(1 / (1 - theta) * log(random()))\n\n if self._prime:\n xp, x = self.values\n else:\n x, xp = self.values\n\n if self.verbose > 1:\n print_('\\t' + 'Current value = ' + str(x))\n\n x = (xp + beta * (xp - x)) * phi + x * (phi == False)\n\n if self.verbose > 1:\n print_('\\t' + 'Proposed value = ' + str(x))\n\n self.stochastic.value = x\n\n # Set proposal adjustment factor\n self.hastings_factor = (sum(phi) - 2) * log(beta)\n\n def blow(self):\n \"\"\"Blow proposal kernel\"\"\"\n\n if self.verbose > 1:\n print_('\\t' + self._id + ' Running Blow proposal kernel')\n\n # Mask for values to move\n phi = self.phi\n\n if self._prime:\n xp, x = self.values\n else:\n x, xp = self.values\n\n if self.verbose > 1:\n print_('\\t' + 'Current value ' + str(x))\n\n sigma = max(phi * abs(xp - x))\n\n x = x + phi * sigma * rnormal()\n\n if self.verbose > 1:\n print_('\\t' + 'Proposed value = ' + str(x))\n\n self.hastings_factor = self._g(\n x,\n xp,\n sigma) - self._g(\n self.stochastic.value,\n xp,\n sigma)\n\n self.stochastic.value = x\n\n def _g(self, h, xp, s):\n \"\"\"Density function for blow and hop moves\"\"\"\n\n nphi = sum(self.phi)\n\n return (nphi / 2.0) * log(2 * pi) + nphi * \\\n log(s) + 0.5 * sum((h - xp) ** 2) / (s ** 2)\n\n def hop(self):\n \"\"\"Hop proposal kernel\"\"\"\n\n if self.verbose > 1:\n print_('\\t' + self._id + ' Running Hop proposal kernel')\n\n # Mask for values to move\n phi = self.phi\n\n if self._prime:\n xp, x = self.values\n else:\n x, xp = self.values\n\n if self.verbose > 1:\n print_('\\t' + 'Current value of x = ' + str(x))\n\n sigma = max(phi * abs(xp - x)) / 3.0\n\n x = (xp + sigma * rnormal()) * phi + x * (phi == False)\n\n if self.verbose > 1:\n print_('\\t' + 'Proposed value = ' + str(x))\n\n self.hastings_factor = self._g(\n x,\n xp,\n sigma) - self._g(\n self.stochastic.value,\n xp,\n sigma)\n\n self.stochastic.value = x\n\n def reject(self):\n \"\"\"Sets current s value to the last accepted value\"\"\"\n self.stochastic.revert()\n\n # Increment rejected count\n self.rejected[self.current_kernel] += 1\n\n if self.verbose > 1:\n print_(\n self._id,\n \"rejected, reverting to value =\",\n self.stochastic.value)\n\n def propose(self):\n \"\"\"This method is called by step() to generate proposed values\"\"\"\n\n # Generate uniform variate to choose kernel\n self.current_kernel = sum(self.cum_probs < random())\n kernel = self.kernels[self.current_kernel]\n\n # Parameters to move\n self.phi = (random(self._len) < self.p)\n\n # Propose new value\n kernel()\n\n def step(self):\n \"\"\"Single iteration of t-walk algorithm\"\"\"\n\n valid_proposal = False\n\n # Use x or xprime as pivot\n self._prime = (random() < 0.5)\n\n if self.verbose > 1:\n print_(\"\\n\\nUsing x%s as pivot\" % (\" prime\" * self._prime or \"\"))\n\n if self._prime:\n # Set the value of the stochastic to the auxiliary\n self.stochastic.value = self.values[1]\n\n if self.verbose > 1:\n print_(\n self._id,\n \"setting value to auxiliary\",\n self.stochastic.value)\n\n # Current log-probability\n logp = self.logp_plus_loglike\n if self.verbose > 1:\n print_(\"Current logp\", logp)\n\n try:\n # Propose new value\n while not valid_proposal:\n self.propose()\n # Check that proposed value lies in support\n valid_proposal = self._support(self.stochastic.value)\n\n if not sum(self.phi):\n raise ZeroProbability\n\n # Proposed log-probability\n logp_p = self.logp_plus_loglike\n if self.verbose > 1:\n print_(\"Proposed logp\", logp_p)\n\n except ZeroProbability:\n\n # Reject proposal\n if self.verbose > 1:\n print_(self._id + ' rejecting due to ZeroProbability.')\n self.reject()\n\n if self._prime:\n # Update value list\n self.values[1] = self.stochastic.value\n # Revert to stochastic's value for next iteration\n self.stochastic.value = self.values[0]\n\n if self.verbose > 1:\n print_(\n self._id,\n \"reverting stochastic to primary value\",\n self.stochastic.value)\n else:\n # Update value list\n self.values[0] = self.stochastic.value\n\n if self.verbose > 1:\n print_(self._id + ' returning.')\n return\n\n if self.verbose > 1:\n print_('logp_p - logp: ', logp_p - logp)\n\n # Evaluate acceptance ratio\n if log(random()) > (logp_p - logp + self.hastings_factor):\n\n # Revert s if fail\n self.reject()\n\n else:\n # Increment accepted count\n self.accepted[self.current_kernel] += 1\n if self.verbose > 1:\n print_(self._id + ' accepting')\n\n if self._prime:\n # Update value list\n self.values[1] = self.stochastic.value\n # Revert to stochastic's value for next iteration\n self.stochastic.value = self.values[0]\n\n if self.verbose > 1:\n print_(\n self._id,\n \"reverting stochastic to primary value\",\n self.stochastic.value)\n\n else:\n # Update value list\n self.values[0] = self.stochastic.value\n\n# Slice sampler implementation contributed by Dominik Wabersich\n\n\nclass Slicer(StepMethod):\n\n \"\"\"\n Univariate slice sampler step method\n\n :Parameters:\n - stochastic : Stochastic\n The variable over which self has jurisdiction.\n - w (optional): float\n Initial width of slice (Defaults to 1)\n - m (optional): int\n Multiplier defining maximum slice size to :math:`mw` (Defaults to 1000)\n - tune (optional): bool\n Tune initial slice width (defaults to True)\n - doubling (optional): bool\n Flag for using doubling procedure instead of stepping out (Defaults to False)\n - tally (optional) : bool\n Flag for recording values for trace (Defaults to True).\n - verbose(optional) : int\n Set verbosity level (Defaults to -1)\n \"\"\"\n\n def __init__(self, stochastic, w=1, m=1000, tune=True,\n doubling=False, tally=False, verbose=-1):\n \"\"\"\n Slice sampler class initialization\n \"\"\"\n # Initialize superclass\n StepMethod.__init__(self, [stochastic], tally=tally)\n\n # id string\n self._id = \"Slicer\"\n\n # Set public attributes\n self.stochastic = stochastic\n if verbose > -1:\n self.verbose = verbose\n else:\n self.verbose = stochastic.verbose\n\n self._tune = tune\n if tune:\n self.w_tune = []\n self.w = w\n self.m = m\n self.doubling = doubling\n\n @staticmethod\n def competence(s):\n \"\"\"\n The competence function for Slice\n\n Works best for continuous scalar variables.\n \"\"\"\n if (s.dtype in float_dtypes) and not s.shape:\n return 1\n else:\n return 0\n\n def step(self):\n \"\"\"\n Slice step method\n\n From Neal 2003 (doi:10.1214/aos/1056562461)\n \"\"\"\n logy = self.loglike - rexponential(1)\n\n L = self.stochastic.value - runiform(0, self.w)\n R = L + self.w\n\n if self.doubling:\n # Doubling procedure\n K = self.m\n while (K and (logy < self.fll(L) or logy < self.fll(R))):\n if random() < 0.5:\n L -= R - L\n else:\n R += R - L\n K -= 1\n else:\n # Stepping out procedure\n J = np.floor(runiform(0, self.m))\n K = (self.m - 1) - J\n while(J > 0 and logy < self.fll(L)):\n L -= self.w\n J -= 1\n while(K > 0 and logy < self.fll(R)):\n R += self.w\n K -= 1\n\n # Shrinkage procedure\n self.stochastic.value = runiform(L, R)\n try:\n logy_new = self.loglike\n except ZeroProbability:\n logy_new = -np.infty\n while(logy_new < logy):\n if (self.stochastic.value < self.stochastic.last_value):\n L = float(self.stochastic.value)\n else:\n R = float(self.stochastic.value)\n self.stochastic.revert()\n self.stochastic.value = runiform(L, R)\n try:\n logy_new = self.loglike\n except ZeroProbability:\n logy_new = -np.infty\n\n def fll(self, value):\n \"\"\"\n Returns loglike of value\n \"\"\"\n self.stochastic.value = value\n try:\n ll = self.loglike\n except ZeroProbability:\n ll = -np.infty\n self.stochastic.revert()\n return ll\n\n def tune(self, verbose=None):\n \"\"\"\n Tuning initial slice width parameter\n \"\"\"\n if not self._tune:\n return False\n else:\n self.w_tune.append(\n abs(self.stochastic.last_value - self.stochastic.value))\n self.w = 2 * (sum(self.w_tune) / len(self.w_tune))\n return True\n"
] |
[
[
"numpy.dot",
"numpy.random.exponential",
"numpy.exp",
"numpy.where",
"numpy.cumsum",
"numpy.outer",
"numpy.random.random",
"numpy.concatenate",
"numpy.random.normal",
"numpy.empty",
"numpy.log",
"numpy.random.poisson",
"numpy.eye",
"numpy.ndim",
"numpy.linalg.cholesky",
"numpy.array",
"numpy.zeros",
"numpy.shape",
"numpy.hstack",
"numpy.squeeze",
"numpy.cov",
"numpy.asarray",
"numpy.iterable",
"numpy.ones",
"numpy.random.uniform",
"numpy.ravel",
"numpy.abs",
"numpy.all"
]
] |
haamoon/finding_common_object
|
[
"2b670facce4e22d669101b0628724d994255dd61"
] |
[
"utils.py"
] |
[
"import tensorflow as tf\nfrom dataflow.generator_cls import GeneratorCLS\nimport json\nimport logging\nfrom easydict import EasyDict\nfrom dataflow.utils import fix_rng_seed\nfrom model.util import batched_gather\nimport os\nimport pickle\n\ndef get_lr_schedule(lr_schedule):\n boundaries = lr_schedule.boundaries\n values = lr_schedule.values\n return tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries, values)\n\ndef corloc(top_subproblem, labels, corloc_list):\n ntotal = tf.cast(tf.shape(top_subproblem)[-1], tf.float32)\n for i in range(tf.shape(labels)[0]):\n res = batched_gather(top_subproblem[i, ..., tf.newaxis],\n labels[i])\n corloc_list.append(tf.reduce_sum(res)/ntotal)\n\ndef get_best_acc(dir_path):\n top_pkl_file = os.path.join(dir_path, 'top.pkl')\n if os.path.isfile(top_pkl_file):\n with open(top_pkl_file, 'rb') as f:\n top = pickle.load(f)\n return top['best']\n return 0.0\n\ndef save_best_acc(dir_path, best_acc, iteration):\n top_pkl_file = os.path.join(dir_path, 'top.pkl')\n top = {'best': best_acc, 'iteration': iteration}\n with open(top_pkl_file, 'wb') as f:\n pickle.dump(top, f)\n\ndef get_dataset(config, training):\n if config.shuffle is False:\n fix_rng_seed(config.seed)\n num_negative_bags = config.negative_bag_size // config.bag_size\n gen = GeneratorCLS(is_training=config.is_training, shuffle=config.shuffle,\n add_gt_list=False, k_shot=config.k_shot, #TODO now cannot return the list now\n bag_size=config.bag_size, num_negative_bags=num_negative_bags,\n split=config.split, num_sample_classes=config.num_sample_classes,\n num_sample_classes_min=config.num_sample_classes_min, use_features=config.use_features,\n dataset_name=config.dataset_name, one_example_per_class=config.one_example_per_class,\n has_single_target=config.has_single_target)\n gen.reset_state()\n dataset = tf.data.Dataset.from_generator(gen.get_data,\n (tf.float32, tf.float32, tf.float32,\n tf.float32, tf.float32, tf.int32)).prefetch(\n config.prefetch_buffer_size)\n\n if training:\n return dataset.batch(config.meta_batch_size).repeat()\n else:\n return dataset.batch(1) #NOTE: we can repeat since new problems will be different\n\ndef parse_dt(dt, config):\n fea, _, _, classes, _, target_class = dt\n pos_fea = fea[:, :config.k_shot, :, 0, 0]\n neg_fea = fea[:, config.k_shot:, :, 0, 0]\n neg_shape = tf.shape(neg_fea)\n ## [MBS, N_NEG_BAGS, BAG_SIZE, D] ==> [MBS, N_NEG_BAGS*BAG_SIZE, D]\n neg_fea = tf.reshape(neg_fea, [neg_shape[0], -1, neg_shape[-1]])\n\n pos_classes = classes[:, :config.k_shot]\n neg_classes = classes[:, config.k_shot:]\n ## [MBS, N_NEG_BAGS, BAG_SIZE] ==> [MBS, N_NEG_BAGS*BAG_SIZE]\n neg_classes = tf.reshape(neg_classes, [neg_shape[0], -1])\n return pos_fea, neg_fea, pos_classes, neg_classes, target_class\n\ndef get_config(path):\n with open(path,'r') as f:\n return EasyDict(json.load(f))\n\ndef set_logger(log_path):\n \"\"\"Set the logger to log info in terminal and file `log_path`.\n\n In general, it is useful to have a logger so that every output to the terminal is saved\n in a permanent file. Here we save it to `model_dir/train.log`.\n\n Example:\n ```\n logging.info(\"Starting training...\")\n ```\n\n Args:\n log_path: (string) where to log\n \"\"\"\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if not logger.handlers:\n # Logging to a file\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\n # Logging to console\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)\n"
] |
[
[
"tensorflow.shape",
"tensorflow.keras.optimizers.schedules.PiecewiseConstantDecay",
"tensorflow.reshape",
"tensorflow.data.Dataset.from_generator",
"tensorflow.reduce_sum"
]
] |
floscha/keras-io
|
[
"fb064c551eda7aea631ceaa548c4411b9a1193cb"
] |
[
"examples/rl/ppo_cartpole.py"
] |
[
"\"\"\"\nTitle: Proximal Policy Optimization\nAuthor: [Ilias Chrysovergis](https://twitter.com/iliachry)\nDate created: 2021/06/24\nLast modified: 2021/06/24\nDescription: Implementation of a Proximal Policy Optimization agent for the CartPole-v0 environment.\n\"\"\"\n\n\"\"\"\n## Introduction\n\nThis code example solves the CartPole-v0 environment using a Proximal Policy Optimization (PPO) agent.\n\n### CartPole-v0\n\nA pole is attached by an un-actuated joint to a cart, which moves along a frictionless track.\nThe system is controlled by applying a force of +1 or -1 to the cart.\nThe pendulum starts upright, and the goal is to prevent it from falling over.\nA reward of +1 is provided for every timestep that the pole remains upright.\nThe episode ends when the pole is more than 15 degrees from vertical, or the cart moves more than 2.4 units from the center.\nAfter 200 steps the episode ends. Thus, the highest return we can get is equal to 200.\n\n[CartPole-v0](https://gym.openai.com/envs/CartPole-v0/)\n\n### Proximal Policy Optimization\n\nPPO is a policy gradient method and can be used for environments with either discrete or continuous action spaces.\nIt trains a stochastic policy in an on-policy way. Also, it utilizes the actor critic method. The actor maps the\nobservation to an action and the critic gives an expectation of the rewards of the agent for the observation given.\nFirstly, it collects a set of trajectories for each epoch by sampling from the latest version of the stochastic policy.\nThen, the rewards-to-go and the advantage estimates are computed in order to update the policy and fit the value function.\nThe policy is updated via a stochastic gradient ascent optimizer, while the value function is fitted via some gradient descent algorithm.\nThis procedure is applied for many epochs until the environment is solved.\n\n\n\n- [PPO Original Paper](https://arxiv.org/pdf/1707.06347.pdf)\n- [OpenAI Spinning Up docs - PPO](https://spinningup.openai.com/en/latest/algorithms/ppo.html)\n\n### Note\n\nThis code example uses Keras and Tensorflow v2. It is based on the PPO Original Paper,\nthe OpenAI's Spinning Up docs for PPO, and the OpenAI's Spinning Up implementation of PPO using Tensorflow v1.\n\n[OpenAI Spinning Up Github - PPO](https://github.com/openai/spinningup/blob/master/spinup/algos/tf1/ppo/ppo.py)\n\"\"\"\n\n\"\"\"\n## Libraries\n\nFor this example the following libraries are used:\n\n1. `numpy` for n-dimensional arrays\n2. `tensorflow` and `keras` for building the deep RL PPO agent\n3. `gym` for getting everything we need about the environment\n4. `scipy.signal` for calculating the discounted cumulative sums of vectors\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport gym\nimport scipy.signal\nimport time\n\n\"\"\"\n## Functions and class\n\"\"\"\n\n\ndef discounted_cumulative_sums(x, discount):\n # Discounted cumulative sums of vectors for computing rewards-to-go and advantage estimates\n return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]\n\n\nclass Buffer:\n # Buffer for storing trajectories\n def __init__(self, observation_dimensions, size, gamma=0.99, lam=0.95):\n # Buffer initialization\n self.observation_buffer = np.zeros(\n (size, observation_dimensions), dtype=np.float32\n )\n self.action_buffer = np.zeros(size, dtype=np.int32)\n self.advantage_buffer = np.zeros(size, dtype=np.float32)\n self.reward_buffer = np.zeros(size, dtype=np.float32)\n self.return_buffer = np.zeros(size, dtype=np.float32)\n self.value_buffer = np.zeros(size, dtype=np.float32)\n self.logprobability_buffer = np.zeros(size, dtype=np.float32)\n self.gamma, self.lam = gamma, lam\n self.pointer, self.trajectory_start_index = 0, 0\n\n def store(self, observation, action, reward, value, logprobability):\n # Append one step of agent-environment interaction\n self.observation_buffer[self.pointer] = observation\n self.action_buffer[self.pointer] = action\n self.reward_buffer[self.pointer] = reward\n self.value_buffer[self.pointer] = value\n self.logprobability_buffer[self.pointer] = logprobability\n self.pointer += 1\n\n def finish_trajectory(self, last_value=0):\n # Finish the trajectory by computing advantage estimates and rewards-to-go\n path_slice = slice(self.trajectory_start_index, self.pointer)\n rewards = np.append(self.reward_buffer[path_slice], last_value)\n values = np.append(self.value_buffer[path_slice], last_value)\n\n deltas = rewards[:-1] + self.gamma * values[1:] - values[:-1]\n\n self.advantage_buffer[path_slice] = discounted_cumulative_sums(\n deltas, self.gamma * self.lam\n )\n self.return_buffer[path_slice] = discounted_cumulative_sums(\n rewards, self.gamma\n )[:-1]\n\n self.trajectory_start_index = self.pointer\n\n def get(self):\n # Get all data of the buffer and normalize the advantages\n self.pointer, self.trajectory_start_index = 0, 0\n advantage_mean, advantage_std = (\n np.mean(self.advantage_buffer),\n np.std(self.advantage_buffer),\n )\n self.advantage_buffer = (self.advantage_buffer - advantage_mean) / advantage_std\n return (\n self.observation_buffer,\n self.action_buffer,\n self.advantage_buffer,\n self.return_buffer,\n self.logprobability_buffer,\n )\n\n\ndef mlp(x, sizes, activation=tf.tanh, output_activation=None):\n # Build a feedforward neural network\n for size in sizes[:-1]:\n x = layers.Dense(units=size, activation=activation)(x)\n return layers.Dense(units=sizes[-1], activation=output_activation)(x)\n\n\ndef logprobabilities(logits, a):\n # Compute the log-probabilities of taking actions a by using the logits (i.e. the output of the actor)\n logprobabilities_all = tf.nn.log_softmax(logits)\n logprobability = tf.reduce_sum(\n tf.one_hot(a, num_actions) * logprobabilities_all, axis=1\n )\n return logprobability\n\n\n# Sample action from actor\n@tf.function\ndef sample_action(observation):\n logits = actor(observation)\n action = tf.squeeze(tf.random.categorical(logits, 1), axis=1)\n return logits, action\n\n\n# Train the policy by maxizing the PPO-Clip objective\n@tf.function\ndef train_policy(\n observation_buffer, action_buffer, logprobability_buffer, advantage_buffer\n):\n\n with tf.GradientTape() as tape: # Record operations for automatic differentiation.\n ratio = tf.exp(\n logprobabilities(actor(observation_buffer), action_buffer)\n - logprobability_buffer\n )\n min_advantage = tf.where(\n advantage_buffer > 0,\n (1 + clip_ratio) * advantage_buffer,\n (1 - clip_ratio) * advantage_buffer,\n )\n\n policy_loss = -tf.reduce_mean(\n tf.minimum(ratio * advantage_buffer, min_advantage)\n )\n policy_grads = tape.gradient(policy_loss, actor.trainable_variables)\n policy_optimizer.apply_gradients(zip(policy_grads, actor.trainable_variables))\n\n kl = tf.reduce_mean(\n logprobability_buffer\n - logprobabilities(actor(observation_buffer), action_buffer)\n )\n kl = tf.reduce_sum(kl)\n return kl\n\n\n# Train the value function by regression on mean-squared error\n@tf.function\ndef train_value_function(observation_buffer, return_buffer):\n with tf.GradientTape() as tape: # Record operations for automatic differentiation.\n value_loss = tf.reduce_mean((return_buffer - critic(observation_buffer)) ** 2)\n value_grads = tape.gradient(value_loss, critic.trainable_variables)\n value_optimizer.apply_gradients(zip(value_grads, critic.trainable_variables))\n\n\n\"\"\"\n## Hyperparameters\n\"\"\"\n\n# Hyperparameters of the PPO algorithm\nsteps_per_epoch = 4000\nepochs = 30\ngamma = 0.99\nclip_ratio = 0.2\npolicy_learning_rate = 3e-4\nvalue_function_learning_rate = 1e-3\ntrain_policy_iterations = 80\ntrain_value_iterations = 80\nlam = 0.97\ntarget_kl = 0.01\nhidden_sizes = (64, 64)\n\n# True if you want to render the environment\nrender = False\n\n\"\"\"\n## Initializations\n\"\"\"\n\n# Initialize the environment and get the dimensionality of the\n# observation space and the number of possible actions\nenv = gym.make(\"CartPole-v0\")\nobservation_dimensions = env.observation_space.shape[0]\nnum_actions = env.action_space.n\n\n# Initialize the buffer\nbuffer = Buffer(observation_dimensions, steps_per_epoch)\n\n# Initialize the actor and the critic as keras models\nobservation_input = keras.Input(shape=(observation_dimensions,), dtype=tf.float32)\nlogits = mlp(observation_input, list(hidden_sizes) + [num_actions], tf.tanh, None)\nactor = keras.Model(inputs=observation_input, outputs=logits)\nvalue = tf.squeeze(\n mlp(observation_input, list(hidden_sizes) + [1], tf.tanh, None), axis=1\n)\ncritic = keras.Model(inputs=observation_input, outputs=value)\n\n# Initialize the policy and the value function optimizers\npolicy_optimizer = keras.optimizers.Adam(learning_rate=policy_learning_rate)\nvalue_optimizer = keras.optimizers.Adam(learning_rate=value_function_learning_rate)\n\n# Initialize the observation, episode return and episode length\nobservation, episode_return, episode_length = env.reset(), 0, 0\n\n\"\"\"\n## Train\n\"\"\"\n# Iterate over the number of epochs\nfor epoch in range(epochs):\n # Initialize the sum of the returns, lengths and number of episodes for each epoch\n sum_return = 0\n sum_length = 0\n num_episodes = 0\n\n # Iterate over the steps of each epoch\n for t in range(steps_per_epoch):\n if render:\n env.render()\n\n # Get the logits, action, and take one step in the environment\n observation = observation.reshape(1, -1)\n logits, action = sample_action(observation)\n observation_new, reward, done, _ = env.step(action[0].numpy())\n episode_return += reward\n episode_length += 1\n\n # Get the value and log-probability of the action\n value_t = critic(observation)\n logprobability_t = logprobabilities(logits, action)\n\n # Store obs, act, rew, v_t, logp_pi_t\n buffer.store(observation, action, reward, value_t, logprobability_t)\n\n # Update the observation\n observation = observation_new\n\n # Finish trajectory if reached to a terminal state\n terminal = done\n if terminal or (t == steps_per_epoch - 1):\n last_value = 0 if done else critic(observation.reshape(1, -1))\n buffer.finish_trajectory(last_value)\n sum_return += episode_return\n sum_length += episode_length\n num_episodes += 1\n observation, episode_return, episode_length = env.reset(), 0, 0\n\n # Get values from the buffer\n (\n observation_buffer,\n action_buffer,\n advantage_buffer,\n return_buffer,\n logprobability_buffer,\n ) = buffer.get()\n\n # Update the policy and implement early stopping using KL divergence\n for _ in range(train_policy_iterations):\n kl = train_policy(\n observation_buffer, action_buffer, logprobability_buffer, advantage_buffer\n )\n if kl > 1.5 * target_kl:\n # Early Stopping\n break\n\n # Update the value function\n for _ in range(train_value_iterations):\n train_value_function(observation_buffer, return_buffer)\n\n # Print mean return and length for each epoch\n print(\n f\" Epoch: {epoch + 1}. Mean Return: {sum_return / num_episodes}. Mean Length: {sum_length / num_episodes}\"\n )\n\n\"\"\"\n## Visualizations\n\nBefore training:\n\n\n\nAfter 8 epochs of training:\n\n\n\nAfter 20 epochs of training:\n\n\n\"\"\"\n"
] |
[
[
"tensorflow.random.categorical",
"tensorflow.minimum",
"tensorflow.GradientTape",
"numpy.zeros",
"tensorflow.where",
"tensorflow.one_hot",
"numpy.mean",
"tensorflow.keras.layers.Dense",
"numpy.std",
"tensorflow.keras.Model",
"tensorflow.nn.log_softmax",
"tensorflow.reduce_sum",
"numpy.append",
"tensorflow.keras.Input",
"tensorflow.keras.optimizers.Adam"
]
] |
mikobski/Robot-Inspekcyjny
|
[
"925491fc43b71bdaa54dccf60d38da59d244181d"
] |
[
"robot/src/vision_to_mavros/scripts/calibrate_extrinsics.py"
] |
[
"#!/usr/bin/env python3\n######################################################\n## Calibrating the extrinsics between T265 and D4xx ##\n## Based on this example: https://github.com/IntelRealSense/librealsense/pull/4355\n## with changes and modifications.\n######################################################\n\n######################################################\n#\n# General steps:\n# 1. Mount the two cameras rigidly\n# 2. Print any one of the checkerboards from: https://markhedleyjones.com/projects/calibration-checkerboard-collection\n# - The default settings in this script are for: https://markhedleyjones.com/storage/checkerboards/Checkerboard-A4-25mm-8x6.pdf\n# - Measure the actual printed grid size of the squares and modify size.\n# 3. Modify the script:\n# - Change grid_H, grid_W and size according to the actual printed checkerboard.\n# - Change the path and file_name if necessary (ex: use this script as standalone).\n# 4. Run the script online:\n# - python calibrate_extrinsics.py\n# 5. The results include intrinsics (save file) and extrinsics (terminal output)\n# \n######################################################\n\nfrom __future__ import print_function\n\nimport pyrealsense2 as rs\nimport numpy as np\nnp.set_printoptions(suppress=True,precision=5)\nimport cv2\nassert cv2.__version__[0] >= '3', 'The fisheye module requires opencv version >= 3.0.0'\nimport os\nimport shutil\nimport json\nimport argparse\nimport glob\nfrom collections import OrderedDict\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--SN_T265', help='serial number of T265')\nparser.add_argument('--SN_D4xx', help='serial number of D4xx')\nparser.add_argument('--path', default=\"calibration_results\", help='image path')\nparser.add_argument('--file_name', default=\"/intrinsics.json\", help='intrinsics calibration file name')\nparser.add_argument('--save_tmp', default=False, help='save the temporary files of this program, useful for debugging purposes')\nparser.add_argument('--grid_H', default=8, help='grid height (inner corners)')\nparser.add_argument('--grid_W', default=6, help='grid width (inner corners)')\nparser.add_argument('--size', default=0.0282, help='grid side length')\nparser.add_argument('--calibrate', default=False, help='run calibration (only)', action='store_true')\nparser.add_argument('--visualize', default=True, help='with GUI', action='store_true')\nargs = parser.parse_args()\nCHECKERBOARD = (args.grid_H, args.grid_W)\nSIDE_LENGTH = args.size\n\ntmp_folder = args.path + \"/tmp\"\n\ndef add_camera_calibration(intrinsics, streams = None):\n cam = {}\n cam['center_px'] = [intrinsics.ppx, intrinsics.ppy]\n cam['focal_length_px'] = [intrinsics.fx, intrinsics.fy]\n cam['distortion'] = {}\n cam['distortion']['type'] = 'kannalabrandt4'\n cam['distortion']['k'] = intrinsics.coeffs[:4]\n if streams:\n ext = streams[\"cam1\"].get_extrinsics_to(streams[\"pose\"]) # w.r.t.\n #print(ext)\n cam[\"extrinsics\"] = {}\n cam[\"extrinsics\"][\"T\"] = ext.translation\n #print(ext.rotation)\n cam[\"extrinsics\"][\"R\"] = ext.rotation\n return cam\n\ndef save_intrinsics(directory, file_name, intrinsics, streams):\n D = OrderedDict() # in order (cam1,cam2)\n D['cameras'] = []\n D['cameras'].append(add_camera_calibration(intrinsics[\"cam1\"], streams))\n D['cameras'].append(add_camera_calibration(intrinsics[\"cam2\"]))\n\n if not os.path.exists(directory):\n os.mkdir(directory)\n with open(directory + file_name, 'w') as f:\n json.dump(D, f, indent=4)\n print(\"Intrinsics output written to \" + directory + file_name)\n\n\ndef read_calibration(cam, extrinsics = False):\n #print(\"read_calibration\")\n # intrinsics\n K = np.array([[cam['focal_length_px'][0], 0, cam['center_px'][0]],\n [ 0, cam['focal_length_px'][1], cam['center_px'][1]],\n [ 0, 0, 1]])\n D = np.array(cam['distortion']['k'])\n\n if extrinsics:\n H = np.eye(4)\n H[:3,:3] = np.reshape(cam[\"extrinsics\"][\"R\"],(3,3))\n H[:3,3] = cam[\"extrinsics\"][\"T\"]\n #print(H)\n return (K, D, H)\n return (K, D)\n\ndef load_calibration(directory, file_name):\n with open(directory + file_name, 'r') as f:\n D = json.load(f)\n\n (K1, D1, H1) = read_calibration(D['cameras'][0], True)\n (K2, D2) = read_calibration(D['cameras'][1])\n return (K1, D1, K2, D2, H1)\n\ndef find_realsense_serial_no(type):\n\n camera_name = ['Intel RealSense T265', 'Intel RealSense D435']\n\n # Get realsense pipeline handle\n pipe = rs.pipeline()\n\n # Find the T265\n devices = rs.context().devices\n for i in range(len(devices)):\n if (devices[i].get_info(rs.camera_info.name) == camera_name[type]):\n print('Found one connected ' + camera_name[type] + ' with serial no:', devices[i].get_info(rs.camera_info.serial_number))\n return devices[i].get_info(rs.camera_info.serial_number)\n\n print('No ' + camera_name[type] + ' found, please check connection or input serial manually')\n return None\n\nif not args.calibrate:\n # Obtain the serial number of the cameras, either automatically or from user's input\n print(\"Trying to connect devices...\")\n serial_t265 = None\n serial_d4xx = None\n\n if (not args.SN_T265):\n serial_t265 = find_realsense_serial_no(0)\n else:\n serial_t265 = args.SN_T265\n\n if (not args.SN_D4xx):\n serial_d4xx = find_realsense_serial_no(1)\n else:\n serial_d4xx = args.SN_D4xx\n\n if (not serial_t265) or (not serial_d4xx):\n print(\"Specify serial numbers --SN_T265 and --SN_D4xx (for online calibration, or --calibrate for prerecorded images with --path path to folder)\")\n exit()\n\n # cam 1\n pipe1 = rs.pipeline()\n cfg1 = rs.config()\n cfg1.enable_device(serial_t265)\n pipe1.start(cfg1)\n\n # cam 2\n pipe2 = rs.pipeline()\n cfg2 = rs.config()\n cfg2.enable_device(serial_d4xx)\n cfg2.enable_all_streams()\n pipe2_profile = pipe2.start(cfg2)\n sensor_depth = pipe2_profile.get_device().first_depth_sensor()\n sensor_depth.set_option(rs.option.emitter_enabled, 0) # turn OFF projector\n\n try:\n # Retreive the stream and intrinsic properties for both cameras\n profile1 = pipe1.get_active_profile()\n profile2 = pipe2.get_active_profile()\n # future improvements: make both stream configureable\n streams = {\"cam1\" : profile1.get_stream(rs.stream.fisheye, 1).as_video_stream_profile(),\n \"pose\" : profile1.get_stream(rs.stream.pose),\n \"cam2\" : profile2.get_stream(rs.stream.infrared, 1).as_video_stream_profile()} # IR1\n #\"cam2\" : profile1.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()} # test\n intrinsics = {\"cam1\" : streams[\"cam1\"].get_intrinsics(),\n \"cam2\" : streams[\"cam2\"].get_intrinsics()}\n #print(\"cam1:\", intrinsics[\"cam1\"])\n #print(\"cam2:\", intrinsics[\"right\"])\n\n save_intrinsics(args.path, args.file_name, intrinsics, streams)\n\n # capture images\n i = 0\n print(\"Press 's' to save image.\\nPress 'q' or 'c' to quit recording and start the calibration.\")\n while True:\n # cam 1\n frames1 = pipe1.wait_for_frames()\n f_fe1 = frames1.get_fisheye_frame(1) # left fisheye\n f_fe2 = frames1.get_fisheye_frame(2) # right fisheye\n if not f_fe1 or not f_fe2:\n continue\n img_fe1 = np.asanyarray(f_fe1.get_data())\n img_fe2 = np.asanyarray(f_fe2.get_data())\n\n # cam 2\n frames2 = pipe2.wait_for_frames()\n f_ir1 = frames2.get_infrared_frame(1) # left infrared\n f_ir2 = frames2.get_infrared_frame(2) # right infrared\n f_color = frames2.get_color_frame()\n if not f_ir1 or not f_ir2 or not f_color:\n continue\n img_ir1 = np.asanyarray(f_ir1.get_data())\n img_ir2 = np.asanyarray(f_ir2.get_data())\n img_color = np.asanyarray(f_color.get_data())\n\n # TODO: configure streams\n img1 = img_fe1\n img2 = img_ir1\n\n # display\n cv2.imshow('cam1', img1)\n cv2.imshow('cam2', img2)\n\n # save or quit\n k = cv2.waitKey(1)\n if k == ord('s'):\n print(\"'s' key pressed. Saving temp images..\")\n if not os.path.exists(tmp_folder):\n os.mkdir(tmp_folder)\n cv2.imwrite(tmp_folder + '/fe1_' + str(i) + '.png', img_fe1)\n cv2.imwrite(tmp_folder + '/fe2_' + str(i) + '.png', img_fe2)\n cv2.imwrite(tmp_folder + '/ir1_' + str(i) + '.png', img_ir1)\n # cv2.imwrite(tmp_folder+ '/ir2_' + str(i) + '.png', img_ir2)\n cv2.imwrite(tmp_folder + '/color_' + str(i) + '.png', img_color)\n print(\"Saved temp images in temp folder \" + tmp_folder)\n i = i+1\n\n if k == ord('q') or k == ord('c'):\n break\n\n finally:\n pipe1.stop()\n pipe2.stop()\n\n\n# calibrate\nprint(\"Calibrate extrinsics now...\")\n\n# arrays to store detections\nP3 = [] # w.r.t. target frame\nP2_1 = [] # in image #1\nP2_2 = [] # in image #2\n\n# TODO: configure streams\nimages1 = glob.glob(tmp_folder + '/fe1_*')\n#images2 = glob.glob(tmp_folder + '/fe2_*') # test\nimages2 = glob.glob(tmp_folder + '/ir1_*')\nimages1.sort()\nimages2.sort()\n#print(images1)\n#print(images2)\n\nif len(images1) == len(images2) == 0:\n print(\"No images found. Exit.\")\n exit(0)\n\n\ntry:\n for i, fname in enumerate(images1):\n img1 = cv2.imread(images1[i])\n img2 = cv2.imread(images2[i])\n\n gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\n # detect\n ret1, corners1 = cv2.findChessboardCorners(gray1, CHECKERBOARD, None)\n ret2, corners2 = cv2.findChessboardCorners(gray2, CHECKERBOARD, None)\n\n if ret1 and ret2:\n # subpixel refinement\n criteria_sub = (cv2.TermCriteria_COUNT + cv2.TERM_CRITERIA_EPS, 10, 1e-1)\n rt = cv2.cornerSubPix(gray1, corners1, (7, 7), (-1, -1), criteria_sub)\n P2_1.append(corners1)\n if args.visualize:\n ret1 = cv2.drawChessboardCorners(img1, CHECKERBOARD, corners1, ret1)\n cv2.imshow(\"img1\", img1)\n cv2.waitKey(200)\n\n rt = cv2.cornerSubPix(gray2, corners2, (7, 7), (-1, -1), criteria_sub)\n P2_2.append(corners2)\n if args.visualize:\n ret2 = cv2.drawChessboardCorners(img2, CHECKERBOARD, corners2, ret2)\n cv2.imshow(\"img2\", img2)\n cv2.waitKey(200)\nexcept cv2.error as e:\n print(\"Error: \", e)\n\n# calibration (stereo extrinsics)\nR = np.zeros((1, 1, 3), dtype=np.float64)\nT = np.zeros((1, 1, 3), dtype=np.float64)\n\nN = len(P2_1) # number of successful detections\n\np3d = np.zeros( (CHECKERBOARD[0]*CHECKERBOARD[1], 1, 3) , np.float64)\np3d[:,0, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)\n\n# fisheye.stereoCalibrate needs different data structures/dimensions than cv2.stereoCalibrate, i.e. (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2/3)!\nP3 = np.array([p3d]*N, dtype=np.float64)\nP2_1 = np.asarray(P2_1, dtype=np.float64)\nP2_2 = np.asarray(P2_2, dtype=np.float64)\n\nP3 = np.reshape(P3, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 3))*SIDE_LENGTH\nP2_1 = np.reshape(P2_1, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2))\nP2_2 = np.reshape(P2_2, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2))\n\n(K1, D1, K2, D2, H1) = load_calibration(args.path, args.file_name)\n\ntry:\n (rms, _, _, _, _, R, T) = \\\n cv2.fisheye.stereoCalibrate(\n P3,\n P2_1,\n P2_2,\n K1,\n D1,\n K2,\n D2,\n (0,0), # only used to initialize intrinsics when no intrinsics provided\n R,\n T,\n cv2.fisheye.CALIB_FIX_INTRINSIC # extrinsics only\n )\nexcept cv2.error as e:\n print(\"Error: \", e)\n print(\"Please make sure that the checkerboard exists in the images. See tmp images in \" + tmp_folder + \" to debug.\")\n exit()\n\nprint(\"RMS:\", rms)\n\nH_cam2_cam1 = np.eye(4)\nH_cam2_cam1[:3,:3] = R\nH_cam2_cam1[:3,3] = T.flatten()\n\n# w.r.t. pose\nH_ir1_fe1 = H_cam2_cam1 # TODO: configure\nH_pose_fe1 = H1\n\nH_pose_ir1 = H_pose_fe1.dot( np.linalg.inv(H_ir1_fe1) )\nprint(\"H (ir1 wrt pose) =\", H_pose_ir1)\n\nfn = args.path + \"/H.txt\"\nnp.savetxt(fn, H_pose_ir1, fmt='%.9f')\nprint(\"Extrinsic output written to\", fn)\n\nif not args.save_tmp:\n if os.path.isdir(tmp_folder):\n shutil.rmtree(tmp_folder, ignore_errors=True)\n print(\"Temporary files deleted. If you wish to keep the tmp files, use --save_tmp True.\")"
] |
[
[
"numpy.array",
"numpy.savetxt",
"numpy.reshape",
"numpy.asarray",
"numpy.zeros",
"numpy.set_printoptions",
"numpy.eye",
"numpy.linalg.inv"
]
] |
Hammer7/Flowers-TF-Lite
|
[
"e98f1ce1c354ce4e09a2c045364fa518702619c5"
] |
[
"flowers_tf_lite.py"
] |
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# https://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n#Original code can be found at\r\n#https://colab.research.google.com/github/tensorflow/examples/blob/master/community/en/flowers_tf_lite.ipynb#scrollTo=aCLb_yV5JfF3\r\n\r\nimport tensorflow as tf\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nIMAGE_SIZE = 224\r\nBATCH_SIZE = 64\r\n\r\ndef download_flower_dataset():\r\n _URL = \"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz\"\r\n zip_file = tf.keras.utils.get_file(origin=_URL,\r\n fname=\"flower_photos.tgz\",\r\n extract=True)\r\n return os.path.join(os.path.dirname(zip_file), 'flower_photos')\r\n\r\n\r\ndef create_image_batch_generator(base_dir):\r\n datagen = tf.keras.preprocessing.image.ImageDataGenerator(\r\n rescale=1./255,\r\n validation_split=0.2)\r\n\r\n train_generator = datagen.flow_from_directory(\r\n base_dir,\r\n target_size=(IMAGE_SIZE, IMAGE_SIZE),\r\n batch_size=BATCH_SIZE,\r\n subset='training')\r\n\r\n val_generator = datagen.flow_from_directory(\r\n base_dir,\r\n target_size=(IMAGE_SIZE, IMAGE_SIZE),\r\n batch_size=BATCH_SIZE,\r\n subset='validation')\r\n \r\n return train_generator, val_generator\r\n\r\n \r\ndef save_labels(train_generator):\r\n for image_batch, label_batch in train_generator:\r\n break\r\n\r\n print(image_batch.shape, label_batch.shape)\r\n print (train_generator.class_indices)\r\n\r\n labels = '\\n'.join(sorted(train_generator.class_indices.keys()))\r\n\r\n with open('labels.txt', 'w') as f:\r\n f.write(labels)\r\n\r\n\r\ndef download_mobilenet_v2_model():\r\n # Create the base model from the pre-trained model MobileNet V2\r\n IMG_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3)\r\n base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,\r\n include_top=False,\r\n weights='imagenet')\r\n\r\n model = tf.keras.Sequential([\r\n base_model,\r\n tf.keras.layers.Conv2D(32, 3, activation='relu'),\r\n tf.keras.layers.Dropout(0.2), \r\n tf.keras.layers.GlobalAveragePooling2D(),\r\n tf.keras.layers.Dense(5, activation='softmax')\r\n ])\r\n\r\n # Let's take a look to see how many layers are in the base model\r\n print(\"Number of layers in the base model: \", len(base_model.layers))\r\n\r\n return base_model, model\r\n\r\ndef run_transfer_learning(base_model, model, train_generator, val_generator):\r\n base_model.trainable = False\r\n model.compile(optimizer=tf.keras.optimizers.Adam(),\r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n model.summary()\r\n print('Number of trainable variables = {}'.format(len(model.trainable_variables)))\r\n\r\n epochs = 10\r\n history = model.fit(train_generator,\r\n epochs=epochs,\r\n validation_data=val_generator)\r\n return history\r\n\r\ndef run_fine_tuning(base_model, model, train_generator, val_generator):\r\n base_model.trainable = True\r\n # Fine tune from this layer onwards\r\n fine_tune_at = 100\r\n\r\n # Freeze all the layers before the `fine_tune_at` layer\r\n for layer in base_model.layers[:fine_tune_at]:\r\n layer.trainable = False\r\n\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer = tf.keras.optimizers.Adam(1e-5),\r\n metrics=['accuracy'])\r\n model.summary()\r\n\r\n print('Number of trainable variables = {}'.format(len(model.trainable_variables)))\r\n\r\n history = model.fit(train_generator,\r\n epochs=5,\r\n validation_data=val_generator)\r\n return history\r\n\r\ndef save_model_as_tflite(model):\r\n saved_model_dir = 'fine_tuning'\r\n\r\n tf.saved_model.save(model, saved_model_dir)\r\n converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)\r\n tflite_model = converter.convert()\r\n\r\n with open('model.tflite', 'wb') as f:\r\n f.write(tflite_model)\r\n\r\ndef plot_figure(history, fig_name):\r\n acc = history.history['accuracy']\r\n val_acc = history.history['val_accuracy']\r\n loss = history.history['loss']\r\n val_loss = history.history['val_loss']\r\n\r\n plt.figure(figsize=(8, 8))\r\n plt.subplot(2, 1, 1)\r\n plt.plot(acc, label='Training Accuracy')\r\n plt.plot(val_acc, label='Validation Accuracy')\r\n plt.legend(loc='lower right')\r\n plt.ylabel('Accuracy')\r\n plt.ylim([min(plt.ylim()),1])\r\n plt.title('Training and Validation Accuracy')\r\n\r\n plt.subplot(2, 1, 2)\r\n plt.plot(loss, label='Training Loss')\r\n plt.plot(val_loss, label='Validation Loss')\r\n plt.legend(loc='upper right')\r\n plt.ylabel('Cross Entropy')\r\n plt.ylim([0,1.0])\r\n plt.title('Training and Validation Loss')\r\n plt.xlabel('epoch')\r\n plt.show()\r\n plt.savefig(fig_name)\r\n \r\nif __name__ == '__main__':\r\n print(tf.__version__)\r\n base_dir = download_flower_dataset()\r\n train_generator, val_generator = create_image_batch_generator(base_dir)\r\n save_labels(train_generator)\r\n\r\n base_model, model = download_mobilenet_v2_model() #download without top layer and add top layer \r\n\r\n history = run_transfer_learning(base_model, model, train_generator, val_generator)\r\n plot_figure(history, 'transfer_learning.png')\r\n\r\n history_fine = run_fine_tuning(base_model, model, train_generator, val_generator)\r\n save_model_as_tflite(model)\r\n plot_figure(history_fine, 'fine_tuning.png')\r\n\r\n"
] |
[
[
"tensorflow.keras.utils.get_file",
"tensorflow.lite.TFLiteConverter.from_saved_model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.applications.MobileNetV2",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.saved_model.save",
"matplotlib.pyplot.savefig",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.optimizers.Adam",
"matplotlib.pyplot.subplot",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.Dropout",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel"
]
] |
FightingSrain/tianshou
|
[
"bd9c3c7f8d144448c44a350828b2c5222298bd8e"
] |
[
"tianshou/utils/net/discrete.py"
] |
[
"import torch\nimport numpy as np\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass Actor(nn.Module):\n \"\"\"For advanced usage (how to customize the network), please refer to\n :ref:`build_the_network`.\n \"\"\"\n\n def __init__(self, preprocess_net, action_shape, hidden_layer_size=128):\n super().__init__()\n self.preprocess = preprocess_net\n self.last = nn.Linear(hidden_layer_size, np.prod(action_shape))\n\n def forward(self, s, state=None, info={}):\n r\"\"\"s -> Q(s, \\*)\"\"\"\n logits, h = self.preprocess(s, state)\n logits = F.softmax(self.last(logits), dim=-1)\n return logits, h\n\n\nclass Critic(nn.Module):\n \"\"\"For advanced usage (how to customize the network), please refer to\n :ref:`build_the_network`.\n \"\"\"\n\n def __init__(self, preprocess_net, hidden_layer_size=128):\n super().__init__()\n self.preprocess = preprocess_net\n self.last = nn.Linear(hidden_layer_size, 1)\n\n def forward(self, s, **kwargs):\n \"\"\"s -> V(s)\"\"\"\n logits, h = self.preprocess(s, state=kwargs.get('state', None))\n logits = self.last(logits)\n return logits\n\n\nclass DQN(nn.Module):\n \"\"\"For advanced usage (how to customize the network), please refer to\n :ref:`build_the_network`.\n \"\"\"\n\n def __init__(self, h, w, action_shape, device='cpu'):\n super(DQN, self).__init__()\n self.device = device\n\n self.conv1 = nn.Conv2d(4, 16, kernel_size=5, stride=2)\n self.bn1 = nn.BatchNorm2d(16)\n self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)\n self.bn2 = nn.BatchNorm2d(32)\n self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)\n self.bn3 = nn.BatchNorm2d(32)\n\n def conv2d_size_out(size, kernel_size=5, stride=2):\n return (size - (kernel_size - 1) - 1) // stride + 1\n\n convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))\n convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))\n linear_input_size = convw * convh * 32\n self.fc = nn.Linear(linear_input_size, 512)\n self.head = nn.Linear(512, action_shape)\n\n def forward(self, x, state=None, info={}):\n r\"\"\"x -> Q(x, \\*)\"\"\"\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x, device=self.device, dtype=torch.float32)\n x = x.permute(0, 3, 1, 2)\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n x = self.fc(x.reshape(x.size(0), -1))\n return self.head(x), state\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"numpy.prod",
"torch.tensor"
]
] |
pjamesjoyce/brightway2-io
|
[
"142fc26e2ffc47d8ec474386ee93ab2737a089ce"
] |
[
"bw2io/strategies/generic.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function, unicode_literals\nfrom eight import *\n\nfrom bw2data import mapping, Database, databases\nfrom ..units import normalize_units as normalize_units_function\nfrom ..errors import StrategyError\nfrom ..utils import activity_hash, DEFAULT_FIELDS\nfrom copy import deepcopy\nimport numbers\nimport numpy as np\nimport pprint\n\n\ndef format_nonunique_key_error(obj, fields, others):\n template = \"\"\"Object in source database can't be uniquely linked to target database.\\nProblematic dataset is:\\n{ds}\\nPossible targets include (at least one not shown):\\n{targets}\"\"\"\n fields_to_print = list(fields or DEFAULT_FIELDS) + ['filename']\n _ = lambda x: {field: x.get(field, \"(missing)\") for field in fields_to_print}\n return template.format(\n ds=pprint.pformat(_(obj)),\n targets=pprint.pformat([_(x) for x in others])\n )\n\n\ndef link_iterable_by_fields(unlinked, other=None, fields=None, kind=None,\n internal=False, relink=False):\n \"\"\"Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.\n\n The database to be linked must have uniqueness for each object for the given ``fields``.\n\n If ``kind``, limit objects in ``unlinked`` of type ``kind``.\n\n If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.\n\n If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``.\"\"\"\n if kind:\n kind = {kind} if isinstance(kind, str) else kind\n if relink:\n filter_func = lambda x: x.get('type') in kind\n else:\n filter_func = lambda x: x.get('type') in kind and not x.get('input')\n else:\n if relink:\n filter_func = lambda x: True\n else:\n filter_func = lambda x: not x.get('input')\n\n if internal:\n other = unlinked\n\n duplicates, candidates = {}, {}\n try:\n # Other can be a generator, so a bit convoluted\n for ds in other:\n key = activity_hash(ds, fields)\n if key in candidates:\n duplicates.setdefault(key, []).append(ds)\n else:\n candidates[key] = (ds['database'], ds['code'])\n except KeyError:\n raise StrategyError(\"Not all datasets in database to be linked have \"\n \"``database`` or ``code`` attributes\")\n\n for container in unlinked:\n for obj in filter(filter_func, container.get('exchanges', [])):\n key = activity_hash(obj, fields)\n if key in duplicates:\n raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key]))\n elif key in candidates:\n obj['input'] = candidates[key]\n return unlinked\n\n\ndef assign_only_product_as_production(db):\n \"\"\"Assign only product as reference product.\n\n Skips datasets that already have a reference product or no production exchanges. Production exchanges must have a ``name`` and an amount.\n\n Will replace the following activity fields, if not already specified:\n\n * 'name' - name of reference product\n * 'unit' - unit of reference product\n * 'production amount' - amount of reference product\n\n \"\"\"\n for ds in db:\n if ds.get(\"reference product\"):\n continue\n products = [x for x in ds.get('exchanges', []) if x.get('type') == 'production']\n if len(products) == 1:\n product = products[0]\n assert product['name']\n ds['reference product'] = product['name']\n ds['production amount'] = product['amount']\n ds['name'] = ds.get('name') or product['name']\n ds['unit'] = ds.get('unit') or product.get('unit') or 'Unknown'\n return db\n\n\ndef link_technosphere_by_activity_hash(db, external_db_name=None, fields=None):\n \"\"\"Link technosphere exchanges using ``activity_hash`` function.\n\n If ``external_db_name``, link against a different database; otherwise link internally.\n\n If ``fields``, link using only certain fields.\"\"\"\n TECHNOSPHERE_TYPES = {\"technosphere\", \"substitution\", \"production\"}\n if external_db_name is not None:\n if external_db_name not in databases:\n raise StrategyError(\"Can't find external database {}\".format(\n external_db_name))\n other = (obj for obj in Database(external_db_name)\n if obj.get('type', 'process') == 'process')\n internal = False\n else:\n other = None\n internal = True\n return link_iterable_by_fields(db, other, internal=internal, kind=TECHNOSPHERE_TYPES, fields=fields)\n\n\ndef set_code_by_activity_hash(db, overwrite=False):\n \"\"\"Use ``activity_hash`` to set dataset code.\n\n By default, won't overwrite existing codes, but will if ``overwrite`` is ``True``.\"\"\"\n for ds in db:\n if 'code' not in ds or overwrite:\n ds['code'] = activity_hash(ds)\n return db\n\n\ndef tupleize_categories(db):\n for ds in db:\n if ds.get('categories'):\n ds['categories'] = tuple(ds['categories'])\n for exc in ds.get('exchanges', []):\n if exc.get('categories'):\n exc['categories'] = tuple(exc['categories'])\n return db\n\n\ndef drop_unlinked(db):\n \"\"\"This is the nuclear option - use at your own risk!\"\"\"\n for ds in db:\n ds['exchanges'] = [obj for obj in ds['exchanges'] if obj.get('input')]\n return db\n\n\ndef normalize_units(db):\n \"\"\"Normalize units in datasets and their exchanges\"\"\"\n for ds in db:\n if 'unit' in ds:\n ds['unit'] = normalize_units_function(ds['unit'])\n for exc in ds.get('exchanges', []):\n if 'unit' in exc:\n exc['unit'] = normalize_units_function(exc['unit'])\n for param in ds.get('parameters', {}).values():\n if 'unit' in param:\n param['unit'] = normalize_units_function(param['unit'])\n return db\n\n\ndef add_database_name(db, name):\n \"\"\"Add database name to datasets\"\"\"\n for ds in db:\n ds['database'] = name\n return db\n\n\ndef convert_uncertainty_types_to_integers(db):\n \"\"\"Generic number conversion function convert to floats. Return to integers.\"\"\"\n for ds in db:\n for exc in ds['exchanges']:\n try:\n exc['uncertainty type'] = int(exc['uncertainty type'])\n except:\n pass\n return db\n\n\ndef drop_falsey_uncertainty_fields_but_keep_zeros(db):\n \"\"\"Drop fields like '' but keep zero and NaN.\n\n Note that this doesn't strip `False`, which behaves *exactly* like 0.\n\n \"\"\"\n uncertainty_fields = [\n 'minimum',\n 'maximum',\n 'scale',\n 'shape',\n 'loc',\n ]\n\n def drop_if_appropriate(exc):\n for field in uncertainty_fields:\n if field not in exc or exc[field] == 0:\n continue\n elif isinstance(exc[field], numbers.Number) and np.isnan(exc[field]):\n continue\n elif not exc[field]:\n del exc[field]\n\n for ds in db:\n for exc in ds['exchanges']:\n drop_if_appropriate(exc)\n return db\n\ndef convert_activity_parameters_to_list(data):\n \"\"\"Convert activity parameters from dictionary to list of dictionaries\"\"\"\n def _(key, value):\n dct = deepcopy(value)\n dct['name'] = key\n return dct\n\n for ds in data:\n if 'parameters' in ds:\n ds['parameters'] = [_(x, y) for x, y in ds['parameters'].items()]\n\n return data\n"
] |
[
[
"numpy.isnan"
]
] |
zabop/astropy
|
[
"9f77b9a0ffe18e4c767e36f00e2e8728135c0e11"
] |
[
"astropy/modeling/powerlaws.py"
] |
[
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nPower law model variants\n\"\"\"\n# pylint: disable=invalid-name\nimport numpy as np\n\nfrom astropy.units import Quantity\n\nfrom .core import Fittable1DModel\nfrom .parameters import InputParameterError, Parameter\n\n__all__ = ['PowerLaw1D', 'BrokenPowerLaw1D', 'SmoothlyBrokenPowerLaw1D',\n 'ExponentialCutoffPowerLaw1D', 'LogParabola1D', 'Schechter1D']\n\n\nclass PowerLaw1D(Fittable1DModel):\n \"\"\"\n One dimensional power law model.\n\n Parameters\n ----------\n amplitude : float\n Model amplitude at the reference point\n x_0 : float\n Reference point\n alpha : float\n Power law index\n\n See Also\n --------\n BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D\n\n Notes\n -----\n Model formula (with :math:`A` for ``amplitude`` and :math:`\\\\alpha` for ``alpha``):\n\n .. math:: f(x) = A (x / x_0) ^ {-\\\\alpha}\n\n \"\"\"\n\n amplitude = Parameter(default=1, description=\"Peak value at the reference point\")\n x_0 = Parameter(default=1, description=\"Reference point\")\n alpha = Parameter(default=1, description=\"Power law index\")\n\n @staticmethod\n def evaluate(x, amplitude, x_0, alpha):\n \"\"\"One dimensional power law model function\"\"\"\n xx = x / x_0\n return amplitude * xx ** (-alpha)\n\n @staticmethod\n def fit_deriv(x, amplitude, x_0, alpha):\n \"\"\"One dimensional power law derivative with respect to parameters\"\"\"\n\n xx = x / x_0\n\n d_amplitude = xx ** (-alpha)\n d_x_0 = amplitude * alpha * d_amplitude / x_0\n d_alpha = -amplitude * d_amplitude * np.log(xx)\n\n return [d_amplitude, d_x_0, d_alpha]\n\n @property\n def input_units(self):\n if self.x_0.unit is None:\n return None\n return {self.inputs[0]: self.x_0.unit}\n\n def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):\n return {'x_0': inputs_unit[self.inputs[0]],\n 'amplitude': outputs_unit[self.outputs[0]]}\n\n\nclass BrokenPowerLaw1D(Fittable1DModel):\n \"\"\"\n One dimensional power law model with a break.\n\n Parameters\n ----------\n amplitude : float\n Model amplitude at the break point.\n x_break : float\n Break point.\n alpha_1 : float\n Power law index for x < x_break.\n alpha_2 : float\n Power law index for x > x_break.\n\n See Also\n --------\n PowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D\n\n Notes\n -----\n Model formula (with :math:`A` for ``amplitude`` and :math:`\\\\alpha_1`\n for ``alpha_1`` and :math:`\\\\alpha_2` for ``alpha_2``):\n\n .. math::\n\n f(x) = \\\\left \\\\{\n \\\\begin{array}{ll}\n A (x / x_{break}) ^ {-\\\\alpha_1} & : x < x_{break} \\\\\\\\\n A (x / x_{break}) ^ {-\\\\alpha_2} & : x > x_{break} \\\\\\\\\n \\\\end{array}\n \\\\right.\n \"\"\"\n\n amplitude = Parameter(default=1, description=\"Peak value at break point\")\n x_break = Parameter(default=1, description=\"Break point\")\n alpha_1 = Parameter(default=1, description=\"Power law index before break point\")\n alpha_2 = Parameter(default=1, description=\"Power law index after break point\")\n\n @staticmethod\n def evaluate(x, amplitude, x_break, alpha_1, alpha_2):\n \"\"\"One dimensional broken power law model function\"\"\"\n\n alpha = np.where(x < x_break, alpha_1, alpha_2)\n xx = x / x_break\n return amplitude * xx ** (-alpha)\n\n @staticmethod\n def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2):\n \"\"\"One dimensional broken power law derivative with respect to parameters\"\"\"\n\n alpha = np.where(x < x_break, alpha_1, alpha_2)\n xx = x / x_break\n\n d_amplitude = xx ** (-alpha)\n d_x_break = amplitude * alpha * d_amplitude / x_break\n d_alpha = -amplitude * d_amplitude * np.log(xx)\n d_alpha_1 = np.where(x < x_break, d_alpha, 0)\n d_alpha_2 = np.where(x >= x_break, d_alpha, 0)\n\n return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2]\n\n @property\n def input_units(self):\n if self.x_break.unit is None:\n return None\n return {self.inputs[0]: self.x_break.unit}\n\n def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):\n return {'x_break': inputs_unit[self.inputs[0]],\n 'amplitude': outputs_unit[self.outputs[0]]}\n\n\nclass SmoothlyBrokenPowerLaw1D(Fittable1DModel):\n \"\"\"One dimensional smoothly broken power law model.\n\n Parameters\n ----------\n amplitude : float\n Model amplitude at the break point.\n x_break : float\n Break point.\n alpha_1 : float\n Power law index for ``x << x_break``.\n alpha_2 : float\n Power law index for ``x >> x_break``.\n delta : float\n Smoothness parameter.\n\n See Also\n --------\n BrokenPowerLaw1D\n\n Notes\n -----\n Model formula (with :math:`A` for ``amplitude``, :math:`x_b` for\n ``x_break``, :math:`\\\\alpha_1` for ``alpha_1``,\n :math:`\\\\alpha_2` for ``alpha_2`` and :math:`\\\\Delta` for\n ``delta``):\n\n .. math::\n\n f(x) = A \\\\left( \\\\frac{x}{x_b} \\\\right) ^ {-\\\\alpha_1}\n \\\\left\\\\{\n \\\\frac{1}{2}\n \\\\left[\n 1 + \\\\left( \\\\frac{x}{x_b}\\\\right)^{1 / \\\\Delta}\n \\\\right]\n \\\\right\\\\}^{(\\\\alpha_1 - \\\\alpha_2) \\\\Delta}\n\n\n The change of slope occurs between the values :math:`x_1`\n and :math:`x_2` such that:\n\n .. math::\n \\\\log_{10} \\\\frac{x_2}{x_b} = \\\\log_{10} \\\\frac{x_b}{x_1}\n \\\\sim \\\\Delta\n\n\n At values :math:`x \\\\lesssim x_1` and :math:`x \\\\gtrsim x_2` the\n model is approximately a simple power law with index\n :math:`\\\\alpha_1` and :math:`\\\\alpha_2` respectively. The two\n power laws are smoothly joined at values :math:`x_1 < x < x_2`,\n hence the :math:`\\\\Delta` parameter sets the \"smoothness\" of the\n slope change.\n\n The ``delta`` parameter is bounded to values greater than 1e-3\n (corresponding to :math:`x_2 / x_1 \\\\gtrsim 1.002`) to avoid\n overflow errors.\n\n The ``amplitude`` parameter is bounded to positive values since\n this model is typically used to represent positive quantities.\n\n\n Examples\n --------\n .. plot::\n :include-source:\n\n import numpy as np\n import matplotlib.pyplot as plt\n from astropy.modeling import models\n\n x = np.logspace(0.7, 2.3, 500)\n f = models.SmoothlyBrokenPowerLaw1D(amplitude=1, x_break=20,\n alpha_1=-2, alpha_2=2)\n\n plt.figure()\n plt.title(\"amplitude=1, x_break=20, alpha_1=-2, alpha_2=2\")\n\n f.delta = 0.5\n plt.loglog(x, f(x), '--', label='delta=0.5')\n\n f.delta = 0.3\n plt.loglog(x, f(x), '-.', label='delta=0.3')\n\n f.delta = 0.1\n plt.loglog(x, f(x), label='delta=0.1')\n\n plt.axis([x.min(), x.max(), 0.1, 1.1])\n plt.legend(loc='lower center')\n plt.grid(True)\n plt.show()\n\n \"\"\"\n\n amplitude = Parameter(default=1, min=0, description=\"Peak value at break point\")\n x_break = Parameter(default=1, description=\"Break point\")\n alpha_1 = Parameter(default=-2, description=\"Power law index before break point\")\n alpha_2 = Parameter(default=2, description=\"Power law index after break point\")\n delta = Parameter(default=1, min=1.e-3, description=\"Smoothness Parameter\")\n\n @amplitude.validator\n def amplitude(self, value):\n if np.any(value <= 0):\n raise InputParameterError(\n \"amplitude parameter must be > 0\")\n\n @delta.validator\n def delta(self, value):\n if np.any(value < 0.001):\n raise InputParameterError(\n \"delta parameter must be >= 0.001\")\n\n @staticmethod\n def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta):\n \"\"\"One dimensional smoothly broken power law model function\"\"\"\n\n # Pre-calculate `x/x_b`\n xx = x / x_break\n\n # Initialize the return value\n f = np.zeros_like(xx, subok=False)\n\n if isinstance(amplitude, Quantity):\n return_unit = amplitude.unit\n amplitude = amplitude.value\n else:\n return_unit = None\n\n # The quantity `t = (x / x_b)^(1 / delta)` can become quite\n # large. To avoid overflow errors we will start by calculating\n # its natural logarithm:\n logt = np.log(xx) / delta\n\n # When `t >> 1` or `t << 1` we don't actually need to compute\n # the `t` value since the main formula (see docstring) can be\n # significantly simplified by neglecting `1` or `t`\n # respectively. In the following we will check whether `t` is\n # much greater, much smaller, or comparable to 1 by comparing\n # the `logt` value with an appropriate threshold.\n threshold = 30 # corresponding to exp(30) ~ 1e13\n i = logt > threshold\n if i.max():\n # In this case the main formula reduces to a simple power\n # law with index `alpha_2`.\n f[i] = amplitude * xx[i] ** (-alpha_2) / (2. ** ((alpha_1 - alpha_2) * delta))\n\n i = logt < -threshold\n if i.max():\n # In this case the main formula reduces to a simple power\n # law with index `alpha_1`.\n f[i] = amplitude * xx[i] ** (-alpha_1) / (2. ** ((alpha_1 - alpha_2) * delta))\n\n i = np.abs(logt) <= threshold\n if i.max():\n # In this case the `t` value is \"comparable\" to 1, hence we\n # we will evaluate the whole formula.\n t = np.exp(logt[i])\n r = (1. + t) / 2.\n f[i] = amplitude * xx[i] ** (-alpha_1) * r ** ((alpha_1 - alpha_2) * delta)\n\n if return_unit:\n return Quantity(f, unit=return_unit, copy=False)\n return f\n\n @staticmethod\n def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta):\n \"\"\"One dimensional smoothly broken power law derivative with respect\n to parameters\"\"\"\n\n # Pre-calculate `x_b` and `x/x_b` and `logt` (see comments in\n # SmoothlyBrokenPowerLaw1D.evaluate)\n xx = x / x_break\n logt = np.log(xx) / delta\n\n # Initialize the return values\n f = np.zeros_like(xx)\n d_amplitude = np.zeros_like(xx)\n d_x_break = np.zeros_like(xx)\n d_alpha_1 = np.zeros_like(xx)\n d_alpha_2 = np.zeros_like(xx)\n d_delta = np.zeros_like(xx)\n\n threshold = 30 # (see comments in SmoothlyBrokenPowerLaw1D.evaluate)\n i = logt > threshold\n if i.max():\n f[i] = amplitude * xx[i] ** (-alpha_2) \\\n / (2. ** ((alpha_1 - alpha_2) * delta))\n\n d_amplitude[i] = f[i] / amplitude\n d_x_break[i] = f[i] * alpha_2 / x_break\n d_alpha_1[i] = f[i] * (-delta * np.log(2))\n d_alpha_2[i] = f[i] * (-np.log(xx[i]) + delta * np.log(2))\n d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))\n\n i = logt < -threshold\n if i.max():\n f[i] = amplitude * xx[i] ** (-alpha_1) \\\n / (2. ** ((alpha_1 - alpha_2) * delta))\n\n d_amplitude[i] = f[i] / amplitude\n d_x_break[i] = f[i] * alpha_1 / x_break\n d_alpha_1[i] = f[i] * (-np.log(xx[i]) - delta * np.log(2))\n d_alpha_2[i] = f[i] * delta * np.log(2)\n d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))\n\n i = np.abs(logt) <= threshold\n if i.max():\n t = np.exp(logt[i])\n r = (1. + t) / 2.\n f[i] = amplitude * xx[i] ** (-alpha_1) \\\n * r ** ((alpha_1 - alpha_2) * delta)\n\n d_amplitude[i] = f[i] / amplitude\n d_x_break[i] = f[i] * (alpha_1 - (alpha_1 - alpha_2) * t / 2. / r) / x_break\n d_alpha_1[i] = f[i] * (-np.log(xx[i]) + delta * np.log(r))\n d_alpha_2[i] = f[i] * (-delta * np.log(r))\n d_delta[i] = f[i] * (alpha_1 - alpha_2) \\\n * (np.log(r) - t / (1. + t) / delta * np.log(xx[i]))\n\n return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta]\n\n @property\n def input_units(self):\n if self.x_break.unit is None:\n return None\n return {self.inputs[0]: self.x_break.unit}\n\n def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):\n return {'x_break': inputs_unit[self.inputs[0]],\n 'amplitude': outputs_unit[self.outputs[0]]}\n\n\nclass ExponentialCutoffPowerLaw1D(Fittable1DModel):\n \"\"\"\n One dimensional power law model with an exponential cutoff.\n\n Parameters\n ----------\n amplitude : float\n Model amplitude\n x_0 : float\n Reference point\n alpha : float\n Power law index\n x_cutoff : float\n Cutoff point\n\n See Also\n --------\n PowerLaw1D, BrokenPowerLaw1D, LogParabola1D\n\n Notes\n -----\n Model formula (with :math:`A` for ``amplitude`` and :math:`\\\\alpha` for ``alpha``):\n\n .. math:: f(x) = A (x / x_0) ^ {-\\\\alpha} \\\\exp (-x / x_{cutoff})\n\n \"\"\"\n\n amplitude = Parameter(default=1, description=\"Peak value of model\")\n x_0 = Parameter(default=1, description=\"Reference point\")\n alpha = Parameter(default=1, description=\"Power law index\")\n x_cutoff = Parameter(default=1, description=\"Cutoff point\")\n\n @staticmethod\n def evaluate(x, amplitude, x_0, alpha, x_cutoff):\n \"\"\"One dimensional exponential cutoff power law model function\"\"\"\n\n xx = x / x_0\n return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff)\n\n @staticmethod\n def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):\n \"\"\"One dimensional exponential cutoff power law derivative with respect to parameters\"\"\"\n\n xx = x / x_0\n xc = x / x_cutoff\n\n d_amplitude = xx ** (-alpha) * np.exp(-xc)\n d_x_0 = alpha * amplitude * d_amplitude / x_0\n d_alpha = -amplitude * d_amplitude * np.log(xx)\n d_x_cutoff = amplitude * x * d_amplitude / x_cutoff ** 2\n\n return [d_amplitude, d_x_0, d_alpha, d_x_cutoff]\n\n @property\n def input_units(self):\n if self.x_0.unit is None:\n return None\n return {self.inputs[0]: self.x_0.unit}\n\n def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):\n return {'x_0': inputs_unit[self.inputs[0]],\n 'x_cutoff': inputs_unit[self.inputs[0]],\n 'amplitude': outputs_unit[self.outputs[0]]}\n\n\nclass LogParabola1D(Fittable1DModel):\n \"\"\"\n One dimensional log parabola model (sometimes called curved power law).\n\n Parameters\n ----------\n amplitude : float\n Model amplitude\n x_0 : float\n Reference point\n alpha : float\n Power law index\n beta : float\n Power law curvature\n\n See Also\n --------\n PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D\n\n Notes\n -----\n Model formula (with :math:`A` for ``amplitude`` and\n :math:`\\\\alpha` for ``alpha`` and :math:`\\\\beta` for ``beta``):\n\n .. math:: f(x) = A \\\\left(\n \\\\frac{x}{x_{0}}\\\\right)^{- \\\\alpha - \\\\beta \\\\log{\\\\left (\\\\frac{x}{x_{0}}\n \\\\right )}}\n\n \"\"\"\n\n amplitude = Parameter(default=1, description=\"Peak value of model\")\n x_0 = Parameter(default=1, description=\"Reference point\")\n alpha = Parameter(default=1, description=\"Power law index\")\n beta = Parameter(default=0, description=\"Power law curvature\")\n\n @staticmethod\n def evaluate(x, amplitude, x_0, alpha, beta):\n \"\"\"One dimensional log parabola model function\"\"\"\n\n xx = x / x_0\n exponent = -alpha - beta * np.log(xx)\n return amplitude * xx ** exponent\n\n @staticmethod\n def fit_deriv(x, amplitude, x_0, alpha, beta):\n \"\"\"One dimensional log parabola derivative with respect to parameters\"\"\"\n\n xx = x / x_0\n log_xx = np.log(xx)\n exponent = -alpha - beta * log_xx\n\n d_amplitude = xx ** exponent\n d_beta = -amplitude * d_amplitude * log_xx ** 2\n d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)\n d_alpha = -amplitude * d_amplitude * log_xx\n return [d_amplitude, d_x_0, d_alpha, d_beta]\n\n @property\n def input_units(self):\n if self.x_0.unit is None:\n return None\n return {self.inputs[0]: self.x_0.unit}\n\n def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):\n return {'x_0': inputs_unit[self.inputs[0]],\n 'amplitude': outputs_unit[self.outputs[0]]}\n\n\nclass Schechter1D(Fittable1DModel):\n r\"\"\"\n Schechter luminosity function (`Schechter 1976\n <https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract>`_),\n parameterized in terms of magnitudes.\n\n Parameters\n ----------\n phi_star : float\n The normalization factor in units of number density.\n\n m_star : float\n The characteristic magnitude where the power-law form of the\n function cuts off. Must not have units.\n\n alpha : float\n The power law index, also known as the faint-end slope. Must not\n have units.\n\n See Also\n --------\n PowerLaw1D, ExponentialCutoffPowerLaw1D, BrokenPowerLaw1D\n\n Notes\n -----\n Model formula (with :math:`\\phi^{*}` for ``phi_star``, :math:`M^{*}`\n for ``m_star``, and :math:`\\alpha` for ``alpha``):\n\n .. math::\n\n n(M) \\ dM = (0.4 \\ln 10) \\ \\phi^{*} \\\n [{10^{0.4 (M^{*} - M)}}]^{\\alpha + 1} \\\n \\exp{[-10^{0.4 (M^{*} - M)}]} \\ dM\n\n ``phi_star`` is the normalization factor in units of number density.\n ``m_star`` is the characteristic magnitude where the power-law form\n of the function cuts off into the exponential form. ``alpha`` is\n the power-law index, defining the faint-end slope of the luminosity\n function.\n\n Examples\n --------\n .. plot::\n :include-source:\n\n from astropy.modeling.models import Schechter1D\n import astropy.units as u\n import matplotlib.pyplot as plt\n import numpy as np\n\n phi_star = 4.3e-4 * (u.Mpc ** -3)\n m_star = -20.26\n alpha = -1.98\n model = Schechter1D(phi_star, m_star, alpha)\n mag = np.linspace(-25, -17)\n\n fig, ax = plt.subplots()\n ax.plot(mag, model(mag))\n ax.set_yscale('log')\n ax.set_xlim(-22.6, -17)\n ax.set_ylim(1.e-7, 1.e-2)\n ax.set_xlabel('$M_{UV}$')\n ax.set_ylabel('$\\phi$ [mag$^{-1}$ Mpc$^{-3}]$')\n\n References\n ----------\n .. [1] Schechter 1976; ApJ 203, 297\n (https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract)\n\n .. [2] `Luminosity function <https://en.wikipedia.org/wiki/Luminosity_function_(astronomy)>`_\n \"\"\"\n\n phi_star = Parameter(default=1., description=('Normalization factor '\n 'in units of number density'))\n m_star = Parameter(default=-20., description='Characteristic magnitude')\n alpha = Parameter(default=-1., description='Faint-end slope')\n\n @staticmethod\n def evaluate(mag, phi_star, m_star, alpha):\n \"\"\"Schechter luminosity function model function.\"\"\"\n if isinstance(mag, Quantity) or isinstance(m_star, Quantity):\n raise ValueError('mag and m_star must not have units')\n factor = 10 ** (0.4 * (m_star - mag))\n\n return (0.4 * np.log(10) * phi_star * factor**(alpha + 1)\n * np.exp(-factor))\n\n @staticmethod\n def fit_deriv(mag, phi_star, m_star, alpha):\n \"\"\"\n Schechter luminosity function derivative with respect to\n parameters.\n \"\"\"\n if isinstance(mag, Quantity) or isinstance(m_star, Quantity):\n raise ValueError('mag and m_star must not have units')\n factor = 10 ** (0.4 * (m_star - mag))\n\n d_phi_star = 0.4 * np.log(10) * factor**(alpha + 1) * np.exp(-factor)\n func = phi_star * d_phi_star\n d_m_star = ((alpha + 1) * 0.4 * np.log(10) * func\n - (0.4 * np.log(10) * func * factor))\n d_alpha = func * np.log(factor)\n\n return [d_phi_star, d_m_star, d_alpha]\n\n @property\n def input_units(self):\n if self.m_star.unit is None:\n return None\n return {self.inputs[0]: self.m_star.unit}\n\n def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):\n return {'m_star': inputs_unit[self.inputs[0]],\n 'phi_star': outputs_unit[self.outputs[0]]}\n"
] |
[
[
"numpy.zeros_like",
"numpy.log",
"numpy.exp",
"numpy.any",
"numpy.where",
"numpy.abs"
]
] |
yweweler/single-speaker-tts
|
[
"04aa714e0ba79c932179cc45a0bbfaa28e768cc7"
] |
[
"tacotron/attention.py"
] |
[
"import tensorflow as tf\nfrom tensorflow.contrib.seq2seq.python.ops.attention_wrapper import LuongAttention, \\\n AttentionWrapper, AttentionWrapperState\n\n\nclass AttentionMode:\n \"\"\"\n Enumerator for the Luong style local attention modes.\n\n - See [1]: Effective Approaches to Attention-based Neural Machine Translation,\n http://arxiv.org/abs/1508.04025\n \"\"\"\n # local-m mode.\n MONOTONIC = 'monotonic'\n\n # local-p mode.\n PREDICTIVE = 'predictive'\n\n\nclass AttentionScore:\n \"\"\"\n Enumerator for the three different content-based scoring functions for Luong style attention.\n\n - See [1]: Effective Approaches to Attention-based Neural Machine Translation,\n http://arxiv.org/abs/1508.04025\n \"\"\"\n DOT = 'dot'\n GENERAL = 'general'\n CONCAT = 'concat'\n\n\ndef _luong_local_compute_attention(attention_mechanism, cell_output, attention_state,\n attention_layer):\n \"\"\"Computes the attention and alignments for the Luong style local attention mechanism.\"\"\"\n alignments, next_attention_state = attention_mechanism(\n cell_output, state=attention_state)\n\n # Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]\n expanded_alignments = tf.expand_dims(alignments, 1)\n\n context_windows = []\n padded_alignment_windows = []\n\n window_start = attention_mechanism.window_start\n window_stop = attention_mechanism.window_stop\n\n pre_padding = attention_mechanism.window_pre_padding\n post_padding = attention_mechanism.window_post_padding\n\n full_pre_padding = attention_mechanism.full_seq_pre_padding\n full_post_padding = attention_mechanism.full_seq_post_padding\n\n for i in range(0, attention_mechanism.const_batch_size):\n # Slice out the window from the memory.\n value_window = attention_mechanism.values[i, window_start[i][0]:window_stop[i][0], :]\n\n # Add zero padding to the slice in order to ensure the window size is (2D+1).\n value_window_paddings = [\n [pre_padding[i][0], post_padding[i][0]],\n [0, 0]\n ]\n value_window = tf.pad(value_window, value_window_paddings, 'CONSTANT')\n\n # Shape information is lost after padding ;(.\n value_window.set_shape((attention_mechanism.window_size,\n attention_mechanism._num_units))\n\n # Calculate the context vector for the current batch entry using only information from\n # teh window.\n context_window = tf.matmul(expanded_alignments[i], value_window)\n context_windows.append(context_window)\n\n if attention_mechanism.force_gaussian is True:\n # Apply gaussian weighting of the window contents.\n point_dist = tf.cast(tf.range(start=window_start[i][0],\n limit=window_stop[i][0],\n delta=1), dtype=tf.float32) - attention_mechanism.p[i][0]\n\n gaussian_weights = tf.exp(-(point_dist ** 2) / 2 * (attention_mechanism.d / 2) ** 2)\n\n __alignments = alignments[i] * gaussian_weights\n else:\n # Use the raw window contents.\n __alignments = alignments[i]\n\n # Add padding to the alignments to get from the window size 2D+1 up to the original\n # memory length.\n alignment_seq_paddings = [\n [full_pre_padding[i][0], full_post_padding[i][0]],\n ]\n __alignments = tf.pad(__alignments, alignment_seq_paddings, 'CONSTANT')\n\n padded_alignment_windows.append(__alignments)\n\n # Stack all context vectors into one tensor.\n context = tf.stack(context_windows)\n # Squeeze out the helper dimension used for calculating the context.\n context = tf.squeeze(context, [1])\n\n # Stack all alignment vectors into one tensor. This tensor gives alignments for each encoder\n # step.\n padded_alignment = tf.stack(padded_alignment_windows)\n\n if attention_layer is not None:\n attention = attention_layer(tf.concat([cell_output, context], 1))\n else:\n attention = context\n\n return attention, padded_alignment, padded_alignment\n\n\nclass LocalLuongAttention(LuongAttention):\n \"\"\"\n Implements a Luong-style local attention mechanism.\n\n This implementation supports both monotonic attention as well as predictive attention.\n\n - See [1]: Effective Approaches to Attention-based Neural Machine Translation,\n http://arxiv.org/abs/1508.04025\n \"\"\"\n\n def __init__(self, num_units,\n memory,\n const_batch_size,\n memory_sequence_length=None,\n scale=False,\n probability_fn=None,\n score_mask_value=None,\n dtype=None,\n name=\"LocalLuongAttention\",\n d=10,\n attention_mode=AttentionMode.MONOTONIC,\n score_mode=AttentionScore.DOT,\n force_gaussian=False\n ):\n \"\"\"\n Arguments:\n num_units (int):\n The depth of the attention mechanism. This controls the number of units in the\n memory layer that processes the encoder states into the `keys`.\n\n memory (tf.Tensor):\n The memory to query; usually the output of an RNN encoder.\n The shape is expected to be shape=(batch_size, encoder_max_time, ...)\n\n const_batch_size (int):\n The constant batch size to expect from every batch. Every batch is expected to\n contain exactly `const_batch_size` samples.\n\n memory_sequence_length:\n (optional) Sequence lengths for the batch entries\n in memory. If provided, the memory tensor rows are masked with zeros\n for values past the respective sequence lengths.\n\n scale (boolean):\n Whether to scale the energy term.\n\n probability_fn:\n (optional) A `callable`. Converts the score to\n probabilities. The default is @{tf.nn.softmax}. Other options include\n @{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.\n Its signature should be: `probabilities = probability_fn(score)`.\n\n score_mask_value:\n (optional) The mask value for score before passing into\n `probability_fn`. The default is -inf. Only used if\n `memory_sequence_length` is not None.\n\n dtype (tf.DType):\n The data type for the memory layer of the attention mechanism.\n\n name (string):\n Name to use when creating ops.\n\n d (int):\n D parameter controlling the window size and gaussian distribution.\n The window size is set to be `2D + 1`.\n\n attention_mode (AttentionMode):\n The attention mode to use. Can be either `MONOTONIC` or `PREDICTIVE`.\n\n score_mode (AttentionScore):\n The attention scoring function to use. Can either be `DOT`, `GENERAL` or `CONCAT`.\n\n force_gaussian (boolean):\n Force a gaussian distribution onto the scores in the attention window.\n Defaults to False.\n \"\"\"\n super().__init__(num_units=num_units,\n memory=memory,\n memory_sequence_length=memory_sequence_length,\n scale=scale,\n probability_fn=probability_fn,\n score_mask_value=score_mask_value,\n dtype=dtype,\n name=name)\n\n # Initialize the decoding time counter.\n # This variable is updated by the `ÀdvancedAttentionWrapper`.\n self.time = 0\n\n # Calculate the attention window size.\n self.d = d\n self.window_size = 2 * self.d + 1\n\n # Store the attention mode.\n self.attention_mode = attention_mode\n\n # Store the scoring function style to be used.\n self.score_mode = score_mode\n\n # The constant batch size to expect.\n self.const_batch_size = const_batch_size\n\n self.force_gaussian = force_gaussian\n\n def __call__(self, query, state):\n \"\"\"\n Calculate the alignments and next_state for the current decoder output.\n\n Arguments:\n query (tf.Tensor):\n Decoder cell outputs to compare to the keys (memory).\n The shape is expected to be shape=(B, num_units) with B being the batch size\n and `num_units` being the output size of the decoder_cell.\n\n state (tf.Tensor):\n In Luong attention the state is equal to the alignments. Therefore this will\n contain the alignments from the previous decoding step.\n\n Returns:\n (alignments, next_state):\n alignments (tf.Tensor):\n The normalized attention scores for the attention window. The shape is\n shape=(B, 2D+1), with B being the batch size and `2D+1` being the window size.\n next_state (tf.Tensor):\n In Luong attention this is equal to `alignments`.\n \"\"\"\n with tf.variable_scope(None, \"local_luong_attention\", [query]):\n # Get the depth of the memory values.\n num_units = self._keys.get_shape()[-1]\n\n # Get the source sequence length from memory.\n source_seq_length = tf.shape(self._keys)[1]\n\n if self.attention_mode == AttentionMode.PREDICTIVE:\n # Predictive selection fo the attention window position.\n vp = tf.get_variable(name=\"local_v_p\", shape=[num_units, 1], dtype=tf.float32)\n wp = tf.get_variable(name=\"local_w_p\", shape=[num_units, num_units],\n dtype=tf.float32)\n\n # shape => (B, num_units)\n _intermediate_result = tf.transpose(tf.tensordot(wp, query, [0, 1]))\n\n # shape => (B, 1)\n _tmp = tf.transpose(tf.tensordot(vp, tf.tanh(_intermediate_result), [0, 1]))\n\n # Derive p_t as described by Luong for the predictive local-p case.\n self.p = tf.cast(source_seq_length, tf.float32) * tf.sigmoid(_tmp)\n\n elif self.attention_mode == AttentionMode.MONOTONIC:\n # Derive p_t as described by Luong for the predictive local-m case.\n self.p = tf.tile(\n [[self.time]],\n tf.convert_to_tensor([self.batch_size, 1])\n )\n\n # Prevent the window from leaving the memory.\n self.p = tf.maximum(self.p, self.d)\n self.p = tf.minimum(self.p, source_seq_length - (self.d + 1))\n self.p = tf.cast(self.p, dtype=tf.float32)\n\n # Calculate the memory sequence index at which the window should start.\n start_index = tf.floor(self.p) - self.d\n start_index = tf.cast(start_index, dtype=tf.int32)\n\n # Prevent the window from leaving the memory.\n self.window_start = tf.maximum(0, start_index)\n\n # Calculate the memory sequence index at which the window should stop.\n stop_index = tf.floor(self.p) + self.d + 1\n stop_index = tf.cast(stop_index, dtype=tf.int32)\n\n # Prevent the window from leaving the memory.\n self.window_stop = tf.minimum(source_seq_length, stop_index)\n\n # Calculate how many padding frames should be added to the start of the window.\n # This is used to get up to the total memory length again.\n self.full_seq_pre_padding = tf.abs(start_index)\n\n # Calculate how many padding frames should be added to the end of the window.\n # This is used to get up to the total memory length again.\n self.full_seq_post_padding = tf.abs(stop_index - source_seq_length)\n\n # Calculate how many padding frames should be added to the start of the window.\n # This is used to get the window up to 2D+1 frames.\n self.window_pre_padding = tf.abs(self.window_start - start_index)\n\n # Calculate how many padding frames should be added to the end of the window.\n # This is used to get the window up to 2D+1 frames.\n self.window_post_padding = tf.abs(self.window_stop - stop_index)\n\n # Slice the windows for every batch entry.\n with tf.variable_scope(None, \"window_extraction\", [query]):\n windows = []\n # Iterate the batch entries.\n for i in range(0, self.const_batch_size):\n # Slice out the window from the processed memory.\n __window = self._keys[i, self.window_start[i][0]:self.window_stop[i][0], :]\n\n # Add zero padding to the slice in order to ensure the window size is (2D+1).\n paddings = [\n [self.window_pre_padding[i][0], self.window_post_padding[i][0]],\n [0, 0]\n ]\n __window = tf.pad(__window, paddings, 'CONSTANT')\n\n # Collect the extracted windows for each batch entry.\n windows.append(__window)\n\n # Merge all extracted windows into one tensor.\n window = tf.stack(windows)\n\n # Calculate the not not normalized attention score as described by Luong as dot.\n if self.score_mode == AttentionScore.DOT:\n score = _luong_dot_score(query, window, self._scale)\n # Calculate the not not normalized attention score as described by Luong as general.\n elif self.score_mode == AttentionScore.GENERAL:\n score = _luong_general_score(query, window)\n # Calculate the not not normalized attention score as described by Luong as general.\n elif self.score_mode == AttentionScore.CONCAT:\n score = _luong_concat_score(query, window)\n else:\n score = None\n raise Exception(\"An invalid attention scoring mode was supplied.\")\n\n # Normalize the scores.\n alignments = self._probability_fn(score, state)\n\n next_state = alignments\n\n return alignments, next_state\n\n\ndef _luong_dot_score(query, keys, scale):\n \"\"\"\n Implements the Luong-style dot scoring function.\n\n This attention has two forms. The first is standard Luong attention, as described in:\n\n Minh-Thang Luong, Hieu Pham, Christopher D. Manning.\n \"Effective Approaches to Attention-based Neural Machine Translation.\"\n EMNLP 2015. https://arxiv.org/abs/1508.04025\n\n The second is the scaled form inspired partly by the normalized form of\n Bahdanau attention.\n\n To enable the second form, call this function with `scale=True`.\n\n This implementation is derived from: `tensorflow.contrib.seq2seq.python.ops.attention_wrapper`\n\n Arguments:\n query (tf.Tensor):\n Decoder cell outputs to compare to the keys (memory).\n The shape is expected to be shape=(B, num_units) with B being the batch size\n and `num_units` being the output size of the decoder_cell.\n\n keys (tf.Tensor):\n Processed memory (usually the encoder states processed by the memory_layer).\n The shape is expected to be shape=(B, X, num_units) with B being the batch size\n and `num_units` being the output size of the memory_layer. X may be the\n maximal length of the encoder time domain or in the case of local attention the\n window size.\n\n scale (boolean):\n Whether to apply a scale to the score function.\n\n Returns:\n score (tf.Tensor):\n A tensor with shape=(B, X) containing the non-normalized score values.\n\n Raises:\n ValueError: If `key` and `query` depths do not match.\n\n \"\"\"\n depth = query.get_shape()[-1]\n key_units = keys.get_shape()[-1]\n\n if depth != key_units:\n raise ValueError(\n \"Incompatible or unknown inner dimensions between query and keys. \"\n \"Query (%s) has units: %s. Keys (%s) have units: %s. \"\n \"Perhaps you need to set num_units to the keys' dimension (%s)?\"\n % (query, depth, keys, key_units, key_units))\n\n dtype = query.dtype\n\n query = tf.expand_dims(query, 1)\n score = tf.matmul(query, keys, transpose_b=True)\n score = tf.squeeze(score, [1])\n\n if scale:\n # Scalar used in weight scaling\n g = tf.get_variable(\n \"attention_g\", dtype=dtype,\n initializer=tf.ones_initializer, shape=())\n score = g * score\n\n return score\n\n\ndef _luong_general_score(query, keys):\n \"\"\"\n Implements the Luong-style general scoring function.\n\n - See [1]: Effective Approaches to Attention-based Neural Machine Translation,\n http://arxiv.org/abs/1508.04025\n\n Arguments:\n query (tf.Tensor):\n Decoder cell outputs to compare to the keys (memory).\n The shape is expected to be shape=(B, num_units) with B being the batch size\n and `num_units` being the output size of the decoder_cell.\n\n keys (tf.Tensor):\n Processed memory (usually the encoder states processed by the memory_layer).\n The shape is expected to be shape=(B, X, num_units) with B being the batch size\n and `num_units` being the output size of the memory_layer. X may be the\n maximal length of the encoder time domain or in the case of local attention the\n window size.\n\n Returns:\n score (tf.Tensor):\n A tensor with shape=(B, X) containing the non-normalized score values.\n \"\"\"\n raise NotImplementedError('Luong style general mode attention scoring is not implemented yet!')\n\n\ndef _luong_concat_score(query, keys):\n \"\"\"\n Implements the Luong-style concat scoring function.\n\n - See [1]: Effective Approaches to Attention-based Neural Machine Translation,\n http://arxiv.org/abs/1508.04025\n\n Arguments:\n query (tf.Tensor):\n Decoder cell outputs to compare to the keys (memory).\n The shape is expected to be shape=(B, num_units) with B being the batch size\n and `num_units` being the output size of the decoder_cell.\n\n keys (tf.Tensor):\n Processed memory (usually the encoder states processed by the memory_layer).\n The shape is expected to be shape=(B, X, num_units) with B being the batch size\n and `num_units` being the output size of the memory_layer. X may be the\n maximal length of the encoder time domain or in the case of local attention the\n window size.\n\n Returns:\n score (tf.Tensor):\n A tensor with shape=(B, X) containing the non-normalized score values.\n\n \"\"\"\n raise NotImplementedError('Luong style concat mode attention scoring is not implemented yet!')\n\n\nclass AdvancedAttentionWrapper(AttentionWrapper):\n \"\"\"\n Wraps the standard AttentionWrapper class so that during decoding steps the decoding time\n index is updated in the attention mechanism.\n\n This is a hack to enable us using Luong style monotonic attention.\n \"\"\"\n\n def __init__(self,\n cell,\n attention_mechanism,\n attention_layer_size=None,\n alignment_history=False,\n cell_input_fn=None,\n output_attention=True,\n initial_cell_state=None,\n name=None):\n\n super().__init__(cell=cell,\n attention_mechanism=attention_mechanism,\n attention_layer_size=attention_layer_size,\n alignment_history=alignment_history,\n cell_input_fn=cell_input_fn,\n output_attention=output_attention,\n initial_cell_state=initial_cell_state,\n name=name)\n\n def call(self, inputs, state):\n \"\"\"Perform a step of attention-wrapped RNN.\n\n - Step 1: Mix the `inputs` and previous step's `attention` output via\n `cell_input_fn`.\n - Step 2: Call the wrapped `cell` with this input and its previous state.\n - Step 3: Score the cell's output with `attention_mechanism`.\n - Step 4: Calculate the alignments by passing the score through the\n `normalizer`.\n - Step 5: Calculate the context vector as the inner product between the\n alignments and the attention_mechanism's values (memory).\n - Step 6: Calculate the attention output by concatenating the cell output\n and context through the attention layer (a linear layer with\n `attention_layer_size` outputs).\n\n Args:\n inputs: (Possibly nested tuple of) Tensor, the input at this time step.\n state: An instance of `AttentionWrapperState` containing\n tensors from the previous time step.\n\n Returns:\n A tuple `(attention_or_cell_output, next_state)`, where:\n\n - `attention_or_cell_output` depending on `output_attention`.\n - `next_state` is an instance of `AttentionWrapperState`\n containing the state calculated at this time step.\n\n Raises:\n TypeError: If `state` is not an instance of `AttentionWrapperState`.\n \"\"\"\n if not isinstance(state, AttentionWrapperState):\n raise TypeError(\"Expected state to be instance of AttentionWrapperState. \"\n \"Received type %s instead.\" % type(state))\n\n # Step 1: Calculate the true inputs to the cell based on the\n # previous attention value.\n cell_inputs = self._cell_input_fn(inputs, state.attention)\n cell_state = state.cell_state\n cell_output, next_cell_state = self._cell(cell_inputs, cell_state)\n\n cell_batch_size = (\n cell_output.shape[0].value or tf.shape(cell_output)[0])\n error_message = (\n \"When applying AttentionWrapper %s: \" % self.name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and the query (decoder output). Are you using \"\n \"the BeamSearchDecoder? You may need to tile your memory input via \"\n \"the tf.contrib.seq2seq.tile_batch function with argument \"\n \"multiple=beam_width.\")\n with tf.control_dependencies(\n self._batch_size_checks(cell_batch_size, error_message)):\n cell_output = tf.identity(\n cell_output, name=\"checked_cell_output\")\n\n if self._is_multi:\n previous_attention_state = state.attention_state\n previous_alignment_history = state.alignment_history\n else:\n previous_attention_state = [state.attention_state]\n previous_alignment_history = [state.alignment_history]\n\n all_alignments = []\n all_attentions = []\n all_attention_states = []\n maybe_all_histories = []\n for i, attention_mechanism in enumerate(self._attention_mechanisms):\n # Note: This is the only modification hacked into the attention wrapper to support\n # monotonic Luong attention.\n attention_mechanism.time = state.time\n\n attention, alignments, next_attention_state = _luong_local_compute_attention(\n attention_mechanism, cell_output, previous_attention_state[i],\n self._attention_layers[i] if self._attention_layers else None)\n alignment_history = previous_alignment_history[i].write(\n state.time, alignments) if self._alignment_history else ()\n\n all_attention_states.append(next_attention_state)\n all_alignments.append(alignments)\n all_attentions.append(attention)\n maybe_all_histories.append(alignment_history)\n\n attention = tf.concat(all_attentions, 1)\n next_state = AttentionWrapperState(\n time=state.time + 1,\n cell_state=next_cell_state,\n attention=attention,\n attention_state=self._item_or_tuple(all_attention_states),\n alignments=self._item_or_tuple(all_alignments),\n alignment_history=self._item_or_tuple(maybe_all_histories))\n\n if self._output_attention:\n return attention, next_state\n else:\n return cell_output, next_state\n"
] |
[
[
"tensorflow.exp",
"tensorflow.tensordot",
"tensorflow.matmul",
"tensorflow.stack",
"tensorflow.tanh",
"tensorflow.identity",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.sigmoid",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.pad",
"tensorflow.abs",
"tensorflow.floor",
"tensorflow.range",
"tensorflow.minimum",
"tensorflow.expand_dims",
"tensorflow.get_variable",
"tensorflow.convert_to_tensor",
"tensorflow.maximum"
]
] |
Yard1/ludwig
|
[
"510455f8d4fcd6b66e76d2c906d2c600fe724093"
] |
[
"ludwig/features/image_feature.py"
] |
[
"#! /usr/bin/env python\n# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport logging\nimport os\nimport sys\nfrom functools import partial\nfrom multiprocessing import Pool\nfrom typing import Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ludwig.constants import *\nfrom ludwig.encoders.image_encoders import ENCODER_REGISTRY\nfrom ludwig.features.base_feature import InputFeature\nfrom ludwig.utils.data_utils import get_abs_path\nfrom ludwig.utils.fs_utils import upload_h5\nfrom ludwig.utils.image_utils import greyscale\nfrom ludwig.utils.image_utils import num_channels_in_image\nfrom ludwig.utils.image_utils import resize_image\nfrom ludwig.utils.image_utils import get_image_from_path, read_image\nfrom ludwig.utils.misc_utils import set_default_value\n\nlogger = logging.getLogger(__name__)\n\n\nimage_scaling_registry = {\n 'pixel_normalization': lambda x: x * 1.0 / 255,\n 'pixel_standardization': lambda x: tf.map_fn(\n lambda f: tf.image.per_image_standardization(f), x)\n}\n\n\nclass ImageFeatureMixin:\n type = IMAGE\n preprocessing_defaults = {\n 'missing_value_strategy': BACKFILL,\n 'in_memory': True,\n 'resize_method': 'interpolate',\n 'scaling': 'pixel_normalization',\n 'num_processes': 1,\n 'infer_image_dimensions': False,\n 'infer_image_max_height': 256,\n 'infer_image_max_width': 256,\n 'infer_image_sample_size': 100\n }\n\n preprocessing_schema = {\n 'missing_value_strategy': {'type': 'string', 'enum': MISSING_VALUE_STRATEGY_OPTIONS},\n 'in_memory': {'type': 'boolean'},\n 'resize_method': {'type': 'string', 'enum': RESIZE_METHODS},\n 'scaling': {'type': 'string', 'enum': list(image_scaling_registry.keys())},\n 'num_processes': {'type': 'integer', 'minimum': 0},\n 'height': {'type': 'integer', 'minimum': 0},\n 'width': {'type': 'integer', 'minimum': 0},\n 'num_channels': {'type': 'integer', 'minimum': 0},\n 'infer_image_dimensions': {'type': 'boolean'},\n 'infer_image_max_height': {'type': 'integer', 'minimum': 0},\n 'infer_image_max_width': {'type': 'integer', 'minimum': 0},\n 'infer_image_sample_size': {'type': 'integer', 'minimum': 0}\n }\n\n @staticmethod\n def cast_column(column, backend):\n return column\n\n @staticmethod\n def get_feature_meta(column, preprocessing_parameters, backend):\n return {\n PREPROCESSING: preprocessing_parameters\n }\n\n @staticmethod\n def _read_image_and_resize(\n img_entry: Union[str, 'numpy.array'],\n img_width: int,\n img_height: int,\n should_resize: bool,\n num_channels: int,\n resize_method: str,\n user_specified_num_channels: int\n ):\n \"\"\"\n :param img_entry Union[str, 'numpy.array']: if str file path to the\n image else numpy.array of the image itself\n :param img_width: expected width of the image\n :param img_height: expected height of the image\n :param should_resize: Should the image be resized?\n :param resize_method: type of resizing method\n :param num_channels: expected number of channels in the first image\n :param user_specified_num_channels: did the user specify num channels?\n :return: image object\n\n Helper method to read and resize an image according to model defn.\n If the user doesn't specify a number of channels, we use the first image\n in the dataset as the source of truth. If any image in the dataset\n doesn't have the same number of channels as the first image,\n raise an exception.\n\n If the user specifies a number of channels, we try to convert all the\n images to the specifications by dropping channels/padding 0 channels\n \"\"\"\n img = read_image(img_entry)\n img_num_channels = num_channels_in_image(img)\n if img_num_channels == 1:\n img = img.reshape((img.shape[0], img.shape[1], 1))\n\n if should_resize:\n img = resize_image(img, (img_height, img_width), resize_method)\n\n if user_specified_num_channels is True:\n\n # convert to greyscale if needed\n if num_channels == 1 and (\n img_num_channels == 3 or img_num_channels == 4):\n img = greyscale(img)\n img_num_channels = 1\n\n # Number of channels is specified by the user\n img_padded = np.zeros((img_height, img_width, num_channels),\n dtype=np.uint8)\n min_num_channels = min(num_channels, img_num_channels)\n img_padded[:, :, :min_num_channels] = img[:, :, :min_num_channels]\n img = img_padded\n\n if img_num_channels != num_channels:\n logger.warning(\n \"Image has {0} channels, where as {1} \"\n \"channels are expected. Dropping/adding channels \"\n \"with 0s as appropriate\".format(\n img_num_channels, num_channels))\n else:\n # If the image isn't like the first image, raise exception\n if img_num_channels != num_channels:\n raise ValueError(\n 'Image has {0} channels, unlike the first image, which '\n 'has {1} channels. Make sure all the images have the same '\n 'number of channels or use the num_channels property in '\n 'image preprocessing'.format(img_num_channels,\n num_channels))\n\n if img.shape[0] != img_height or img.shape[1] != img_width:\n raise ValueError(\n \"Images are not of the same size. \"\n \"Expected size is {0}, \"\n \"current image size is {1}.\"\n \"Images are expected to be all of the same size \"\n \"or explicit image width and height are expected \"\n \"to be provided. \"\n \"Additional information: \"\n \"https://ludwig-ai.github.io/ludwig-docs/user_guide/#image-features-preprocessing\"\n .format([img_height, img_width, num_channels], img.shape)\n )\n\n return img\n\n @staticmethod\n def _finalize_preprocessing_parameters(\n preprocessing_parameters: dict,\n first_img_entry: Union[str, 'numpy.array'],\n src_path: str,\n input_feature_col: np.array\n ):\n \"\"\"\n Helper method to determine the height, width and number of channels for\n preprocessing the image data. This is achieved by looking at the\n parameters provided by the user. When there are some missing parameters,\n we fall back on to the first image in the dataset. The assumption being\n that all the images in the data are expected be of the same size with\n the same number of channels\n \"\"\"\n first_image = read_image(first_img_entry)\n first_img_height = first_image.shape[0]\n first_img_width = first_image.shape[1]\n first_img_num_channels = num_channels_in_image(first_image)\n\n should_resize = False\n if (HEIGHT in preprocessing_parameters or\n WIDTH in preprocessing_parameters):\n should_resize = True\n try:\n height = int(preprocessing_parameters[HEIGHT])\n width = int(preprocessing_parameters[WIDTH])\n except ValueError as e:\n raise ValueError(\n 'Image height and width must be set and have '\n 'positive integer values: ' + str(e)\n )\n if height <= 0 or width <= 0:\n raise ValueError(\n 'Image height and width must be positive integers'\n )\n else:\n # User hasn't specified height and width.\n # Default to first image, or infer from sample.\n height, width = first_img_height, first_img_width\n\n if preprocessing_parameters[INFER_IMAGE_DIMENSIONS]:\n should_resize = True\n sample_size = min(len(input_feature_col), preprocessing_parameters[INFER_IMAGE_SAMPLE_SIZE])\n sample_images = [read_image(get_image_from_path(src_path, img))\n for img in input_feature_col[:sample_size]]\n\n if sample_images:\n height_avg = min(\n sum(x.shape[0] for x in sample_images) / len(sample_images),\n preprocessing_parameters[INFER_IMAGE_MAX_HEIGHT])\n width_avg = min(\n sum(x.shape[1] for x in sample_images) / len(sample_images),\n preprocessing_parameters[INFER_IMAGE_MAX_WIDTH])\n\n height, width = round(height_avg), round(width_avg)\n\n logger.debug(\"Inferring height: {0} and width: {1}\".format(height, width))\n else:\n logger.warning(\"Sample set for inference is empty, default to height and width of first image\")\n\n if NUM_CHANNELS in preprocessing_parameters:\n # User specified num_channels in the model/feature config\n user_specified_num_channels = True\n num_channels = preprocessing_parameters[NUM_CHANNELS]\n else:\n user_specified_num_channels = False\n num_channels = first_img_num_channels\n\n assert isinstance(num_channels, int), ValueError(\n 'Number of image channels needs to be an integer'\n )\n\n return (\n should_resize,\n width,\n height,\n num_channels,\n user_specified_num_channels,\n first_image\n )\n\n @staticmethod\n def add_feature_data(\n feature,\n input_df,\n proc_df,\n metadata,\n preprocessing_parameters,\n backend,\n skip_save_processed_input\n ):\n in_memory = preprocessing_parameters['in_memory']\n if PREPROCESSING in feature and 'in_memory' in feature[PREPROCESSING]:\n in_memory = feature[PREPROCESSING]['in_memory']\n\n num_processes = preprocessing_parameters['num_processes']\n if PREPROCESSING in feature and 'num_processes' in feature[\n PREPROCESSING]:\n num_processes = feature[PREPROCESSING]['num_processes']\n\n src_path = None\n if SRC in metadata:\n src_path = os.path.dirname(os.path.abspath(metadata.get(SRC)))\n\n num_images = len(input_df[feature[COLUMN]])\n if num_images == 0:\n raise ValueError('There are no images in the dataset provided.')\n\n first_img_entry = next(iter(input_df[feature[COLUMN]]))\n logger.debug(\n 'Detected image feature type is {}'.format(type(first_img_entry))\n )\n\n if not isinstance(first_img_entry, str) \\\n and not isinstance(first_img_entry, np.ndarray):\n raise ValueError(\n 'Invalid image feature data type. Detected type is {}, '\n 'expect either string for file path or numpy array.'\n .format(type(first_img_entry))\n )\n\n first_img_entry = get_image_from_path(src_path, first_img_entry)\n\n (\n should_resize,\n width,\n height,\n num_channels,\n user_specified_num_channels,\n first_image\n ) = ImageFeatureMixin._finalize_preprocessing_parameters(\n preprocessing_parameters, first_img_entry, src_path, input_df[feature[COLUMN]]\n )\n\n metadata[feature[NAME]][PREPROCESSING]['height'] = height\n metadata[feature[NAME]][PREPROCESSING]['width'] = width\n metadata[feature[NAME]][PREPROCESSING][\n 'num_channels'] = num_channels\n\n read_image_and_resize = partial(\n ImageFeatureMixin._read_image_and_resize,\n img_width=width,\n img_height=height,\n should_resize=should_resize,\n num_channels=num_channels,\n resize_method=preprocessing_parameters['resize_method'],\n user_specified_num_channels=user_specified_num_channels\n )\n\n # check to see if the active backend can support lazy loading of\n # image features from the hdf5 cache.\n backend.check_lazy_load_supported(feature)\n\n if in_memory or skip_save_processed_input:\n # Number of processes to run in parallel for preprocessing\n metadata[feature[NAME]][PREPROCESSING][\n 'num_processes'] = num_processes\n metadata[feature[NAME]]['reshape'] = (height, width, num_channels)\n\n # Split the dataset into pools only if we have an explicit request to use\n # multiple processes. In case we have multiple input images use the\n # standard code anyway.\n if backend.supports_multiprocessing and (\n num_processes > 1 or num_images > 1):\n all_img_entries = [get_abs_path(src_path, img_entry)\n if isinstance(img_entry, str) else img_entry\n for img_entry in input_df[feature[COLUMN]]]\n\n with Pool(num_processes) as pool:\n logger.debug(\n 'Using {} processes for preprocessing images'.format(\n num_processes\n )\n )\n proc_df[feature[PROC_COLUMN]] = pool.map(\n read_image_and_resize, all_img_entries\n )\n else:\n # If we're not running multiple processes and we are only processing one\n # image just use this faster shortcut, bypassing multiprocessing.Pool.map\n logger.debug(\n 'No process pool initialized. Using internal process for preprocessing images'\n )\n\n # helper function for handling single image\n def _get_processed_image(img_store):\n if isinstance(img_store, str):\n return read_image_and_resize(\n get_abs_path(src_path, img_store)\n )\n else:\n return read_image_and_resize(img_store)\n\n proc_df[feature[PROC_COLUMN]] = backend.df_engine.map_objects(\n input_df[feature[COLUMN]],\n _get_processed_image\n )\n else:\n\n all_img_entries = [get_abs_path(src_path, img_entry)\n if isinstance(img_entry, str) else img_entry\n for img_entry in input_df[feature[COLUMN]]]\n\n data_fp = backend.cache.get_cache_path(\n metadata.get(SRC), metadata.get(CHECKSUM), TRAINING\n )\n with upload_h5(data_fp) as h5_file:\n # todo future add multiprocessing/multithreading\n image_dataset = h5_file.create_dataset(\n feature[PROC_COLUMN] + '_data',\n (num_images, height, width, num_channels),\n dtype=np.uint8\n )\n for i, img_entry in enumerate(all_img_entries):\n image_dataset[i, :height, :width, :] = (\n read_image_and_resize(img_entry)\n )\n h5_file.flush()\n\n proc_df[feature[PROC_COLUMN]] = np.arange(num_images)\n return proc_df\n\n\nclass ImageInputFeature(ImageFeatureMixin, InputFeature):\n height = 0\n width = 0\n num_channels = 0\n scaling = 'pixel_normalization'\n encoder = 'stacked_cnn'\n\n def __init__(self, feature, encoder_obj=None):\n super().__init__(feature)\n self.overwrite_defaults(feature)\n if encoder_obj:\n self.encoder_obj = encoder_obj\n else:\n self.encoder_obj = self.initialize_encoder(feature)\n\n def call(self, inputs, training=None, mask=None):\n assert isinstance(inputs, tf.Tensor)\n assert inputs.dtype in [tf.uint8, tf.int64]\n\n # casting and rescaling\n inputs = tf.cast(inputs, tf.float32) / 255\n\n inputs_encoded = self.encoder_obj(\n inputs, training=training, mask=mask\n )\n\n return inputs_encoded\n\n @classmethod\n def get_input_dtype(cls):\n return tf.uint8\n\n def get_input_shape(self):\n return self.height, self.width, self.num_channels\n\n @staticmethod\n def update_config_with_metadata(\n input_feature,\n feature_metadata,\n *args,\n **kwargs\n ):\n for key in ['height', 'width', 'num_channels', 'scaling']:\n input_feature[key] = feature_metadata[PREPROCESSING][key]\n\n @staticmethod\n def populate_defaults(input_feature):\n set_default_value(input_feature, TIED, None)\n set_default_value(input_feature, PREPROCESSING, {})\n\n encoder_registry = ENCODER_REGISTRY\n"
] |
[
[
"tensorflow.image.per_image_standardization",
"numpy.arange",
"numpy.zeros",
"tensorflow.cast"
]
] |
caiodearaujo/streamlit-awesome-table
|
[
"18a7ad7372aba1163f17134191d784056649bbd8"
] |
[
"samples/with_search/__init__.py"
] |
[
"import pandas as pd\nimport streamlit as st\nfrom awesome_table import AwesomeTable\nfrom awesome_table.column import (Column, ColumnDType)\nfrom sample import data as sample_data\n\nst.set_page_config(page_title='AwesomeTable by @caiofaar', page_icon='📊', layout='wide')\nst.title('AwesomeTable with Search')\n\nAwesomeTable(pd.json_normalize(sample_data), columns=[\n Column(name='id', label='ID'),\n Column(name='name', label='Name'),\n Column(name='job_title', label='Job Title'),\n Column(name='avatar', label='Avatar'),\n Column(name='_url.social_media', label='Social Media', dtype=ColumnDType.ICONBUTTON, icon='fa-solid fa-share-nodes'), ## From FontAwesome v6.0.0\n Column(name='_url.document', label='Document', dtype=ColumnDType.DOWNLOAD),\n], show_search=True)"
] |
[
[
"pandas.json_normalize"
]
] |
aharley/track_check_repeat
|
[
"564c3065a758deea11acdcaeea7a187ce376d564"
] |
[
"nets/raft_core/backraft.py"
] |
[
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport sys\nsys.path.append('./')\n\nfrom update import BasicUpdateBlock, SmallUpdateBlock\nfrom extractor import BasicEncoder, SmallEncoder\nfrom corr import CorrBlock, AlternateCorrBlock\nfrom util import bilinear_sampler, coords_grid, upflow8\n\ntry:\n autocast = torch.cuda.amp.autocast\nexcept:\n # dummy autocast for PyTorch < 1.6\n class autocast:\n def __init__(self, enabled):\n pass\n def __enter__(self):\n pass\n def __exit__(self, *args):\n pass\n\n\nclass RAFT(nn.Module):\n def __init__(self, args):\n super(RAFT, self).__init__()\n self.args = args\n\n if args.small:\n self.hidden_dim = hdim = 96\n self.context_dim = cdim = 64\n args.corr_levels = 4\n args.corr_radius = 3\n \n else:\n self.hidden_dim = hdim = 128\n self.context_dim = cdim = 128\n args.corr_levels = 4\n args.corr_radius = 4\n\n if 'dropout' not in self.args:\n self.args.dropout = 0\n\n if 'alternate_corr' not in self.args:\n self.args.alternate_corr = False\n\n # feature network, context network, and update block\n if args.small:\n self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout) \n self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)\n self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)\n\n else:\n self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout) \n self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)\n self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)\n\n def freeze_bn(self):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n def initialize_flow(self, img):\n \"\"\" Flow is represented as difference between two coordinate grids flow = coords1 - coords0\"\"\"\n N, C, H, W = img.shape\n coords0 = coords_grid(N, H//8, W//8).to(img.device)\n coords1 = coords_grid(N, H//8, W//8).to(img.device)\n\n # optical flow computed as difference: flow = coords1 - coords0\n return coords0, coords1\n\n def upsample_flow(self, flow, mask):\n \"\"\" Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination \"\"\"\n N, _, H, W = flow.shape\n mask = mask.view(N, 1, 9, 8, 8, H, W)\n mask = torch.softmax(mask, dim=2)\n\n up_flow = F.unfold(8 * flow, [3,3], padding=1)\n up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)\n\n up_flow = torch.sum(mask * up_flow, dim=2)\n up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)\n return up_flow.reshape(N, 2, 8*H, 8*W)\n\n def forward(self, image1):\n \"\"\" get featmap for one frame \"\"\"\n image1 = 2 * (image1 / 255.0) - 1.0\n image1 = image1.contiguous()\n hdim = self.hidden_dim\n cdim = self.context_dim\n # run the feature network\n with autocast(enabled=self.args.mixed_precision):\n fmap1 = self.fnet(image1) \n fmap1 = fmap1.float()\n return fmap1\n\n def old_forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False):\n \"\"\" Estimate optical flow between pair of frames \"\"\"\n\n image1 = 2 * (image1 / 255.0) - 1.0\n image2 = 2 * (image2 / 255.0) - 1.0\n\n image1 = image1.contiguous()\n image2 = image2.contiguous()\n\n hdim = self.hidden_dim\n cdim = self.context_dim\n\n # run the feature network\n with autocast(enabled=self.args.mixed_precision):\n fmap1, fmap2 = self.fnet([image1, image2]) \n \n fmap1 = fmap1.float()\n fmap2 = fmap2.float()\n if self.args.alternate_corr:\n corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)\n else:\n corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)\n\n # run the context network\n with autocast(enabled=self.args.mixed_precision):\n cnet = self.cnet(image1)\n net, inp = torch.split(cnet, [hdim, cdim], dim=1)\n net = torch.tanh(net)\n inp = torch.relu(inp)\n\n coords0, coords1 = self.initialize_flow(image1)\n\n if flow_init is not None:\n coords1 = coords1 + flow_init\n\n flow_predictions = []\n for itr in range(iters):\n coords1 = coords1.detach()\n corr = corr_fn(coords1) # index correlation volume\n\n flow = coords1 - coords0\n with autocast(enabled=self.args.mixed_precision):\n net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)\n\n # F(t+1) = F(t) + \\Delta(t)\n coords1 = coords1 + delta_flow\n\n # upsample predictions\n if up_mask is None:\n flow_up = upflow8(coords1 - coords0)\n else:\n flow_up = self.upsample_flow(coords1 - coords0, up_mask)\n \n flow_predictions.append(flow_up)\n\n if test_mode:\n corr = corr_fn(coords1) # index correlation volume\n # feat = torch.cat([inp, corr], dim=1)\n feat = inp\n return coords1 - coords0, flow_up, (feat, fmap1, fmap2)\n \n return flow_predictions\n"
] |
[
[
"torch.nn.functional.unfold",
"torch.relu",
"torch.split",
"torch.softmax",
"torch.tanh",
"torch.sum"
]
] |
MGHComputationalPathology/CalicoML
|
[
"e296f1cc0a78c4bf05e719aae96d8ea2e9d6c03c"
] |
[
"tests/test_metrics.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\"\\\nCopyright (c) 2015-2018, MGH Computational Pathology\n\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom calicoml.core.metrics import ppv, npv, ROC\nfrom calicoml.core.metrics import compute_averaged_metrics, accuracy_from_confusion_matrix, ConditionalMeansSelector\nfrom calicoml.core.metrics import f_pearson\nfrom calicoml.core.serialization.model import roc_auc_function\n\nimport numpy as np\nimport nose\nfrom rpy2.robjects import FloatVector\nfrom rpy2.robjects.packages import importr\nfrom sklearn.metrics import confusion_matrix\nfrom scipy.stats import pearsonr\n\n\ndef test_ppv():\n \"\"\"Verifies correctness of the PPV calculation\"\"\"\n nose.tools.eq_(ppv([1], [1]), 1.0)\n nose.tools.eq_(ppv([1, 1], [1, 0]), 1.0)\n nose.tools.eq_(ppv([1, 0, 0, 1], [1, 1, 1, 1]), 0.5)\n nose.tools.eq_(ppv([1, 0, 0, 1], [0, 1, 1, 0]), 0.0)\n nose.tools.eq_(ppv([1, 0, 0, 1], [1, 1, 0, 1]), 2.0 / 3)\n\n nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 1, 0]), 1.0)\n nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 0, 1]), 0.8)\n nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 1, 0, 1]), 0.6)\n nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 0, 1, 0, 1, 0, 1]), 0.4)\n nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 0, 1, 0, 1, 0, 1, 0, 1]), 0.2)\n nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]), 0.0)\n\n # Bad values should fail\n nose.tools.assert_raises(AssertionError, lambda: ppv([1, 0, 1], [1, 0]))\n nose.tools.assert_raises(AssertionError, lambda: ppv([1, 0], [1, 0, 1]))\n nose.tools.assert_raises(AssertionError, lambda: ppv([1, 0, 2], [1, 0, 1]))\n nose.tools.assert_raises(AssertionError, lambda: ppv([1, 0, 1], [1, 0, 2]))\n\n\ndef test_npv():\n \"\"\"Verifies correctness of the NPV calculation\"\"\"\n nose.tools.eq_(npv([0], [0]), 1.0)\n nose.tools.eq_(npv([0, 0], [0, 1]), 1.0)\n nose.tools.eq_(npv([0, 1], [0, 0]), 0.5)\n nose.tools.eq_(npv([1, 0, 0, 1], [0, 0, 0, 0]), 0.5)\n nose.tools.eq_(npv([1, 0, 0, 1], [0, 1, 1, 0]), 0.0)\n nose.tools.eq_(npv([0, 1, 1, 0], [0, 0, 1, 0]), 2.0 / 3)\n\n nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 1, 0]), 1.0)\n nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 0, 1]), 0.8)\n nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 1, 0, 1]), 0.6)\n nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 0, 1, 0, 1, 0, 1]), 0.4)\n nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 0, 1, 0, 1, 0, 1, 0, 1]), 0.2)\n nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]), 0.0)\n\n # Bad values should fail\n nose.tools.assert_raises(AssertionError, lambda: npv([1, 0, 1], [1, 0]))\n nose.tools.assert_raises(AssertionError, lambda: npv([1, 0], [1, 0, 1]))\n nose.tools.assert_raises(AssertionError, lambda: npv([1, 0, 2], [1, 0, 1]))\n nose.tools.assert_raises(AssertionError, lambda: npv([1, 0, 1], [1, 0, 2]))\n\n\ndef test_roc():\n \"\"\"Tests the ROC class\"\"\"\n def checkme(y_true, y_pred, expected_auc):\n \"\"\"Tests the ROC for a single set of predictions. Mostly sanity checks since all the computation is done\n by scikit, which we assume is correct\"\"\"\n roc = ROC.from_scores(y_true, y_pred)\n nose.tools.assert_almost_equal(roc.auc, expected_auc)\n nose.tools.ok_(all(0 <= fpr_val <= 1 for fpr_val in roc.fpr))\n nose.tools.ok_(all(0 <= tpr_val <= 1 for tpr_val in roc.tpr))\n\n nose.tools.assert_list_equal(list(roc.dataframe['tpr']), list(roc.tpr))\n nose.tools.assert_list_equal(list(roc.dataframe['thresholds']), list(roc.thresholds))\n\n for prop in ['fpr', 'tpr', 'thresholds']:\n nose.tools.assert_list_equal(list(roc.dataframe[prop]), list(getattr(roc, prop)))\n nose.tools.assert_greater_equal(len(roc.dataframe[prop]), 2) # needs to have at least the two extremes\n\n yield checkme, [1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], 1.0\n yield checkme, [1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1], 0.0\n yield checkme, [1, 1, 1, 1, 0, 0, 0, 0], [1, 0, 1, 0, 1, 0, 1, 0], 0.5\n yield checkme, [1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 1, 0, 0, 0], 0.75\n\n\ndef test_auc_ci():\n \"\"\"Validates the AUC confidence interval by comparing with R's pROC\"\"\"\n def checkme(y_true, y_pred):\n \"\"\"Test utility\"\"\"\n roc = ROC.from_scores(y_true, y_pred)\n print(roc.auc_ci)\n np.testing.assert_allclose(roc.auc_ci.estimate, roc.auc, atol=0.01)\n\n proc = importr('pROC')\n r_ci_obj = proc.ci(proc.roc(FloatVector(y_true), FloatVector(y_pred), ci=True), method='bootstrap')\n r_ci_dict = dict(list(r_ci_obj.items()))\n np.testing.assert_allclose(r_ci_dict['2.5%'], roc.auc_ci.low, atol=0.02)\n np.testing.assert_allclose(r_ci_dict['97.5%'], roc.auc_ci.high, atol=0.02)\n\n np.random.seed(0xC0FFEE)\n yield checkme, [1, 1, 1, 1, 0, 0, 0, 0] * 10, [1, 1, 1, 1, 0, 0, 0, 0] * 10\n yield checkme, [1, 1, 1, 1, 0, 0, 0, 0] * 10, [1, 0, 1, 0, 1, 0, 1, 0] * 10\n\n for _ in range(5):\n y_true = np.random.choice([0, 1], size=100)\n y_pred = np.random.normal(size=100)\n y_pred[y_true == 1] += np.abs(np.random.normal())\n yield checkme, y_true, y_pred\n\n\ndef test_compute_averaged_metrics():\n \"\"\" Tests compute_averaged_metrics function\"\"\"\n y_truth = [0, 1, 2, 0, 1, 2]\n scores1 = [[0.7, 0.2, 0.1], [0.1, 0.7, 0.2], [0.2, 0.1, 0.7], [0.8, 0.1, 0.1], [0.1, 0.8, 0.1], [0.1, 0.1, 0.8]]\n result1 = compute_averaged_metrics(y_truth, scores1, roc_auc_function)\n nose.tools.assert_almost_equal(1.0, result1, delta=1e-6)\n scores2 = [[0.1, 0.1, 0.8], [0.7, 0.2, 0.1], [0.1, 0.7, 0.2], [0.2, 0.1, 0.7], [0.8, 0.1, 0.1], [0.1, 0.8, 0.1]]\n result2 = compute_averaged_metrics(y_truth, scores2, roc_auc_function)\n nose.tools.assert_almost_equal(0.375, result2, delta=1e-6)\n\n\ndef test_pearson():\n \"\"\" Validate pearson correlation\"\"\"\n X = np.asarray([[1, 2], [-2, 8], [3, 5]])\n y = np.asarray([-1, -2, 0])\n rs_pearson, ps_pearson = f_pearson(X, y)\n nose.tools.assert_almost_equal(0.073186395040328034, ps_pearson[0], delta=1e-6)\n nose.tools.assert_almost_equal(0.66666666666666663, ps_pearson[1], delta=1e-6)\n nose.tools.assert_almost_equal(0.993399267799, rs_pearson[0], delta=1e-6)\n nose.tools.assert_almost_equal(-0.5, rs_pearson[1], delta=1e-6)\n\n\ndef test_accuracy_from_confusion_matrix():\n \"\"\" test accuracy computations from confusion matrix \"\"\"\n y_truth = [0, 1, 2, 0, 1, 2]\n y_score = [[0.7, 0.2, 0.1], [0.1, 0.7, 0.2], [0.2, 0.1, 0.7], [0.1, 0.8, 0.1], [0.1, 0.8, 0.1], [0.8, 0.1, 0.1]]\n y_pred = [0, 1, 2, 1, 1, 0]\n computed_confusion_matrix = confusion_matrix(y_truth, y_pred)\n accuracy = accuracy_from_confusion_matrix(y_truth, y_score, computed_confusion_matrix)\n nose.tools.assert_almost_equal(0.6666667, accuracy, delta=1e-6)\n\n\ndef test_conditional_means_selector():\n \"\"\" test ConditionalMeansSelector class \"\"\"\n # first check means in reverse order\n cms = ConditionalMeansSelector(f_pearson)\n test_y = np.asarray([3, 2, 1, 0, 3, 2, 1, 0])\n test_x = np.asarray([[0, 3], [5, 2], [9, 1], [13, 0], [0, 3], [5, 2], [9, 1], [13, 0]])\n rs_cond_means, ps_cond_means = cms.selector_function(test_x, test_y)\n nose.tools.assert_almost_equal(1.0, rs_cond_means[0], delta=1e-6)\n nose.tools.assert_almost_equal(1.0, rs_cond_means[1], delta=1e-6)\n nose.tools.assert_almost_equal(0.0, ps_cond_means[0], delta=1e-6)\n nose.tools.assert_almost_equal(0.0, ps_cond_means[1], delta=1e-6)\n\n # check that direct call does not produce right result, do NOT use as code pattern !!!\n rs_cond_means_wrong, _ = f_pearson(test_x, test_y)\n nose.tools.assert_not_almost_equal(1.0, rs_cond_means_wrong[0], delta=1e-6)\n\n # check means in same order\n cms_pairwise = ConditionalMeansSelector(pearsonr, True)\n rs_cond_means_pw, ps_cond_means_pw = cms_pairwise.selector_function(test_x, test_y)\n nose.tools.assert_almost_equal(1.0, rs_cond_means_pw[0], delta=1e-6)\n nose.tools.assert_almost_equal(1.0, rs_cond_means_pw[1], delta=1e-6)\n nose.tools.assert_almost_equal(0.0, ps_cond_means_pw[0], delta=1e-6)\n nose.tools.assert_almost_equal(0.0, ps_cond_means_pw[1], delta=1e-6)\n"
] |
[
[
"numpy.testing.assert_allclose",
"sklearn.metrics.confusion_matrix",
"numpy.random.normal",
"numpy.random.choice",
"numpy.asarray",
"numpy.random.seed"
]
] |
edwardoughton/e3nb
|
[
"d03701ba24aad8a723e3e9c138f7f636f7c67573"
] |
[
"scripts/los.py"
] |
[
"\"\"\"\nExtract CLOS / NLOS lookup.\n\nWritten by Ed Oughton.\n\nMarch 2021\n\n\"\"\"\nimport os\nimport configparser\nimport json\nimport math\nimport glob\nimport random\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nimport pyproj\nfrom shapely.geometry import Point, Polygon, box, LineString\nfrom shapely.ops import transform\nimport rasterio\n# import networkx as nx\nfrom rasterio.warp import calculate_default_transform, reproject, Resampling\nfrom rasterio.mask import mask\nfrom rasterstats import zonal_stats, gen_zonal_stats\nfrom tqdm import tqdm\n\ngrass7bin = r'\"C:\\Program Files\\GRASS GIS 7.8\\grass78.bat\"'\nos.environ['GRASSBIN'] = grass7bin\nos.environ['PATH'] += ';' + r\"C:\\Program Files\\GRASS GIS 7.8\\lib\"\n\nfrom grass_session import Session\nfrom grass.script import core as gcore\n\nCONFIG = configparser.ConfigParser()\nCONFIG.read(os.path.join(os.path.dirname(__file__), \"script_config.ini\"))\nBASE_PATH = CONFIG[\"file_locations\"][\"base_path\"]\n\nDATA_RAW = os.path.join(BASE_PATH, \"raw\")\nDATA_INTERMEDIATE = os.path.join(BASE_PATH, \"intermediate\")\nDATA_PROCESSED = os.path.join(BASE_PATH, \"processed\")\n\n\ndef load_raster_tile_lookup(iso3):\n \"\"\"\n Load in the preprocessed raster tile lookup.\n\n Parameters\n ----------\n iso3 : string\n Country iso3 code.\n\n Returns\n -------\n lookup : dict\n A lookup table containing raster tile boundary coordinates\n as the keys, and the file paths as the values.\n\n \"\"\"\n path = os.path.join(DATA_INTERMEDIATE, iso3, 'raster_lookup.csv')\n data = pd.read_csv(path)\n data = data.to_records('dicts')\n\n lookup = {}\n\n for item in data:\n\n coords = (item['x1'], item['y1'], item['x2'], item['y2'])\n\n lookup[coords] = item['path']\n\n return lookup\n\n\ndef generate_grid(iso3, side_length):\n \"\"\"\n Generate a spatial grid for the chosen country.\n \"\"\"\n directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)\n path_output = os.path.join(directory, filename)\n\n if os.path.exists(path_output):\n return\n\n filename = 'national_outline.shp'\n path = os.path.join(DATA_INTERMEDIATE, iso3, filename)\n country_outline = gpd.read_file(path, crs=\"epsg:4326\")\n\n country_outline.crs = \"epsg:4326\"\n country_outline = country_outline.to_crs(\"epsg:3857\")\n\n xmin, ymin, xmax, ymax = country_outline.total_bounds\n\n polygons = manually_create_grid(\n xmin, ymin, xmax, ymax, side_length, side_length\n )\n\n grid = gpd.GeoDataFrame({'geometry': polygons}, crs=\"epsg:3857\")#[:100]\n\n intersection = gpd.overlay(grid, country_outline, how='intersection')\n intersection.crs = \"epsg:3857\"\n intersection['area_km2'] = intersection['geometry'].area / 1e6\n intersection = intersection.to_crs(\"epsg:4326\")\n intersection.to_file(path_output, crs=\"epsg:4326\")\n\n return intersection\n\n\ndef manually_create_grid(xmin, ymin, xmax, ymax, length, wide):\n \"\"\"\n\n \"\"\"\n cols = list(range(int(np.floor(xmin)), int(np.ceil(xmax - int(wide))), int(wide)))\n rows = list(range(int(np.floor(ymin)), int(np.ceil(ymax)), int(length)))\n\n polygons = []\n\n for x in cols:\n for y in rows:\n polygons.append(\n Polygon([(x, y), (x+wide, y), (x+wide, y-length), (x, y-length)])\n )\n\n return polygons\n\n\ndef find_tile(polygon, tile_lookup):\n \"\"\"\n\n Parameters\n ----------\n polygon : tuple\n The bounds of the modeling region.\n tile_lookup : dict\n A lookup table containing raster tile boundary coordinates\n as the keys, and the file paths as the values.\n\n Return\n ------\n output : list\n Contains the file path to the correct raster tile. Note:\n only the first element is returned and if there are more than\n one paths, an error is returned.\n\n \"\"\"\n output = []\n\n poly_bbox = box(polygon[0], polygon[1], polygon[2], polygon[3])\n\n for key, value in tile_lookup.items():\n\n bbox = box(key[0], key[1], key[2], key[3])\n\n if bbox.intersects(poly_bbox):\n output.append(value)\n\n if len(output) == 1:\n return output[0]\n elif len(output) > 1:\n print('Problem with find_tile returning more than 1 path')\n return output[0]\n else:\n print('Problem with find_tile: Unable to find raster path')\n\n\ndef add_id_range_data_to_grid(iso3, tile_lookup, side_length):\n \"\"\"\n Query the Digital Elevation Model to get an estimated interdecile\n range for each grid square.\n\n \"\"\"\n directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')\n filename = 'grid_final.shp'\n path_output = os.path.join(directory, filename)\n\n if os.path.exists(path_output):\n return gpd.read_file(path_output, crs='epsg:4328')\n\n filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)\n path = os.path.join(directory, filename)\n grid = gpd.read_file(path, crs='epsg:4328')\n\n output = []\n\n for idx, grid_tile in grid.iterrows():\n\n path_input = find_tile(\n grid_tile['geometry'].bounds,\n tile_lookup\n )\n\n stats = next(gen_zonal_stats(\n grid_tile['geometry'],\n path_input,\n add_stats={\n 'interdecile_range': interdecile_range\n },\n nodata=0\n ))\n\n id_range_m = stats['interdecile_range']\n\n output.append({\n 'type': 'Feature',\n 'geometry': grid_tile['geometry'],\n 'properties': {\n 'id_range_m': id_range_m,\n 'area_km2': grid_tile['area_km2'],\n # 'pop_density_km2': grid_tile['pop_densit'],\n # 'population': grid_tile['population'],\n }\n })\n\n output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')\n\n output = output.replace([np.inf, -np.inf], np.nan)\n\n output = output[output.geometry.notnull()]\n\n output.to_file(path_output, crs=\"epsg:4326\")\n\n return output\n\n\ndef interdecile_range(x):\n \"\"\"\n Get range between bottom 10% and top 10% of values.\n\n This is from the Longley-Rice Irregular Terrain Model.\n\n Code here: https://github.com/edwardoughton/itmlogic\n Paper here: https://joss.theoj.org/papers/10.21105/joss.02266.pdf\n\n Parameters\n ----------\n x : list\n Terrain profile values.\n\n Returns\n -------\n interdecile_range : int\n The terrain irregularity parameter.\n\n \"\"\"\n q90, q10 = np.percentile(x, [90, 10])\n\n interdecile_range = int(round(q90 - q10, 0))\n\n return interdecile_range\n\n\ndef estimate_terrain_deciles(grid):\n \"\"\"\n\n \"\"\"\n # terrain_lookup = grid.loc[grid['area_km2'] > 1000].reset_index()\n\n terrain_lookup = grid\n terrain_lookup['decile'] = pd.qcut(terrain_lookup['id_range_m'], 10, labels=False)\n\n terrain_lookup = terrain_lookup[['decile', 'id_range_m']]\n\n terrain_lookup = terrain_lookup.groupby(['decile']).min()\n\n terrain_lookup = terrain_lookup['id_range_m'].to_list()\n\n return terrain_lookup\n\n\ndef select_grid_sampling_areas(iso3, grid, lut):\n \"\"\"\n\n \"\"\"\n for i in range(1, 11):\n if i == 1:\n grid.loc[(grid['id_range_m'] < lut[1]), 'decile'] = str(i)\n value_name = '0-{}'.format(str(lut[1]))\n grid.loc[(grid['id_range_m'] < lut[1]), 'value'] = value_name\n elif i <= 9:\n grid.loc[(\n grid['id_range_m'] >= lut[i-1]) &\n (grid['id_range_m'] <= lut[i]), 'decile'] = str(i)\n value_name = '{}-{}'.format(str(lut[i-1]), str(lut[i]))\n grid.loc[(\n grid['id_range_m'] >= lut[i-1]) &\n (grid['id_range_m'] <= lut[i]), 'value'] = value_name\n elif i == 10:\n grid.loc[(grid['id_range_m'] > lut[i-1]), 'decile'] = str(i)\n value_name = '>{}'.format(str(lut[i-1]))\n grid.loc[(grid['id_range_m'] > lut[i-1]), 'value'] = value_name\n else:\n continue\n\n np.random.seed(2)\n\n grid = grid.loc[grid['area_km2'] > 2400].reset_index()\n\n sampling_areas = grid.groupby(['decile']).apply(lambda x: x.sample(1)).reset_index(drop=True)\n\n directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_area')\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n sampling_areas.to_file(os.path.join(directory, 'sampling_areas.shp'))\n\n sampling_areas.crs = 'epsg:4326'\n\n return sampling_areas\n\n\ndef get_points(iso3, sampling_areas, tile_lookup, point_spacing):\n \"\"\"\n\n \"\"\"\n directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n sampling_areas = sampling_areas.to_crs(\"epsg:3857\")\n\n for idx, sampling_area in sampling_areas.iterrows():\n\n lon = sampling_area['geometry'].representative_point().coords[0][0]\n lat = sampling_area['geometry'].representative_point().coords[0][1]\n filename = \"{}-{}\".format(lon, lat)\n\n xmin, ymin, xmax, ymax = sampling_area['geometry'].bounds\n\n polygons = manually_create_grid(xmin, ymin, xmax, ymax, point_spacing, point_spacing)\n\n #make geopandas dataframes\n grid_sample = gpd.GeoDataFrame({'geometry': polygons}, crs=\"epsg:3857\")\n boundary = gpd.GeoDataFrame({'geometry': sampling_area['geometry']},\n crs=\"epsg:3857\", index=[0])\n\n #only get points within the tile boundary\n grid_sample = gpd.overlay(grid_sample, boundary, how='intersection')\n\n grid_sample = grid_sample.to_crs(\"epsg:4326\") #convert to lon lat\n\n ##get the highest points in each grid sample tile\n sampling_points = find_points(iso3, grid_sample, tile_lookup, filename)#[:1]\n\n ##convert to projected for viewsheding\n sampling_points = sampling_points.to_crs(\"epsg:4326\")\n\n path_output = os.path.join(directory, filename + '.shp')\n sampling_points.to_file(path_output)\n\n return sampling_points\n\n\ndef find_points(iso3, grid_sample, tile_lookup, filename):\n \"\"\"\n\n \"\"\"\n filename_2 = filename + '.shp'\n directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')\n path_output = os.path.join(directory, filename_2)\n\n if os.path.exists(path_output):\n return gpd.read_file(path_output, crs='epsg:4326')\n\n output = []\n\n for idx, grid_tile in grid_sample.iterrows():\n\n min_x, min_y, max_x, max_y = grid_tile['geometry'].bounds\n\n geom = Point(random.uniform(min_x, max_x), random.uniform(min_y, max_y))\n\n output.append({\n 'type': 'Feature',\n 'geometry': geom,\n 'properties': {\n }\n })\n\n output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')\n\n return output\n\n\ndef generate_viewsheds(iso3, sampling_areas, sampling_points):\n \"\"\"\n\n \"\"\"\n sampling_areas = sampling_areas.to_crs(\"epsg:3857\")\n\n #set output folder\n folder_out_viewsheds = os.path.join(DATA_INTERMEDIATE, iso3, 'viewsheds')\n\n if not os.path.exists(folder_out_viewsheds):\n os.makedirs(folder_out_viewsheds)\n\n for idx, sampling_area in tqdm(sampling_areas.iterrows(),\n total=sampling_areas.shape[0]):\n\n output = []\n\n lon = sampling_area['geometry'].representative_point().coords[0][0]\n lat = sampling_area['geometry'].representative_point().coords[0][1]\n area_filename = \"{}-{}\".format(lon, lat)\n print('--Working on {}'.format(area_filename))\n\n ##load sampling points\n directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')\n points = gpd.read_file(os.path.join(directory, area_filename + '.shp'))#[:2]\n\n ##convert to lon lat to get correct raster tile\n sampling_area_df = gpd.GeoDataFrame({'geometry': sampling_area['geometry']},\n crs=\"epsg:3857\", index=[0])\n sampling_area_df = sampling_area_df.to_crs(\"epsg:4326\")\n\n for idx, item in sampling_area_df.iterrows():\n #needs a loop because the data structure needs a series\n path_input = find_tile(item['geometry'].bounds, tile_lookup)\n\n for idx, point in tqdm(points.iterrows(), total=points.shape[0]):\n\n results = []\n\n lon = point['geometry'].representative_point().coords[0][0]\n lat = point['geometry'].representative_point().coords[0][1]\n filename2 = \"{}-{}\".format(lon, lat)\n\n path_output = os.path.join(folder_out_viewsheds, filename2)\n\n file_path = os.path.join(path_output, 'location', 'PERMANENT',\n 'viewsheds', filename2 + '.tif')\n\n x = point['geometry'].coords[0][0]\n y = point['geometry'].coords[0][1]\n\n if not os.path.exists(file_path):\n try:\n viewshed((x, y), path_input, path_output, filename2, 45000, 'epsg:4326')\n except:\n print('--Viewshed already exists')\n\n seen = set()\n\n for idx, node in tqdm(points.iterrows(), total=points.shape[0]):\n\n x2 = node['geometry'].coords[0][0]\n y2 = node['geometry'].coords[0][1]\n\n link = '{}_{}_{}_{}'.format(x, y, x2, y2)\n\n if link in seen:\n continue\n\n dist = find_distance((x, y), (x2, y2))\n\n if dist < 10:\n continue\n\n los = check_los(file_path, (x2, y2))\n\n results.append({\n 'sampling_area': area_filename,\n 'point_id': filename2,\n 'node_id': '{}_{}'.format(x2, y2),\n 'distance': dist,\n 'id_range_m': sampling_area['id_range_m'],\n 'decile': sampling_area['decile'],\n 'los': los,\n })\n\n seen.add('{}_{}_{}_{}'.format(x, y, x2, y2))\n seen.add('{}_{}_{}_{}'.format(x2, y2, x, y))\n\n output = output + results\n\n output = pd.DataFrame(output)\n folder = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results')\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n output.to_csv(os.path.join(folder, area_filename + '.csv'), index=False)\n\n\ndef viewshed(point, path_input, path_output, tile_name, max_distance, crs):\n \"\"\"\n Perform a viewshed using GRASS.\n\n Parameters\n ---------\n point : tuple\n The point being queried.\n tile_lookup : dict\n A lookup table containing raster tile boundary coordinates\n as the keys, and the file paths as the values.\n path_output : string\n The directory path for the output folder.\n tile_name : string\n The name allocated to the viewshed tile.\n max_distance : int\n The maximum distance a path can be.\n crs : string\n The coordinate reference system in use.\n\n Returns\n -------\n grid : dataframe\n A geopandas dataframe containing the created grid.\n\n \"\"\"\n with Session(gisdb=path_output, location=\"location\", create_opts=crs):\n\n # print('parse command')\n # print(gcore.parse_command(\"g.gisenv\", flags=\"s\"))#, set=\"DEBUG=3\"\n\n # print('r.external')\n # now link a GDAL supported raster file to a binary raster map layer,\n # from any GDAL supported raster map format, with an optional title.\n # The file is not imported but just registered as GRASS raster map.\n gcore.run_command('r.external', input=path_input, output=tile_name, overwrite=True)\n\n # print('r.external.out')\n #write out as geotiff\n gcore.run_command('r.external.out', directory='viewsheds', format=\"GTiff\")\n\n # print('r.region')\n #manage the settings of the current geographic region\n gcore.run_command('g.region', raster=tile_name)\n\n # print('r.viewshed')\n #for each point in the output that is NULL: No LOS\n gcore.run_command('r.viewshed', #flags='e',\n input=tile_name,\n output='{}.tif'.format(tile_name),\n coordinate= [point[0], point[1]],\n observer_elevation=30,\n target_elevation=30,\n memory=5000,\n overwrite=True,\n quiet=True,\n max_distance=max_distance,\n # verbose=True\n )\n\n\ndef check_los(path_input, point):\n \"\"\"\n Find potential LOS high points.\n\n Parameters\n ----------\n path_input : string\n File path for the digital elevation raster tile.\n point : tuple\n Coordinate point being queried.\n\n Returns\n -------\n los : string\n The Line of Sight (los) of the path queried.\n\n \"\"\"\n with rasterio.open(path_input) as src:\n\n x = point[0]\n y = point[1]\n\n for val in src.sample([(x, y)]):\n\n if np.isnan(val):\n # print('is nan: {} therefore nlos'.format(val))\n los = 'nlos'\n return los\n else:\n # print('is not nan: {} therefore los'.format(val))\n los ='clos'\n return los\n\n\ndef find_distance(point1, point2):\n \"\"\"\n\n \"\"\"\n point1 = Point(point1)\n point1 = gpd.GeoDataFrame({'geometry': [point1]}, index=[0])\n point1 = point1.set_crs('epsg:4326')\n point1 = point1.to_crs('epsg:3857')\n\n point2 = Point(point2)\n point2 = gpd.GeoDataFrame({'geometry': [point2]}, index=[0])\n point2 = point2.set_crs('epsg:4326')\n point2 = point2.to_crs('epsg:3857')\n\n dist = LineString([\n (point1['geometry'][0].coords[0][0], point1['geometry'][0].coords[0][1]),\n (point2['geometry'][0].coords[0][0], point2['geometry'][0].coords[0][1])\n ]).length\n\n return dist\n\n\ndef collect_results(iso3, sampling_areas):\n \"\"\"\n\n \"\"\"\n sampling_areas = sampling_areas.to_crs(\"epsg:3857\")#[:1]\n\n output = []\n\n #set output folder\n for idx, sampling_area in sampling_areas.iterrows():\n\n lon = sampling_area['geometry'].representative_point().coords[0][0]\n lat = sampling_area['geometry'].representative_point().coords[0][1]\n filename = \"{}-{}\".format(lon, lat)\n\n directory = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results')\n data = pd.read_csv(os.path.join(directory, filename + '.csv'))\n\n seen = set()\n interval_size = 2500\n\n for distance_lower in range(0, 45000, interval_size):\n\n distance_upper = distance_lower + interval_size\n\n clos = 0\n nlos = 0\n\n for idx, item in data.iterrows():\n\n path_id = '{}_{}_{}'.format(\n item['point_id'],\n item['node_id'],\n item['distance']\n )\n\n if not path_id in seen:\n if item['distance'] < distance_upper:\n\n if item['los'] == 'clos':\n clos += 1\n elif item['los'] == 'nlos':\n nlos += 1\n else:\n print('Did not recognize los')\n\n seen.add(path_id)\n\n\n if clos > 0:\n clos_probability = (clos / (clos + nlos))\n else:\n clos_probability = 'no data'\n\n if nlos > 0:\n nlos_probability = (nlos / (clos + nlos))\n else:\n nlos_probability = 'no data'\n\n output.append({\n 'decile': item['decile'],\n 'id_range_m': item['id_range_m'],\n 'distance_lower': distance_lower,\n 'distance_upper': distance_upper,\n 'total_samples': clos + nlos,\n 'clos_probability': clos_probability,\n 'nlos_probability': nlos_probability,\n })\n\n output = pd.DataFrame(output)\n folder = os.path.join(DATA_INTERMEDIATE, iso3)\n output.to_csv(os.path.join(folder, 'los_lookup.csv'), index=False)\n\n\nif __name__ == \"__main__\":\n\n countries = [\n (\"PER\", 5e4, 25e2),\n (\"IDN\", 5e4, 25e2),\n ]\n\n for country in countries:\n\n iso3 = country[0]\n side_length = country[1]\n point_spacing = country[2]\n\n ##Load the raster tile lookup\n tile_lookup = load_raster_tile_lookup(iso3)\n\n ##Generate grids\n generate_grid(iso3, side_length) #1e5\n\n # ##Add interdecile range to grid\n grid = add_id_range_data_to_grid(iso3, tile_lookup, side_length)\n\n ##Get the terrain deciles\n terrain_values = estimate_terrain_deciles(grid)\n\n ##Get the grid tile samples\n sampling_areas = select_grid_sampling_areas(iso3, grid, terrain_values)#[:1]\n\n ##Generate the terrain lookup\n sampling_points = get_points(iso3, sampling_areas, tile_lookup, point_spacing)#[:1]\n\n ##Process viewsheds\n generate_viewsheds(iso3, sampling_areas, sampling_points)\n\n ## Collect results\n collect_results(iso3, sampling_areas)\n"
] |
[
[
"numpy.ceil",
"numpy.isnan",
"numpy.random.seed",
"numpy.percentile",
"pandas.DataFrame",
"pandas.qcut",
"pandas.read_csv",
"numpy.floor"
]
] |
AroMorin/DNNOP
|
[
"271e65811fe7cadcffc8155049e256fa78c0c5c6"
] |
[
"environments/nao/pose_assumption.py"
] |
[
"\"\"\"NAO robot class\"\"\"\n\nfrom .robot import Robot\nimport torch\n\nclass Pose_Assumption(Robot):\n def __init__(self, env_params):\n super(Pose_Assumption, self).__init__(env_params)\n env_params = self.ingest_params2(env_params)\n self.target = env_params[\"target error\"]\n self.joints = env_params[\"joints to move\"]\n self.target_angles = env_params[\"target angles\"]\n self.default_pose = \"LyingBack\"\n self.penalty = 0 # State\n self.error = float('inf') # Initial state\n self.assume_pose(self.default_pose)\n self.set_stiffness()\n\n def ingest_params2(self, env_params):\n if \"target error\" not in env_params:\n env_params[\"target error\"] = 0.1\n if \"joints to move\" not in env_params:\n env_params[\"joints to move\"] = [\"HeadYaw\", \"HeadPitch\",\n \"RShoulderPitch\",\"RShoulderRoll\",\n \"RElbowYaw\", \"RElbowRoll\",\n \"RWristYaw\",\n \"RHipYawPitch\",\n \"RHipRoll\", \"RHipPitch\", \"RKneePitch\",\n \"RAnklePitch\", \"RAnkleRoll\",\n \"LShoulderPitch\",\"LShoulderRoll\",\n \"LElbowYaw\", \"LElbowRoll\",\n \"LWristYaw\",\n \"LHipYawPitch\",\n \"LHipRoll\", \"LHipPitch\", \"LKneePitch\",\n \"LAnklePitch\", \"LAnkleRoll\"\n ]\n # NOTE: joints must be named individually\n if \"target angles\" not in env_params:\n env_params[\"target angles\"] = [0.0, 0.153,\n 0.66, 0.914,\n 0.994, 0.721,\n 0.08432,\n -0.512, -0.04,\n -0.8299, 0.317,\n 0.288, -0.268, 0.99, 0.175, -1.234,\n -0.819, -1.286, -0.58287, 0.118,\n 0.2899, -0.09, 0.6, -0.046\n ]\n return env_params\n\n def set_stiffness(self):\n time = 1.0 # Seconds\n value = 0.7 # Stiffness (max 1/min 0, higher is looser)\n self.motion.stiffnessInterpolation(self.joints, value, time)\n\n def step(self):\n \"\"\"In this function the robot will return to default pose, to\n be ready for the new command.\n \"\"\"\n origin = [0.4] # Arbitrary input\n self.observation = torch.tensor(origin,\n dtype=self.precision,\n device = self.device)\n\n def evaluate(self, inference):\n \"\"\"Evaluates the predicted pose.\"\"\"\n self.reset_state()\n values = self.process_inference(inference)\n self.apply(values)\n angles = self.get_joints()\n self.calc_error(angles)\n return self.error\n\n def reset_state(self):\n self.penalty = 0\n self.error = float('inf')\n\n def process_inference(self, inference):\n \"\"\"Ensures safety of the predicted angles.\"\"\"\n values = [a.item() for a in inference]\n for idx, value in enumerate(values):\n name = self.joints[idx]\n limits = self.motion.getLimits(name)\n min_angle = limits[0][0]\n max_angle = limits[0][1]\n max_vel = limits[0][2] # Unenforced\n max_tor = limits[0][3] # Unenforced\n value = self.cap_angle(value, min_angle, max_angle)\n values[idx] = [value]\n return values\n\n def apply(self, angles):\n \"\"\"Applies the pose to the robot.\"\"\"\n self.set_joints(angles)\n\n def cap_angle(self, x, a, b):\n penalty = 10 # Safety penalty\n if x<a:\n self.penalty += penalty\n x = a\n elif x>b:\n self.penalty += penalty\n x = b\n return x\n\n def calc_error(self, angles):\n \"\"\"Calculate the error between predicted and target angles, and\n add the safety penalties.\n \"\"\"\n errors = [abs(x-y) for x,y in zip(angles, self.target_angles)]\n error = sum(errors)\n error += self.penalty\n self.error = torch.tensor(error)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n"
] |
[
[
"torch.tensor"
]
] |
ankitsrao/aws-unused-resources
|
[
"f155f6f327235b6ceae541b02381f4083369b132"
] |
[
"unused_aws_resources.py"
] |
[
"\"\"\"\r\nSYNOPSIS\r\n--------\r\n Get the details of unused resources present across regions in the AWS account\r\n\r\nDESCRIPTION\r\n-----------\r\n This script provides a detailed overview of the number of unused resources present in the AWS account. \r\n\tIt provides service-wise details of unused resources lying around in all the regions of the AWS account. \r\n\r\nPREREQUISITES\r\n-------------\r\n - Workstation with Python version 3 and above\r\n - AWS python-based SDK: boto3\r\n Installation command: pip3 install boto3\r\n - pandas framework and openpyxl for reporting operations (xlsx file). \r\n Installation command(s): \r\n - pip3 install pandas\r\n - pip3 install openpyxl\r\n - User credentials (Access Key Id and Secret Accces Key) of a user having atleast the Security Audit permission and above on the AWS account\r\n\r\nEXAMPLE\r\n-------\r\n This script can be executed on a python compiler (AWS Cloudshell, Powershell, bash, any command line tool with python installed)\r\n Command: python ./unused_aws_resources.py --accessKey <AWS Access Key Id> --secretKey <AWS Secret Access Key>\r\n\r\nOUTPUT\r\n------\r\n - The script will provide a summarized count of all unused resources in the account.\r\n - For a detailed view, the user can refer to the .xlsx file that will be generated by the script.\r\n\r\n\"\"\"\r\n\r\nimport json\r\nimport boto3\r\nimport argparse\r\nimport multiprocessing\r\nimport csv\r\nimport os\r\nimport pandas as pd\r\nimport sys\r\nimport glob\r\n\r\nfrom urllib.request import urlopen\r\n\r\ndef ebs_volume(function, credentials, unused_resource_count, region_list):\r\n print('Scanning EBS Volumes')\r\n\r\n volume_count = 0\r\n unused_volume_detail = []\r\n\r\n for region in region_list:\r\n try:\r\n ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)\r\n volumes = list(ec2.volumes.all())\r\n \r\n unused_volumes = set([volume.volume_id for volume in volumes if volume.state == 'available'])\r\n for volume_id in unused_volumes:\r\n unused_volume_detail.append({'ResourceType':'AWS::EC2::Volume','ResourceId':volume_id,'Region':region})\r\n volume_count+=len(unused_volumes)\r\n except:\r\n pass\r\n \r\n if volume_count:\r\n unused_volume_detail = json.loads(json.dumps(unused_volume_detail))\r\n f = csv.writer(open(\"./aws_logs/ebs_volume.csv\", \"w\", newline=''))\r\n\r\n f.writerow([\"ResourceType\", \"ResourceId\", \"Region\"])\r\n\r\n for unused_volume_detail in unused_volume_detail:\r\n f.writerow([unused_volume_detail[\"ResourceType\"],\r\n unused_volume_detail[\"ResourceId\"],\r\n unused_volume_detail[\"Region\"]])\r\n\r\n unused_resource_count[function] = volume_count\r\n\r\ndef elastic_ip(function, credentials, unused_resource_count, region_list):\r\n print('Scanning Elastic IPs')\r\n\r\n eip_count = 0\r\n unused_eip_detail = []\r\n \r\n for region in region_list:\r\n try:\r\n ec2_client = boto3.client('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)\r\n \r\n eip_data = ec2_client.describe_addresses()['Addresses']\r\n for eip in eip_data:\r\n try:\r\n AssociationId = eip['AssociationId']\r\n except:\r\n AssociationId = ''\r\n \r\n if not AssociationId:\r\n unused_eip_detail.append({'ResourceType':'AWS::EC2::EIP','ResourceId':eip['AllocationId'],'Region':region})\r\n eip_count += 1\r\n except:\r\n pass\r\n\r\n if eip_count:\r\n unused_eip_detail = json.loads(json.dumps(unused_eip_detail))\r\n f = csv.writer(open(\"./aws_logs/elastic_ip.csv\", \"w\", newline=''))\r\n\r\n f.writerow([\"ResourceType\", \"ResourceId\", \"Region\"])\r\n\r\n for unused_eip_detail in unused_eip_detail:\r\n f.writerow([unused_eip_detail[\"ResourceType\"],\r\n unused_eip_detail[\"ResourceId\"],\r\n unused_eip_detail[\"Region\"]])\r\n\r\n unused_resource_count[function] = eip_count\r\n\r\ndef network_interface(function, credentials, unused_resource_count, region_list):\r\n print('Scanning Network Interfaces')\r\n\r\n ni_count = 0\r\n unused_ni_detail = []\r\n\r\n for region in region_list:\r\n try:\r\n ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)\r\n network_interfaces = list(ec2.network_interfaces.all())\r\n \r\n unused_nis = set([ni.network_interface_id for ni in network_interfaces if ni.status == 'available'])\r\n for network_interface_id in unused_nis:\r\n unused_ni_detail.append({'ResourceType':'AWS::EC2::NetworkInterface','ResourceId':network_interface_id,'Region':region})\r\n ni_count+=len(unused_nis)\r\n except:\r\n pass\r\n\r\n if ni_count:\r\n unused_ni_detail = json.loads(json.dumps(unused_ni_detail))\r\n f = csv.writer(open(\"./aws_logs/network_interface.csv\", \"w\", newline=''))\r\n\r\n f.writerow([\"ResourceType\", \"ResourceId\", \"Region\"])\r\n\r\n for unused_ni_detail in unused_ni_detail:\r\n f.writerow([unused_ni_detail[\"ResourceType\"],\r\n unused_ni_detail[\"ResourceId\"],\r\n unused_ni_detail[\"Region\"]])\r\n\r\n unused_resource_count[function] = ni_count\r\n\r\ndef vpc(function, credentials, unused_resource_count, region_list):\r\n print('Scanning VPCs')\r\n\r\n vpc_count = 0\r\n unused_vpc_detail = []\r\n\r\n for region in region_list:\r\n try:\r\n ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)\r\n vpcs = list(ec2.vpcs.all())\r\n network_interfaces = list(ec2.network_interfaces.all())\r\n \r\n all_vpcs = set([vpc.vpc_id for vpc in vpcs])\r\n all_active_vpcs = set([vpc['VpcId'] for ni in network_interfaces for vpc in ni.vpc])\r\n unused_vpcs = all_vpcs - all_active_vpcs\r\n for vpcid in unused_vpcs:\r\n unused_vpc_detail.append({'ResourceType':'AWS::EC2::VPC','ResourceId':vpcid,'Region':region})\r\n vpc_count+=len(unused_vpcs)\r\n except:\r\n pass\r\n\r\n if vpc_count:\r\n unused_vpc_detail = json.loads(json.dumps(unused_vpc_detail))\r\n f = csv.writer(open(\"./aws_logs/vpc.csv\", \"w\", newline=''))\r\n\r\n f.writerow([\"ResourceType\", \"ResourceId\", \"Region\"])\r\n\r\n for unused_vpc_detail in unused_vpc_detail:\r\n f.writerow([unused_vpc_detail[\"ResourceType\"],\r\n unused_vpc_detail[\"ResourceId\"],\r\n unused_vpc_detail[\"Region\"]])\r\n\r\n unused_resource_count[function] = vpc_count\r\n\r\ndef subnet(function, credentials, unused_resource_count, region_list):\r\n print('Scanning Subnets')\r\n\r\n subnet_count = 0\r\n unused_subnet_detail = []\r\n\r\n for region in region_list:\r\n try:\r\n ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)\r\n subnets = list(ec2.subnets.all())\r\n network_interfaces = list(ec2.network_interfaces.all())\r\n \r\n all_subnets = set([subnet.subnet_id for subnet in subnets])\r\n all_active_subnets = set([subnet['SubnetId'] for ni in network_interfaces for subnet in ni.subnet])\r\n unused_subnets = all_subnets - all_active_subnets\r\n for subnetid in unused_subnets:\r\n unused_subnet_detail.append({'ResourceType':'AWS::EC2::Subnet','ResourceId':subnetid,'Region':region})\r\n subnet_count+=len(unused_subnets)\r\n except:\r\n pass\r\n\r\n if subnet_count:\r\n unused_subnet_detail = json.loads(json.dumps(unused_subnet_detail))\r\n f = csv.writer(open(\"./aws_logs/subnet.csv\", \"w\", newline=''))\r\n\r\n f.writerow([\"ResourceType\", \"ResourceId\", \"Region\"])\r\n\r\n for unused_subnet_detail in unused_subnet_detail:\r\n f.writerow([unused_subnet_detail[\"ResourceType\"],\r\n unused_subnet_detail[\"ResourceId\"],\r\n unused_subnet_detail[\"Region\"]])\r\n\r\n unused_resource_count[function] = subnet_count\r\n\r\ndef security_group(function, credentials, unused_resource_count, region_list):\r\n print('Scanning Security Groups')\r\n\r\n sg_count = 0\r\n unused_sg_detail = []\r\n \r\n for region in region_list:\r\n try:\r\n ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)\r\n sgs = list(ec2.security_groups.all())\r\n network_interfaces = list(ec2.network_interfaces.all())\r\n \r\n all_sgs = set([sg.group_id for sg in sgs])\r\n all_inst_sgs = set([sg['GroupId'] for ni in network_interfaces for sg in ni.groups])\r\n unused_sgs = all_sgs - all_inst_sgs\r\n for sgid in unused_sgs:\r\n unused_sg_detail.append({'ResourceType':'AWS::EC2::SecurityGroup','ResourceId':sgid,'Region':region})\r\n sg_count+=len(unused_sgs)\r\n except:\r\n pass\r\n \r\n if sg_count:\r\n unused_sg_detail = json.loads(json.dumps(unused_sg_detail))\r\n f = csv.writer(open(\"./aws_logs/security_group.csv\", \"w\", newline=''))\r\n\r\n f.writerow([\"ResourceType\", \"ResourceId\", \"Region\"])\r\n\r\n for unused_sg_detail in unused_sg_detail:\r\n f.writerow([unused_sg_detail[\"ResourceType\"],\r\n unused_sg_detail[\"ResourceId\"],\r\n unused_sg_detail[\"Region\"]])\r\n\r\n unused_resource_count[function] = sg_count\r\n\r\ndef classic_loadbalancer(function, credentials, unused_resource_count, region_list):\r\n print('Scanning Classic Load balancers')\r\n\r\n elb_count = 0\r\n unused_elb_detail = []\r\n \r\n for region in region_list:\r\n try:\r\n classic_lb = boto3.client('elb', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)\r\n \r\n paginated_data=[]\r\n elb_paginator = classic_lb.get_paginator('describe_load_balancers')\r\n for load_balancers in elb_paginator.paginate():\r\n paginated_data.extend(load_balancers['LoadBalancerDescriptions'])\r\n \r\n for elb_detail in paginated_data:\r\n instance_health_status = []\r\n instance_data = classic_lb.describe_instance_health(LoadBalancerName=elb_detail['LoadBalancerName'])['InstanceStates']\r\n for instance in instance_data:\r\n instance_health_status.append(instance['State'])\r\n if 'InService' not in instance_health_status:\r\n unused_elb_detail.append({'ResourceType':'AWS::ElasticLoadBalancing::LoadBalancer','ResourceId':elb_detail['LoadBalancerName'],'Region':region})\r\n elb_count+=1\r\n except:\r\n pass\r\n\r\n if elb_count:\r\n unused_elb_detail = json.loads(json.dumps(unused_elb_detail))\r\n f = csv.writer(open(\"./aws_logs/classic_loadbalancer.csv\", \"w\", newline=''))\r\n\r\n f.writerow([\"ResourceType\", \"ResourceId\", \"Region\"])\r\n\r\n for unused_elb_detail in unused_elb_detail:\r\n f.writerow([unused_elb_detail[\"ResourceType\"],\r\n unused_elb_detail[\"ResourceId\"],\r\n unused_elb_detail[\"Region\"]])\r\n\r\n unused_resource_count[function] = elb_count\r\n\r\ndef app_nw_gateway_loadbalancer(function, credentials, unused_resource_count, region_list):\r\n print('Scanning Application/Network/Gateway Load balancers')\r\n\r\n elbv2_count = 0\r\n unused_elbv2_detail = []\r\n \r\n for region in region_list:\r\n try:\r\n elbv2 = boto3.client('elbv2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)\r\n \r\n paginated_data=[]\r\n elbv2_paginator = elbv2.get_paginator('describe_load_balancers')\r\n for load_balancers in elbv2_paginator.paginate():\r\n paginated_data.extend(load_balancers['LoadBalancers'])\r\n \r\n for elbv2_detail in paginated_data:\r\n target_health_status = []\r\n try: \r\n target_group_detail = elbv2.describe_target_groups(LoadBalancerArn=elbv2_detail['LoadBalancerArn'])['TargetGroups']\r\n for target_group in target_group_detail:\r\n target_group_health = elbv2.describe_target_health(TargetGroupArn=target_group['TargetGroupArn'])['TargetHealthDescriptions']\r\n for target in target_group_health:\r\n target_health_status.append(target['TargetHealth']['State'])\r\n except:\r\n pass\r\n\r\n if 'healthy' not in target_health_status:\r\n unused_elbv2_detail.append({'ResourceType':'AWS::ElasticLoadBalancingV2::LoadBalancer', 'LoadBalancer_Type':elbv2_detail['Type'], 'ResourceId':elbv2_detail['LoadBalancerName'],'Region':region})\r\n elbv2_count+=1\r\n except:\r\n pass\r\n\r\n if elbv2_count:\r\n unused_elbv2_detail = json.loads(json.dumps(unused_elbv2_detail))\r\n f = csv.writer(open(\"./aws_logs/app_nw_gateway_loadbalancer.csv\", \"w\", newline=''))\r\n\r\n f.writerow([\"ResourceType\", \"LoadBalancer_Type\", \"ResourceId\", \"Region\"])\r\n\r\n for unused_elbv2_detail in unused_elbv2_detail:\r\n f.writerow([unused_elbv2_detail[\"ResourceType\"],\r\n unused_elbv2_detail[\"LoadBalancer_Type\"],\r\n unused_elbv2_detail[\"ResourceId\"],\r\n unused_elbv2_detail[\"Region\"]])\r\n\r\n unused_resource_count[function] = elbv2_count\r\n\r\ndef iam_user(function, credentials, unused_resource_count, region_list):\r\n print('Scanning IAM Users')\r\n\r\n iamuser_count = 0\r\n unused_iamuser_detail = []\r\n\r\n try:\r\n iam = boto3.resource('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'])\r\n iam_client = boto3.client('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'])\r\n \r\n iamuser_data = list(iam.users.all())\r\n for user in iamuser_data:\r\n if not user.password_last_used and not iam_client.list_access_keys(UserName=user.name)['AccessKeyMetadata']:\r\n unused_iamuser_detail.append({'ResourceType':'AWS::IAM::User', 'ResourceId': user.name, 'Region':'Global'})\r\n iamuser_count += 1\r\n except:\r\n pass\r\n\r\n if iamuser_count:\r\n unused_iamuser_detail = json.loads(json.dumps(unused_iamuser_detail))\r\n f = csv.writer(open(\"./aws_logs/iam_user.csv\", \"w\", newline=''))\r\n\r\n f.writerow([\"ResourceType\", \"ResourceId\", \"Region\"])\r\n\r\n for unused_iamuser_detail in unused_iamuser_detail:\r\n f.writerow([unused_iamuser_detail[\"ResourceType\"],\r\n unused_iamuser_detail[\"ResourceId\"],\r\n unused_iamuser_detail[\"Region\"]])\r\n\r\n unused_resource_count[function] = iamuser_count\r\n\r\ndef iam_group(function, credentials, unused_resource_count, region_list):\r\n print('Scanning IAM Groups')\r\n\r\n iamgroup_count = 0\r\n unused_iamgroup_detail = []\r\n\r\n try:\r\n iam = boto3.resource('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'])\r\n iam_client = boto3.client('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'])\r\n \r\n iamgroup_data = list(iam.groups.all())\r\n for group in iamgroup_data:\r\n if not iam_client.get_group(GroupName=group.name)['Users']:\r\n unused_iamgroup_detail.append({'ResourceType':'AWS::IAM::Group', 'ResourceId': group.name, 'Region':'Global'})\r\n iamgroup_count += 1\r\n except:\r\n pass\r\n\r\n if iamgroup_count:\r\n unused_iamgroup_detail = json.loads(json.dumps(unused_iamgroup_detail))\r\n f = csv.writer(open(\"./aws_logs/iam_group.csv\", \"w\", newline=''))\r\n\r\n f.writerow([\"ResourceType\", \"ResourceId\", \"Region\"])\r\n\r\n for unused_iamgroup_detail in unused_iamgroup_detail:\r\n f.writerow([unused_iamgroup_detail[\"ResourceType\"],\r\n unused_iamgroup_detail[\"ResourceId\"],\r\n unused_iamgroup_detail[\"Region\"]])\r\n\r\n unused_resource_count[function] = iamgroup_count\r\n\r\ndef main(arg):\r\n access_key = arg.accessKey\r\n secret_key = arg.secretKey\r\n region_list = []\r\n unused_resource_details = {}\r\n\r\n try:\r\n print(\"Connecting to AWS account \")\r\n session = boto3.session.Session(aws_access_key_id=access_key, aws_secret_access_key=secret_key)\r\n except:\r\n print(\"\\033[1;31;40m \"\"Please do Check for Credentials provided or Internet Connection and Try Again\\n\")\r\n quit()\r\n\r\n iam = session.client('sts')\r\n account_id = iam.get_caller_identity()[\"Account\"]\r\n print(\"Successfully connected to AWS account\", account_id)\r\n\r\n print(\"Scanning for unused resources across all available regions.\")\r\n print(\"Wait for few minutes...\\n\")\r\n\r\n function_list= [ ebs_volume, elastic_ip, network_interface, vpc, subnet, security_group, classic_loadbalancer, app_nw_gateway_loadbalancer,\r\n iam_user, iam_group ]\r\n\r\n print(\"Collecting list of enabled region\")\r\n available_regions = session.client('ec2',region_name=\"us-east-1\")\r\n enabled_regions = available_regions.describe_regions()['Regions']\r\n for region in enabled_regions:\r\n region_list.append(region['RegionName'])\r\n \r\n manager = multiprocessing.Manager()\r\n unused_resource_count = manager.dict()\r\n credentials = manager.dict()\r\n credentials['access_key'] = access_key\r\n credentials['secret_key'] = secret_key\r\n credentials['account_id'] = account_id\r\n jobs = []\r\n\r\n try:\r\n os.mkdir(\"./aws_logs\")\r\n except:\r\n pass\r\n\r\n for function in function_list:\r\n try:\r\n p = multiprocessing.Process(target=function, args=(function, credentials, unused_resource_count, region_list))\r\n jobs.append(p)\r\n p.start()\r\n except:\r\n print(\"Exception occurred while creating processes. Please try again later!\")\r\n quit()\r\n \r\n if jobs:\r\n for process in jobs:\r\n try:\r\n process.join()\r\n except:\r\n print(\"Exception occurred while joining processes. Please try again later!\")\r\n quit()\r\n\r\n os.chdir('./aws_logs')\r\n writer = pd.ExcelWriter('unused_resources.xlsx')\r\n all_files = glob.glob(\"*.csv\")\r\n\r\n for f in all_files:\r\n df = pd.read_csv(f)\r\n df.to_excel(writer,sheet_name=f.split('.')[0], index=False)\r\n writer.save()\r\n\r\n for f in all_files:\r\n os.remove(f)\r\n\r\n print(\"Completed account scan\")\r\n # Updating Resource Count Object\r\n unused_resource_details.update({ 'AWS::EC2::Volume': unused_resource_count[ebs_volume],\r\n 'AWS::EC2::EIP': unused_resource_count[elastic_ip],\r\n 'AWS::EC2::NetworkInterface': unused_resource_count[network_interface],\r\n 'AWS::EC2::VPC': unused_resource_count[vpc],\r\n 'AWS::EC2::Subnet': unused_resource_count[subnet],\r\n 'AWS::EC2::SecurityGroup': unused_resource_count[security_group],\r\n 'AWS::ElasticLoadBalancing::LoadBalancer': unused_resource_count[classic_loadbalancer],\r\n 'AWS::ElasticLoadBalancingV2::LoadBalancer': unused_resource_count[app_nw_gateway_loadbalancer],\r\n 'AWS::IAM::User': unused_resource_count[iam_user],\r\n 'AWS::IAM::Group': unused_resource_count[iam_group]\r\n })\r\n\r\n # Showing Resource Distribution\r\n print(\"\\nUnused Resources in the Account:\")\r\n unused_resource_count = 0\r\n for key, value in sorted(unused_resource_details.items(), key=lambda x: x[1], reverse=True):\r\n if value != 0:\r\n print(\"\\t{} : {}\".format(key, value))\r\n unused_resource_count+=value\r\n\r\n print(\"\\n\\nSummary:\")\r\n print(\"\\tTotal Unused Resources:\", unused_resource_count)\r\n print(\"\\n\\nDetailed unused resource information can be found at: aws_logs/unused_resources.xlsx\")\r\n\r\nif(__name__ == '__main__'):\r\n arg_parser = argparse.ArgumentParser(prog='unused_aws_resources',\r\n usage='%(prog)s [options]',\r\n description='Count AWS resources')\r\n\r\n # Add the arguments\r\n arg_parser.add_argument('--accessKey',\r\n type=str,\r\n required=True,\r\n help='AWS Access Key')\r\n arg_parser.add_argument('--secretKey',\r\n type=str,\r\n required=True,\r\n help='AWS Secret Key')\r\n\r\n # Execute the parse_args() method\r\n args = arg_parser.parse_args()\r\n main(args)\r\n"
] |
[
[
"pandas.read_csv",
"pandas.ExcelWriter"
]
] |
doneva593/wifisystem
|
[
"209c3ca8783f967221fe821993e0368ab96f4398"
] |
[
"server/ai/src/naive_bayes.py"
] |
[
"import sqlite3\nimport mmap\nimport os\nimport sys\nimport copy\nimport math\nimport tempfile\n\nfrom tqdm import tqdm\nfrom scipy.stats import norm\nfrom expiringdict import ExpiringDict\n\ncache = ExpiringDict(max_len=100000,max_age_seconds=600)\n\ndef get_num_lines(file_path):\n fp = open(file_path, \"r+\")\n buf = mmap.mmap(fp.fileno(), 0)\n lines = 0\n while buf.readline():\n lines += 1\n return lines\n\nclass ExtendedNaiveBayes:\n \n def __init__(self,family,path_to_data=\".\"):\n self.family = family \n self.db_name = os.path.join(path_to_data,family+\".nb.db\")\n\n def fit(self,csv_file):\n db = sqlite3.connect(\":memory:\")\n c = db.cursor()\n try:\n c.execute('''CREATE TABLE data (loc TEXT, mac TEXT, val INTEGER, count INTEGER)''')\n db.commit()\n except sqlite3.OperationalError:\n pass\n\n headers = []\n with open(csv_file,\"r\") as f:\n for i,line in enumerate(tqdm(f, total=get_num_lines(csv_file))):\n line = line.strip()\n if i == 0:\n headers = line.split(\",\")\n continue\n loc = \"\"\n for j,signal in enumerate(line.split(\",\")):\n if j == 0:\n loc = signal \n continue\n if signal.strip() == \"\":\n continue\n mac = headers[j]\n val = int(round(float(signal.strip())))\n c.execute('''SELECT count FROM data WHERE loc = ? AND mac = ? AND val = ?''',(loc, mac, val\n ))\n count = c.fetchone()\n if count == None:\n c.execute('''INSERT INTO data(loc,mac,val,count)\n VALUES(?,?,?,?)''', (loc,mac,val,1))\n else:\n c.execute('''UPDATE data SET count = ? WHERE loc = ? AND mac = ? AND val = ?''',(count[0]+1,loc,mac,val))\n db.commit()\n\n # with open(\"dump.sql\",\"w\") as f:\n # for line in db.iterdump():\n # f.write('%s\\n' % line)\n f = tempfile.TemporaryFile()\n for line in db.iterdump():\n f.write('{}\\n'.format(line).encode('utf-8'))\n\n db.close()\n\n # Write disk to file\n try:\n os.remove(self.db_name)\n except:\n pass\n db = sqlite3.connect(self.db_name)\n c = db.cursor()\n f.seek(0)\n c.executescript(f.read().decode('utf-8'))\n f.close()\n db.commit()\n db.close()\n # os.remove(\"dump.sql\")\n\n\n def get_locations(self):\n db = sqlite3.connect(self.db_name)\n c = db.cursor()\n c.execute('''SELECT loc FROM data GROUP BY loc''')\n locs = c.fetchall()\n db.close()\n locations = []\n for l in locs:\n locations.append(l[0])\n return locations\n\n def prob_mac_given_loc(self,mac,val,loc,positive):\n \"\"\"\n Determine the P(mac=val | loc) (positive)\n Determine the P(mac=val | ~loc) (not positive)\n \"\"\"\n name = \"{}{}{}{}\".format(mac,val,loc,positive)\n cached = cache.get(name)\n if cached != None:\n return cached\n P = 0.005\n nameData = \"{}{}{}\".format(mac,loc,positive)\n cached = cache.get(nameData)\n if cached != None:\n if val in cached:\n P = cached[val]\n return P\n\n # First find all the values for mac at loc\n db = sqlite3.connect(self.db_name)\n c = db.cursor()\n if positive:\n c.execute('''SELECT val,count FROM data WHERE loc = ? AND mac = ?''',(loc,mac))\n else:\n c.execute('''SELECT val,count FROM data WHERE loc != ? AND mac = ?''',(loc,mac)) \n val_to_count = {}\n for row in c.fetchall():\n val_to_count[row[0]] = row[1]\n db.close()\n\n # apply gaussian filter\n new_val_to_count = copy.deepcopy(val_to_count) \n width = 3\n for v in val_to_count:\n for x in range(-1*width**3,width**3+1):\n addend = int(round(100*norm.pdf(0,loc=x,scale=width)))\n if addend <= 0 :\n continue\n if v+x not in new_val_to_count:\n new_val_to_count[v+x] = 0\n new_val_to_count[v+x] = new_val_to_count[v+x]+addend\n\n total = 0\n for v in new_val_to_count:\n total += new_val_to_count[v]\n for v in new_val_to_count:\n new_val_to_count[v] = new_val_to_count[v] / total\n\n # 0.5% chance for anything\n P = 0.005\n if val in new_val_to_count:\n P = new_val_to_count[val]\n cache[name] = P \n cache[nameData] = new_val_to_count\n return P\n\n def predict_proba(self,header_unfiltered,csv_data_unfiltered):\n header = []\n csv_data = []\n for i,dat in enumerate(csv_data_unfiltered):\n if dat == 0:\n continue\n csv_data.append(dat)\n header.append(header_unfiltered[i])\n\n locations = self.get_locations()\n num_locations = len(locations)\n NA = 1/num_locations\n NnotA = 1-NA\n Ps = {}\n for i,mac in enumerate(header):\n val = int(round(float(csv_data[i])))\n for location in locations:\n if location not in Ps:\n Ps[location] = []\n PA = self.prob_mac_given_loc(mac,val,location,True)\n PnotA = self.prob_mac_given_loc(mac,val,location,False)\n P = PA*NA / (PA*NA + PnotA*NnotA)\n Ps[location].append(math.log(P))\n P_sum = 0\n for location in Ps:\n P_sum += math.exp(sum(Ps[location]))\n d = {}\n for location in Ps:\n d[location] = math.exp(sum(Ps[location]))/P_sum\n return [(k, d[k]) for k in sorted(d, key=d.get, reverse=True)]\n\n\n\ndef testit():\n a =ExtendedNaiveBayes(\"testing1\")\n print(\"fitting data\")\n file_to_test = \"reverse.csv\"\n a.fit(file_to_test)\n print(\"done\")\n with open(file_to_test,\"r\") as f:\n for i,line in enumerate(f):\n line = line.strip()\n if i == 0:\n headers = line.split(\",\")\n continue\n headers_submit = []\n csv_data_submit = []\n loc = \"\"\n for j,signal in enumerate(line.split(\",\")):\n if j == 0:\n loc = signal \n continue\n if signal.strip() == \"\":\n continue\n headers_submit.append(headers[j])\n csv_data_submit.append(int(round(float(signal.strip()))))\n print(loc)\n a.predict_proba(headers_submit,csv_data_submit)\n"
] |
[
[
"scipy.stats.norm.pdf"
]
] |
mvdoc/mne-python
|
[
"bac50dd08361b10d0a65c614ea2de06308750411",
"bac50dd08361b10d0a65c614ea2de06308750411",
"bac50dd08361b10d0a65c614ea2de06308750411"
] |
[
"mne/io/artemis123/artemis123.py",
"examples/decoding/plot_linear_model_patterns.py",
"mne/chpi.py"
] |
[
"# Author: Luke Bloy <bloyl@chop.edu>\n#\n# License: BSD (3-clause)\n\nimport numpy as np\nimport os.path as op\nimport datetime\nimport calendar\n\nfrom .utils import _load_mne_locs\nfrom ...utils import logger, warn\nfrom ..utils import _read_segments_file\nfrom ..base import BaseRaw\nfrom ..meas_info import _empty_info\nfrom ..constants import FIFF\n\n\ndef read_raw_artemis123(input_fname, preload=False, verbose=None):\n \"\"\"Read Artemis123 data as raw object.\n\n Parameters\n ----------\n input_fname : str\n Path to the data file (extension ``.bin``). The header file with the\n same file name stem and an extension ``.txt`` is expected to be found\n in the same directory.\n preload : bool or str (default False)\n Preload data into memory for data manipulation and faster indexing.\n If True, the data will be preloaded into memory (fast, requires\n large amount of memory). If preload is a string, preload is the\n file name of a memory-mapped file which is used to store the data\n on the hard drive (slower, requires less memory).\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n raw : Instance of Raw\n A Raw object containing the data.\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n \"\"\"\n return RawArtemis123(input_fname, preload=preload, verbose=verbose)\n\n\ndef _get_artemis123_info(fname):\n \"\"\"Function for extracting info from artemis123 header files.\"\"\"\n fname = op.splitext(op.abspath(fname))[0]\n header = fname + '.txt'\n\n logger.info('Reading header...')\n\n # key names for artemis channel info...\n chan_keys = ['name', 'scaling', 'FLL_Gain', 'FLL_Mode', 'FLL_HighPass',\n 'FLL_AutoReset', 'FLL_ResetLock']\n\n header_info = dict()\n header_info['filter_hist'] = []\n header_info['comments'] = ''\n header_info['channels'] = []\n\n with open(header, 'r') as fid:\n # section flag\n # 0 - None\n # 1 - main header\n # 2 - channel header\n # 3 - comments\n # 4 - length\n # 5 - filtering History\n sectionFlag = 0\n for line in fid:\n # skip emptylines or header line for channel info\n if ((not line.strip()) or\n (sectionFlag == 2 and line.startswith('DAQ Map'))):\n continue\n\n # set sectionFlag\n if line.startswith('<end'):\n sectionFlag = 0\n elif line.startswith(\"<start main header>\"):\n sectionFlag = 1\n elif line.startswith(\"<start per channel header>\"):\n sectionFlag = 2\n elif line.startswith(\"<start comments>\"):\n sectionFlag = 3\n elif line.startswith(\"<start length>\"):\n sectionFlag = 4\n elif line.startswith(\"<start filtering history>\"):\n sectionFlag = 5\n else:\n # parse header info lines\n # part of main header - lines are name value pairs\n if sectionFlag == 1:\n values = line.strip().split('\\t')\n if len(values) == 1:\n values.append('')\n header_info[values[0]] = values[1]\n # part of channel header - lines are Channel Info\n elif sectionFlag == 2:\n values = line.strip().split('\\t')\n if len(values) != 7:\n raise IOError('Error parsing line \\n\\t:%s\\n' % line +\n 'from file %s' % header)\n tmp = dict()\n for k, v in zip(chan_keys, values):\n tmp[k] = v\n header_info['channels'].append(tmp)\n elif sectionFlag == 3:\n header_info['comments'] = '%s%s' \\\n % (header_info['comments'], line.strip())\n elif sectionFlag == 4:\n header_info['num_samples'] = int(line.strip())\n elif sectionFlag == 5:\n header_info['filter_hist'].append(line.strip())\n\n for k in ['Temporal Filter Active?', 'Decimation Active?',\n 'Spatial Filter Active?']:\n if(header_info[k] != 'FALSE'):\n warn('%s - set to but is not supported' % k)\n if(header_info['filter_hist']):\n warn('Non-Empty Filter histroy found, BUT is not supported' % k)\n\n # build mne info struct\n info = _empty_info(float(header_info['Rate Out']))\n\n # Attempt to get time/date from fname\n # Artemis123 files saved from the scanner observe the following\n # naming convention 'Artemis_Data_YYYY-MM-DD-HHh-MMm_[chosen by user].bin'\n try:\n date = datetime.datetime.strptime(\n op.basename(fname).split('_')[2], '%Y-%m-%d-%Hh-%Mm')\n meas_date = calendar.timegm(date.utctimetuple())\n except Exception:\n meas_date = None\n\n # build subject info\n subject_info = {'id': header_info['Subject ID']}\n\n # build description\n desc = ''\n for k in ['Purpose', 'Notes']:\n desc += '{} : {}\\n'.format(k, header_info[k])\n desc += 'Comments : {}'.format(header_info['comments'])\n\n info = _empty_info(float(header_info['Rate Out']))\n info.update({'filename': fname, 'meas_date': meas_date,\n 'description': desc, 'buffer_size_sec': 1.,\n 'subject_info': subject_info,\n 'proj_name': header_info['Project Name']})\n\n # Channel Names by type\n ref_mag_names = ['REF_001', 'REF_002', 'REF_003',\n 'REF_004', 'REF_005', 'REF_006']\n\n ref_grad_names = ['REF_007', 'REF_008', 'REF_009',\n 'REF_010', 'REF_011', 'REF_012']\n\n # load mne loc dictionary\n loc_dict = _load_mne_locs()\n info['chs'] = []\n info['bads'] = []\n\n for i, chan in enumerate(header_info['channels']):\n # build chs struct\n t = {'cal': float(chan['scaling']), 'ch_name': chan['name'],\n 'logno': i + 1, 'scanno': i + 1, 'range': 1.0,\n 'unit_mul': FIFF.FIFF_UNITM_NONE,\n 'coord_frame': FIFF.FIFFV_COORD_DEVICE}\n t['loc'] = loc_dict.get(chan['name'], np.zeros(12))\n\n if (chan['name'].startswith('MEG')):\n t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_GRAD\n t['kind'] = FIFF.FIFFV_MEG_CH\n # While gradiometer units are T/m, the meg sensors referred to as\n # gradiometers report the field difference between 2 pick-up coils.\n # Therefore the units of the measurements should be T\n # *AND* the baseline (difference between pickup coils)\n # should not be used in leadfield / forwardfield computations.\n t['unit'] = FIFF.FIFF_UNIT_T\n t['unit_mul'] = FIFF.FIFF_UNITM_F\n\n # 3 axis referance magnetometers\n elif (chan['name'] in ref_mag_names):\n t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG\n t['kind'] = FIFF.FIFFV_REF_MEG_CH\n t['unit'] = FIFF.FIFF_UNIT_T\n t['unit_mul'] = FIFF.FIFF_UNITM_F\n\n # reference gradiometers\n elif (chan['name'] in ref_grad_names):\n t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD\n t['kind'] = FIFF.FIFFV_REF_MEG_CH\n # While gradiometer units are T/m, the meg sensors referred to as\n # gradiometers report the field difference between 2 pick-up coils.\n # Therefore the units of the measurements should be T\n # *AND* the baseline (difference between pickup coils)\n # should not be used in leadfield / forwardfield computations.\n t['unit'] = FIFF.FIFF_UNIT_T\n t['unit_mul'] = FIFF.FIFF_UNITM_F\n\n # other reference channels are unplugged and should be ignored.\n elif (chan['name'].startswith('REF')):\n t['coil_type'] = FIFF.FIFFV_COIL_NONE\n t['kind'] = FIFF.FIFFV_MISC_CH\n t['unit'] = FIFF.FIFF_UNIT_V\n info['bads'].append(t['ch_name'])\n\n elif (chan['name'].startswith(('AUX', 'TRG', 'MIO'))):\n t['coil_type'] = FIFF.FIFFV_COIL_NONE\n t['unit'] = FIFF.FIFF_UNIT_V\n if (chan['name'].startswith('TRG')):\n t['kind'] = FIFF.FIFFV_STIM_CH\n else:\n t['kind'] = FIFF.FIFFV_MISC_CH\n else:\n raise ValueError('Channel does not match expected' +\n ' channel Types:\"%s\"' % chan['name'])\n\n # incorporate mulitplier (unit_mul) into calibration\n t['cal'] *= 10 ** t['unit_mul']\n t['unit_mul'] = FIFF.FIFF_UNITM_NONE\n\n # append this channel to the info\n info['chs'].append(t)\n if (chan['FLL_ResetLock'] == 'TRUE'):\n info['bads'].append(t['ch_name'])\n\n # reduce info['bads'] to unique set\n info['bads'] = list(set(info['bads']))\n info._update_redundant()\n return info, header_info\n\n\nclass RawArtemis123(BaseRaw):\n \"\"\"Raw object from Artemis123 file.\n\n Parameters\n ----------\n input_fname : str\n Path to the Artemis123 data file (ending in ``'.bin'``).\n preload : bool or str (default False)\n Preload data into memory for data manipulation and faster indexing.\n If True, the data will be preloaded into memory (fast, requires\n large amount of memory). If preload is a string, preload is the\n file name of a memory-mapped file which is used to store the data\n on the hard drive (slower, requires less memory).\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n \"\"\"\n\n def __init__(self, input_fname, preload=False, verbose=None): # noqa: D102\n info, header_info = _get_artemis123_info(input_fname)\n last_samps = [header_info['num_samples'] - 1]\n super(RawArtemis123, self).__init__(\n info, preload, filenames=[input_fname], raw_extras=[header_info],\n last_samps=last_samps, orig_format=np.float32,\n verbose=verbose)\n\n def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):\n \"\"\"Read a chunk of raw data.\"\"\"\n _read_segments_file(self, data, idx, fi, start,\n stop, cals, mult, dtype='>f4')\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n===============================================================\nLinear classifier on sensor data with plot patterns and filters\n===============================================================\n\nDecoding, a.k.a MVPA or supervised machine learning applied to MEG and EEG\ndata in sensor space. Fit a linear classifier with the LinearModel object\nproviding topographical patterns which are more neurophysiologically\ninterpretable [1] than the classifier filters (weight vectors).\nThe patterns explain how the MEG and EEG data were generated from the\ndiscriminant neural sources which are extracted by the filters.\nNote patterns/filters in MEG data are more similar than EEG data\nbecause the noise is less spatially correlated in MEG than EEG.\n\n[1] Haufe, S., Meinecke, F., Görgen, K., Dähne, S., Haynes, J.-D.,\nBlankertz, B., & Bießmann, F. (2014). On the interpretation of\nweight vectors of linear models in multivariate neuroimaging.\nNeuroImage, 87, 96–110. doi:10.1016/j.neuroimage.2013.10.067\n\"\"\"\n# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>\n# Romain Trachel <trachelr@gmail.com>\n#\n# License: BSD (3-clause)\n\nimport mne\nfrom mne import io\nfrom mne.datasets import sample\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\n\n# import a linear classifier from mne.decoding\nfrom mne.decoding import LinearModel\n\nprint(__doc__)\n\ndata_path = sample.data_path()\n\n###############################################################################\n# Set parameters\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\ntmin, tmax = -0.2, 0.5\nevent_id = dict(aud_l=1, vis_l=3)\n\n# Setup for reading the raw data\nraw = io.read_raw_fif(raw_fname, preload=True)\nraw.filter(2, 15) # replace baselining with high-pass\nevents = mne.read_events(event_fname)\n\n# Read epochs\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,\n decim=4, baseline=None, preload=True)\n\nlabels = epochs.events[:, -1]\n\n# get MEG and EEG data\nmeg_epochs = epochs.copy().pick_types(meg=True, eeg=False)\nmeg_data = meg_epochs.get_data().reshape(len(labels), -1)\neeg_epochs = epochs.copy().pick_types(meg=False, eeg=True)\neeg_data = eeg_epochs.get_data().reshape(len(labels), -1)\n\n###############################################################################\n# Decoding in sensor space using a LogisticRegression classifier\n\nclf = LogisticRegression()\nsc = StandardScaler()\n\n# create a linear model with LogisticRegression\nmodel = LinearModel(clf)\n\n# fit the classifier on MEG data\nX = sc.fit_transform(meg_data)\nmodel.fit(X, labels)\n# plot patterns and filters\nmodel.plot_patterns(meg_epochs.info, title='MEG Patterns')\nmodel.plot_filters(meg_epochs.info, title='MEG Filters')\n\n# fit the classifier on EEG data\nX = sc.fit_transform(eeg_data)\nmodel.fit(X, labels)\n# plot patterns and filters\nmodel.plot_patterns(eeg_epochs.info, title='EEG Patterns')\nmodel.plot_filters(eeg_epochs.info, title='EEG Filters')\n",
"# Authors: Eric Larson <larson.eric.d@gmail.com>\n#\n# License: BSD (3-clause)\n\nfrom functools import partial\n\nimport numpy as np\nfrom scipy import linalg, fftpack\n\nfrom .io.pick import pick_types, pick_channels\nfrom .io.constants import FIFF\nfrom .forward import (_magnetic_dipole_field_vec, _create_meg_coils,\n _concatenate_coils, _read_coil_defs)\nfrom .cov import make_ad_hoc_cov, _get_whitener_data\nfrom .transforms import (apply_trans, invert_transform, _angle_between_quats,\n quat_to_rot, rot_to_quat)\nfrom .utils import (verbose, logger, check_version, use_log_level,\n _check_fname, warn)\n\n# Eventually we should add:\n# hpicons\n# high-passing of data during fits\n# parsing cHPI coil information from acq pars, then to PSD if necessary\n\n\n# ############################################################################\n# Reading from text or FIF file\n\ndef read_head_pos(fname):\n \"\"\"Read MaxFilter-formatted head position parameters.\n\n Parameters\n ----------\n fname : str\n The filename to read. This can be produced by e.g.,\n ``maxfilter -headpos <name>.pos``.\n\n Returns\n -------\n pos : array, shape (N, 10)\n The position and quaternion parameters from cHPI fitting.\n\n See Also\n --------\n write_head_pos\n head_pos_to_trans_rot_t\n\n Notes\n -----\n .. versionadded:: 0.12\n \"\"\"\n _check_fname(fname, must_exist=True, overwrite=True)\n data = np.loadtxt(fname, skiprows=1) # first line is header, skip it\n data.shape = (-1, 10) # ensure it's the right size even if empty\n if np.isnan(data).any(): # make sure we didn't do something dumb\n raise RuntimeError('positions could not be read properly from %s'\n % fname)\n return data\n\n\ndef write_head_pos(fname, pos):\n \"\"\"Write MaxFilter-formatted head position parameters.\n\n Parameters\n ----------\n fname : str\n The filename to write.\n pos : array, shape (N, 10)\n The position and quaternion parameters from cHPI fitting.\n\n See Also\n --------\n read_head_pos\n head_pos_to_trans_rot_t\n\n Notes\n -----\n .. versionadded:: 0.12\n \"\"\"\n _check_fname(fname, overwrite=True)\n pos = np.array(pos, np.float64)\n if pos.ndim != 2 or pos.shape[1] != 10:\n raise ValueError('pos must be a 2D array of shape (N, 10)')\n with open(fname, 'wb') as fid:\n fid.write(' Time q1 q2 q3 q4 q5 '\n 'q6 g-value error velocity\\n'.encode('ASCII'))\n for p in pos:\n fmts = ['% 9.3f'] + ['% 8.5f'] * 9\n fid.write(((' ' + ' '.join(fmts) + '\\n')\n % tuple(p)).encode('ASCII'))\n\n\ndef head_pos_to_trans_rot_t(quats):\n \"\"\"Convert Maxfilter-formatted head position quaternions.\n\n Parameters\n ----------\n quats : ndarray, shape (N, 10)\n MaxFilter-formatted position and quaternion parameters.\n\n Returns\n -------\n translation : ndarray, shape (N, 3)\n Translations at each time point.\n rotation : ndarray, shape (N, 3, 3)\n Rotations at each time point.\n t : ndarray, shape (N,)\n The time points.\n\n See Also\n --------\n read_pos\n write_pos\n \"\"\"\n t = quats[..., 0].copy()\n rotation = quat_to_rot(quats[..., 1:4])\n translation = quats[..., 4:7].copy()\n return translation, rotation, t\n\n\n# ############################################################################\n# Estimate positions from data\n\n@verbose\ndef _get_hpi_info(info, adjust=False, verbose=None):\n \"\"\"Helper to get HPI information from raw.\"\"\"\n if len(info['hpi_meas']) == 0 or \\\n ('coil_freq' not in info['hpi_meas'][0]['hpi_coils'][0]):\n raise RuntimeError('Appropriate cHPI information not found in'\n 'raw.info[\"hpi_meas\"], cannot process cHPI')\n hpi_result = info['hpi_results'][-1]\n hpi_coils = sorted(info['hpi_meas'][-1]['hpi_coils'],\n key=lambda x: x['number']) # ascending (info) order\n hpi_dig = sorted([d for d in info['dig']\n if d['kind'] == FIFF.FIFFV_POINT_HPI],\n key=lambda x: x['ident']) # ascending (dig) order\n pos_order = hpi_result['order'] - 1 # zero-based indexing, dig->info\n\n # this shouldn't happen, eventually we could add the transforms\n # necessary to put it in head coords\n if not all(d['coord_frame'] == FIFF.FIFFV_COORD_HEAD for d in hpi_dig):\n raise RuntimeError('cHPI coordinate frame incorrect')\n # Give the user some info\n logger.info('HPIFIT: %s coils digitized in order %s'\n % (len(pos_order), ' '.join(str(o + 1) for o in pos_order)))\n logger.debug('HPIFIT: %s coils accepted: %s'\n % (len(hpi_result['used']),\n ' '.join(str(h) for h in hpi_result['used'])))\n hpi_rrs = np.array([d['r'] for d in hpi_dig])[pos_order]\n\n # Fitting errors\n hpi_rrs_fit = sorted([d for d in info['hpi_results'][-1]['dig_points']],\n key=lambda x: x['ident'])\n hpi_rrs_fit = np.array([d['r'] for d in hpi_rrs_fit])\n # hpi_result['dig_points'] are in FIFFV_COORD_UNKNOWN coords, but this\n # is probably a misnomer because it should be FIFFV_COORD_DEVICE for this\n # to work\n assert hpi_result['coord_trans']['to'] == FIFF.FIFFV_COORD_HEAD\n hpi_rrs_fit = apply_trans(hpi_result['coord_trans']['trans'], hpi_rrs_fit)\n if 'moments' in hpi_result:\n logger.debug('Hpi coil moments (%d %d):'\n % hpi_result['moments'].shape[::-1])\n for moment in hpi_result['moments']:\n logger.debug(\"%g %g %g\" % tuple(moment))\n errors = np.sqrt(((hpi_rrs - hpi_rrs_fit) ** 2).sum(axis=1))\n logger.debug('HPIFIT errors: %s mm.'\n % ', '.join('%0.1f' % (1000. * e) for e in errors))\n if errors.sum() < len(errors) * hpi_result['dist_limit']:\n logger.info('HPI consistency of isotrak and hpifit is OK.')\n elif not adjust and (len(hpi_result['used']) == len(hpi_coils)):\n warn('HPI consistency of isotrak and hpifit is poor.')\n else:\n # adjust HPI coil locations using the hpifit transformation\n for hi, (r_dig, r_fit) in enumerate(zip(hpi_rrs, hpi_rrs_fit)):\n # transform to head frame\n d = 1000 * np.sqrt(((r_dig - r_fit) ** 2).sum())\n if not adjust:\n warn('Discrepancy of HPI coil %d isotrak and hpifit is %.1f '\n 'mm!' % (hi + 1, d))\n elif hi + 1 not in hpi_result['used']:\n if hpi_result['goodness'][hi] >= hpi_result['good_limit']:\n logger.info('Note: HPI coil %d isotrak is adjusted by '\n '%.1f mm!' % (hi + 1, d))\n hpi_rrs[hi] = r_fit\n else:\n warn('Discrepancy of HPI coil %d isotrak and hpifit of '\n '%.1f mm was not adjusted!' % (hi + 1, d))\n logger.debug('HP fitting limits: err = %.1f mm, gval = %.3f.'\n % (1000 * hpi_result['dist_limit'], hpi_result['good_limit']))\n\n # how cHPI active is indicated in the FIF file\n hpi_sub = info['hpi_subsystem']\n if 'event_channel' in hpi_sub:\n hpi_pick = pick_channels(info['ch_names'],\n [hpi_sub['event_channel']])\n hpi_pick = hpi_pick[0] if len(hpi_pick) > 0 else None\n else:\n hpi_pick = None # there is no pick!\n hpi_on = [coil['event_bits'][0] for coil in hpi_sub['hpi_coils']]\n # not all HPI coils will actually be used\n hpi_on = np.array([hpi_on[hc['number'] - 1] for hc in hpi_coils])\n assert len(hpi_coils) == len(hpi_on)\n\n # get frequencies\n hpi_freqs = np.array([float(x['coil_freq']) for x in hpi_coils])\n logger.info('Using %s HPI coils: %s Hz'\n % (len(hpi_freqs), ' '.join(str(int(s)) for s in hpi_freqs)))\n return hpi_freqs, hpi_rrs, hpi_pick, hpi_on, pos_order\n\n\ndef _magnetic_dipole_objective(x, B, B2, coils, scale, method):\n \"\"\"Project data onto right eigenvectors of whitened forward.\"\"\"\n if method == 'forward':\n fwd = _magnetic_dipole_field_vec(x[np.newaxis, :], coils)\n else:\n from .preprocessing.maxwell import _sss_basis\n # Eventually we can try incorporating external bases here, which\n # is why the :3 is on the SVD below\n fwd = _sss_basis(dict(origin=x, int_order=1, ext_order=0), coils).T\n fwd = np.dot(fwd, scale.T)\n one = np.dot(linalg.svd(fwd, full_matrices=False)[2][:3], B)\n one *= one\n Bm2 = one.sum()\n return B2 - Bm2\n\n\ndef _fit_magnetic_dipole(B_orig, x0, coils, scale, method):\n \"\"\"Fit a single bit of data (x0 = pos).\"\"\"\n from scipy.optimize import fmin_cobyla\n B = np.dot(scale, B_orig)\n B2 = np.dot(B, B)\n objective = partial(_magnetic_dipole_objective, B=B, B2=B2,\n coils=coils, scale=scale, method=method)\n x = fmin_cobyla(objective, x0, (), rhobeg=1e-2, rhoend=1e-5, disp=False)\n return x, 1. - objective(x) / B2\n\n\ndef _chpi_objective(x, coil_dev_rrs, coil_head_rrs):\n \"\"\"Helper objective function.\"\"\"\n d = np.dot(coil_dev_rrs, quat_to_rot(x[:3]).T)\n d += x[3:]\n d -= coil_head_rrs\n d *= d\n return d.sum()\n\n\ndef _unit_quat_constraint(x):\n \"\"\"Constrain our 3 quaternion rot params (ignoring w) to have norm <= 1.\"\"\"\n return 1 - (x * x).sum()\n\n\ndef _fit_chpi_pos(coil_dev_rrs, coil_head_rrs, x0):\n \"\"\"Fit rotation and translation parameters for cHPI coils.\"\"\"\n from scipy.optimize import fmin_cobyla\n denom = np.sum((coil_head_rrs - np.mean(coil_head_rrs, axis=0)) ** 2)\n objective = partial(_chpi_objective, coil_dev_rrs=coil_dev_rrs,\n coil_head_rrs=coil_head_rrs)\n x = fmin_cobyla(objective, x0, _unit_quat_constraint,\n rhobeg=1e-2, rhoend=1e-6, disp=False)\n return x, 1. - objective(x) / denom\n\n\n@verbose\ndef _setup_chpi_fits(info, t_window, t_step_min, method='forward',\n exclude='bads', add_hpi_stim_pick=True,\n remove_aliased=False, verbose=None):\n \"\"\"Helper to set up cHPI fits.\"\"\"\n from scipy.spatial.distance import cdist\n from .preprocessing.maxwell import _prep_mf_coils\n if not (check_version('numpy', '1.7') and check_version('scipy', '0.11')):\n raise RuntimeError('numpy>=1.7 and scipy>=0.11 required')\n hpi_freqs, coil_head_rrs, hpi_pick, hpi_ons = _get_hpi_info(info)[:4]\n # What to do e.g. if Raw has been resampled and some of our\n # HPI freqs would now be aliased\n highest = info.get('lowpass')\n highest = info['sfreq'] / 2. if highest is None else highest\n keepers = np.array([h <= highest for h in hpi_freqs], bool)\n if remove_aliased:\n hpi_freqs = hpi_freqs[keepers]\n coil_head_rrs = coil_head_rrs[keepers]\n hpi_ons = hpi_ons[keepers]\n elif not keepers.all():\n raise RuntimeError('Found HPI frequencies %s above the lowpass '\n '(or Nyquist) frequency %0.1f'\n % (hpi_freqs[~keepers].tolist(), highest))\n line_freqs = np.arange(info['line_freq'], info['sfreq'] / 3.,\n info['line_freq'])\n logger.info('Line interference frequencies: %s Hz'\n % ' '.join(['%d' % l for l in line_freqs]))\n # initial transforms\n dev_head_t = info['dev_head_t']['trans']\n head_dev_t = invert_transform(info['dev_head_t'])['trans']\n # determine timing\n n_window = int(round(t_window * info['sfreq']))\n logger.debug('Coordinate transformation:')\n for d in (dev_head_t[0, :3], dev_head_t[1, :3], dev_head_t[2, :3],\n dev_head_t[:3, 3] * 1000.):\n logger.debug('{0:8.4f} {1:8.4f} {2:8.4f}'.format(*d))\n slope = np.arange(n_window).astype(np.float64)[:, np.newaxis]\n slope -= np.mean(slope)\n rads = slope / info['sfreq']\n rads *= 2 * np.pi\n f_t = hpi_freqs[np.newaxis, :] * rads\n l_t = line_freqs[np.newaxis, :] * rads\n model = [np.sin(f_t), np.cos(f_t)] # hpi freqs\n model += [np.sin(l_t), np.cos(l_t)] # line freqs\n model += [slope, np.ones(slope.shape)]\n model = np.concatenate(model, axis=1)\n inv_model = linalg.pinv(model)\n # Set up highpass at half lowest cHPI freq\n hp_n = 2 ** (int(np.ceil(np.log2(n_window))) + 1)\n freqs = fftpack.rfftfreq(hp_n, 1. / info['sfreq'])\n hp_ind = np.where(freqs >= hpi_freqs.min())[0][0] - 2\n hp_window = np.concatenate(\n [[0], np.repeat(np.hanning(hp_ind - 1)[:(hp_ind - 1) // 2],\n 2)])[np.newaxis]\n\n # Set up magnetic dipole fits\n picks_meg = pick_types(info, meg=True, eeg=False, exclude=exclude)\n if add_hpi_stim_pick:\n if hpi_pick is None:\n raise RuntimeError('Could not find HPI status channel')\n picks = np.concatenate([picks_meg, [hpi_pick]])\n else:\n picks = picks_meg\n megchs = [ch for ci, ch in enumerate(info['chs']) if ci in picks_meg]\n templates = _read_coil_defs(elekta_defs=True, verbose=False)\n coils = _create_meg_coils(megchs, 'accurate', coilset=templates)\n if method == 'forward':\n coils = _concatenate_coils(coils)\n else: # == 'multipole'\n coils = _prep_mf_coils(info)\n scale = make_ad_hoc_cov(info, verbose=False)\n scale = _get_whitener_data(info, scale, picks_meg, verbose=False)\n orig_dev_head_quat = np.concatenate([rot_to_quat(dev_head_t[:3, :3]),\n dev_head_t[:3, 3]])\n dists = cdist(coil_head_rrs, coil_head_rrs)\n hpi = dict(dists=dists, scale=scale, picks=picks, model=model,\n inv_model=inv_model, coil_head_rrs=coil_head_rrs,\n coils=coils, on=hpi_ons, n_window=n_window, method=method,\n freqs=hpi_freqs, line_freqs=line_freqs,\n hp_ind=hp_ind, hp_n=hp_n, hp_window=hp_window)\n last = dict(quat=orig_dev_head_quat, coil_head_rrs=coil_head_rrs,\n coil_dev_rrs=apply_trans(head_dev_t, coil_head_rrs),\n sin_fit=None, fit_time=-t_step_min)\n return hpi, last\n\n\ndef _time_prefix(fit_time):\n \"\"\"Helper to format log messages.\"\"\"\n return (' t=%0.3f:' % fit_time).ljust(17)\n\n\n@verbose\ndef _calculate_chpi_positions(raw, t_step_min=0.1, t_step_max=10.,\n t_window=0.2, dist_limit=0.005, gof_limit=0.98,\n verbose=None):\n \"\"\"Calculate head positions using cHPI coils.\n\n Parameters\n ----------\n raw : instance of Raw\n Raw data with cHPI information.\n t_step_min : float\n Minimum time step to use. If correlations are sufficiently high,\n t_step_max will be used.\n t_step_max : float\n Maximum time step to use.\n t_window : float\n Time window to use to estimate the head positions.\n max_step : float\n Maximum time step to go between estimations.\n dist_limit : float\n Minimum distance (m) to accept for coil position fitting.\n gof_limit : float\n Minimum goodness of fit to accept.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n quats : ndarray, shape (N, 10)\n The ``[t, q1, q2, q3, x, y, z, gof, err, v]`` for each fit.\n\n Notes\n -----\n The number of time points ``N`` will depend on the velocity of head\n movements as well as ``t_step_max`` and ``t_step_min``.\n\n See Also\n --------\n read_head_pos\n write_head_pos\n \"\"\"\n from scipy.spatial.distance import cdist\n hpi, last = _setup_chpi_fits(raw.info, t_window, t_step_min)\n fit_idxs = raw.time_as_index(np.arange(0., raw.times[-1], t_step_min),\n use_rounding=True)\n quats = []\n logger.info('Fitting up to %s time points (%0.1f sec duration)'\n % (len(fit_idxs), raw.times[-1]))\n pos_0 = None\n n_freqs = len(hpi['freqs'])\n for midpt in fit_idxs:\n #\n # 1. Fit amplitudes for each channel from each of the N cHPI sinusoids\n #\n fit_time = midpt / raw.info['sfreq']\n time_sl = midpt - hpi['n_window'] // 2\n time_sl = slice(max(time_sl, 0),\n min(time_sl + hpi['n_window'], len(raw.times)))\n with use_log_level(False):\n meg_chpi_data = raw[hpi['picks'], time_sl][0]\n this_data = meg_chpi_data[:-1]\n chpi_data = meg_chpi_data[-1]\n ons = (np.round(chpi_data).astype(np.int) &\n hpi['on'][:, np.newaxis]).astype(bool)\n n_on = np.sum(ons, axis=0)\n if not (n_on >= 3).all():\n logger.info(_time_prefix(fit_time) + '%s < 3 HPI coils turned on, '\n 'skipping fit' % (n_on.min(),))\n continue\n # ons = ons.all(axis=1) # which HPI coils to use\n this_len = time_sl.stop - time_sl.start\n if this_len == hpi['n_window']:\n model, inv_model = hpi['model'], hpi['inv_model']\n else: # first or last window\n model = hpi['model'][:this_len]\n inv_model = linalg.pinv(model)\n X = np.dot(inv_model, this_data.T)\n data_diff = np.dot(model, X).T - this_data\n del model, inv_model\n data_diff *= data_diff\n this_data *= this_data\n g_chan = (1 - np.sqrt(data_diff.sum(axis=1) / this_data.sum(axis=1)))\n g_sin = (1 - np.sqrt(data_diff.sum() / this_data.sum()))\n del data_diff, this_data\n X_sin, X_cos = X[:n_freqs], X[n_freqs:2 * n_freqs]\n signs = np.sign(np.arctan2(X_sin, X_cos))\n X_sin *= X_sin\n X_cos *= X_cos\n X_sin += X_cos\n sin_fit = np.sqrt(X_sin)\n if last['sin_fit'] is not None: # first iteration\n corr = np.corrcoef(sin_fit.ravel(), last['sin_fit'].ravel())[0, 1]\n # check to see if we need to continue\n if fit_time - last['fit_time'] <= t_step_max - 1e-7 and \\\n corr * corr > 0.98:\n continue # don't need to re-fit data\n last['sin_fit'] = sin_fit.copy() # save *before* inplace sign mult\n sin_fit *= signs\n del signs, X_sin, X_cos, X\n\n #\n # 2. Fit magnetic dipole for each coil to obtain coil positions\n # in device coordinates\n #\n logger.debug(' HPI amplitude correlation %0.3f: %0.3f '\n '(%s chnls > 0.950)' % (fit_time, np.sqrt(g_sin),\n (np.sqrt(g_chan) > 0.95).sum()))\n outs = [_fit_magnetic_dipole(f, pos, hpi['coils'], hpi['scale'],\n hpi['method'])\n for f, pos in zip(sin_fit, last['coil_dev_rrs'])]\n this_coil_dev_rrs = np.array([o[0] for o in outs])\n g_coils = [o[1] for o in outs]\n these_dists = cdist(this_coil_dev_rrs, this_coil_dev_rrs)\n these_dists = np.abs(hpi['dists'] - these_dists)\n # there is probably a better algorithm for finding the bad ones...\n good = False\n use_mask = np.ones(n_freqs, bool)\n while not good:\n d = these_dists[use_mask][:, use_mask]\n d_bad = (d > dist_limit)\n good = not d_bad.any()\n if not good:\n if use_mask.sum() == 2:\n use_mask[:] = False\n break # failure\n # exclude next worst point\n badness = (d * d_bad).sum(axis=0)\n exclude = np.where(use_mask)[0][np.argmax(badness)]\n use_mask[exclude] = False\n good = use_mask.sum() >= 3\n if not good:\n warn(_time_prefix(fit_time) + '%s/%s good HPI fits, '\n 'cannot determine the transformation!'\n % (use_mask.sum(), n_freqs))\n continue\n\n #\n # 3. Fit the head translation and rotation params (minimize error\n # between coil positions and the head coil digitization positions)\n #\n this_quat, g = _fit_chpi_pos(this_coil_dev_rrs[use_mask],\n hpi['coil_head_rrs'][use_mask],\n last['quat'])\n if g < gof_limit:\n logger.info(_time_prefix(fit_time) +\n 'Bad coil fit! (g=%7.3f)' % (g,))\n continue\n this_dev_head_t = np.concatenate(\n (quat_to_rot(this_quat[:3]),\n this_quat[3:][:, np.newaxis]), axis=1)\n this_dev_head_t = np.concatenate((this_dev_head_t, [[0, 0, 0, 1.]]))\n # velocities, in device coords, of HPI coils\n dt = fit_time - last['fit_time']\n vs = tuple(1000. * np.sqrt(np.sum((last['coil_dev_rrs'] -\n this_coil_dev_rrs) ** 2,\n axis=1)) / dt)\n logger.info(_time_prefix(fit_time) +\n ('%s/%s good HPI fits, movements [mm/s] = ' +\n ' / '.join(['% 6.1f'] * n_freqs))\n % ((use_mask.sum(), n_freqs) + vs))\n # resulting errors in head coil positions\n est_coil_head_rrs = apply_trans(this_dev_head_t, this_coil_dev_rrs)\n errs = 1000. * np.sqrt(((hpi['coil_head_rrs'] -\n est_coil_head_rrs) ** 2).sum(axis=-1))\n e = errs.mean() / 1000. # mm -> m\n d = 100 * np.sqrt(np.sum(last['quat'][3:] - this_quat[3:]) ** 2) # cm\n r = _angle_between_quats(last['quat'][:3], this_quat[:3]) / dt\n v = d / dt # cm/sec\n if pos_0 is None:\n pos_0 = this_quat[3:].copy()\n d = 100 * np.sqrt(np.sum((this_quat[3:] - pos_0) ** 2)) # dis from 1st\n # MaxFilter averages over a 200 ms window for display, but we don't\n for ii in range(n_freqs):\n if use_mask[ii]:\n start, end = ' ', '/'\n else:\n start, end = '(', ')'\n log_str = (' ' + start +\n '{0:6.1f} {1:6.1f} {2:6.1f} / ' +\n '{3:6.1f} {4:6.1f} {5:6.1f} / ' +\n 'g = {6:0.3f} err = {7:4.1f} ' +\n end)\n if ii <= 2:\n log_str += '{8:6.3f} {9:6.3f} {10:6.3f}'\n elif ii == 3:\n log_str += '{8:6.1f} {9:6.1f} {10:6.1f}'\n vals = np.concatenate((1000 * hpi['coil_head_rrs'][ii],\n 1000 * est_coil_head_rrs[ii],\n [g_coils[ii], errs[ii]]))\n if ii <= 2:\n vals = np.concatenate((vals, this_dev_head_t[ii, :3]))\n elif ii == 3:\n vals = np.concatenate((vals, this_dev_head_t[:3, 3] * 1000.))\n logger.debug(log_str.format(*vals))\n logger.debug(' #t = %0.3f, #e = %0.2f cm, #g = %0.3f, '\n '#v = %0.2f cm/s, #r = %0.2f rad/s, #d = %0.2f cm'\n % (fit_time, 100 * e, g, v, r, d))\n quats.append(np.concatenate(([fit_time], this_quat, [g], [e], [v])))\n last['fit_time'] = fit_time\n last['quat'] = this_quat\n last['coil_dev_rrs'] = this_coil_dev_rrs\n logger.info('[done]')\n quats = np.array(quats, np.float64)\n quats = np.zeros((0, 10)) if quats.size == 0 else quats\n return quats\n\n\n@verbose\ndef filter_chpi(raw, include_line=True, verbose=None):\n \"\"\"Remove cHPI and line noise from data.\n\n .. note:: This function will only work properly if cHPI was on\n during the recording.\n\n Parameters\n ----------\n raw : instance of Raw\n Raw data with cHPI information. Must be preloaded. Operates in-place.\n include_line : bool\n If True, also filter line noise.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n raw : instance of Raw\n The raw data.\n\n Notes\n -----\n cHPI signals are in general not stationary, because head movements act\n like amplitude modulators on cHPI signals. Thus it is recommended to\n to use this procedure, which uses an iterative fitting method, to\n remove cHPI signals, as opposed to notch filtering.\n\n .. versionadded:: 0.12\n \"\"\"\n if not raw.preload:\n raise RuntimeError('raw data must be preloaded')\n t_window = 0.2\n t_step = 0.01\n n_step = int(np.ceil(t_step * raw.info['sfreq']))\n hpi = _setup_chpi_fits(raw.info, t_window, t_window, exclude='bads',\n add_hpi_stim_pick=False, remove_aliased=True,\n verbose=False)[0]\n fit_idxs = np.arange(0, len(raw.times) + hpi['n_window'] // 2, n_step)\n n_freqs = len(hpi['freqs'])\n n_remove = 2 * n_freqs\n meg_picks = pick_types(raw.info, meg=True, exclude=()) # filter all chs\n n_times = len(raw.times)\n\n msg = 'Removing %s cHPI' % n_freqs\n if include_line:\n n_remove += 2 * len(hpi['line_freqs'])\n msg += ' and %s line harmonic' % len(hpi['line_freqs'])\n msg += ' frequencies from %s MEG channels' % len(meg_picks)\n\n proj = np.dot(hpi['model'][:, :n_remove], hpi['inv_model'][:n_remove]).T\n logger.info(msg)\n chunks = list() # the chunks to subtract\n last_endpt = 0\n last_done = 0.\n next_done = 60.\n for ii, midpt in enumerate(fit_idxs):\n if midpt / raw.info['sfreq'] >= next_done or ii == len(fit_idxs) - 1:\n logger.info(' Filtering % 5.1f - % 5.1f sec'\n % (last_done, min(next_done, raw.times[-1])))\n last_done = next_done\n next_done += 60.\n left_edge = midpt - hpi['n_window'] // 2\n time_sl = slice(max(left_edge, 0),\n min(left_edge + hpi['n_window'], len(raw.times)))\n this_len = time_sl.stop - time_sl.start\n if this_len == hpi['n_window']:\n this_proj = proj\n else: # first or last window\n model = hpi['model'][:this_len]\n inv_model = linalg.pinv(model)\n this_proj = np.dot(model[:, :n_remove], inv_model[:n_remove]).T\n this_data = raw._data[meg_picks, time_sl]\n subt_pt = min(midpt + n_step, n_times)\n if last_endpt != subt_pt:\n fit_left_edge = left_edge - time_sl.start + hpi['n_window'] // 2\n fit_sl = slice(fit_left_edge,\n fit_left_edge + (subt_pt - last_endpt))\n chunks.append((subt_pt, np.dot(this_data, this_proj[:, fit_sl])))\n last_endpt = subt_pt\n\n # Consume (trailing) chunks that are now safe to remove because\n # our windows will no longer touch them\n if ii < len(fit_idxs) - 1:\n next_left_edge = fit_idxs[ii + 1] - hpi['n_window'] // 2\n else:\n next_left_edge = np.inf\n while len(chunks) > 0 and chunks[0][0] <= next_left_edge:\n right_edge, chunk = chunks.pop(0)\n raw._data[meg_picks,\n right_edge - chunk.shape[1]:right_edge] -= chunk\n return raw\n"
] |
[
[
"numpy.zeros"
],
[
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.StandardScaler"
],
[
"numpy.dot",
"scipy.linalg.svd",
"numpy.mean",
"scipy.linalg.pinv",
"numpy.where",
"numpy.cos",
"scipy.optimize.fmin_cobyla",
"numpy.concatenate",
"numpy.sin",
"numpy.arange",
"numpy.argmax",
"numpy.sqrt",
"scipy.fftpack.rfftfreq",
"numpy.array",
"numpy.zeros",
"numpy.round",
"numpy.loadtxt",
"numpy.arctan2",
"scipy.spatial.distance.cdist",
"numpy.log2",
"numpy.ceil",
"numpy.isnan",
"numpy.hanning",
"numpy.sum",
"numpy.ones",
"numpy.abs"
]
] |
GEM7318/Snowmobile
|
[
"23631a47e510dfd15bce0e8708f95ebf5734349e"
] |
[
"docs/snippets/configuration.py"
] |
[
"\"\"\"\n\"\"\"\nimport snowmobile\n\nsn = snowmobile.connect(delay=True)\n\nsn.alive # > False\ntype(sn.con) # > NoneType\n\ntype(sn.cfg) # > snowmobile.core.configuration.Configuration\nstr(sn.cfg) # > snowmobile.Configuration('snowmobile.toml')\n\nprint(sn.cfg.location) # > /path/to/your/snowmobile.toml\n\nsn.cfg.connection.default_alias # > 'creds1'\n\nprint(sn.alive)\n\ntype(sn) # > snowmobile.core.connection.Snowmobile\n\ntype(sn.cfg) # > snowmobile.core.configuration.Configuration\nstr(sn.cfg) # > snowmobile.Configuration('snowmobile.toml')\n\ntype(sn.con) # > snowflake.connector.connection.SnowflakeConnection\ntype(sn.cursor) # > snowflake.connector.cursor.SnowflakeCursor\n\ndf1 = sn.query(\"select 1\") # == pd.read_sql()\ntype(df1) # > pandas.core.frame.DataFrame\n\ncur1 = sn.query(\"select 1\", as_df=False) # == SnowflakeConnection.cursor().execute()\ntype(cur1) # > snowflake.connector.cursor.SnowflakeCursor\n\nimport pandas as pd\n\ndf2 = pd.read_sql(sql=\"select 1\", con=sn.con)\ncur2 = sn.con.cursor().execute(\"select 1\")\n\nprint(df2.equals(df1)) # > True\nprint(cur1.fetchone() == cur2.fetchone()) # > True\n\n# -- complete example; should run 'as is' --\n"
] |
[
[
"pandas.read_sql"
]
] |
nnassar98/pyleecan
|
[
"c4fdc6362fdeba3d0766d5d1df3ff9c97c3f9fa3",
"3a6ffe14ab46e90dc0b2855386623833c622b95e"
] |
[
"Tests/Plot/test_plots.py",
"Tests/Functions/test_dqh_transformation_freq.py"
] |
[
"from os.path import isfile\nfrom os.path import join\n\nimport matplotlib.pyplot as plt\nimport pytest\nfrom SciDataTool import DataTime, Data1D, DataLinspace, VectorField, Norm_ref\nfrom numpy import linspace, sin, squeeze\n\nfrom Tests import TEST_DATA_DIR\nfrom Tests import save_plot_path as save_path\nfrom pyleecan.Classes.ImportMatlab import ImportMatlab\nfrom pyleecan.Classes.InputFlux import InputFlux\nfrom pyleecan.Classes.OPdq import OPdq\nfrom pyleecan.Classes.Output import Output\nfrom pyleecan.Classes.Simu1 import Simu1\nfrom pyleecan.Functions.load import load\nfrom pyleecan.Functions.Plot import dict_2D, dict_3D\nfrom pyleecan.definitions import DATA_DIR\n\n\n@pytest.fixture(scope=\"module\")\ndef import_data():\n data = import_data_func()\n return data\n\n\ndef import_data_func():\n SCIM_006 = load(join(DATA_DIR, \"Machine\", \"SCIM_006.json\"))\n\n simu = Simu1(name=\"test_plots\", machine=SCIM_006)\n\n mat_file_Br = join(TEST_DATA_DIR, \"Plots\", \"default_proj_Br.mat\")\n mat_file_time = join(TEST_DATA_DIR, \"Plots\", \"default_proj_time.mat\")\n mat_file_angle = join(TEST_DATA_DIR, \"Plots\", \"default_proj_angle.mat\")\n mat_file_Br_cfft2 = join(TEST_DATA_DIR, \"Plots\", \"default_proj_Br_cfft2.mat\")\n mat_file_Brfreqs = join(TEST_DATA_DIR, \"Plots\", \"default_proj_Brfreqs.mat\")\n mat_file_Brwavenumber = join(\n TEST_DATA_DIR, \"Plots\", \"default_proj_Brwavenumber.mat\"\n )\n if not isfile(mat_file_Br):\n import urllib.request\n\n url = \"https://www.pyleecan.org/Data/default_proj_Br.mat\"\n urllib.request.urlretrieve(url, mat_file_Br)\n\n if not isfile(mat_file_Br_cfft2):\n import urllib.request\n\n url = \"https://www.pyleecan.org/Data/default_proj_Br_cfft2.mat\"\n urllib.request.urlretrieve(url, mat_file_Br_cfft2)\n\n data = {}\n data[\"SCIM_006\"] = SCIM_006\n data[\"simu\"] = simu\n # Read input files from Manatee\n data[\"flux\"] = ImportMatlab(mat_file_Br, var_name=\"XBr\")\n data[\"time\"] = ImportMatlab(mat_file_time, var_name=\"timec\")\n data[\"angle\"] = ImportMatlab(mat_file_angle, var_name=\"alpha_radc\")\n\n data[\"flux_FT\"] = ImportMatlab(mat_file_Br_cfft2, var_name=\"Fwr\")\n data[\"freqs\"] = ImportMatlab(mat_file_Brfreqs, var_name=\"freqs\")\n data[\"wavenumber\"] = ImportMatlab(mat_file_Brwavenumber, var_name=\"orders\")\n data[\"N0\"] = 2000\n data[\"Id_ref\"] = 10\n data[\"Iq_ref\"] = -10\n # Plot parameters\n data[\"freq_max\"] = 2000\n data[\"r_max\"] = 78\n return data\n\n\nclass Test_plots(object):\n @pytest.mark.long_5s\n @pytest.mark.SingleOP\n @pytest.mark.SCIM\n def test_default_proj_Br_time_space(self, import_data):\n SCIM_006 = import_data[\"SCIM_006\"]\n simu = import_data[\"simu\"]\n time = import_data[\"time\"]\n angle = import_data[\"angle\"]\n flux = import_data[\"flux\"]\n freq_max = import_data[\"freq_max\"]\n N0 = import_data[\"N0\"]\n Id_ref = import_data[\"Id_ref\"]\n Iq_ref = import_data[\"Iq_ref\"]\n\n time_arr = squeeze(time.get_data())\n angle_arr = squeeze(angle.get_data())\n flux_arr = flux.get_data()\n norm_angle = {\"space_order\": Norm_ref(ref=3)}\n\n simu = Simu1(name=\"test_default_proj_Br_time_space\", machine=SCIM_006)\n simu.mag = None\n simu.force = None\n simu.struct = None\n simu.input = InputFlux(\n B_dict={\"Br\": flux},\n time=time,\n angle=angle,\n OP=OPdq(N0=N0, Id_ref=Id_ref, Iq_ref=Iq_ref),\n )\n out = Output(simu=simu)\n simu.run()\n\n out2 = Output(simu=simu)\n\n # Reduce to 1/3 period\n Br_reduced = flux_arr[0:672, 0:672]\n time_reduced = time_arr[0:672]\n angle_reduced = angle_arr[0:672]\n\n # Build the data objects\n Time2 = Data1D(\n name=\"time\",\n unit=\"s\",\n symmetries={\"period\": 3},\n values=time_reduced,\n )\n Angle2 = Data1D(\n name=\"angle\",\n unit=\"rad\",\n symmetries={\"period\": 3},\n values=angle_reduced,\n normalizations=norm_angle,\n )\n Br2 = DataTime(\n symbol=\"B_r\",\n name=\"Airgap radial flux density\",\n unit=\"T\",\n axes=[Time2, Angle2],\n values=Br_reduced,\n )\n out2.mag.B = VectorField(\n name=\"Airgap flux density\", symbol=\"B\", components={\"radial\": Br2}\n )\n\n # Plot the result by comparing the two simulation (sym / no sym)\n plt.close(\"all\")\n\n out.mag.B.plot_2D_Data(\n \"time\",\n \"angle[0]{°}\",\n data_list=[out2.mag.B],\n is_auto_ticks=False,\n legend_list=[\"Reference\", \"Periodic\"],\n save_path=join(save_path, \"test_default_proj_Br_dataobj_period.png\"),\n is_show_fig=False,\n **dict_2D,\n )\n out.mag.B.plot_2D_Data(\n \"freqs=[0,\" + str(freq_max) + \"]\",\n data_list=[out2.mag.B],\n legend_list=[\"Reference\", \"Periodic\"],\n is_auto_ticks=False,\n save_path=join(save_path, \"test_default_proj_Br_dataobj_period_fft.png\"),\n is_show_fig=False,\n **dict_2D,\n )\n\n out3 = Output(simu=simu)\n\n # Get linspace data\n t0 = time_arr[0]\n tf = time_arr[-1]\n deltat = time_arr[1] - time_arr[0]\n a0 = angle_arr[0]\n deltaa = angle_arr[1] - angle_arr[0]\n Na = len(angle_arr)\n\n # Build the data objects\n Time3 = DataLinspace(\n name=\"time\",\n unit=\"s\",\n initial=t0,\n final=tf + deltat,\n step=deltat,\n include_endpoint=False,\n )\n Angle3 = DataLinspace(\n name=\"angle\",\n unit=\"rad\",\n normalizations=norm_angle,\n initial=a0,\n step=deltaa,\n number=Na,\n include_endpoint=False,\n )\n Br3 = DataTime(\n symbol=\"B_r\",\n name=\"Airgap radial flux density\",\n unit=\"T\",\n axes=[Time3, Angle3],\n values=flux_arr,\n )\n out3.mag.B = VectorField(\n name=\"Airgap flux density\", symbol=\"B\", components={\"radial\": Br3}\n )\n\n # Plot the result by comparing the two simulation (Data1D / DataLinspace)\n plt.close(\"all\")\n out.mag.B.plot_2D_Data(\n \"angle{°}\",\n data_list=[out3.mag.B],\n legend_list=[\"Reference\", \"Linspace\"],\n is_auto_ticks=False,\n save_path=join(save_path, \"test_default_proj_Br_dataobj_linspace.png\"),\n is_show_fig=False,\n **dict_2D,\n )\n out.mag.B.components[\"radial\"].axes[1].normalizations[\"space_order\"] = Norm_ref(\n ref=3\n )\n out.mag.B.plot_2D_Data(\n \"wavenumber->space_order=[0,100]\",\n data_list=[out3.mag.B],\n legend_list=[\"Reference\", \"Linspace\"],\n is_auto_ticks=False,\n save_path=join(save_path, \"test_default_proj_Br_dataobj_linspace_fft.png\"),\n is_show_fig=False,\n **dict_2D,\n )\n\n simu4 = Simu1(name=\"test_default_proj_Br_time_space_ift\", machine=SCIM_006)\n simu4.mag = None\n simu4.force = None\n simu4.struct = None\n simu4.input = InputFlux(\n B_dict={\"Br\": flux},\n time=time,\n angle=angle,\n OP=OPdq(N0=N0, Id_ref=Id_ref, Iq_ref=Iq_ref),\n )\n out4 = Output(simu=simu4)\n simu4.run()\n out4.post.legend_name = \"Inverse FT\"\n\n # Plot the result by comparing the two simulation (direct / ifft)\n plt.close(\"all\")\n\n out.mag.B.plot_2D_Data(\n \"angle{°}\",\n data_list=[out4.mag.B],\n legend_list=[\"Reference\", \"Inverse FFT\"],\n is_auto_ticks=False,\n save_path=join(save_path, \"test_default_proj_Br_dataobj_ift.png\"),\n is_show_fig=False,\n **dict_2D,\n )\n out.mag.B.plot_2D_Data(\n \"wavenumber=[0,100]\",\n data_list=[out4.mag.B],\n legend_list=[\"Reference\", \"Inverse FFT\"],\n is_auto_ticks=False,\n save_path=join(save_path, \"test_default_proj_Br_dataobj_ift_fft.png\"),\n is_show_fig=False,\n **dict_2D,\n )\n\n out5 = Output(simu=simu)\n\n # Get linspace data\n t0 = 0.01\n tf = 0.04\n Nt = 3000\n time5 = linspace(0.01, 0.04, 3000, endpoint=True)\n\n # Compute sine function\n Br5 = 0.2 * sin(375 * time5 - 1.5)\n\n # Build the data objects\n Time5 = DataLinspace(\n name=\"time\",\n unit=\"s\",\n initial=t0,\n final=tf,\n number=Nt,\n include_endpoint=True,\n )\n flux5 = DataTime(\n symbol=\"B_r\",\n name=\"Airgap radial flux density\",\n unit=\"T\",\n axes=[Time5],\n values=Br5,\n )\n out5.mag.B = VectorField(\n name=\"Airgap flux density\", symbol=\"B\", components={\"radial\": flux5}\n )\n\n # Plot the result by comparing the two simulation (sym / no sym)\n plt.close(\"all\")\n\n out.mag.B.plot_2D_Data(\n \"time\",\n \"angle[0]{°}\",\n data_list=[out5.mag.B],\n legend_list=[\"Br\", \"0.2sin(375t-1.5)\"],\n save_path=join(save_path, \"test_default_proj_Br_compare.png\"),\n is_auto_ticks=False,\n is_show_fig=False,\n **dict_2D,\n )\n\n @pytest.mark.SingleOP\n @pytest.mark.SCIM\n def test_default_proj_Br_cfft2(self, import_data):\n SCIM_006 = import_data[\"SCIM_006\"]\n simu = import_data[\"simu\"]\n time = import_data[\"time\"]\n angle = import_data[\"angle\"]\n flux = import_data[\"flux\"]\n freq_max = import_data[\"freq_max\"]\n r_max = import_data[\"r_max\"]\n N0 = import_data[\"N0\"]\n Id_ref = import_data[\"Id_ref\"]\n Iq_ref = import_data[\"Iq_ref\"]\n\n N_stem = 100\n\n simu = Simu1(name=\"test_default_proj_Br_cfft2\", machine=SCIM_006)\n simu.input = InputFlux(\n B_dict={\"Br\": flux},\n time=time,\n angle=angle,\n OP=OPdq(N0=N0, Id_ref=Id_ref, Iq_ref=Iq_ref),\n )\n simu.mag = None\n simu.force = None\n simu.struct = None\n out = Output(simu=simu)\n simu.run()\n\n # Plot the 2D FFT of flux density as stem plot\n plt.close(\"all\")\n out.mag.B.plot_3D_Data(\n \"freqs=[0,\" + str(freq_max) + \"]\",\n \"wavenumber=[-\" + str(r_max) + \",\" + str(r_max) + \"]\",\n N_stem=N_stem,\n is_auto_ticks=False,\n save_path=join(save_path, \"test_default_proj_Br_dataobj_cfft2.png\"),\n is_show_fig=False,\n **dict_3D,\n )\n\n @pytest.mark.SingleOP\n @pytest.mark.SCIM\n def test_default_proj_surf(self, import_data):\n SCIM_006 = import_data[\"SCIM_006\"]\n simu = import_data[\"simu\"]\n time = import_data[\"time\"]\n angle = import_data[\"angle\"]\n flux = import_data[\"flux\"]\n flux_FT = import_data[\"flux_FT\"]\n freqs = import_data[\"freqs\"]\n wavenumber = import_data[\"wavenumber\"]\n freq_max = import_data[\"freq_max\"]\n r_max = import_data[\"r_max\"]\n N0 = import_data[\"N0\"]\n Id_ref = import_data[\"Id_ref\"]\n Iq_ref = import_data[\"Iq_ref\"]\n\n simu = Simu1(name=\"test_default_proj_surf\", machine=SCIM_006)\n simu.mag = None\n simu.force = None\n simu.struct = None\n simu.input = InputFlux(\n B_dict={\"Br\": flux},\n time=time,\n angle=angle,\n OP=OPdq(N0=N0, Id_ref=Id_ref, Iq_ref=Iq_ref),\n )\n out = Output(simu=simu)\n simu.run()\n\n # Plot the result by comparing the two simulation (sym / no sym)\n plt.close(\"all\")\n out.mag.B.plot_3D_Data(\n \"time=[0,0.06]\",\n \"angle{°}\",\n component_list=[\"radial\"],\n save_path=join(save_path, \"test_default_proj_Br_surf_dataobj.png\"),\n is_2D_view=False,\n is_show_fig=False,\n **dict_3D,\n )\n\n @pytest.mark.SingleOP\n @pytest.mark.SCIM\n def test_default_proj_fft2(self, import_data):\n SCIM_006 = import_data[\"SCIM_006\"]\n simu = import_data[\"simu\"]\n time = import_data[\"time\"]\n angle = import_data[\"angle\"]\n flux = import_data[\"flux\"]\n freq_max = import_data[\"freq_max\"]\n r_max = import_data[\"r_max\"]\n N0 = import_data[\"N0\"]\n Id_ref = import_data[\"Id_ref\"]\n Iq_ref = import_data[\"Iq_ref\"]\n\n simu = Simu1(name=\"test_default_proj_fft2\", machine=SCIM_006)\n simu.mag = None\n simu.force = None\n simu.struct = None\n simu.input = InputFlux(\n B_dict={\"Br\": flux},\n time=time,\n angle=angle,\n OP=OPdq(N0=N0, Id_ref=Id_ref, Iq_ref=Iq_ref),\n )\n out = Output(simu=simu)\n simu.run()\n\n # Plot the 2D FFT of flux density as 2D scatter plot with colormap\n plt.close(\"all\")\n freq_max = 500\n r_max = 20\n out.mag.B.plot_3D_Data(\n \"freqs=[0,\" + str(freq_max) + \"]\",\n \"wavenumber=[-\" + str(r_max) + \",\" + str(r_max) + \"]\",\n is_2D_view=True,\n is_auto_ticks=False,\n save_path=join(save_path, \"test_default_proj_Br_fft2_dataobj.png\"),\n is_show_fig=False,\n **dict_3D,\n )\n\n @pytest.mark.SingleOP\n @pytest.mark.SCIM\n def test_default_proj_time_space(self, import_data):\n SCIM_006 = import_data[\"SCIM_006\"]\n simu = import_data[\"simu\"]\n time = import_data[\"time\"]\n angle = import_data[\"angle\"]\n flux = import_data[\"flux\"]\n N0 = import_data[\"N0\"]\n Id_ref = import_data[\"Id_ref\"]\n Iq_ref = import_data[\"Iq_ref\"]\n\n simu = Simu1(name=\"test_default_proj_time_space\", machine=SCIM_006)\n simu.mag = None\n simu.force = None\n simu.struct = None\n simu.input = InputFlux(\n B_dict={\"Br\": flux},\n time=time,\n angle=angle,\n OP=OPdq(N0=N0, Id_ref=Id_ref, Iq_ref=Iq_ref),\n )\n out = Output(simu=simu)\n simu.run()\n\n # Plot the result by comparing the two simulation (sym / no sym)\n plt.close(\"all\")\n out.mag.B.plot_3D_Data(\n \"time\",\n \"angle{°}\",\n is_2D_view=True,\n save_path=join(save_path, \"test_default_proj_Br_time_space_dataobj.png\"),\n is_show_fig=False,\n **dict_3D,\n )\n\n\nif __name__ == \"__main__\":\n\n data = import_data_func()\n test_plot_class = Test_plots()\n\n test_plot_class.test_default_proj_Br_time_space(data)\n test_plot_class.test_default_proj_Br_cfft2(data)\n test_plot_class.test_default_proj_surf(data)\n test_plot_class.test_default_proj_fft2(data)\n test_plot_class.test_default_proj_time_space(data)\n",
"import pytest\n\nfrom numpy import pi, array, linspace, zeros, cos, exp, abs as np_abs, angle\nfrom numpy.testing import assert_array_almost_equal\n\nfrom SciDataTool import DataTime, Data1D, Norm_ref\n\nfrom pyleecan.Functions.Electrical.dqh_transformation import n2dqh_DataTime\nfrom pyleecan.Functions.Electrical.dqh_transformation_freq import (\n n2dqh_DataFreq,\n dqh2n_DataFreq,\n get_phase_dir,\n get_phase_dir_DataFreq,\n)\nfrom pyleecan.Functions.Winding.gen_phase_list import gen_name\n\nparam_list = [\n {\"qs\": 3, \"current_dir\": -1, \"phase_dir\": -1}, # pyleecan convention, 3 phases\n {\"qs\": 3, \"current_dir\": 1, \"phase_dir\": 1}, # other convention, 3 phases\n {\"qs\": 3, \"current_dir\": 1, \"phase_dir\": -1}, # other convention, 3 phases\n {\"qs\": 6, \"current_dir\": -1, \"phase_dir\": -1}, # pyleecan convention, 6 phases\n {\"qs\": 11, \"current_dir\": -1, \"phase_dir\": -1}, # pyleecan convention, 11 phases\n]\n\nis_show_fig = False\n\n\n@pytest.mark.parametrize(\"param_dict\", param_list)\ndef test_dqh_transformation_freq(param_dict):\n \"\"\"Check that the dqh transformations can return a correct output\"\"\"\n\n Nt = 500\n felec = 1 # assume unit frequency\n angle_curr = 0 # pi / 2\n\n # Get parameters for current test case\n qs = param_dict[\"qs\"]\n current_dir = param_dict[\"current_dir\"] # current direction\n phase_dir = param_dict[\"phase_dir\"] # phase direction\n\n # Define time and electrical angle arrays\n time = linspace(0, 1 / felec, Nt, endpoint=False)\n angle_elec = current_dir * 2 * pi * felec * time\n\n # Time axis for plots including angle_elec normalization used for DQH (cf Input.comp_axis_time())\n norm_time = {\"angle_elec\": Norm_ref(ref=current_dir / (2 * pi * felec))}\n Time = Data1D(name=\"time\", unit=\"s\", values=time, normalizations=norm_time)\n\n # Phase axis for plots\n Phase = Data1D(name=\"phase\", unit=\"\", values=gen_name(qs), is_components=True)\n\n A_harm = array(\n [\n 1 * exp(1j * angle_curr),\n 0.1 * exp(1j * pi / 3),\n 0.2 * exp(1j * pi / 8),\n 1j * 0.05,\n 0.15,\n ],\n dtype=complex,\n )\n order_harm = array([1, -5, 7, -11, 13])\n\n In = zeros((Nt, qs))\n for ii in range(qs):\n # current dir is included in angle_elec while phase_dir is related\n # to the one enforced in Clarke transform\n for A, order in zip(A_harm, order_harm):\n In[:, ii] += abs(A) * cos(\n order * angle_elec + phase_dir * 2 * ii * pi / qs + angle(A)\n )\n\n # Test calculation dqh transform directly applied to DataND objects\n In_dt = DataTime(\n name=\"Stator current\",\n unit=\"A\",\n symbol=\"I_s\",\n axes=[Time, Phase],\n values=In,\n )\n\n Idqh_dt = n2dqh_DataTime(In_dt, phase_dir=phase_dir)\n\n Idqh_df = n2dqh_DataFreq(\n In_dt, felec=felec, phase_dir=phase_dir, current_dir=current_dir\n )\n\n result_dqh_dt = Idqh_dt.get_along(\"freqs\", \"phase[]\")\n Idqh_val_dt = result_dqh_dt[Idqh_dt.symbol][[0, 6, 12], :]\n\n result_dqh_df = Idqh_df.get_along(\"freqs\", \"phase[]\")\n Idqh_val_df = result_dqh_df[Idqh_df.symbol]\n\n assert_array_almost_equal(np_abs(Idqh_val_dt - Idqh_val_df), 0)\n\n In_df = dqh2n_DataFreq(\n Idqh_df, felec=felec, n=qs, phase_dir=phase_dir, current_dir=current_dir\n )\n\n In_dt_val = In_dt.get_along(\"freqs\", \"phase[]\")[In_dt.symbol]\n In_df_val = In_df.get_along(\"freqs\", \"phase[]\")[In_df.symbol]\n\n assert_array_almost_equal(\n np_abs(In_dt_val[[1, 5, 7, 11, 13], :] - In_df_val), 0\n )\n\n # Check phase_dir calculation\n phase_dir_calc1 = get_phase_dir(In_dt_val, current_dir)\n phase_dir_calc2 = get_phase_dir_DataFreq(In_dt)\n assert phase_dir_calc1 == phase_dir\n assert phase_dir_calc2 == phase_dir\n\n if is_show_fig:\n Idqh_dt.plot_2D_Data(\"time\", \"phase[]\")\n Idqh_dt.plot_2D_Data(\"freqs\", \"phase[]\")\n\n Idqh_df.plot_2D_Data(\"time\", \"phase[]\")\n Idqh_df.plot_2D_Data(\"freqs\", \"phase[]\")\n\n In_dt.plot_2D_Data(\"time\", \"phase[]\")\n In_dt.plot_2D_Data(\"freqs\", \"phase[]\")\n\n In_df.plot_2D_Data(\"time\", \"phase[]\")\n In_df.plot_2D_Data(\"freqs\", \"phase[]\")\n\n pass\n\n\nif __name__ == \"__main__\":\n\n for param_dict in param_list:\n test_dqh_transformation_freq(param_dict)\n\n # test_dqh_transformation_freq(param_list[1])\n"
] |
[
[
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.close"
],
[
"numpy.array",
"numpy.angle",
"numpy.zeros",
"numpy.exp",
"numpy.abs",
"numpy.linspace"
]
] |
iamacityzen/ga-learner-dsmp-repo
|
[
"a4c091f4e95504938270906b04426839b63814c3"
] |
[
"Making-first-prediction-using-linear-regression/code.py"
] |
[
"# --------------\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\n# code starts here\ndf = pd.read_csv(path)\ndf.head()\nX = df[['ages','num_reviews','piece_count','play_star_rating','review_difficulty','star_rating','theme_name','val_star_rating','country']]\ny = df['list_price']\nX_train,X_test,y_train,y_test = train_test_split(X,y,random_state = 6, test_size = 0.3)\n# code ends here\n\n\n\n# --------------\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\n# code starts here \n\ncols = X_train.columns\n#cols= list(X_train.columns.values)\n\n\nsns.pairplot(df)\n\n# code ends here\n\n\n\n# --------------\n# Code starts here\ncorr = X_train.corr()\nprint(corr)\nX_train.drop(['play_star_rating', 'val_star_rating'], axis = 1,inplace = True) \nX_test.drop(['play_star_rating', 'val_star_rating'], axis = 1,inplace = True)\n# Code ends here\n\n\n# --------------\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport math\n# Code starts here\nregressor = LinearRegression()\nregressor.fit(X_train,y_train)\ny_pred = regressor.predict(X_test)\n\ndef metrics(actual,pred):\n print('Mean Squared Error', mean_squared_error(actual,pred))\n print('R-Squared', r2_score(actual,pred))\nmetrics(y_test,y_pred)\nmse = 2106.7634311857673\nr2 = 0.7747160273433752\n# Code ends here\n\n\n# --------------\n# Code starts here\nresidual = y_test - y_pred\nplt.hist(residual)\n\n\n\n# Code ends here\n\n\n"
] |
[
[
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.hist",
"sklearn.metrics.r2_score",
"pandas.read_csv",
"sklearn.cross_validation.train_test_split"
]
] |
OmoooJ/gluon-facex
|
[
"c5606fc9e2223c6d6dce2aaf2858d83f5eac1d54",
"c5606fc9e2223c6d6dce2aaf2858d83f5eac1d54"
] |
[
"examples/mnist/train_mnist_arcloss.py",
"gluonfr/metrics/verification.py"
] |
[
"# MIT License\n#\n# Copyright (c) 2018 Haoxintong\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\"\"\"\n\nimport os\nimport time\nimport mxnet as mx\nimport numpy as np\nfrom gluonfr.loss import ArcLoss\nfrom mxnet.gluon.data.vision import MNIST\nfrom mxnet import nd, gluon, metric as mtc, autograd as ag\nfrom examples.mnist.net.lenet import LeNetPlus\nfrom examples.mnist.utils import transform_train, transform_val, plot_result\n\nos.environ['MXNET_GLUON_REPO'] = 'https://apache-mxnet.s3.cn-north-1.amazonaws.com.cn/'\nos.environ['MXNET_ENABLE_GPU_P2P'] = '0'\n\n\ndef validate(net, val_data, ctx, loss, plot=False):\n metric = mtc.Accuracy()\n val_loss = 0\n ebs = []\n lbs = []\n for i, batch in enumerate(val_data):\n data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)\n labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)\n\n ots = [net(X) for X in data]\n embedds = [ot[0] for ot in ots]\n outputs = [ot[1] for ot in ots]\n\n losses = [loss(yhat, y) for yhat, y in zip(outputs, labels)]\n metric.update(labels, outputs)\n val_loss += sum([l.mean().asscalar() for l in losses]) / len(losses)\n if plot:\n for es, ls in zip(embedds, labels):\n assert len(es) == len(ls)\n for idx in range(len(es)):\n ebs.append(es[idx].asnumpy())\n lbs.append(ls[idx].asscalar())\n if plot:\n ebs = np.vstack(ebs)\n lbs = np.hstack(lbs)\n\n _, val_acc = metric.get()\n return val_acc, val_loss / len(val_data), ebs, lbs\n\n\ndef train():\n epochs = 100\n\n lr = 0.1\n lr_steps = [40, 70, np.inf]\n momentum = 0.9\n wd = 5e-4\n\n plot_period = 5\n\n ctx = [mx.gpu(i) for i in range(2)]\n batch_size = 256\n\n margin_s = 5\n margin_m = 0.2\n\n train_set = MNIST(train=True, transform=transform_train)\n train_data = gluon.data.DataLoader(train_set, batch_size, True, num_workers=4, last_batch='discard')\n val_set = MNIST(train=False, transform=transform_val)\n val_data = gluon.data.DataLoader(val_set, batch_size, shuffle=False, num_workers=4)\n\n net = LeNetPlus(embedding_size=64, feature_norm=True, weight_norm=True)\n net.initialize(init=mx.init.MSRAPrelu(), ctx=ctx)\n # net.load_parameters(\"./pretrained_mnist.params\", ctx=ctx)\n net.hybridize()\n\n loss = ArcLoss(s=margin_s, m=margin_m, classes=10)\n\n train_params = net.collect_params()\n trainer = gluon.Trainer(train_params, 'sgd', {'learning_rate': lr, 'momentum': momentum, 'wd': wd})\n\n lr_counter = 0\n\n metric = mtc.Accuracy()\n num_batch = len(train_data)\n\n for epoch in range(epochs+1):\n if epoch == lr_steps[lr_counter]:\n trainer.set_learning_rate(trainer.learning_rate * 0.1)\n lr_counter += 1\n # if (epoch % plot_period) == 0:\n # plot = True\n # else:\n plot = False\n train_loss = 0\n metric.reset()\n tic = time.time()\n ebs = []\n lbs = []\n\n for batch in train_data:\n data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)\n labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)\n\n with ag.record():\n ots = [net(X) for X in data]\n embedds = [ot[0] for ot in ots]\n outputs = [ot[1] for ot in ots]\n losses = [loss(yhat, y) for yhat, y in zip(outputs, labels)]\n\n for l in losses:\n ag.backward(l)\n if plot:\n for es, ls in zip(embedds, labels):\n assert len(es) == len(ls)\n for idx in range(len(es)):\n ebs.append(es[idx].asnumpy())\n lbs.append(ls[idx].asscalar())\n\n trainer.step(batch_size)\n metric.update(labels, outputs)\n\n train_loss += sum([l.mean().asscalar() for l in losses]) / len(losses)\n\n _, train_acc = metric.get()\n train_loss /= num_batch\n\n val_acc, val_loss, val_ebs, val_lbs = validate(net, val_data, ctx, loss, plot)\n\n if plot:\n ebs = np.vstack(ebs)\n lbs = np.hstack(lbs)\n\n plot_result(ebs, lbs, os.path.join(\"../../resources\", \"arcloss-train-epoch{}.png\".format(epoch)))\n plot_result(val_ebs, val_lbs, os.path.join(\"../../resources\", \"arcloss-val-epoch{}.png\".format(epoch)))\n\n toc = time.time()\n print('[epoch % 3d] train accuracy: %.6f, train loss: %.6f | '\n 'val accuracy: %.6f, val loss: %.6f, time: %.6f'\n % (epoch, train_acc, train_loss, val_acc, val_loss, toc - tic))\n\n # if epoch == 10:\n # net.save_parameters(\"./pretrained_mnist.params\")\n # net.save_parameters(\"./models/attention%d-cifar10-epoch-%d.params\" % (args.num_layers, epoch))\n\n\nif __name__ == '__main__':\n train()",
"# MIT License\n#\n# Copyright (c) 2018 Haoxintong\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\"\"\"\nimport mxnet as mx\nimport numpy as np\nfrom sklearn.model_selection import KFold\nfrom scipy import interpolate\n\n__all__ = [\"FaceVerification\"]\n\n\nclass FaceVerification(mx.metric.EvalMetric):\n r\"\"\" Compute confusion matrix of 1:1 problem in face verification or other fields.\n Use update() to collect the outputs and compute distance in each batch, then use get() to compute the\n confusion matrix and accuracy of the val dataset.\n\n Parameters\n ----------\n nfolds: int, default is 10\n\n thresholds: ndarray, default is None.\n Use np.arange to generate thresholds. If thresholds=None, np.arange(0, 2, 0.01) will be used for\n euclidean distance.\n\n far_target: float, default is 1e-3.\n This is used to get the verification accuracy of expected far.\n\n dist_type: int, default is 0.\n Option value is {0, 1}, 0 for euclidean distance, 1 for cosine similarity. Here for cosine distance,\n we use `1 - cosine` as the final distances.\n\n \"\"\"\n\n def __init__(self, nfolds=10, thresholds=None, far_target=1e-3, dist_type=0):\n super().__init__(\"FaceVerification\")\n self.far_target = far_target\n self._nfolds = nfolds\n self._dists = []\n self._issame = []\n default_thresholds = np.arange(0, 2, 0.01) if dist_type == 0 else np.arange(0, 1, 0.01)\n self._thresholds = thresholds if thresholds is not None else default_thresholds\n self.reset()\n self._dist_type = dist_type\n\n # noinspection PyMethodOverriding\n def update(self, labels: mx.nd.NDArray, embeddings0: mx.nd.NDArray, embeddings1: mx.nd.NDArray):\n \"\"\"\n\n :param labels: NDArray.\n :param embeddings0: NDArray.\n :param embeddings1: NDArray.\n :return:\n \"\"\"\n\n embeddings0 = embeddings0.asnumpy() if not isinstance(embeddings0, np.ndarray) else embeddings0\n embeddings1 = embeddings1.asnumpy() if not isinstance(embeddings1, np.ndarray) else embeddings1\n labels = labels.asnumpy() if not isinstance(labels, np.ndarray) else labels\n\n if self._dist_type == 0:\n diff = np.subtract(embeddings0, embeddings1)\n dists = np.sqrt(np.sum(np.square(diff), 1))\n else:\n dists = 1 - np.sum(np.multiply(embeddings0, embeddings1), axis=1) / \\\n (np.linalg.norm(embeddings0, axis=1) * np.linalg.norm(embeddings1, axis=1))\n\n self._dists += [d for d in dists]\n self._issame += [l for l in labels]\n\n def get(self):\n tpr, fpr, accuracy = calculate_roc(self._thresholds, np.asarray(self._dists),\n np.asarray(self._issame), self._nfolds)\n\n val, val_std, far = calculate_val(self._thresholds, np.asarray(self._dists),\n np.asarray(self._issame), self.far_target, self._nfolds)\n acc, acc_std = np.mean(accuracy), np.std(accuracy)\n return tpr, fpr, acc, val, val_std, far, acc_std\n\n def reset(self):\n self._dists = []\n self._issame = []\n\n\n# code below is modified from project <Facenet (David Sandberg)>\nclass LFold:\n def __init__(self, n_splits=2, shuffle=False):\n self.n_splits = n_splits\n if self.n_splits > 1:\n self.k_fold = KFold(n_splits=n_splits, shuffle=shuffle)\n\n def split(self, indices):\n if self.n_splits > 1:\n return self.k_fold.split(indices)\n else:\n return [(indices, indices)]\n\n\ndef calculate_roc(thresholds, dist, actual_issame, nrof_folds=10):\n assert len(dist) == len(actual_issame), \"Shape of predicts and labels mismatch!\"\n\n nrof_pairs = len(dist)\n nrof_thresholds = len(thresholds)\n k_fold = LFold(n_splits=nrof_folds, shuffle=False)\n\n tprs = np.zeros((nrof_folds, nrof_thresholds))\n fprs = np.zeros((nrof_folds, nrof_thresholds))\n accuracy = np.zeros((nrof_folds,))\n indices = np.arange(nrof_pairs)\n dist = np.array(dist)\n\n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n # Find the best threshold for the fold\n acc_train = np.zeros((nrof_thresholds,))\n for threshold_idx, threshold in enumerate(thresholds):\n _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])\n\n best_threshold_index = np.argmax(acc_train)\n for threshold_idx, threshold in enumerate(thresholds):\n tprs[fold_idx, threshold_idx], \\\n fprs[fold_idx, threshold_idx], _ = calculate_accuracy(threshold, dist[test_set],\n actual_issame[test_set])\n _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set],\n actual_issame[test_set])\n\n tpr = np.mean(tprs, 0)\n fpr = np.mean(fprs, 0)\n return tpr, fpr, accuracy\n\n\ndef calculate_accuracy(threshold, dist, actual_issame):\n predict_issame = np.less(dist, threshold)\n tp = np.sum(np.logical_and(predict_issame, actual_issame))\n fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))\n tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))\n fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))\n\n tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)\n fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)\n acc = float(tp + tn) / dist.size\n return tpr, fpr, acc\n\n\ndef calculate_val(thresholds, dist, actual_issame, far_target, nrof_folds=10):\n assert len(dist) == len(actual_issame), \"Shape of predicts and labels mismatch!\"\n\n nrof_pairs = len(dist)\n nrof_thresholds = len(thresholds)\n k_fold = LFold(n_splits=nrof_folds, shuffle=False)\n\n val = np.zeros(nrof_folds)\n far = np.zeros(nrof_folds)\n indices = np.arange(nrof_pairs)\n dist = np.array(dist)\n\n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n # Find the threshold that gives FAR = far_target\n far_train = np.zeros(nrof_thresholds)\n for threshold_idx, threshold in enumerate(thresholds):\n _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])\n\n if np.max(far_train) >= far_target:\n f = interpolate.interp1d(far_train, thresholds, kind='slinear')\n threshold = f(far_target)\n else:\n threshold = 0.0\n val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])\n\n val_mean = np.mean(val)\n val_std = np.std(val)\n far_mean = np.mean(far)\n return val_mean, val_std, far_mean\n\n\ndef calculate_val_far(threshold, dist, actual_issame):\n predict_issame = np.less(dist, threshold)\n true_accept = np.sum(np.logical_and(predict_issame, actual_issame))\n false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))\n n_same = np.sum(actual_issame)\n n_diff = np.sum(np.logical_not(actual_issame))\n\n val = float(true_accept) / float(n_same)\n far = float(false_accept) / float(n_diff)\n return val, far\n"
] |
[
[
"numpy.hstack",
"numpy.vstack"
],
[
"numpy.logical_not",
"numpy.max",
"numpy.array",
"numpy.less",
"scipy.interpolate.interp1d",
"numpy.asarray",
"numpy.zeros",
"numpy.square",
"numpy.linalg.norm",
"numpy.sum",
"numpy.mean",
"numpy.logical_and",
"numpy.multiply",
"numpy.std",
"numpy.subtract",
"numpy.arange",
"numpy.argmax",
"sklearn.model_selection.KFold"
]
] |
jatinchowdhury18/BBDDelay
|
[
"eb219742ff53b15e11efbaf9bc38b002610cf9b9"
] |
[
"sim/filter_design.py"
] |
[
"import numpy as np\nimport scipy.signal as signal\nimport matplotlib.pyplot as plt\n\nFS = 48000.0\nFREQ = 9000\nomega = 2 * np.pi * FREQ\n\nr = np.array([251589, -130428 - 4165j, -130428 + 4165j, 4634 - 22873j, 4634 + 22873j])\np = np.array([-46580, -55482 + 25082j, -55482 - 25082j, -26292 - 59437j, -26292 + 59437j])\n\nr = np.array([5092.0, -11256.0 - 99566.0j, -11256.0 + 99566.0, -13802.0 - 24606.0j, -13802.0 + 24606.0j])\np = np.array([-176261.0, -51468.0 - 21437.0j, -51468.0 + 21437.0j, -26276.0 - 59699.0j, -26276.0 + 59699.0j])\n\nH0 = 0\nfor i in range(5):\n prod = r[i]\n for k in range(5):\n if i == k:\n continue\n prod *= p[k]\n H0 += prod\n\nprint(H0)\n\n# print(z)\n# print(p)\n\nworN=np.logspace(1, 5, 1000)\nw, h = signal.freqs_zpk([], p, H0, worN)\nplt.figure()\nplt.semilogx(w, 20 * np.log10(abs(h)))\n\nfc = 500.0\n# freq_factor = fc / 9400\nfreq_factor = fc / 11000\nr = r * freq_factor\np = p * freq_factor\n\nH0 = 0\nfor i in range(5):\n prod = r[i]\n for k in range(5):\n if i == k:\n continue\n prod *= p[k]\n H0 += prod\n\nw, h = signal.freqs_zpk([], p, H0, worN)\nplt.semilogx(w, 20 * np.log10(abs(h)))\n\nz, p, k = signal.butter(5, 2 * np.pi * fc, analog=True, output='zpk')\nw, h = signal.freqs_zpk(z, p, k, worN)\nplt.semilogx(w, 20 * np.log10(abs(h)))\n\nplt.xlabel('Frequency')\nplt.ylabel('Amplitude response [dB]')\nplt.ylim(-90)\nplt.grid()\n\n# plt.figure()\n# plt.plot(z.real, z.imag, 'go')\n# plt.plot(p.real, p.imag, 'rx')\n# plt.grid()\nplt.show()\n"
] |
[
[
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"scipy.signal.freqs_zpk",
"scipy.signal.butter",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"numpy.logspace"
]
] |
ofgulban/meso-MRI
|
[
"15ef8e19aae6218833a06bf01418d3d83eafd8c7",
"15ef8e19aae6218833a06bf01418d3d83eafd8c7"
] |
[
"scripts/wip/anim-test_camera.py",
"scripts/wip/wip-patch_flatten_filtered.py"
] |
[
"\"\"\"Test Pyvista camera parameters.\"\"\"\n\nimport os\nimport numpy as np\nimport pyvista as pv\nimport nibabel as nb\n\nFILE = \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-04/flattening/sub-04_ses-T2s_segm_rim_CS_LH_v02_borderized_multilaterate_perimeter_chunk_T2star_flat_400x400_voronoi.nii.gz\"\n\nOUTDIR = \"/home/faruk/data2/DATA_MRI_NIFTI/derived/movies/test_frames\"\n\nMIN, MAX = 20, 45\nBACKGROUND = \"black\"\nRESOLUTION = (720, 720)\nCMAP = \"gray\"\n# -----------------------------------------------------------------------------\n# Output directory\nif not os.path.exists(OUTDIR):\n os.makedirs(OUTDIR)\nprint(\" Output directory: {}\".format(OUTDIR))\n\nnii = nb.load(FILE)\ndims = nii.shape\ndata = nii.get_fdata()\n\n# Normalize\ndata[data > MAX] = MAX\ndata -= MIN\ndata /= MAX - MIN\ndata[data < 0] = 0\ndata *= 255\n\n# Prep pyvista plotter\np = pv.Plotter(window_size=RESOLUTION, off_screen=True)\nopacity = np.ones(255)\nopacity[0] = 0\np.add_volume(data, cmap=\"gray\", opacity=opacity)\np.set_background(BACKGROUND)\n\n# p.camera.roll = 0\np.camera_position = 'yz'\np.camera.elevation = 15\n\nprint(\"Roll : {}\".format(p.camera.roll))\nprint(\"Elevation : {}\".format(p.camera.elevation))\nprint(\"Azimuth : {}\".format(p.camera.azimuth))\nprint(\"Position : {}\".format(p.camera.position))\nprint(\"Focal point : {}\".format(p.camera.focal_point))\nprint(\"Clip range : {}\".format(p.camera.clipping_range))\n\nCAMPOS_DEFAULT = p.camera_position\n\n# Manipulate camera\n# -----------------------------------------------------------------------------\np.camera_position = CAMPOS_DEFAULT\np.camera.elevation += 30\nfor i in range(90):\n p.show(auto_close=False)\n out_name = \"03_azimuth-{}.png\".format(str(i).zfill(3))\n p.screenshot(os.path.join(OUTDIR, out_name))\n p.camera.azimuth += 4\n p.camera.azimuth %= 360\n\nprint(\"Finished.\")\n",
"\"\"\"Flatten several values into a chunky disk.\"\"\"\n\nimport os\nimport subprocess\nimport nibabel as nb\nimport numpy as np\nimport glob\n\nVALUES = [[\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/08_median_filter2/sub-01_ses-T2s_segm_rim_HG_RH_v02_borderized_multilaterate_perimeter_chunk_T2star_UVD_median_filter.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/08_median_filter2/sub-01_ses-T2s_segm_rim_HG_LH_v02_borderized_multilaterate_perimeter_chunk_T2star_UVD_median_filter.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/08_median_filter2/sub-01_ses-T2s_segm_rim_CS_RH_v02_borderized_multilaterate_perimeter_chunk_T2star_UVD_median_filter.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/08_median_filter2/sub-01_ses-T2s_segm_rim_CS_LH_v02_borderized_multilaterate_perimeter_chunk_T2star_UVD_median_filter.nii.gz\",\n ], [\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/08_median_filter2/sub-01_ses-T2s_segm_rim_HG_RH_v02_borderized_multilaterate_perimeter_chunk_T1_UVD_median_filter.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/08_median_filter2/sub-01_ses-T2s_segm_rim_HG_LH_v02_borderized_multilaterate_perimeter_chunk_T1_UVD_median_filter.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/08_median_filter2/sub-01_ses-T2s_segm_rim_CS_RH_v02_borderized_multilaterate_perimeter_chunk_T1_UVD_median_filter.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/08_median_filter2/sub-01_ses-T2s_segm_rim_CS_LH_v02_borderized_multilaterate_perimeter_chunk_T1_UVD_median_filter.nii.gz\",\n ]\n]\n\n# Make sure that these correspond to images in VALUES\nTAGS = [\"T2star\", \"T1\"]\n\nCOORD_UV = [\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/03_multilaterate/sub-01_ses-T2s_segm_rim_HG_RH_v02_borderized_multilaterate_UV_coordinates.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/03_multilaterate/sub-01_ses-T2s_segm_rim_HG_LH_v02_borderized_multilaterate_UV_coordinates.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/03_multilaterate/sub-01_ses-T2s_segm_rim_CS_RH_v02_borderized_multilaterate_UV_coordinates.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/03_multilaterate/sub-01_ses-T2s_segm_rim_CS_LH_v02_borderized_multilaterate_UV_coordinates.nii.gz\",\n]\n\nCOORD_D = [\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/02_layers/sub-01_ses-T2s_segm_rim_HG_RH_v02_borderized_metric_equivol.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/02_layers/sub-01_ses-T2s_segm_rim_HG_LH_v02_borderized_metric_equivol.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/02_layers/sub-01_ses-T2s_segm_rim_CS_RH_v02_borderized_metric_equivol.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/02_layers/sub-01_ses-T2s_segm_rim_CS_LH_v02_borderized_metric_equivol.nii.gz\",\n]\n\nDOMAIN =[\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/03_multilaterate/sub-01_ses-T2s_segm_rim_HG_RH_v02_borderized_multilaterate_perimeter_chunk.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/03_multilaterate/sub-01_ses-T2s_segm_rim_HG_LH_v02_borderized_multilaterate_perimeter_chunk.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/03_multilaterate/sub-01_ses-T2s_segm_rim_CS_RH_v02_borderized_multilaterate_perimeter_chunk.nii.gz\",\n \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/segmentation/03_multilaterate/sub-01_ses-T2s_segm_rim_CS_LH_v02_borderized_multilaterate_perimeter_chunk.nii.gz\",\n]\n\nOUTDIR = \"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-01/flattening_filtered/\"\n\nBINS_U = 400\nBINS_V = 400\nBINS_D = 100\n\n# -----------------------------------------------------------------------------\n# Output directory\nif not os.path.exists(OUTDIR):\n os.makedirs(OUTDIR)\n print(\" Output directory: {}\\n\".format(OUTDIR))\n\nfor j in range(len(VALUES)):\n tag = TAGS[j]\n for i in range(len(DOMAIN)):\n values = VALUES[j][i]\n coord_uv = COORD_UV[i]\n coord_d = COORD_D[i]\n domain = DOMAIN[i]\n\n # Determine output basename\n filename = os.path.basename(domain)\n basename, ext = filename.split(os.extsep, 1)\n outname = os.path.join(OUTDIR, \"{}_{}.{} \".format(basename, tag, ext))\n\n # Layers and middle gray matter\n command = \"/home/faruk/Git/LAYNII/LN2_PATCH_FLATTEN \"\n command += \"-values {} \".format(values)\n command += \"-coord_uv {} \".format(coord_uv)\n command += \"-coord_d {} \".format(coord_d)\n command += \"-domain {} \".format(domain)\n command += \"-bins_u {} \".format(BINS_U)\n command += \"-bins_v {} \".format(BINS_V)\n command += \"-bins_d {} \".format(BINS_D)\n command += \"-voronoi \"\n command += \"-norm_mask \"\n command += \"-output {} \".format(outname)\n\n print(command)\n subprocess.run(command, shell=True)\n print()\n\n# Make sform qform of flat niftis identity matrix\nnii_files = glob.glob(os.path.join(OUTDIR, \"*.nii*\"))\nfor i in nii_files:\n nii = nb.load(i)\n new = nb.Nifti1Image(nii.dataobj, header=nii.header, affine=np.eye(4))\n nb.save(new, i)\n\nprint('Finished.\\n')\n"
] |
[
[
"numpy.ones"
],
[
"numpy.eye"
]
] |
carlylagrotta/MSI
|
[
"e958beb5df2a2d1018bbb2f96382b5c99b08c3ef"
] |
[
"tests/shock_tube_optimization_shell_six_paramter_fit_test_modified.py"
] |
[
"import sys, os\nsys.path.append('../../') #get rid of this at some point with central test script or when package is built\nos.chdir('../../')\n\nimport MSI.simulations.instruments.shock_tube as st\nimport MSI.cti_core.cti_processor as pr\nimport MSI.optimization.matrix_loader as ml\nimport MSI.optimization.opt_runner as opt\nimport MSI.simulations.absorbance.curve_superimpose as csp\nimport MSI.simulations.yaml_parser as yp\nimport MSI.optimization.shock_tube_optimization_shell_six_param_fit as stMSIspf\nimport cantera as ct\nimport pandas as pd\nimport numpy as np\nimport MSI.utilities.plotting_script as plotter\nimport MSI.utilities.post_processor as post_processor\n\n\n\n\nfiles_to_include = [['Pirraglia_0.yaml']] \n \n \nnumer_of_iterations = 3\ncti_file = 'glarborg_custom.cti'\nworking_directory = 'MSI/data/H_O2'\nreaction_uncertainty_csv = 'glarborg_reaction_uncertainty.csv'\nmaster_reaction_equation_cti_name = 'master_reactions_glarborg.cti'\n#rate_constant_target_value_data = 'burke_target_value_single_reactions.csv'\n\n#this would be an empty string '' if you do not want to include it \nrun_with_k_target_values = 'On'\nmaster_equation_reactions = ['H2O2 + OH <=> H2O + HO2',\n '2 HO2 <=> H2O2 + O2',\n 'HO2 + OH <=> H2O + O2',\n '2 OH <=> H2O + O',\n 'CH3 + HO2 <=> CH4 + O2',\n 'CH3 + HO2 <=> CH3O + OH']\n\n#master_index = [2,3,4,5,6,7]\nmaster_index = [2,3,4,5,6,7]\n\nmaster_equation_uncertainty_df = pd.read_csv('MSI/data/H_O2/six_parameter_fit_large_uncertainty.csv')\n#this could be 'On'\n\nrate_constant_target_value_data_for_plotting = 'FFCM1_target_reactions_1_plotting.csv'\nrate_constant_target_value_data = 'FFCM1_target_reactions_1.csv'\nrate_constant_target_value_data_extra = 'FFCM1_target_reactions_extra_data.csv'\n\n#start here \n\n\nsix_parameter_fit_sensitivities = {'H2O2 + OH <=> H2O + HO2':{'A':np.array([-13.37032086, 32.42060027, 19.23022032, 6.843287462 , 36.62853824 ,-0.220309785 ,-0.099366346, -4.134352081]),\n 'n':np.array([1.948532282, -5.341557065, -3.337497841, -1.025292166, -5.813524857, 0.011862923 ,0.061801326, 0.581628835]),\n 'Ea':np.array([-0.463042822, 1.529151218, 0.808025472 ,0.359889935, -0.021309254, -0.098013004, -0.102022118, -0.097024727]),\n 'c':np.array([0.00163576, -0.008645666, -0.003111179, -0.002541995, 0.014228149 ,0.001263134, 0.001236963, -0.000390567]),\n 'd':np.array([1.071992802, -2.780550365, -1.71391034 ,-0.274481751, -4.491132406, -0.054960894, 0.049553379, 0.270885383]),\n 'f':np.array([-0.027060156, 0.056903076, 0.041102936 ,0.001361221, 0.144385439, 0.003136796 ,0.001374015, -0.006089248])},\n '2 HO2 <=> H2O2 + O2': {'A':np.array([-12.93733217, 24.39245077 ,17.73177606, 4.37803475, 33.44985889, 0.381601192 ,3.748890308]),\n 'n':np.array([1.872602872, -4.096806067, -3.09439453 ,-0.63226683, -5.125008418, -0.061610462, -0.677953862]),\n 'Ea':np.array([-0.463903763 ,1.259537237, 0.826684258 ,0.257400116, 0.803882706 ,2.20E-05, 0.181336266]),\n 'c':np.array([0.002069572, -0.008314769, -0.00424128 ,-0.002016113, 0.000134642 ,0.000122049 ,-0.001026567]),\n 'd':np.array([0.981856324, -1.847383095, -1.493544053, 0.016222685, -3.428753345, -0.050708107, -0.526284003]),\n 'f':np.array([-0.022628436, 0.023558844, 0.031573523 ,-0.00732987, 0.096573278 ,0.001668073, 0.01033547])},\n 'HO2 + OH <=> H2O + O2': {'A':np.array([-4.795727446, 6.426354909 ,4.878258417, 2.472791017, 7.856296474, 1.328033302 ,-3.457932692, -0.349839371, 2.331070924 ,2.403555921, -0.165397001, 0.246540172 ,0.722946077]),\n 'n':np.array([0.624241134, -1.321082842, -1.032242319, -0.36532386, -1.112545721, -0.188622956, 0.421083939 ,0.038859478 ,-0.360855106, -0.38989218, 0.029669899 ,-0.04371581, -0.130487515]),\n 'Ea':np.array([-0.259799111, 0.205620792 ,0.130799794, 0.137023666 ,0.379232542, 6.19E-02, -0.198196699, -0.023548432, 0.118069394 ,0.104383314 ,-0.003830947, 0.011566499 ,-0.073557828]),\n 'c':np.array([0.00161312, -0.001906694, -0.000863021, -0.00105112 ,-0.002185605, -0.000334461, 0.001817049 ,0.000170761, -0.000859313, -0.000653029, -3.11E-06 ,-6.37E-05, 0.00047058]),\n 'd':np.array([0.124499363, -0.645652135, -0.535188558, 0.052734001 ,-0.45181066, -0.082250635, 0.034779283, -0.011522821, 0.017057742, -0.165960963, 0.057288687, -0.012776017, -0.192422381]),\n 'f':np.array([0.002033109, -0.011099716, 0.005351213 ,-0.007623667, 0.005327017 ,0.001259485,0.00245957, 0.000976725 ,-0.004879845, 0.001903886 ,-0.001838669 ,0.000252269, 0.004691829])},\n '2 OH <=> H2O + O': {'A': np.array([-5.40485067, 18.96061659 ,8.089301961, 6.953940096 ,-12.54280438, -3.264972401, 2.106487623 ,-1.657943467, 1.614935 ,-1.536463599]),\n 'n': np.array([0.803274875, -3.167851673, -1.607661056, -1.041258197, 1.679914849, 0.466415264 ,-0.326136934, 0.355297684 ,-0.16618967, 0.253903734]),\n 'Ea': np.array([0.147285831, 0.605814544, -0.062253282, 0.372322712, -1.884116555, -0.281992263, 0.099465537 ,0.030650483, 0.176069015 ,-0.056967886]),\n 'c': np.array([-0.003001658, -0.001870536, 0.003820535 ,-0.002753277, 0.014224162, 0.00032969 ,-0.000627241, -0.001081979, -0.002009835, 0.000255318]),\n 'd':np.array([0.446957978, -1.467039994, -1.298391635, -0.402720385, 0.568106728 ,0.229877892, -0.194395052, 1.033858025 ,0.527183366, 0.308743056]),\n 'f':np.array([-0.010053913, 0.025128322, 0.035579811 ,0.00515753 ,-0.0083511, -0.00512885, 0.003954, -0.029711993 ,-0.01986861, -0.007691647])},\n 'CH3 + HO2 <=> CH4 + O2': {'A':np.array([.007845,-.89278,-.94908]),\n 'n':np.array([-0.00104,-.36888,.154462]),\n 'Ea':np.array([.504278,-.44379,-0.03181]),\n 'c':np.array([0,0,0]),\n 'd':np.array([0,0,0]),\n 'f':np.array([0,0,0])},\n 'CH3 + HO2 <=> CH3O + OH': {'A':np.array([1.319108,-.92151]),\n 'n':np.array([-.04282,.150846]),\n 'Ea':np.array([0.024285,-0.02956]),\n 'c':np.array([0,0]),\n 'd':np.array([0,0]),\n 'f':np.array([0,0])}}\n \n \n \nmolecular_parameter_sensitivities = {'H2O2 + OH <=> H2O + HO2':{'A':np.array([-0.373074255, -5.658058364,-2.203911028,1.69333527,-7.110529947,-0.272049596,1.373125254,-0.644666166]),\n 'n':np.array([0.043611058, 0.15417925, -0.208413633, -0.306031876, 0.81053055, 0.031772359 ,-0.136901806, 0.073807424]),\n 'Ea':np.array([0.419762882, -1.301125209, -0.681648059, -0.091866582, -2.353326781, -0.064230907, 0.047721593 ,0.147941186])},\n '2 HO2 <=> H2O2 + O2': {'A':np.array([-0.166005487, -6.797175212, -2.798300682, 1.973896891 ,-4.354910767, -0.082067357, -3.839749825]),\n 'n':np.array([0.018748596, 0.294710827 ,-0.135488286, -0.332967052, 0.4930396, 0.009470627 ,0.409095255]),\n 'Ea':np.array([0.459015825, -1.401810899, -0.722040616, -0.066133729, -1.52807633 ,-0.021832631, -0.411667639])},\n 'HO2 + OH <=> H2O + O2': {'A':np.array([-1.30109642, -11.63457509, -4.680271526, 0.782373804 , -0.016083278, 0.005513255 ,-1.738426278, -0.232013539, 0.884067816 ,-0.500473791, 0.399272687 ,0.062255923 ,-1.667253993]),\n 'n':np.array([0.152797314, 1.1181845, 0.306250902 ,-0.164846884, -0.008229148, -0.001531881, 0.195875814 ,0.026844834, -0.18238354 ,0.017363927, -0.055634983 ,-0.017324495, 0.218771679]),\n 'Ea':np.array([0.101558432, -1.638858106, -0.704325409, -0.119041648, -0.307281167, -0.04872945, 0.001603412 ,0.000324159, -0.08089174, -0.148811902, 0.027266121 ,-0.002907638, -0.237949453])},\n '2 OH <=> H2O + O': {'A': np.array([0.299144373, -2.662684629, -6.643003014, 0.370230493 ,-3.354253502, -0.271981922, -0.581195748, 9.774024441 , 5.90328859, 2.272800133]),\n 'n': np.array([-0.028599275, -0.071787028, 0.572722706 ,-0.109709456, 0.381272207 ,0.03153973 ,0.061282516, -1.341475144, -0.835422411, -0.302994441]),\n 'Ea': np.array([0.535103651, -1.054606857, -0.989721261, -0.169631331, -1.099840578, -0.069647609, -0.101285313, 0.74522721, 0.352517552 ,0.205464658])},\n 'CH3 + HO2 <=> CH4 + O2': {'A':np.array([.007845,-.89278,-.94908]),\n 'n':np.array([-0.00104,-.36888,.154462]),\n 'Ea':np.array([.504278,-.44379,-0.03181])},\n 'CH3 + HO2 <=> CH3O + OH': {'A':np.array([1.319108,-.92151]),\n 'n':np.array([-.04282,.150846]),\n 'Ea':np.array([0.024285,-0.02956])}} \n \n \n \n \n \n \nsix_parameter_fit_nominal_parameters_dict = {'H2O2 + OH <=> H2O + HO2':{'A':4.64E-06,'n':5.605491008,'Ea':-5440.266692,'c':126875776.1,'d':0.000441194,'f':-5.35E-13},\n '2 HO2 <=> H2O2 + O2':{'A':1.30E+04,'n':1.997152351,'Ea':-3628.04407,'c':93390973.44,'d':-0.000732521,'f':8.20E-12} ,\n 'HO2 + OH <=> H2O + O2':{'A':1.41E+18,'n':-2.05344973,'Ea':-232.0064051,'c':15243859.12,'d':-0.001187694,'f':8.01E-12},\n '2 OH <=> H2O + O':{'A':354.5770856,'n':2.938741717,'Ea':-1836.492972,'c':12010735.18,'d':-4.87E-05,'f':1.22E-12},\n 'CH3 + HO2 <=> CH4 + O2':{'A':3.19e3,'n':2.670857,'Ea':-4080.73,'c':0.0,'d':0.0,'f':0.0},\n 'CH3 + HO2 <=> CH3O + OH':{'A':8.38e11,'n':.29,'Ea':-785.45,'c':0.0,'d':0.0,'f':0.0}}\n\n\n\n\n\nMSI_st_instance_one = stMSIspf.MSI_shocktube_optimization_six_parameter_fit(cti_file,\n .01,\n 1,\n 1,\n working_directory,\n files_to_include, \n reaction_uncertainty_csv,rate_constant_target_value_data,\n master_equation_reactions = master_equation_reactions,\n molecular_parameter_sensitivities = molecular_parameter_sensitivities,\n six_parameter_fit_sensitivities = six_parameter_fit_sensitivities,\n master_reaction_equation_cti_name = master_reaction_equation_cti_name,\n master_index = master_index,\n master_equation_uncertainty_df = master_equation_uncertainty_df,\n six_paramter_fit_nominal_parameters_dict = six_parameter_fit_nominal_parameters_dict)\nMSI_st_instance_one.one_run_shock_tube_optimization()\n\nS_matrix_original = MSI_st_instance_one.S_matrix\nexp_dict_list_original = MSI_st_instance_one.experiment_dictonaries\noriginal_covariance = MSI_st_instance_one.covarience\nX_one_itteration = MSI_st_instance_one.X\nMSI_st_instance_one.deltaXAsNsEas\n\n\n\n\n#need to fix this and return _s_matrix and y_matrix\n\n\n\nMSI_st_instance_two = stMSIspf.MSI_shocktube_optimization_six_parameter_fit(cti_file,\n .01,\n 1,\n 1,\n working_directory,\n files_to_include, \n reaction_uncertainty_csv,rate_constant_target_value_data,\n master_equation_reactions = master_equation_reactions,\n molecular_parameter_sensitivities = molecular_parameter_sensitivities,\n six_parameter_fit_sensitivities = six_parameter_fit_sensitivities,\n master_reaction_equation_cti_name = master_reaction_equation_cti_name,\n master_index = master_index,\n master_equation_uncertainty_df = master_equation_uncertainty_df,\n six_paramter_fit_nominal_parameters_dict = six_parameter_fit_nominal_parameters_dict)\n \n \n#\n#\n#\n#\n#\n#ALL OF THIS STUFF CAN PROBABLY GO INTO SOME SORT OF CLASS\ndelta_X_list = MSI_st_instance_two.multiple_shock_tube_runs(numer_of_iterations)\n\n\ndeltaXAsNsEas = MSI_st_instance_two.deltaXAsNsEas\nphysical_obervable_updates_list = MSI_st_instance_two.physical_obervable_updates_list\nabsorbance_observables_updates_list = MSI_st_instance_two.absorbance_coef_update_dict\nYdf = MSI_st_instance_two.Y_data_frame\nZdf = MSI_st_instance_two.z_data_frame\nexperimental_dicts = MSI_st_instance_two.experiment_dictonaries\nz_matrix = MSI_st_instance_two.z_matrix\ns_matrix = MSI_st_instance_two.s_matrix\ny = MSI_st_instance_two.y_matrix\nY_matrix = MSI_st_instance_two.Y_matrix\nS_matrix = MSI_st_instance_two.S_matrix\n\nX = MSI_st_instance_two.X\nXdf = MSI_st_instance_two.X_data_frame\ncovarience = MSI_st_instance_two.covarience\nexp_dict_list_optimized_extra_reaction = MSI_st_instance_two.experiment_dictonaries\nparsed_yaml_list = MSI_st_instance_two.list_of_parsed_yamls\nsigma = MSI_st_instance_two.sigma\nX = MSI_st_instance_two.X\ndelta_X = MSI_st_instance_two.delta_X\nmolecular_parameter_updates = MSI_st_instance_two.delta_x_molecular_params_by_reaction_dict\nnominal_dict_six_p_fit = MSI_st_instance_two.six_paramter_fit_nominal_parameters_dict\noriginal_diag = np.diag(original_covariance)\n\n\n\n\n#target_value_rate_constant_csv = 'MSI/data/test_data/FFCM1_custom_target_value_test.csv'\noriginal_cti_file = MSI_st_instance_two.data_directory +'/'+ MSI_st_instance_two.cti_file_name\n\nexperiment_dict_uncertainty = MSI_st_instance_two.experiment_dict_uncertainty_original\ntarget_value_csv = MSI_st_instance_two.data_directory +'/'+ MSI_st_instance_two.k_target_values_csv\nsix_parameter_fit_dict_optimized = MSI_st_instance_two.updated_six_parameter_fits_dict\nif run_with_k_target_values == 'On' or run_with_k_target_values == 'on':\n k_target_value_S_matrix = MSI_st_instance_two.k_target_values_for_s\nelse:\n k_target_value_S_matrix = None\n\n\n##########################################################################################################################\n#PLOTTING##\n##########################################################################################################################\n#csv_file_sigma = MSI_st_instance_two.data_directory +'/'+'sigma_for_uncertainty_weighted_sensitivity_FFCM1.csv'\ncsv_file_sigma = MSI_st_instance_two.data_directory +'/'+'sigma_for_uncertainty_weighted_sensitivity_glarborg.csv'\n#csv_file_sigma = ''\nplotting_instance = plotter.Plotting(S_matrix,\n s_matrix,\n Y_matrix,\n Y_matrix,\n z_matrix,\n X,\n sigma,\n covarience,\n original_covariance,\n S_matrix_original,\n exp_dict_list_optimized_extra_reaction,\n exp_dict_list_original,\n parsed_yaml_list,\n Ydf,\n target_value_rate_constant_csv= MSI_st_instance_two.data_directory +'/'+ rate_constant_target_value_data_for_plotting ,\n target_value_rate_constant_csv_extra_values = MSI_st_instance_two.data_directory +'/'+rate_constant_target_value_data_extra,\n k_target_value_S_matrix =k_target_value_S_matrix,\n k_target_values=run_with_k_target_values,\n working_directory = working_directory,\n sigma_uncertainty_weighted_sensitivity_csv=csv_file_sigma)\n#csv_file_sigma = MSI_st_instance_two.data_directory +'/'+'sigma_for_uncertainty_weighted_sensitivity_updated.csv'\nobservable_counter_and_absorbance_wl,length_of_experimental_data = plotting_instance.lengths_of_experimental_data()\nsigmas_optimized,test = plotting_instance.calculating_sigmas(S_matrix,covarience)\nsigmas_original,test2 = plotting_instance.calculating_sigmas(S_matrix_original,original_covariance)\nplotting_instance.plotting_observables(sigmas_original = sigmas_original,sigmas_optimized= sigmas_optimized)\ndiag = plotting_instance.getting_matrix_diag(covarience)\n\n\n#plotting_instance.Y_matrix_plotter(Y_matrix,exp_dict_list_optimized,y,sigma)\n\n#\n#\n#plotting_instance.plotting_rate_constants(optimized_cti_file=MSI_st_instance_two.new_cti_file,\n# original_cti_file=original_cti_file,\n# initial_temperature=250,\n# final_temperature=2500)\n \n\n\nsensitivity, top_sensitivity = plotting_instance.sort_top_uncertainty_weighted_sens()\nobs = plotting_instance.plotting_uncertainty_weighted_sens()\n\nplotting_instance.plotting_rate_constants_six_paramter_fit(optimized_cti_file=MSI_st_instance_two.new_cti_file,\n original_cti_file=original_cti_file,\n initial_temperature=250,\n final_temperature=2500,\n master_equation_reactions = master_equation_reactions,\n six_parameter_fit_dict_optimized = six_parameter_fit_dict_optimized,\n six_parameter_fit_dict_nominal = six_parameter_fit_nominal_parameters_dict,\n six_parameter_fit_sensitivity_dict =six_parameter_fit_sensitivities )\n\n#plotting_instance.plotting_X_itterations(list_of_X_values_to_plot = [0,1,2,3,4,5,50],list_of_X_array=X_list,number_of_iterations=numer_of_iterations)\npost_processor_instance = post_processor.post_processing(optimized_cti_file = MSI_st_instance_two.new_cti_file,\n original_cti_file = original_cti_file,\n kinetic_paramter_dictonary = MSI_st_instance_two.kinetic_paramter_dict,\n master_equation_reactions=master_equation_reactions,\n six_parameter_fit_nominal_parameters_dict = six_parameter_fit_nominal_parameters_dict,\n six_parameter_fit_optimized_paramter_dict = six_parameter_fit_dict_optimized,\n exp_dict_list_optimized = exp_dict_list_optimized_extra_reaction,\n exp_dict_list_original = exp_dict_list_original,\n parsed_yaml_list = parsed_yaml_list)\n\nkinetic_paramters_dict = post_processor_instance.create_active_kinetic_paramter_dictonary()\nphysical_params_dict = post_processor_instance.create_active_physical_paramter_dictonary()\n\n\n"
] |
[
[
"numpy.array",
"pandas.read_csv",
"numpy.diag"
]
] |
herupraptono/kevmurphyML
|
[
"d8e08dbe268d2da9712ef915893984dc830fabea",
"d8e08dbe268d2da9712ef915893984dc830fabea",
"d8e08dbe268d2da9712ef915893984dc830fabea"
] |
[
"figureCode/daft/mlpXor.py",
"figureCode/contoursSSEDemo.py",
"figureCode/logregBinarydemo.py"
] |
[
"from matplotlib import rc\nrc(\"font\", family=\"serif\", size=12)\nrc(\"text\", usetex=True)\nrc(\"text.latex\", preamble=open(\"macros.tex\").read())\n\n#import daft\n\t\nimport imp\ndaft = imp.load_source('daft', '/Users/kpmurphy/github/daft/daft.py')\n \nimport os\n\npgm = daft.PGM([4, 4], origin=[0, 0])\n\npgm.add_node(daft.Node(\"one\", r\"$1$\", 1, 1))\npgm.add_node(daft.Node(\"x1\", r\"$x_1$\", 2, 1))\npgm.add_node(daft.Node(\"x2\", r\"$x_2$\", 3, 1))\n\npgm.add_node(daft.Node(\"z1\", r\"$z_1$\", 2, 2))\npgm.add_node(daft.Node(\"z2\", r\"$z_2$\", 3, 2))\n\npgm.add_node(daft.Node(\"y\", r\"$y$\", 2.5, 3))\n\npgm.add_edge(\"one\", \"z1\", label=\"-1.5\", xoffset=-0.3)\npgm.add_edge(\"x1\", \"z1\", label=\"+1\", xoffset=-0.3)\npgm.add_edge(\"x1\", \"z2\", label=\"+1\", xoffset=-0.4)\npgm.add_edge(\"x2\", \"z1\", label=\"+1\", xoffset=0.4)\npgm.add_edge(\"x2\", \"z2\", label=\"+1\", xoffset=0.3)\npgm.add_edge(\"z1\", \"y\", label=\"-1\", xoffset=-0.3)\npgm.add_edge(\"z2\", \"y\", label=\"-1\", xoffset=0.3)\n\n#ax = pgm.render() # returns the pyplot axes object it drew onto\n#ax.text(1, 2, \"My label\")\n\npgm.render()\nfolder = \"/Users/kpmurphy/github/pyprobml/figures\"\nfname = \"mlpXor\"\npgm.figure.savefig(os.path.join(folder, \"{}.png\".format(fname)))\n\n\n",
"#!/usr/bin/env python3\n\n# Error surface for linear regression model.\nfrom __future__ import absolute_import\nimport matplotlib.pyplot as pl\nimport numpy as np\nfrom utils import util\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef contoursSSEDemo():\n N = 21\n x,y,_,_,_,_ = util.poly_data_make(sampling='thibaux', n=N)\n X = util.add_ones(x)\n\n return X,y\n\nif __name__ == '__main__':\n X,y = contoursSSEDemo()\n N = len(y)\n w = np.linalg.lstsq(X, y)[0]\n v = np.arange(-6, 6, .1)\n W0, W1 = np.meshgrid(v, v)\n\n SS = np.array([sum((w0*X[:,0] + w1*X[:,1] - y)**2) for w0, w1 in zip(np.ravel(W0), np.ravel(W1))])\n SS = SS.reshape(W0.shape)\n\n fig = pl.figure()\n ax = fig.add_subplot(111, projection='3d')\n surf = ax.plot_surface(W0, W1, SS)\n pl.savefig('linregSurfSSE.png')\n pl.show()\n\n fig,ax = pl.subplots()\n ax.set_title('Sum of squares error contours for linear regression')\n CS = pl.contour(W0, W1, SS)\n pl.plot([-4.351],[0.5377],'x')\n\n pl.savefig('linregContoursSSE.png')\n pl.show()\n",
"#!/usr/bin/env python\n\n# Fit the following models to SAT scores:\n# - Logistic Regression\n# - Quadratic Logistic Regression\n# - RBF Logistic Regression\n# - KNN with 10 nearest neighbors\n\nimport matplotlib.pyplot as pl\nimport numpy as np\nfrom utils import util\nfrom scipy.special import logit\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel\nfrom sklearn.neighbors import KNeighborsClassifier\n\ndef genMultinomialData(num_instances, num_classes, num_vars):\n num_example_points = 3\n\n np.random.seed(234)\n example_points = np.random.randn(num_classes * num_example_points, num_vars)\n\n np.random.seed(234)\n X = 2*np.random.rand(num_instances, num_vars)-1\n\n y = np.zeros((num_instances, 1))\n\n # Now 'classify' each instance by its nearest neighbor.\n for i in range(1, num_instances):\n # Take the i'th example and find the closest sample.\n dist = np.linalg.norm((\n np.tile(X[i,:], (num_classes * num_example_points, 1)) -\n example_points), axis=1)\n min_index = np.argmin(dist)\n y[i, 0] = (min_index % num_classes) + 1\n\n return X,y\n\ndef plotScatter(X0, X1, y):\n for x0, x1, cls in zip(X0, X1, y):\n color = 'blue' if cls == 1 else 'red'\n marker = 'x' if cls == 1 else 'o'\n pl.scatter(x0, x1, marker=marker, color=color)\n\nX,y = genMultinomialData(100, 2, 2)\n\nmodels = [LogisticRegressionCV(),\n LogisticRegressionCV(),\n LogisticRegressionCV(),\n KNeighborsClassifier(n_neighbors=10)]\nkernels = [lambda X0, X1: X0, # No Kernel\n lambda X0, X1: polynomial_kernel(X0, X1, degree=2),\n lambda X0, X1: rbf_kernel(X0, X1, gamma=50), # sigma = .1\n lambda X0, X1: X0]\nnames = ['Linear Logistic Regression', \n 'Quadratic Logistic Regression', \n 'RBF Logistic Regression',\n 'KNN with K=10']\nfile_names = ['Linear', 'Quad', 'Rbf', 'KNN10']\n\nfor i in range(len(models)):\n transX = kernels[i](X, X)\n model = models[i].fit(transX, y)\n \n xx, yy = np.meshgrid(np.linspace(-1, 1, 250), np.linspace(-1, 1, 250))\n Z = model.predict(kernels[i](np.c_[xx.ravel(), yy.ravel()], X)).reshape(xx.shape)\n pl.pcolormesh(xx, yy, Z, cmap=pl.cm.coolwarm)\n plotScatter(X[:, 0], X[:, 1], y)\n pl.title(names[i])\n pl.savefig('logregBinary%sBoundary' % file_names[i])\n pl.show()\n \n Z = model.predict_proba(kernels[i](np.c_[xx.ravel(), yy.ravel()], X))[:,2].reshape(xx.shape)\n pl.pcolormesh(xx, yy, Z, cmap=pl.cm.coolwarm)\n pl.colorbar()\n pl.title('Prob Class 1')\n pl.savefig('logregBinary%sProbClass1' % file_names[i])\n pl.show()\n"
] |
[
[
"matplotlib.rc"
],
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"numpy.linalg.lstsq",
"numpy.arange",
"numpy.ravel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.contour",
"numpy.meshgrid"
],
[
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.pcolormesh",
"numpy.random.rand",
"numpy.zeros",
"numpy.argmin",
"numpy.random.seed",
"matplotlib.pyplot.savefig",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.title",
"numpy.random.randn",
"sklearn.metrics.pairwise.rbf_kernel",
"numpy.tile",
"sklearn.metrics.pairwise.polynomial_kernel",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show",
"numpy.linspace",
"sklearn.linear_model.LogisticRegressionCV"
]
] |
mhavasi/edward2
|
[
"b630fea94386f7a6413f7d33ce75bb1dbe413d2d",
"b630fea94386f7a6413f7d33ce75bb1dbe413d2d"
] |
[
"baselines/imagenet/ensemble.py",
"baselines/imagenet/efficientnet_be_model.py"
] |
[
"# coding=utf-8\n# Copyright 2020 The Edward2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Ensemble on ImageNet.\n\nThis script only performs evaluation, not training. We recommend training\nensembles by launching independent runs of `deterministic.py` over different\nseeds.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport edward2 as ed\nimport deterministic_model # local file import\nimport utils # local file import\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nflags.DEFINE_integer('per_core_batch_size', 512, 'Batch size per TPU core/GPU.')\nflags.DEFINE_integer('seed', 0, 'Random seed.')\nflags.DEFINE_string('data_dir', None, 'Path to training and testing data.')\nflags.mark_flag_as_required('data_dir')\nflags.DEFINE_string('checkpoint_dir', None,\n 'The directory where the model weights are stored.')\nflags.mark_flag_as_required('checkpoint_dir')\nflags.DEFINE_string('output_dir', '/tmp/imagenet',\n 'The directory where to save predictions.')\nflags.DEFINE_string('alexnet_errors_path', None,\n 'Path to AlexNet corruption errors file.')\nflags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')\n\n# Accelerator flags.\nflags.DEFINE_bool('use_gpu', True, 'Whether to run on GPU or otherwise TPU.')\nflags.DEFINE_integer('num_cores', 1, 'Number of TPU cores or number of GPUs.')\nflags.DEFINE_string('tpu', None,\n 'Name of the TPU. Only used if use_gpu is False.')\nFLAGS = flags.FLAGS\n\n# Number of images in eval dataset.\nIMAGENET_VALIDATION_IMAGES = 50000\nNUM_CLASSES = 1000\n\n\ndef ensemble_negative_log_likelihood(labels, logits):\n \"\"\"Negative log-likelihood for ensemble.\n\n For each datapoint (x,y), the ensemble's negative log-likelihood is:\n\n ```\n -log p(y|x) = -log sum_{m=1}^{ensemble_size} exp(log p(y|x,theta_m)) +\n log ensemble_size.\n ```\n\n Args:\n labels: tf.Tensor of shape [...].\n logits: tf.Tensor of shape [ensemble_size, ..., num_classes].\n\n Returns:\n tf.Tensor of shape [...].\n \"\"\"\n labels = tf.cast(labels, tf.int32)\n logits = tf.convert_to_tensor(logits)\n ensemble_size = float(logits.shape[0])\n nll = tf.nn.sparse_softmax_cross_entropy_with_logits(\n tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),\n logits)\n return -tf.reduce_logsumexp(-nll, axis=0) + tf.math.log(ensemble_size)\n\n\ndef gibbs_cross_entropy(labels, logits):\n \"\"\"Average cross entropy for ensemble members (Gibbs cross entropy).\n\n For each datapoint (x,y), the ensemble's Gibbs cross entropy is:\n\n ```\n GCE = - (1/ensemble_size) sum_{m=1}^ensemble_size log p(y|x,theta_m).\n ```\n\n The Gibbs cross entropy approximates the average cross entropy of a single\n model drawn from the (Gibbs) ensemble.\n\n Args:\n labels: tf.Tensor of shape [...].\n logits: tf.Tensor of shape [ensemble_size, ..., num_classes].\n\n Returns:\n tf.Tensor of shape [...].\n \"\"\"\n labels = tf.cast(labels, tf.int32)\n logits = tf.convert_to_tensor(logits)\n nll = tf.nn.sparse_softmax_cross_entropy_with_logits(\n tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),\n logits)\n return tf.reduce_mean(nll, axis=0)\n\n\ndef main(argv):\n del argv # unused arg\n if not FLAGS.use_gpu:\n raise ValueError('Only GPU is currently supported.')\n if FLAGS.num_cores > 1:\n raise ValueError('Only a single accelerator is currently supported.')\n tf.enable_v2_behavior()\n tf.random.set_seed(FLAGS.seed)\n tf.io.gfile.makedirs(FLAGS.output_dir)\n\n batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores\n steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size\n\n dataset_test = utils.ImageNetInput(\n is_training=False,\n data_dir=FLAGS.data_dir,\n batch_size=FLAGS.per_core_batch_size,\n use_bfloat16=False).input_fn()\n test_datasets = {'clean': dataset_test}\n corruption_types, max_intensity = utils.load_corrupted_test_info()\n for name in corruption_types:\n for intensity in range(1, max_intensity + 1):\n dataset_name = '{0}_{1}'.format(name, intensity)\n test_datasets[dataset_name] = utils.load_corrupted_test_dataset(\n name=name,\n intensity=intensity,\n batch_size=FLAGS.per_core_batch_size,\n drop_remainder=True,\n use_bfloat16=False)\n\n model = deterministic_model.resnet50(input_shape=(224, 224, 3),\n num_classes=NUM_CLASSES)\n\n logging.info('Model input shape: %s', model.input_shape)\n logging.info('Model output shape: %s', model.output_shape)\n logging.info('Model number of weights: %s', model.count_params())\n # Search for checkpoints from their index file; then remove the index suffix.\n ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,\n '**/*.index'))\n ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]\n ensemble_size = len(ensemble_filenames)\n logging.info('Ensemble size: %s', ensemble_size)\n logging.info('Ensemble number of weights: %s',\n ensemble_size * model.count_params())\n logging.info('Ensemble filenames: %s', str(ensemble_filenames))\n checkpoint = tf.train.Checkpoint(model=model)\n\n # Write model predictions to files.\n num_datasets = len(test_datasets)\n for m, ensemble_filename in enumerate(ensemble_filenames):\n checkpoint.restore(ensemble_filename)\n for n, (name, test_dataset) in enumerate(test_datasets.items()):\n filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)\n filename = os.path.join(FLAGS.output_dir, filename)\n if not tf.io.gfile.exists(filename):\n logits = []\n test_iterator = iter(test_dataset)\n for _ in range(steps_per_eval):\n features, _ = next(test_iterator) # pytype: disable=attribute-error\n logits.append(model(features, training=False))\n\n logits = tf.concat(logits, axis=0)\n with tf.io.gfile.GFile(filename, 'w') as f:\n np.save(f, logits.numpy())\n percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)\n message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '\n 'Dataset {:d}/{:d}'.format(percent,\n m + 1,\n ensemble_size,\n n + 1,\n num_datasets))\n logging.info(message)\n\n metrics = {\n 'test/negative_log_likelihood': tf.keras.metrics.Mean(),\n 'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),\n 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),\n 'test/ece': ed.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),\n }\n corrupt_metrics = {}\n for name in test_datasets:\n corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()\n corrupt_metrics['test/accuracy_{}'.format(name)] = (\n tf.keras.metrics.SparseCategoricalAccuracy())\n corrupt_metrics['test/ece_{}'.format(\n name)] = ed.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins)\n\n # Evaluate model predictions.\n for n, (name, test_dataset) in enumerate(test_datasets.items()):\n logits_dataset = []\n for m in range(ensemble_size):\n filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)\n filename = os.path.join(FLAGS.output_dir, filename)\n with tf.io.gfile.GFile(filename, 'rb') as f:\n logits_dataset.append(np.load(f))\n\n logits_dataset = tf.convert_to_tensor(logits_dataset)\n test_iterator = iter(test_dataset)\n for step in range(steps_per_eval):\n _, labels = next(test_iterator) # pytype: disable=attribute-error\n logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]\n labels = tf.cast(tf.reshape(labels, [-1]), tf.int32)\n negative_log_likelihood = tf.reduce_mean(\n ensemble_negative_log_likelihood(labels, logits))\n per_probs = tf.nn.softmax(logits)\n probs = tf.reduce_mean(per_probs, axis=0)\n if name == 'clean':\n gibbs_ce = tf.reduce_mean(gibbs_cross_entropy(labels, logits))\n metrics['test/negative_log_likelihood'].update_state(\n negative_log_likelihood)\n metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)\n metrics['test/accuracy'].update_state(labels, probs)\n metrics['test/ece'].update_state(labels, probs)\n else:\n corrupt_metrics['test/nll_{}'.format(name)].update_state(\n negative_log_likelihood)\n corrupt_metrics['test/accuracy_{}'.format(name)].update_state(\n labels, probs)\n corrupt_metrics['test/ece_{}'.format(name)].update_state(\n labels, probs)\n\n message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(\n (n + 1) / num_datasets, n + 1, num_datasets))\n logging.info(message)\n\n corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,\n corruption_types,\n max_intensity,\n FLAGS.alexnet_errors_path)\n total_results = {name: metric.result() for name, metric in metrics.items()}\n total_results.update(corrupt_results)\n logging.info('Metrics: %s', total_results)\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2020 The Edward2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"EfficientNet model with BatchEnsemble.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nimport math\nimport edward2 as ed\nimport utils # local file import\nimport tensorflow.compat.v2 as tf\n\nBlockArgs = collections.namedtuple('BlockArgs', [\n 'kernel_size',\n 'num_repeat',\n 'input_filters',\n 'output_filters',\n 'expand_ratio',\n 'strides',\n 'se_ratio',\n])\n\n\ndef efficientnet_params(model_name):\n \"\"\"Get efficientnet params based on model name.\"\"\"\n params_dict = {\n # (width_coefficient, depth_coefficient, resolution, dropout_rate)\n 'efficientnet-b0': (1.0, 1.0, 224, 0.2),\n 'efficientnet-b1': (1.0, 1.1, 240, 0.2),\n 'efficientnet-b2': (1.1, 1.2, 260, 0.3),\n 'efficientnet-b3': (1.2, 1.4, 300, 0.3),\n 'efficientnet-b4': (1.4, 1.8, 380, 0.4),\n 'efficientnet-b5': (1.6, 2.2, 456, 0.4),\n 'efficientnet-b6': (1.8, 2.6, 528, 0.5),\n 'efficientnet-b7': (2.0, 3.1, 600, 0.5),\n 'efficientnet-b8': (2.2, 3.6, 672, 0.5),\n 'efficientnet-l2': (4.3, 5.3, 800, 0.5),\n }\n return params_dict[model_name]\n\n\ndef round_filters(filters, width_coefficient, depth_divisor, min_depth):\n \"\"\"Round number of filters based on depth multiplier.\"\"\"\n filters *= width_coefficient\n min_depth = min_depth or depth_divisor\n new_filters = max(\n min_depth,\n int(filters + depth_divisor / 2) // depth_divisor * depth_divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_filters < 0.9 * filters:\n new_filters += depth_divisor\n return int(new_filters)\n\n\ndef round_repeats(repeats, depth_coefficient):\n \"\"\"Round number of filters based on depth multiplier.\"\"\"\n return int(math.ceil(depth_coefficient * repeats))\n\n\ndef make_sign_initializer(random_sign_init):\n if random_sign_init > 0:\n initializer = ed.initializers.RandomSign(random_sign_init)\n else:\n initializer = tf.keras.initializers.RandomNormal(mean=1.0,\n stddev=-random_sign_init)\n return initializer\n\n\nclass MBConvBlock(tf.keras.layers.Layer):\n \"\"\"A class of MBConv: Mobile Inverted Residual Bottleneck.\"\"\"\n\n def __init__(self,\n block_args,\n ensemble_size,\n random_sign_init,\n batch_norm_momentum,\n batch_norm_epsilon,\n batch_norm,\n data_format,\n relu_fn,\n use_se,\n clip_projection_output):\n \"\"\"Initializes a MBConv block.\n\n Args:\n block_args: BlockArgs, arguments to create a Block.\n ensemble_size: Size of ensemble.\n random_sign_init: Probability/stddev for fast weight initialization.\n batch_norm_momentum: Momentum for batch normalization.\n batch_norm_epsilon: Epsilon for batch normalization.\n batch_norm: Batch norm layer.\n data_format: Image data format.\n relu_fn: Activation.\n use_se: Whether to use squeeze and excitation layers.\n clip_projection_output: Whether to clip projected conv outputs.\n \"\"\"\n super(MBConvBlock, self).__init__()\n self._block_args = block_args\n self._ensemble_size = ensemble_size\n self._random_sign_init = random_sign_init\n self._batch_norm_momentum = batch_norm_momentum\n self._batch_norm_epsilon = batch_norm_epsilon\n self._batch_norm = batch_norm\n self._data_format = data_format\n if self._data_format == 'channels_first':\n self._channel_axis = 1\n self._spatial_dims = [2, 3]\n else:\n self._channel_axis = -1\n self._spatial_dims = [1, 2]\n\n self._relu_fn = relu_fn\n self._has_se = (\n use_se and self._block_args.se_ratio is not None and\n 0 < self._block_args.se_ratio <= 1)\n self._clip_projection_output = clip_projection_output\n self._build()\n\n def _build(self):\n \"\"\"Builds block according to the arguments.\"\"\"\n filters = self._block_args.input_filters * self._block_args.expand_ratio\n kernel_size = self._block_args.kernel_size\n self._expand_conv = ed.layers.Conv2DBatchEnsemble(\n filters=filters,\n kernel_size=[1, 1],\n alpha_initializer=make_sign_initializer(self._random_sign_init),\n gamma_initializer=make_sign_initializer(self._random_sign_init),\n ensemble_size=self._ensemble_size,\n strides=[1, 1],\n kernel_initializer=utils.conv_kernel_initializer,\n padding='same',\n data_format=self._data_format,\n use_bias=False)\n self._bn0 = self._batch_norm(\n axis=self._channel_axis,\n momentum=self._batch_norm_momentum,\n epsilon=self._batch_norm_epsilon)\n self._depthwise_conv = ed.layers.DepthwiseConv2DBatchEnsemble(\n kernel_size=[kernel_size, kernel_size],\n alpha_initializer=make_sign_initializer(self._random_sign_init),\n gamma_initializer=make_sign_initializer(self._random_sign_init),\n ensemble_size=self._ensemble_size,\n strides=self._block_args.strides,\n depthwise_initializer=utils.conv_kernel_initializer,\n padding='same',\n data_format=self._data_format,\n use_bias=False)\n self._bn1 = self._batch_norm(\n axis=self._channel_axis,\n momentum=self._batch_norm_momentum,\n epsilon=self._batch_norm_epsilon)\n if self._has_se:\n num_reduced_filters = max(\n 1, int(self._block_args.input_filters * self._block_args.se_ratio))\n self._se_reduce = ed.layers.Conv2DBatchEnsemble(\n num_reduced_filters,\n kernel_size=[1, 1],\n alpha_initializer=make_sign_initializer(self._random_sign_init),\n gamma_initializer=make_sign_initializer(self._random_sign_init),\n ensemble_size=self._ensemble_size,\n strides=[1, 1],\n kernel_initializer=utils.conv_kernel_initializer,\n padding='same',\n data_format=self._data_format,\n use_bias=True)\n self._se_expand = ed.layers.Conv2DBatchEnsemble(\n filters,\n kernel_size=[1, 1],\n alpha_initializer=make_sign_initializer(self._random_sign_init),\n gamma_initializer=make_sign_initializer(self._random_sign_init),\n ensemble_size=self._ensemble_size,\n strides=[1, 1],\n kernel_initializer=utils.conv_kernel_initializer,\n padding='same',\n data_format=self._data_format,\n use_bias=True)\n\n filters = self._block_args.output_filters\n self._project_conv = ed.layers.Conv2DBatchEnsemble(\n filters=filters,\n kernel_size=[1, 1],\n alpha_initializer=make_sign_initializer(self._random_sign_init),\n gamma_initializer=make_sign_initializer(self._random_sign_init),\n ensemble_size=self._ensemble_size,\n strides=[1, 1],\n kernel_initializer=utils.conv_kernel_initializer,\n padding='same',\n data_format=self._data_format,\n use_bias=False)\n self._bn2 = self._batch_norm(\n axis=self._channel_axis,\n momentum=self._batch_norm_momentum,\n epsilon=self._batch_norm_epsilon)\n\n def call(self, inputs, training=True, survival_prob=None):\n \"\"\"Implementation of call().\n\n Args:\n inputs: the inputs tensor.\n training: boolean, whether the model is constructed for training.\n survival_prob: float, between 0 to 1, drop connect rate.\n\n Returns:\n A output tensor.\n \"\"\"\n x = inputs\n if self._block_args.expand_ratio != 1:\n x = self._relu_fn(self._bn0(self._expand_conv(x), training=training))\n x = self._relu_fn(self._bn1(self._depthwise_conv(x), training=training))\n\n if self._has_se:\n se_tensor = tf.reduce_mean(\n x, self._spatial_dims, keepdims=True)\n se_tensor = self._se_expand(self._relu_fn(self._se_reduce(se_tensor)))\n x = tf.sigmoid(se_tensor) * x\n\n x = self._bn2(self._project_conv(x), training=training)\n # Add identity so that quantization-aware training can insert quantization\n # ops correctly.\n x = tf.identity(x)\n if self._clip_projection_output:\n x = tf.clip_by_value(x, -6, 6)\n if all(\n s == 1 for s in self._block_args.strides\n ) and self._block_args.input_filters == self._block_args.output_filters:\n if survival_prob:\n x = utils.drop_connect(x, training, survival_prob)\n x = tf.add(x, inputs)\n return x\n\n\nclass Model(tf.keras.Model):\n \"\"\"EfficientNet.\"\"\"\n\n def __init__(self,\n width_coefficient,\n depth_coefficient,\n dropout_rate,\n ensemble_size,\n random_sign_init,\n batch_norm_momentum=0.99,\n batch_norm_epsilon=1e-3,\n survival_prob=0.8,\n data_format='channels_last',\n num_classes=1000,\n depth_divisor=8,\n min_depth=None,\n relu_fn=tf.nn.swish,\n batch_norm=utils.SyncBatchNorm, # TPU-specific requirement.\n use_se=True,\n clip_projection_output=False):\n \"\"\"Initializes model instance.\n\n Args:\n width_coefficient: Coefficient to scale width.\n depth_coefficient: Coefficient to scale depth.\n dropout_rate: Dropout rate.\n ensemble_size: Size of ensemble.\n random_sign_init: Probability/stddev for fast weight initialization.\n batch_norm_momentum: Momentum for batch normalization.\n batch_norm_epsilon: Epsilon for batch normalization.\n survival_prob: float, survival probability for stochastic depth.\n data_format: Image data format.\n num_classes: Number of output classes.\n depth_divisor: Divisor to divide filters per conv when rounding.\n min_depth: Minimum depth per conv when rounding filters.\n relu_fn: Activation.\n batch_norm: Batch norm layer.\n use_se: Whether to use squeeze and excitation layers.\n clip_projection_output: Whether to clip projected conv outputs.\n \"\"\"\n super(Model, self).__init__()\n self._width_coefficient = width_coefficient\n self._depth_coefficient = depth_coefficient\n self._dropout_rate = dropout_rate\n self._ensemble_size = ensemble_size\n self._random_sign_init = random_sign_init\n self._batch_norm_momentum = batch_norm_momentum\n self._batch_norm_epsilon = batch_norm_epsilon\n self._survival_prob = survival_prob\n self._data_format = data_format\n self._num_classes = num_classes\n self._depth_divisor = depth_divisor\n self._min_depth = min_depth\n self._relu_fn = relu_fn\n self._batch_norm = batch_norm\n self._use_se = use_se\n self._clip_projection_output = clip_projection_output\n self._build()\n\n def _build(self):\n \"\"\"Builds a model.\"\"\"\n if self._data_format == 'channels_first':\n channel_axis = 1\n self._spatial_dims = [2, 3]\n else:\n channel_axis = -1\n self._spatial_dims = [1, 2]\n\n self._conv_stem = ed.layers.Conv2DBatchEnsemble(\n filters=round_filters(32,\n self._width_coefficient,\n self._depth_divisor,\n self._min_depth),\n kernel_size=[3, 3],\n alpha_initializer=make_sign_initializer(self._random_sign_init),\n gamma_initializer=make_sign_initializer(self._random_sign_init),\n ensemble_size=self._ensemble_size,\n strides=[2, 2],\n kernel_initializer=utils.conv_kernel_initializer,\n padding='same',\n data_format=self._data_format,\n use_bias=False)\n self._bn0 = self._batch_norm(\n axis=channel_axis,\n momentum=self._batch_norm_momentum,\n epsilon=self._batch_norm_epsilon)\n\n Block = functools.partial( # pylint: disable=invalid-name\n MBConvBlock,\n ensemble_size=self._ensemble_size,\n random_sign_init=self._random_sign_init,\n batch_norm_momentum=self._batch_norm_momentum,\n batch_norm_epsilon=self._batch_norm_epsilon,\n batch_norm=self._batch_norm,\n data_format=self._data_format,\n relu_fn=self._relu_fn,\n use_se=self._use_se,\n clip_projection_output=self._clip_projection_output)\n self._blocks = []\n blocks_args = [\n BlockArgs(kernel_size=3,\n num_repeat=1,\n input_filters=32,\n output_filters=16,\n expand_ratio=1,\n strides=[1, 1],\n se_ratio=0.25),\n BlockArgs(kernel_size=3,\n num_repeat=2,\n input_filters=16,\n output_filters=24,\n expand_ratio=6,\n strides=[2, 2],\n se_ratio=0.25),\n BlockArgs(kernel_size=5,\n num_repeat=2,\n input_filters=24,\n output_filters=40,\n expand_ratio=6,\n strides=[2, 2],\n se_ratio=0.25),\n BlockArgs(kernel_size=3,\n num_repeat=3,\n input_filters=40,\n output_filters=80,\n expand_ratio=6,\n strides=[2, 2],\n se_ratio=0.25),\n BlockArgs(kernel_size=5,\n num_repeat=3,\n input_filters=80,\n output_filters=112,\n expand_ratio=6,\n strides=[1, 1],\n se_ratio=0.25),\n BlockArgs(kernel_size=5,\n num_repeat=4,\n input_filters=112,\n output_filters=192,\n expand_ratio=6,\n strides=[2, 2],\n se_ratio=0.25),\n BlockArgs(kernel_size=3,\n num_repeat=1,\n input_filters=192,\n output_filters=320,\n expand_ratio=6,\n strides=[1, 1],\n se_ratio=0.25),\n ]\n for block_args in blocks_args:\n # Update block input and output filters based on depth multiplier.\n input_filters = round_filters(block_args.input_filters,\n self._width_coefficient,\n self._depth_divisor,\n self._min_depth)\n output_filters = round_filters(block_args.output_filters,\n self._width_coefficient,\n self._depth_divisor,\n self._min_depth)\n repeats = round_repeats(block_args.num_repeat,\n self._depth_coefficient)\n block_args = block_args._replace(\n input_filters=input_filters,\n output_filters=output_filters,\n num_repeat=repeats)\n self._blocks.append(Block(block_args))\n\n if block_args.num_repeat > 1:\n # pylint: disable=protected-access\n block_args = block_args._replace(\n input_filters=block_args.output_filters, strides=[1, 1])\n # pylint: enable=protected-access\n for _ in range(block_args.num_repeat - 1):\n self._blocks.append(Block(block_args))\n\n self._conv_head = ed.layers.Conv2DBatchEnsemble(\n filters=round_filters(1280,\n self._width_coefficient,\n self._depth_divisor,\n self._min_depth),\n kernel_size=[1, 1],\n alpha_initializer=make_sign_initializer(self._random_sign_init),\n gamma_initializer=make_sign_initializer(self._random_sign_init),\n ensemble_size=self._ensemble_size,\n strides=[1, 1],\n kernel_initializer=utils.conv_kernel_initializer,\n padding='same',\n use_bias=False)\n self._bn1 = self._batch_norm(\n axis=channel_axis,\n momentum=self._batch_norm_momentum,\n epsilon=self._batch_norm_epsilon)\n self._avg_pooling = tf.keras.layers.GlobalAveragePooling2D(\n data_format=self._data_format)\n if self._dropout_rate > 0:\n self._dropout = tf.keras.layers.Dropout(self._dropout_rate)\n else:\n self._dropout = None\n self._fc = ed.layers.DenseBatchEnsemble(\n self._num_classes,\n alpha_initializer=make_sign_initializer(self._random_sign_init),\n gamma_initializer=make_sign_initializer(self._random_sign_init),\n ensemble_size=self._ensemble_size,\n kernel_initializer=utils.dense_kernel_initializer)\n\n def call(self, inputs, training=True):\n \"\"\"Implementation of call().\n\n Args:\n inputs: input tensors.\n training: boolean, whether the model is constructed for training.\n\n Returns:\n output tensors.\n \"\"\"\n outputs = self._relu_fn(\n self._bn0(self._conv_stem(inputs), training=training))\n\n for idx, block in enumerate(self._blocks):\n survival_prob = self._survival_prob\n if survival_prob:\n drop_rate = 1.0 - survival_prob\n survival_prob = 1.0 - drop_rate * float(idx) / len(self._blocks)\n outputs = block.call(\n outputs, training=training, survival_prob=survival_prob)\n\n outputs = self._relu_fn(\n self._bn1(self._conv_head(outputs), training=training))\n outputs = self._avg_pooling(outputs)\n if self._dropout:\n outputs = self._dropout(outputs, training=training)\n outputs = self._fc(outputs)\n return outputs\n"
] |
[
[
"tensorflow.compat.v2.io.gfile.makedirs",
"tensorflow.compat.v2.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.compat.v2.train.Checkpoint",
"tensorflow.compat.v2.reduce_logsumexp",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.cast",
"numpy.load",
"tensorflow.compat.v2.reshape",
"tensorflow.compat.v2.io.gfile.exists",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.random.set_seed",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.keras.metrics.Mean",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.nn.softmax",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.io.gfile.GFile"
],
[
"tensorflow.compat.v2.clip_by_value",
"tensorflow.compat.v2.keras.initializers.RandomNormal",
"tensorflow.compat.v2.add",
"tensorflow.compat.v2.sigmoid",
"tensorflow.compat.v2.keras.layers.Dropout",
"tensorflow.compat.v2.identity",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.keras.layers.GlobalAveragePooling2D"
]
] |
awillats/brian2
|
[
"e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc",
"e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc",
"e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc",
"e1107ed0cc4a7d6c69c1e2634b675ba09edfd9fc"
] |
[
"brian2/core/functions.py",
"brian2/tests/test_subgroup.py",
"brian2/tests/test_morphology.py",
"brian2/tests/features/base.py"
] |
[
"from collections.abc import Mapping\nimport inspect\nimport types\nfrom typing import Callable\n\nimport numpy as np\nimport sympy\nfrom sympy.codegen import cfunctions as sympy_cfunctions\nfrom numpy.random import randn, rand\nfrom sympy import Function as sympy_Function\nfrom sympy import S\n\nimport brian2.units.unitsafefunctions as unitsafe\nfrom brian2.core.preferences import prefs\nfrom brian2.core.variables import Constant\nfrom brian2.units.fundamentalunits import (fail_for_dimension_mismatch,\n Quantity, get_dimensions,\n DIMENSIONLESS, is_dimensionless)\nfrom brian2.units.allunits import second\n\n__all__ = ['DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types']\n\n\nBRIAN_DTYPES = ['boolean', 'integer', 'float']\nVALID_ARG_TYPES = BRIAN_DTYPES+['any']\nVALID_RETURN_TYPES = BRIAN_DTYPES+['highest']\n\n\ndef declare_types(**types):\n '''\n Decorator to declare argument and result types for a function\n\n Usage is similar to `check_units` except that types must be one of ``{VALID_ARG_TYPES}``\n and the result type must be one of ``{VALID_RETURN_TYPES}``. Unspecified argument\n types are assumed to be ``'all'`` (i.e. anything is permitted), and an unspecified\n result type is assumed to be ``'float'``. Note that the ``'highest'`` option for\n result type will give the highest type of its argument, e.g. if the arguments\n were boolean and integer then the result would be integer, if the arguments were\n integer and float it would be float.\n '''\n def annotate_function_with_types(f):\n if hasattr(f, '_orig_arg_names'):\n arg_names = f._orig_arg_names\n else:\n arg_names = f.__code__.co_varnames[0:f.__code__.co_argcount]\n argtypes = []\n for name in arg_names:\n arg_type = types.get(name, 'any')\n if arg_type not in VALID_ARG_TYPES:\n raise ValueError(\"Argument type %s is not valid, must be one of %s, \"\n \"for argument %s\" % (arg_type, VALID_ARG_TYPES, name))\n argtypes.append(arg_type)\n for n in types:\n if n not in arg_names and n!='result':\n raise ValueError(\"Type specified for unknown argument \"+n)\n return_type = types.get('result', 'float')\n if return_type not in VALID_RETURN_TYPES:\n raise ValueError(\"Result type %s is not valid, \"\n \"must be one of %s\" % (return_type, VALID_RETURN_TYPES))\n f._arg_types = argtypes\n f._return_type = return_type\n f._orig_arg_names = arg_names\n f._annotation_attributes = getattr(f, '_annotation_attributes', [])+['_arg_types', '_return_type']\n return f\n return annotate_function_with_types\n\n\nclass Function(object):\n '''\n An abstract specification of a function that can be used as part of\n model equations, etc.\n\n Parameters\n ----------\n pyfunc : function\n A Python function that is represented by this `Function` object.\n sympy_func : `sympy.Function`, optional\n A corresponding sympy function (if any). Allows functions to be\n interpreted by sympy and potentially make simplifications. For example,\n ``sqrt(x**2)`` could be replaced by ``abs(x)``.\n arg_units : list of `Unit`, optional\n If `pyfunc` does not provide unit information (which typically means\n that it was not annotated with a `check_units` decorator), the\n units of the arguments have to specified explicitly using this\n parameter.\n return_unit : `Unit` or callable, optional\n Same as for `arg_units`: if `pyfunc` does not provide unit information,\n this information has to be provided explictly here. `return_unit` can\n either be a specific `Unit`, if the function always returns the same\n unit, or a function of the input units, e.g. a \"square\" function would\n return the square of its input units, i.e. `return_unit` could be\n specified as ``lambda u: u**2``.\n arg_types : list of str, optional\n Similar to `arg_units`, but gives the type of the argument rather than\n its unit. In the current version of Brian arguments are specified\n by one of the following strings: 'boolean', 'integer', 'float', 'any'.\n If `arg_types` is not specified, 'any' will be assumed. In\n future versions, a more refined specification may be possible. Note that\n any argument with a type other than float should have no units. If\n return_type : str, optional\n Similar to `return_unit` and `arg_types`. In addition to 'boolean',\n 'integer' and 'float' you can also use 'highest' which will return the\n highest type of its arguments. You can also give a function, as for\n `return_unit`. If the return type is not specified, it is assumed to\n be 'float'.\n stateless : bool, optional\n Whether this function does not have an internal state, i.e. if it\n always returns the same output when called with the same arguments.\n This is true for mathematical functions but not true for ``rand()``, for\n example. Defaults to ``True``.\n auto_vectorise : bool, optional\n Whether the implementations of this function should get an additional\n argument (not specified in abstract code) that can be used to determine\n the number of values that should be returned (for the numpy target), or\n an index potentially useful for generating deterministic values\n independent of the order of vectorisation (for all other targets). The\n main use case are random number functions, e.g. equations refer to\n ``rand()``, but the generate code will actually call\n ``rand(_vectorisation_idx)``. Defaults to ``False``.\n\n Notes\n -----\n If a function should be usable for code generation targets other than\n Python/numpy, implementations for these target languages have to be added\n using the `~brian2.codegen.functions.implementation` decorator or using the\n `~brian2.codegen.functions.add_implementations` function.\n '''\n def __init__(self, pyfunc, sympy_func=None,\n arg_units=None, arg_names=None,\n return_unit=None,\n arg_types=None, return_type=None,\n stateless=True, auto_vectorise=False):\n self.pyfunc = pyfunc\n self.sympy_func = sympy_func\n self._arg_units = arg_units\n self._arg_names = arg_names\n self._return_unit = return_unit\n if return_unit == bool:\n self._returns_bool = True\n else:\n self._returns_bool = False\n self._arg_types = arg_types\n self._return_type = return_type\n self.stateless = stateless\n self.auto_vectorise = auto_vectorise\n if self._arg_units is None:\n if not hasattr(pyfunc, '_arg_units'):\n raise ValueError(('The Python function \"%s\" does not specify '\n 'how it deals with units, need to specify '\n '\"arg_units\" or use the \"@check_units\" '\n 'decorator.') % pyfunc.__name__)\n elif pyfunc._arg_units is None:\n # @check_units sets _arg_units to None if the units aren't\n # specified for all of its arguments\n raise ValueError(('The Python function \"%s\" does not specify '\n 'the units for all of its '\n 'arguments.') % pyfunc.__name__)\n else:\n self._arg_units = pyfunc._arg_units\n else:\n if any(isinstance(u, str) for u in self._arg_units):\n if self._arg_names is None:\n raise TypeError('Need to specify the names of the '\n 'arguments.')\n if len(self._arg_names) != len(self._arg_units):\n raise TypeError(f'arg_names and arg_units need to have the '\n f'same length ({len(self._arg_names)} != '\n f'({len(self._arg_units)})')\n\n if self._return_unit is None:\n if not hasattr(pyfunc, '_return_unit'):\n raise ValueError(('The Python function \"%s\" does not specify '\n 'how it deals with units, need to specify '\n '\"return_unit\" or use the \"@check_units\" '\n 'decorator.') % pyfunc.__name__)\n elif pyfunc._return_unit is None:\n # @check_units sets _return_unit to None if no \"result=...\"\n # keyword is specified.\n raise ValueError(('The Python function \"%s\" does not specify '\n 'the unit for its return '\n 'value.') % pyfunc.__name__)\n else:\n self._return_unit = pyfunc._return_unit\n\n if self._arg_types is None:\n if hasattr(pyfunc, '_arg_types'):\n self._arg_types = pyfunc._arg_types\n else:\n self._arg_types = ['any']*len(self._arg_units)\n\n if self._return_type is None:\n self._return_type = getattr(pyfunc, '_return_type', 'float')\n\n for argtype, u in zip(self._arg_types, self._arg_units):\n if argtype!='float' and argtype!='any' and u is not None and not is_dimensionless(u):\n raise TypeError(\"Non-float arguments must be dimensionless in function \"+pyfunc.__name__)\n if argtype not in VALID_ARG_TYPES:\n raise ValueError(\"Argument type %s is not valid, must be one of %s, \"\n \"in function %s\" % (argtype, VALID_ARG_TYPES, pyfunc.__name__))\n\n if self._return_type not in VALID_RETURN_TYPES:\n raise ValueError(\"Return type %s is not valid, must be one of %s, \"\n \"in function %s\" % (self._return_type, VALID_RETURN_TYPES, pyfunc.__name__))\n\n #: Stores implementations for this function in a\n #: `FunctionImplementationContainer`\n self.implementations = FunctionImplementationContainer(self)\n\n def is_locally_constant(self, dt):\n '''\n Return whether this function (if interpreted as a function of time)\n should be considered constant over a timestep. This is most importantly\n used by `TimedArray` so that linear integration can be used. In its\n standard implementation, always returns ``False``.\n\n Parameters\n ----------\n dt : float\n The length of a timestep (without units).\n\n Returns\n -------\n constant : bool\n Whether the results of this function can be considered constant\n over one timestep of length `dt`.\n '''\n return False\n\n def __call__(self, *args):\n return self.pyfunc(*args)\n\n\nclass FunctionImplementation(object):\n '''\n A simple container object for function implementations.\n\n Parameters\n ----------\n name : str, optional\n The name of the function in the target language. Should only be\n specified if the function has to be renamed for the target language.\n code : language-dependent, optional\n A language dependent argument specifying the implementation in the\n target language, e.g. a code string or a dictionary of code strings.\n namespace : dict-like, optional\n A dictionary of mappings from names to values that should be added\n to the namespace of a `CodeObject` using the function.\n dependencies : dict-like, optional\n A mapping of names to `Function` objects, for additional functions\n needed by this function.\n availability_check : callable, optional\n A function that will be called to check whether the function should be\n made available (e.g. depending on whether it is supported by the\n compiler). The function should do nothing if the function is\n available, or raise a ``NotImplementedError`` with a message\n explaining why it isn't.\n dynamic : bool, optional\n Whether this `code`/`namespace` is dynamic, i.e. generated for each\n new context it is used in. If set to ``True``, `code` and `namespace`\n have to be callable with a `Group` as an argument and are expected\n to return the final `code` and `namespace`. Defaults to ``False``.\n '''\n def __init__(self, name=None, code=None, namespace=None,\n dependencies=None, availability_check=None,\n dynamic=False, compiler_kwds=None):\n if compiler_kwds is None:\n compiler_kwds = {}\n self.name = name\n if dependencies is None:\n dependencies = {}\n self.dependencies = dependencies\n self._code = code\n self._namespace = namespace\n self.dynamic = dynamic\n self.compiler_kwds = compiler_kwds\n self.availability_check = availability_check\n\n def get_code(self, owner):\n if self.availability_check is not None:\n self.availability_check()\n if self.dynamic:\n return self._code(owner)\n else:\n return self._code\n\n def get_namespace(self, owner):\n if self.dynamic:\n return self._namespace(owner)\n else:\n return self._namespace\n\n\nclass FunctionImplementationContainer(Mapping):\n '''\n Helper object to store implementations and give access in a dictionary-like\n fashion, using `CodeGenerator` implementations as a fallback for `CodeObject`\n implementations.\n '''\n def __init__(self, function):\n self._function = function\n self._implementations = dict()\n\n def __getitem__(self, key):\n '''\n Find an implementation for this function that can be used by the\n `CodeObject` given as `key`. Will find implementations registered\n for `key` itself (or one of its parents), or for the `CodeGenerator`\n class that `key` uses (or one of its parents). In all cases,\n implementations registered for the corresponding names qualify as well.\n\n Parameters\n ----------\n key : `CodeObject`\n The `CodeObject` that will use the `Function`\n\n Returns\n -------\n implementation : `FunctionImplementation`\n An implementation suitable for `key`.\n '''\n fallback = getattr(key, 'generator_class', None)\n # in some cases we do the code generation with original_generator_class instead (e.g. GSL)\n fallback_parent = getattr(key, 'original_generator_class', None)\n\n for K in [key, fallback, fallback_parent]:\n name = getattr(K, 'class_name',\n 'no class name for key')\n for impl_key, impl in self._implementations.items():\n impl_key_name = getattr(impl_key, 'class_name',\n 'no class name for implementation')\n if ((impl_key_name is not None and impl_key_name in [K, name]) or\n (impl_key is not None and impl_key in [K, name])):\n return impl\n if hasattr(K, '__bases__'):\n for cls in inspect.getmro(K):\n if cls in self._implementations:\n return self._implementations[cls]\n name = getattr(cls, 'class_name', None)\n if name in self._implementations:\n return self._implementations[name]\n\n # Give a nicer error message if possible\n if getattr(key, 'class_name', None) is not None:\n key = key.class_name\n elif getattr(fallback, 'class_name', None) is not None:\n key = fallback.class_name\n keys = ', '.join([getattr(k, 'class_name', str(k))\n for k in self._implementations])\n raise KeyError(('No implementation available for target {key}. '\n 'Available implementations: {keys}').format(key=key,\n keys=keys))\n\n def add_numpy_implementation(self, wrapped_func, dependencies=None,\n discard_units=None, compiler_kwds=None):\n '''\n Add a numpy implementation to a `Function`.\n\n Parameters\n ----------\n function : `Function`\n The function description for which an implementation should be added.\n wrapped_func : callable\n The original function (that will be used for the numpy implementation)\n dependencies : list of `Function`, optional\n A list of functions this function needs.\n discard_units : bool, optional\n See `implementation`.\n '''\n if discard_units is None:\n discard_units = prefs['codegen.runtime.numpy.discard_units']\n\n # Get the original function inside the check_units decorator\n if hasattr(wrapped_func, '_orig_func'):\n orig_func = wrapped_func._orig_func\n else:\n orig_func = wrapped_func\n\n if discard_units:\n new_globals = dict(orig_func.__globals__)\n # strip away units in the function by changing its namespace\n for key, value in new_globals.items():\n if isinstance(value, Quantity):\n new_globals[key] = np.asarray(value)\n unitless_func = types.FunctionType(orig_func.__code__, new_globals,\n orig_func.__name__,\n orig_func.__defaults__,\n orig_func.__closure__)\n self._implementations['numpy'] = FunctionImplementation(name=None,\n code=unitless_func,\n dependencies=dependencies,\n compiler_kwds=None)\n else:\n def wrapper_function(*args):\n arg_units = list(self._function._arg_units)\n\n if self._function.auto_vectorise:\n arg_units += [DIMENSIONLESS]\n if not len(args) == len(arg_units):\n raise ValueError(('Function %s got %d arguments, '\n 'expected %d') % (self._function.pyfunc.__name__, len(args),\n len(arg_units)))\n new_args = []\n for arg, arg_unit in zip(args, arg_units):\n if arg_unit == bool or arg_unit is None or isinstance(arg_unit, str):\n new_args.append(arg)\n else:\n new_args.append(Quantity.with_dimensions(arg,\n get_dimensions(arg_unit)))\n result = orig_func(*new_args)\n if isinstance(self._function._return_unit, Callable):\n return_unit = self._function._return_unit(*[get_dimensions(a)\n for a in args])\n else:\n return_unit = self._function._return_unit\n if return_unit == bool:\n if not (isinstance(result, bool) or\n np.asarray(result).dtype == bool):\n raise TypeError('The function %s returned '\n '%s, but it was expected '\n 'to return a boolean '\n 'value ' % (orig_func.__name__,\n result))\n elif (isinstance(return_unit, int) and return_unit == 1) or return_unit.dim is DIMENSIONLESS:\n fail_for_dimension_mismatch(result,\n return_unit,\n 'The function %s returned '\n '{value}, but it was expected '\n 'to return a dimensionless '\n 'quantity' % orig_func.__name__,\n value=result)\n else:\n fail_for_dimension_mismatch(result,\n return_unit,\n ('The function %s returned '\n '{value}, but it was expected '\n 'to return a quantity with '\n 'units %r') % (orig_func.__name__,\n return_unit),\n value=result)\n return np.asarray(result)\n\n self._implementations['numpy'] = FunctionImplementation(name=None,\n code=wrapper_function,\n dependencies=dependencies)\n\n def add_implementation(self, target, code, namespace=None,\n dependencies=None, availability_check=None,\n name=None, compiler_kwds=None):\n self._implementations[target] = FunctionImplementation(name=name,\n code=code,\n dependencies=dependencies,\n availability_check=availability_check,\n namespace=namespace,\n compiler_kwds=compiler_kwds)\n\n def add_dynamic_implementation(self, target, code, namespace=None,\n dependencies=None, availability_check=None,\n name=None, compiler_kwds=None):\n '''\n Adds an \"dynamic implementation\" for this function. `code` and `namespace`\n arguments are expected to be callables that will be called in\n `Network.before_run` with the owner of the `CodeObject` as an argument.\n This allows to generate code that depends on details of the context it\n is run in, e.g. the ``dt`` of a clock.\n '''\n if not callable(code):\n raise TypeError('code argument has to be a callable, is type %s instead' % type(code))\n if namespace is not None and not callable(namespace):\n raise TypeError('namespace argument has to be a callable, is type %s instead' % type(code))\n self._implementations[target] = FunctionImplementation(name=name,\n code=code,\n namespace=namespace,\n dependencies=dependencies,\n availability_check=availability_check,\n dynamic=True,\n compiler_kwds=compiler_kwds)\n\n def __len__(self):\n return len(self._implementations)\n\n def __iter__(self):\n return iter(self._implementations)\n\n\ndef implementation(target, code=None, namespace=None, dependencies=None,\n discard_units=None, name=None, **compiler_kwds):\n '''\n A simple decorator to extend user-written Python functions to work with code\n generation in other languages.\n\n Parameters\n ----------\n target : str\n Name of the code generation target (e.g. ``'cython'``) for which to add\n an implementation.\n code : str or dict-like, optional\n What kind of code the target language expects is language-specific,\n e.g. C++ code allows for a dictionary of code blocks instead of a\n single string.\n namespaces : dict-like, optional\n A namespace dictionary (i.e. a mapping of names to values) that\n should be added to a `CodeObject` namespace when using this function.\n dependencies : dict-like, optional\n A mapping of names to `Function` objects, for additional functions\n needed by this function.\n discard_units: bool, optional\n Numpy functions can internally make use of the unit system. However,\n during a simulation run, state variables are passed around as unitless\n values for efficiency. If `discard_units` is set to ``False``, input\n arguments will have units added to them so that the function can still\n use units internally (the units will be stripped away from the return\n value as well). Alternatively, if `discard_units` is set to ``True``,\n the function will receive unitless values as its input. The namespace\n of the function will be altered to make references to units (e.g.\n ``ms``) refer to the corresponding floating point values so that no\n unit mismatch errors are raised. Note that this system cannot work in\n all cases, e.g. it does not work with functions that internally imports\n values (e.g. does ``from brian2 import ms``) or access values with\n units indirectly (e.g. uses ``brian2.ms`` instead of ``ms``). If no\n value is given, defaults to the preference setting\n `codegen.runtime.numpy.discard_units`.\n name : str, optional\n The name of the function in the target language. Should only be\n specified if the function has to be renamed for the target language.\n compiler_kwds : dict, optional\n Additional keyword arguments will be transferred to the code generation\n stage, e.g. for C++-based targets, the code can make use of additional\n header files by providing a list of strings as the ``headers`` argument.\n\n Notes\n -----\n While it is in principle possible to provide a numpy implementation\n as an argument for this decorator, this is normally not necessary -- the\n numpy implementation should be provided in the decorated function.\n\n If this decorator is used with other decorators such as `check_units` or\n `declare_types`, it should be the uppermost decorator (that is, the\n last one to be applied).\n\n Examples\n --------\n Sample usage::\n\n @implementation('cpp',\"\"\"\n #include<math.h>\n inline double usersin(double x)\n {\n return sin(x);\n }\n \"\"\")\n def usersin(x):\n return sin(x)\n '''\n\n def do_user_implementation(func):\n # Allow nesting of decorators\n if isinstance(func, Function):\n function = func\n else:\n function = Function(func)\n\n if discard_units: # Add a numpy implementation that discards units\n if not (target == 'numpy' and code is None):\n raise TypeError((\"'discard_units' can only be set for code \"\n \"generation target 'numpy', without providing \"\n \"any code.\"))\n function.implementations.add_numpy_implementation(wrapped_func=func,\n dependencies=dependencies,\n discard_units=discard_units,\n compiler_kwds=compiler_kwds)\n else:\n function.implementations.add_implementation(target, code=code,\n dependencies=dependencies,\n namespace=namespace,\n name=name,\n compiler_kwds=compiler_kwds)\n # # copy any annotation attributes\n # if hasattr(func, '_annotation_attributes'):\n # for attrname in func._annotation_attributes:\n # setattr(function, attrname, getattr(func, attrname))\n # function._annotation_attributes = getattr(func, '_annotation_attributes', [])\n return function\n return do_user_implementation\n\n\nclass SymbolicConstant(Constant):\n '''\n Class for representing constants (e.g. pi) that are understood by sympy.\n '''\n def __init__(self, name, sympy_obj, value):\n super(SymbolicConstant, self).__init__(name, value=value)\n self.sympy_obj = sympy_obj\n\n\n################################################################################\n# Standard functions and constants\n################################################################################\n\ndef _exprel(x):\n if x.is_zero:\n return S.One\n else:\n return (sympy.exp(x) - S.One)/x\n\nclass exprel(sympy_Function):\n \"\"\"\n Represents ``(exp(x) - 1)/x``.\n\n The benefit of using ``exprel(x)`` over ``(exp(x) - 1)/x``\n is that the latter is prone to cancellation under finite precision\n arithmetic when x is close to zero, and cannot be evaluated when x is\n equal to zero.\n \"\"\"\n nargs = 1\n\n def fdiff(self, argindex=1):\n \"\"\"\n Returns the first derivative of this function.\n \"\"\"\n if argindex == 1:\n return (sympy.exp(*self.args)*(self.args[0] - S.One) + S.One)/self.args[0]**2\n else:\n raise sympy.ArgumentIndexError(self, argindex)\n\n\n def _eval_expand_func(self, **hints):\n return _exprel(*self.args)\n\n def _eval_rewrite_as_exp(self, arg, **kwargs):\n if arg.is_zero:\n return S.One\n else:\n return (sympy.exp(arg) - S.One)/arg\n\n _eval_rewrite_as_tractable = _eval_rewrite_as_exp\n\n @classmethod\n def eval(cls, arg):\n if arg is None:\n return None\n if arg.is_zero:\n return S.One\n\n exp_arg = sympy.exp.eval(arg)\n if exp_arg is not None:\n return (exp_arg - S.One)/arg\n\n def _eval_is_real(self):\n return self.args[0].is_real\n\n def _eval_is_finite(self):\n return self.args[0].is_finite\n\n_infinity_int = 1073741823 # maximum 32bit integer divided by 2\n\n\ndef timestep(t, dt):\n '''\n Converts a given time to an integer time step. This function slightly shifts\n the time before dividing it by ``dt`` to make sure that multiples of ``dt``\n do not end up in the preceding time step due to floating point issues. This\n function is used in the refractoriness calculation.\n\n .. versionadded:: 2.1.3\n\n Parameters\n ----------\n t : np.ndarray, float, Quantity\n The time to convert.\n dt : float or Quantity\n The length of a simulation time step.\n\n Returns\n -------\n ts : np.ndarray, np.int64\n The time step corresponding to the given time.\n\n Notes\n -----\n This function cannot handle infinity values, use big values instead (e.g.\n a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``\n variable for neurons that never spiked).\n '''\n elapsed_steps = np.array((t + 1e-3*dt)/dt, dtype=np.int64)\n if elapsed_steps.shape == ():\n elapsed_steps = elapsed_steps.item()\n return elapsed_steps\n\n\nDEFAULT_FUNCTIONS = {\n # numpy functions that have the same name in numpy and math.h\n 'cos': Function(unitsafe.cos,\n sympy_func=sympy.functions.elementary.trigonometric.cos),\n 'sin': Function(unitsafe.sin,\n sympy_func=sympy.functions.elementary.trigonometric.sin),\n 'tan': Function(unitsafe.tan,\n sympy_func=sympy.functions.elementary.trigonometric.tan),\n 'cosh': Function(unitsafe.cosh,\n sympy_func=sympy.functions.elementary.hyperbolic.cosh),\n 'sinh': Function(unitsafe.sinh,\n sympy_func=sympy.functions.elementary.hyperbolic.sinh),\n 'tanh': Function(unitsafe.tanh,\n sympy_func=sympy.functions.elementary.hyperbolic.tanh),\n 'exp': Function(unitsafe.exp,\n sympy_func=sympy.functions.elementary.exponential.exp),\n 'log': Function(unitsafe.log,\n sympy_func=sympy.functions.elementary.exponential.log),\n 'log10': Function(unitsafe.log10,\n sympy_func=sympy_cfunctions.log10),\n 'expm1': Function(unitsafe.expm1,\n sympy_func=sympy_cfunctions.expm1),\n 'exprel': Function(unitsafe.exprel,\n sympy_func=exprel),\n 'log1p': Function(unitsafe.log1p,\n sympy_func=sympy_cfunctions.log1p),\n 'sqrt': Function(np.sqrt,\n sympy_func=sympy.functions.elementary.miscellaneous.sqrt,\n arg_units=[None], return_unit=lambda u: u**0.5),\n 'ceil': Function(np.ceil,\n sympy_func=sympy.functions.elementary.integers.ceiling,\n arg_units=[None], return_unit=lambda u: u),\n 'floor': Function(np.floor,\n sympy_func=sympy.functions.elementary.integers.floor,\n arg_units=[None], return_unit=lambda u: u),\n # numpy functions that have a different name in numpy and math.h\n 'arccos': Function(unitsafe.arccos,\n sympy_func=sympy.functions.elementary.trigonometric.acos),\n 'arcsin': Function(unitsafe.arcsin,\n sympy_func=sympy.functions.elementary.trigonometric.asin),\n 'arctan': Function(unitsafe.arctan,\n sympy_func=sympy.functions.elementary.trigonometric.atan),\n 'abs': Function(np.abs, return_type='highest',\n sympy_func=sympy.functions.elementary.complexes.Abs,\n arg_units=[None], return_unit=lambda u: u),\n 'sign': Function(pyfunc=np.sign, sympy_func=sympy.sign, return_type='highest',\n arg_units=[None], return_unit=1),\n # functions that need special treatment\n 'rand': Function(pyfunc=rand, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),\n 'randn': Function(pyfunc=randn, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),\n 'poisson': Function(pyfunc=np.random.poisson, arg_units=[1], return_unit=1, return_type='integer',\n stateless=False, auto_vectorise=True),\n 'clip': Function(pyfunc=np.clip,\n arg_units=[None, 'a', 'a'],\n arg_names=['a', 'a_min', 'a_max'],\n return_type='highest',\n return_unit=lambda u1, u2, u3: u1),\n 'int': Function(pyfunc=np.int_, return_type='integer',\n arg_units=[1], return_unit=1),\n 'timestep': Function(pyfunc=timestep, return_type='integer',\n arg_units=[second, second], return_unit=1)\n }\n\nDEFAULT_CONSTANTS = {'pi': SymbolicConstant('pi', sympy.pi, value=np.pi),\n 'e': SymbolicConstant('e', sympy.E, value=np.e),\n 'inf': SymbolicConstant('inf', S.Infinity,\n value=np.inf),\n '-inf': SymbolicConstant('-inf', S.NegativeInfinity,\n value=-np.inf)}\n",
"\nfrom brian2.core.network import schedule_propagation_offset\n\nimport pytest\nfrom numpy.testing import assert_equal, assert_array_equal\n\nfrom brian2 import *\nfrom brian2.utils.logger import catch_logs\nfrom brian2.devices.device import reinit_and_delete\nfrom brian2.tests.utils import assert_allclose\n\n\n@pytest.mark.codegen_independent\ndef test_str_repr():\n '''\n Test the string representation of a subgroup.\n '''\n G = NeuronGroup(10, 'v:1')\n SG = G[5:8]\n # very basic test, only make sure no error is raised\n assert len(str(SG))\n assert len(repr(SG))\n\n\ndef test_state_variables():\n '''\n Test the setting and accessing of state variables in subgroups.\n '''\n G = NeuronGroup(10, 'v : volt')\n SG = G[4:9]\n with pytest.raises(DimensionMismatchError):\n SG.__setattr__('v', -70)\n SG.v_ = float(-80*mV)\n assert_allclose(G.v,\n np.array([0, 0, 0, 0, -80, -80, -80, -80, -80, 0])*mV)\n assert_allclose(SG.v,\n np.array([-80, -80, -80, -80, -80])*mV)\n assert_allclose(G.v_,\n np.array([0, 0, 0, 0, -80, -80, -80, -80, -80, 0])*float(mV))\n assert_allclose(SG.v_,\n np.array([-80, -80, -80, -80, -80])*float(mV))\n # You should also be able to set variables with a string\n SG.v = 'v + i*mV'\n assert_allclose(SG.v[0], -80*mV)\n assert_allclose(SG.v[4], -76*mV)\n assert_allclose(G.v[4:9], -80*mV + np.arange(5)*mV)\n\n # Calculating with state variables should work too\n assert all(G.v[4:9] - SG.v == 0)\n\n # And in-place modification should work as well\n SG.v += 10*mV\n assert_allclose(G.v[4:9], -70*mV + np.arange(5)*mV)\n SG.v *= 2\n assert_allclose(G.v[4:9], 2*(-70*mV + np.arange(5)*mV))\n # with unit checking\n with pytest.raises(DimensionMismatchError):\n SG.v.__iadd__(3*second)\n with pytest.raises(DimensionMismatchError):\n SG.v.__iadd__(3)\n with pytest.raises(DimensionMismatchError):\n SG.v.__imul__(3*second)\n\n # Indexing with subgroups\n assert_equal(G.v[SG], SG.v[:])\n\n@pytest.mark.standalone_compatible\ndef test_state_variables_simple():\n G = NeuronGroup(10, '''a : 1\n b : 1\n c : 1\n d : 1\n ''')\n SG = G[3:7]\n SG.a = 1\n SG.a['i == 0'] = 2\n SG.b = 'i'\n SG.b['i == 3'] = 'i * 2'\n SG.c = np.arange(3, 7)\n SG.d[1:2] = 4\n SG.d[2:4] = [1, 2]\n run(0*ms)\n assert_equal(G.a[:], [0, 0, 0, 2, 1, 1, 1, 0, 0, 0])\n assert_equal(G.b[:], [0, 0, 0, 0, 1, 2, 6, 0, 0, 0])\n assert_equal(G.c[:], [0, 0, 0, 3, 4, 5, 6, 0, 0, 0])\n assert_equal(G.d[:], [0, 0, 0, 0, 4, 1, 2, 0, 0, 0])\n\n\ndef test_state_variables_string_indices():\n '''\n Test accessing subgroups with string indices.\n '''\n G = NeuronGroup(10, 'v : volt')\n SG = G[4:9]\n assert len(SG.v['i>3']) == 1\n\n G.v = np.arange(10) * mV\n assert len(SG.v['v>7.5*mV']) == 1\n\n # Combined string indexing and assignment\n SG.v['i > 3'] = 'i*10*mV'\n\n assert_allclose(G.v[:], [0, 1, 2, 3, 4, 5, 6, 7, 40, 9] * mV)\n\n@pytest.mark.codegen_independent\ndef test_state_variables_group_as_index():\n G = NeuronGroup(10, 'v : 1')\n SG = G[4:9]\n G.v[SG] = 1\n assert_equal(G.v[:], np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 0]))\n G.v = 1\n G.v[SG] = '2*v'\n assert_equal(G.v[:], np.array([1, 1, 1, 1, 2, 2, 2, 2, 2, 1]))\n\n\n@pytest.mark.codegen_independent\ndef test_state_variables_group_as_index_problematic():\n G = NeuronGroup(10, 'v : 1')\n SG = G[4:9]\n G.v = 1\n tests = [('i', 1),\n ('N', 1),\n ('N + i', 2),\n ('v', 0)]\n for value, n_warnings in tests:\n with catch_logs() as l:\n G.v.__setitem__(SG, value)\n assert len(l) == n_warnings, 'expected %d, got %d warnings' % (n_warnings, len(l))\n assert all([entry[1].endswith('ambiguous_string_expression')\n for entry in l])\n\n@pytest.mark.standalone_compatible\ndef test_state_monitor():\n G = NeuronGroup(10, 'v : volt')\n G.v = np.arange(10) * volt\n SG = G[5:]\n mon_all = StateMonitor(SG, 'v', record=True)\n mon_0 = StateMonitor(SG, 'v', record=0)\n run(defaultclock.dt)\n\n assert_allclose(mon_0[0].v, mon_all[0].v)\n assert_allclose(mon_0[0].v, np.array([5]) * volt)\n assert_allclose(mon_all.v.flatten(), np.arange(5, 10) * volt)\n\n with pytest.raises(IndexError):\n mon_all[5]\n\n\ndef test_shared_variable():\n '''Make sure that shared variables work with subgroups'''\n G = NeuronGroup(10, 'v : volt (shared)')\n G.v = 1*volt\n SG = G[5:]\n assert SG.v == 1*volt\n\n\n@pytest.mark.standalone_compatible\ndef test_synapse_creation():\n G1 = NeuronGroup(10, '')\n G2 = NeuronGroup(20, '')\n SG1 = G1[:5]\n SG2 = G2[10:]\n S = Synapses(SG1, SG2)\n S.connect(i=2, j=2) # Should correspond to (2, 12)\n S.connect('i==2 and j==5') # Should correspond to (2, 15)\n\n run(0*ms) # for standalone\n\n # Internally, the \"real\" neuron indices should be used\n assert_equal(S._synaptic_pre[:], np.array([2, 2]))\n assert_equal(S._synaptic_post[:], np.array([12, 15]))\n # For the user, the subgroup-relative indices should be presented\n assert_equal(S.i[:], np.array([2, 2]))\n assert_equal(S.j[:], np.array([2, 5]))\n # N_incoming and N_outgoing should also be correct\n assert all(S.N_outgoing[2, :] == 2)\n assert all(S.N_incoming[:, 2] == 1)\n assert all(S.N_incoming[:, 5] == 1)\n\n\n@pytest.mark.standalone_compatible\ndef test_synapse_creation_state_vars():\n G1 = NeuronGroup(10, 'v : 1')\n G2 = NeuronGroup(20, 'v : 1')\n G1.v = 'i'\n G2.v = '10 + i'\n SG1 = G1[:5]\n SG2 = G2[10:]\n\n # connect based on pre-/postsynaptic state variables\n S2 = Synapses(SG1, SG2, 'w:1')\n S2.connect('v_pre > 2')\n\n S3 = Synapses(SG1, SG2, 'w:1')\n S3.connect('v_post < 25')\n\n S4 = Synapses(SG2, SG1, 'w:1')\n S4.connect('v_post > 2')\n\n S5 = Synapses(SG2, SG1, 'w:1')\n S5.connect('v_pre < 25')\n\n run(0*ms) # for standalone\n\n assert len(S2) == 2 * len(SG2), str(len(S2))\n assert all(S2.v_pre[:] > 2)\n assert len(S3) == 5 * len(SG1), '%s != %s ' % (len(S3), 5 * len(SG1))\n assert all(S3.v_post[:] < 25)\n\n assert len(S4) == 2 * len(SG2), str(len(S4))\n assert all(S4.v_post[:] > 2)\n assert len(S5) == 5 * len(SG1), '%s != %s ' % (len(53), 5 * len(SG1))\n assert all(S5.v_pre[:] < 25)\n\n\n@pytest.mark.standalone_compatible\ndef test_synapse_creation_generator():\n G1 = NeuronGroup(10, 'v:1')\n G2 = NeuronGroup(20, 'v:1')\n G1.v = 'i'\n G2.v = '10 + i'\n SG1 = G1[:5]\n SG2 = G2[10:]\n S = Synapses(SG1, SG2, 'w:1')\n S.connect(j='i*2 + k for k in range(2)') # diverging connections\n\n # connect based on pre-/postsynaptic state variables\n S2 = Synapses(SG1, SG2, 'w:1')\n S2.connect(j='k for k in range(N_post) if v_pre > 2')\n\n S3 = Synapses(SG1, SG2, 'w:1')\n S3.connect(j='k for k in range(N_post) if v_post < 25')\n\n S4 = Synapses(SG2, SG1, 'w:1')\n S4.connect(j='k for k in range(N_post) if v_post > 2')\n\n S5 = Synapses(SG2, SG1, 'w:1')\n S5.connect(j='k for k in range(N_post) if v_pre < 25')\n\n run(0*ms) # for standalone\n\n # Internally, the \"real\" neuron indices should be used\n assert_equal(S._synaptic_pre[:], np.arange(5).repeat(2))\n assert_equal(S._synaptic_post[:], np.arange(10)+10)\n # For the user, the subgroup-relative indices should be presented\n assert_equal(S.i[:], np.arange(5).repeat(2))\n assert_equal(S.j[:], np.arange(10))\n\n # N_incoming and N_outgoing should also be correct\n assert all(S.N_outgoing[:] == 2)\n assert all(S.N_incoming[:] == 1)\n\n assert len(S2) == 2 * len(SG2), str(len(S2))\n assert all(S2.v_pre[:] > 2)\n assert len(S3) == 5 * len(SG1), '%s != %s ' % (len(S3), 5 * len(SG1))\n assert all(S3.v_post[:] < 25)\n\n assert len(S4) == 2 * len(SG2), str(len(S4))\n assert all(S4.v_post[:] > 2)\n assert len(S5) == 5 * len(SG1), '%s != %s ' % (len(S5), 5 * len(SG1))\n assert all(S5.v_pre[:] < 25)\n\n\n@pytest.mark.standalone_compatible\ndef test_synapse_creation_generator_multiple_synapses():\n G1 = NeuronGroup(10, 'v:1')\n G2 = NeuronGroup(20, 'v:1')\n G1.v = 'i'\n G2.v = '10 + i'\n SG1 = G1[:5]\n SG2 = G2[10:]\n S1 = Synapses(SG1, SG2)\n S1.connect(j='k for k in range(N_post)', n='i')\n\n S2 = Synapses(SG1, SG2)\n S2.connect(j='k for k in range(N_post)', n='j')\n\n S3 = Synapses(SG2, SG1)\n S3.connect(j='k for k in range(N_post)', n='i')\n\n S4 = Synapses(SG2, SG1)\n S4.connect(j='k for k in range(N_post)', n='j')\n\n S5 = Synapses(SG1, SG2)\n S5.connect(j='k for k in range(N_post)', n='i+j')\n\n S6 = Synapses(SG2, SG1)\n S6.connect(j='k for k in range(N_post)', n='i+j')\n\n S7 = Synapses(SG1, SG2)\n S7.connect(j='k for k in range(N_post)', n='int(v_pre>2)*2')\n\n S8 = Synapses(SG2, SG1)\n S8.connect(j='k for k in range(N_post)', n='int(v_post>2)*2')\n\n S9 = Synapses(SG1, SG2)\n S9.connect(j='k for k in range(N_post)', n='int(v_post>22)*2')\n\n S10 = Synapses(SG2, SG1)\n S10.connect(j='k for k in range(N_post)', n='int(v_pre>22)*2')\n\n run(0*ms) # for standalone\n\n # straightforward loop instead of doing something clever...\n for source in range(len(SG1)):\n assert_equal(S1.j[source, :], np.arange(len(SG2)).repeat(source))\n assert_equal(S2.j[source, :], np.arange(len(SG2)).repeat(np.arange(len(SG2))))\n assert_equal(S3.i[:, source], np.arange(len(SG2)).repeat(np.arange(len(SG2))))\n assert_equal(S4.i[:, source], np.arange(len(SG2)).repeat(source))\n assert_equal(S5.j[source, :], np.arange(len(SG2)).repeat(np.arange(len(SG2))+source))\n assert_equal(S6.i[:, source], np.arange(len(SG2)).repeat(np.arange(len(SG2)) + source))\n if source > 2:\n assert_equal(S7.j[source, :], np.arange(len(SG2)).repeat(2))\n assert_equal(S8.i[:, source], np.arange(len(SG2)).repeat(2))\n else:\n assert len(S7.j[source, :]) == 0\n assert len(S8.i[:, source]) == 0\n assert_equal(S9.j[source, :], np.arange(3, len(SG2)).repeat(2))\n assert_equal(S10.i[:, source], np.arange(3, len(SG2)).repeat(2))\n\n\n@pytest.mark.standalone_compatible\ndef test_synapse_creation_generator_complex_ranges():\n G1 = NeuronGroup(10, 'v:1')\n G2 = NeuronGroup(20, 'v:1')\n G1.v = 'i'\n G2.v = '10 + i'\n SG1 = G1[:5]\n SG2 = G2[10:]\n S = Synapses(SG1, SG2)\n S.connect(j='i+k for k in range(N_post-i)') # Connect to all j>i\n\n # connect based on pre-/postsynaptic state variables\n S2 = Synapses(SG1, SG2)\n S2.connect(j='k for k in range(N_post * int(v_pre > 2))')\n\n # connect based on pre-/postsynaptic state variables\n S3 = Synapses(SG2, SG1)\n S3.connect(j='k for k in range(N_post * int(v_pre > 22))')\n\n run(0*ms) # for standalone\n\n for syn_source in range(5):\n # Internally, the \"real\" neuron indices should be used\n assert_equal(S._synaptic_post[syn_source, :],\n 10 + syn_source + np.arange(10 - syn_source))\n # For the user, the subgroup-relative indices should be presented\n assert_equal(S.j[syn_source, :], syn_source + np.arange(10-syn_source))\n\n assert len(S2) == 2 * len(SG2), str(len(S2))\n assert all(S2.v_pre[:] > 2)\n assert len(S3) == 7 * len(SG1), str(len(S3))\n assert all(S3.v_pre[:] > 22)\n\n\n@pytest.mark.standalone_compatible\ndef test_synapse_creation_generator_random():\n G1 = NeuronGroup(10, 'v:1')\n G2 = NeuronGroup(20, 'v:1')\n G1.v = 'i'\n G2.v = '10 + i'\n SG1 = G1[:5]\n SG2 = G2[10:]\n\n # connect based on pre-/postsynaptic state variables\n S2 = Synapses(SG1, SG2)\n S2.connect(j='k for k in sample(N_post, p=1.0*int(v_pre > 2))')\n\n S3 = Synapses(SG2, SG1)\n S3.connect(j='k for k in sample(N_post, p=1.0*int(v_pre > 22))')\n\n run(0*ms) # for standalone\n\n assert len(S2) == 2 * len(SG2), str(len(S2))\n assert all(S2.v_pre[:] > 2)\n assert len(S3) == 7 * len(SG1), str(len(S3))\n assert all(S3.v_pre[:] > 22)\n\n\ndef test_synapse_access():\n G1 = NeuronGroup(10, 'v:1')\n G1.v = 'i'\n G2 = NeuronGroup(20, 'v:1')\n G2.v = 'i'\n SG1 = G1[:5]\n SG2 = G2[10:]\n S = Synapses(SG1, SG2, 'w:1')\n S.connect(True)\n S.w['j == 0'] = 5\n assert all(S.w['j==0'] == 5)\n S.w[2, 2] = 7\n assert all(S.w['i==2 and j==2'] == 7)\n S.w = '2*j'\n assert all(S.w[:, 1] == 2)\n\n assert len(S.w[:, 10]) == 0\n assert len(S.w['j==10']) == 0\n\n # Test referencing pre- and postsynaptic variables\n assert_equal(S.w[2:, :], S.w['v_pre >= 2'])\n assert_equal(S.w[:, :5], S.w['v_post < 15'])\n S.w = 'v_post'\n assert_equal(S.w[:], S.j[:] + 10)\n S.w = 'v_post + v_pre'\n assert_equal(S.w[:], S.j[:] + 10 + S.i[:])\n\n # Test using subgroups as indices\n assert len(S) == len(S.w[SG1, SG2])\n assert_equal(S.w[SG1, 1], S.w[:, 1])\n assert_equal(S.w[1, SG2], S.w[1, :])\n assert len(S.w[SG1, 10]) == 0\n\n\ndef test_synapses_access_subgroups():\n G1 = NeuronGroup(5, 'x:1')\n G2 = NeuronGroup(10, 'y:1')\n SG1 = G1[2:5]\n SG2 = G2[4:9]\n S = Synapses(G1, G2, 'w:1')\n S.connect()\n S.w[SG1, SG2] = 1\n assert_equal(S.w['(i>=2 and i<5) and (j>=4 and j<9)'], 1)\n assert_equal(S.w['not ((i>=2 and i<5) and (j>=4 and j<9))'], 0)\n S.w = 0\n S.w[SG1, :] = 1\n assert_equal(S.w['i>=2 and i<5'], 1)\n assert_equal(S.w['not (i>=2 and i<5)'], 0)\n S.w = 0\n S.w[:, SG2] = 1\n assert_equal(S.w['j>=4 and j<9'], 1)\n assert_equal(S.w['not (j>=4 and j<9)'], 0)\n\n\n@pytest.mark.codegen_independent\ndef test_synapses_access_subgroups_problematic():\n G1 = NeuronGroup(5, 'x:1')\n G2 = NeuronGroup(10, 'y:1')\n SG1 = G1[2:5]\n SG2 = G2[4:9]\n S = Synapses(G1, G2, 'w:1')\n S.connect()\n\n # Note that \"j\" is not ambiguous, because the equivalent in the target group\n # is called \"i\" (this previously raised a warning)\n tests = [\n ((SG1, slice(None)), 'i', 1),\n ((SG1, slice(None)), 'i + N_pre', 2),\n ((SG1, slice(None)), 'N_pre', 1),\n ((slice(None), SG2), 'j', 0),\n ((slice(None), SG2), 'N_post', 1),\n ((slice(None), SG2), 'N', 1),\n ((SG1, SG2), 'i', 1),\n ((SG1, SG2), 'i + j', 1),\n ((SG1, SG2), 'N_pre', 1),\n ((SG1, SG2), 'j', 0),\n ((SG1, SG2), 'N_post', 1),\n ((SG1, SG2), 'N', 1),\n # These should not raise a warning\n ((SG1, SG2), 'w', 0),\n ((SG1, SG2), 'x_pre', 0),\n ((SG1, SG2), 'y_post', 0),\n ((SG1, SG2), 'y', 0)\n ]\n for item, value, n_warnings in tests:\n with catch_logs() as l:\n S.w.__setitem__(item, value)\n assert len(l) == n_warnings, 'expected %d, got %d warnings' % (n_warnings, len(l))\n assert all([entry[1].endswith('ambiguous_string_expression')\n for entry in l])\n\n\n@pytest.mark.standalone_compatible\ndef test_subgroup_summed_variable():\n # Check in particular that only neurons targeted are reset to 0 (see github issue #925)\n source = NeuronGroup(1, \"\")\n target = NeuronGroup(5, \"Iin : 1\")\n target.Iin = 10\n target1 = target[1:2]\n target2 = target[3:]\n\n syn1 = Synapses(source, target1, \"Iin_post = 5 : 1 (summed)\")\n syn1.connect(True)\n syn2 = Synapses(source, target2, \"Iin_post = 1 : 1 (summed)\")\n syn2.connect(True)\n\n run(2 * defaultclock.dt)\n\n assert_array_equal(target.Iin, [10, 5, 10, 1, 1])\n\n\ndef test_subexpression_references():\n '''\n Assure that subexpressions in targeted groups are handled correctly.\n '''\n G = NeuronGroup(10, '''v : 1\n v2 = 2*v : 1''')\n G.v = np.arange(10)\n SG1 = G[:5]\n SG2 = G[5:]\n\n S1 = Synapses(SG1, SG2, '''w : 1\n u = v2_post + 1 : 1\n x = v2_pre + 1 : 1''')\n S1.connect('i==(5-1-j)')\n assert_equal(S1.i[:], np.arange(5))\n assert_equal(S1.j[:], np.arange(5)[::-1])\n assert_equal(S1.u[:], np.arange(10)[:-6:-1]*2+1)\n assert_equal(S1.x[:], np.arange(5)*2+1)\n\n S2 = Synapses(G, SG2, '''w : 1\n u = v2_post + 1 : 1\n x = v2_pre + 1 : 1''')\n S2.connect('i==(5-1-j)')\n assert_equal(S2.i[:], np.arange(5))\n assert_equal(S2.j[:], np.arange(5)[::-1])\n assert_equal(S2.u[:], np.arange(10)[:-6:-1]*2+1)\n assert_equal(S2.x[:], np.arange(5)*2+1)\n\n S3 = Synapses(SG1, G, '''w : 1\n u = v2_post + 1 : 1\n x = v2_pre + 1 : 1''')\n S3.connect('i==(10-1-j)')\n assert_equal(S3.i[:], np.arange(5))\n assert_equal(S3.j[:], np.arange(10)[:-6:-1])\n assert_equal(S3.u[:], np.arange(10)[:-6:-1]*2+1)\n assert_equal(S3.x[:], np.arange(5)*2+1)\n\n\ndef test_subexpression_no_references():\n '''\n Assure that subexpressions are handled correctly, even\n when the subgroups are created on-the-fly.\n '''\n G = NeuronGroup(10, '''v : 1\n v2 = 2*v : 1''')\n G.v = np.arange(10)\n\n assert_equal(G[5:].v2, np.arange(5, 10)*2)\n\n S1 = Synapses(G[:5], G[5:], '''w : 1\n u = v2_post + 1 : 1\n x = v2_pre + 1 : 1''')\n S1.connect('i==(5-1-j)')\n assert_equal(S1.i[:], np.arange(5))\n assert_equal(S1.j[:], np.arange(5)[::-1])\n assert_equal(S1.u[:], np.arange(10)[:-6:-1]*2+1)\n assert_equal(S1.x[:], np.arange(5)*2+1)\n\n S2 = Synapses(G, G[5:], '''w : 1\n u = v2_post + 1 : 1\n x = v2_pre + 1 : 1''')\n S2.connect('i==(5-1-j)')\n assert_equal(S2.i[:], np.arange(5))\n assert_equal(S2.j[:], np.arange(5)[::-1])\n assert_equal(S2.u[:], np.arange(10)[:-6:-1]*2+1)\n assert_equal(S2.x[:], np.arange(5)*2+1)\n\n S3 = Synapses(G[:5], G, '''w : 1\n u = v2_post + 1 : 1\n x = v2_pre + 1 : 1''')\n S3.connect('i==(10-1-j)')\n assert_equal(S3.i[:], np.arange(5))\n assert_equal(S3.j[:], np.arange(10)[:-6:-1])\n assert_equal(S3.u[:], np.arange(10)[:-6:-1]*2+1)\n assert_equal(S3.x[:], np.arange(5)*2+1)\n\n\n@pytest.mark.standalone_compatible\ndef test_synaptic_propagation():\n G1 = NeuronGroup(10, 'v:1', threshold='v>1', reset='v=0')\n G1.v['i%2==1'] = 1.1 # odd numbers should spike\n G2 = NeuronGroup(20, 'v:1')\n SG1 = G1[1:6]\n SG2 = G2[10:]\n S = Synapses(SG1, SG2, on_pre='v+=1')\n S.connect('i==j')\n run(defaultclock.dt + schedule_propagation_offset())\n expected = np.zeros(len(G2))\n # Neurons 1, 3, 5 spiked and are connected to 10, 12, 14\n expected[[10, 12, 14]] = 1\n assert_equal(np.asarray(G2.v).flatten(), expected)\n\n\n@pytest.mark.standalone_compatible\ndef test_synaptic_propagation_2():\n # This tests for the bug in github issue #461\n source = NeuronGroup(100, '', threshold='True')\n sub_source = source[99:]\n target = NeuronGroup(1, 'v:1')\n syn = Synapses(sub_source, target, on_pre='v+=1')\n syn.connect()\n run(defaultclock.dt + schedule_propagation_offset())\n assert target.v[0] == 1.0\n\n\n@pytest.mark.standalone_compatible\ndef test_run_regularly():\n # See github issue #922\n\n group = NeuronGroup(10, 'v: integer')\n # Full group\n group.run_regularly('v += 16')\n # Subgroup with explicit reference\n subgroup = group[:2]\n subgroup.run_regularly('v += 8')\n # Subgroup with explicit reference and reference for run_regularly operation\n subgroup2 = group[2:4]\n updater = subgroup2.run_regularly('v += 4')\n # Subgroup without reference\n group[4:6].run_regularly('v += 2')\n # Subgroup without reference, with reference for run_regularly operation\n updater2 = group[6:8].run_regularly('v += 1')\n\n run(defaultclock.dt)\n assert_array_equal(group.v, [24, 24, 20, 20, 18, 18, 17, 17, 16, 16])\n\n\n@pytest.mark.standalone_compatible\ndef test_spike_monitor():\n G = NeuronGroup(10, 'v:1', threshold='v>1', reset='v=0')\n G.v[0] = 1.1\n G.v[2] = 1.1\n G.v[5] = 1.1\n SG = G[3:]\n SG2 = G[:3]\n s_mon = SpikeMonitor(G)\n sub_s_mon = SpikeMonitor(SG)\n sub_s_mon2 = SpikeMonitor(SG2)\n run(defaultclock.dt)\n assert_equal(s_mon.i, np.array([0, 2, 5]))\n assert_equal(s_mon.t_, np.zeros(3))\n assert_equal(sub_s_mon.i, np.array([2]))\n assert_equal(sub_s_mon.t_, np.zeros(1))\n assert_equal(sub_s_mon2.i, np.array([0, 2]))\n assert_equal(sub_s_mon2.t_, np.zeros(2))\n expected = np.zeros(10, dtype=int)\n expected[[0, 2, 5]] = 1\n assert_equal(s_mon.count, expected)\n expected = np.zeros(7, dtype=int)\n expected[[2]] = 1\n assert_equal(sub_s_mon.count, expected)\n assert_equal(sub_s_mon2.count, np.array([1, 0, 1]))\n\n\n@pytest.mark.codegen_independent\ndef test_wrong_indexing():\n G = NeuronGroup(10, 'v:1')\n with pytest.raises(TypeError):\n G['string']\n\n with pytest.raises(IndexError):\n G[10]\n with pytest.raises(IndexError):\n G[10:]\n with pytest.raises(IndexError):\n G[::2]\n with pytest.raises(IndexError):\n G[3:2]\n with pytest.raises(IndexError):\n G[[5, 4, 3]]\n with pytest.raises(IndexError):\n G[[2, 4, 6]]\n with pytest.raises(IndexError):\n G[[-1, 0, 1]]\n with pytest.raises(IndexError):\n G[[9, 10, 11]]\n with pytest.raises(IndexError):\n G[[9, 10]]\n with pytest.raises(IndexError):\n G[[10, 11]]\n with pytest.raises(TypeError):\n G[[2.5, 3.5, 4.5]]\n\n\n@pytest.mark.codegen_independent\ndef test_alternative_indexing():\n G = NeuronGroup(10, 'v : integer')\n G.v = 'i'\n assert_equal(G[-3:].v, np.array([7, 8, 9]))\n assert_equal(G[3].v, np.array([3]))\n assert_equal(G[[3, 4, 5]].v, np.array([3, 4, 5]))\n\n\ndef test_no_reference_1():\n '''\n Using subgroups without keeping an explicit reference. Basic access.\n '''\n G = NeuronGroup(10, 'v:1')\n G.v = np.arange(10)\n assert_equal(G[:5].v[:], G.v[:5])\n\n\n@pytest.mark.standalone_compatible\ndef test_no_reference_2():\n '''\n Using subgroups without keeping an explicit reference. Monitors\n '''\n G = NeuronGroup(2, 'v:1', threshold='v>1', reset='v=0')\n G.v = [0, 1.1]\n state_mon = StateMonitor(G[:1], 'v', record=True)\n spike_mon = SpikeMonitor(G[1:])\n rate_mon = PopulationRateMonitor(G[:2])\n run(2*defaultclock.dt)\n assert_equal(state_mon[0].v[:], np.zeros(2))\n assert_equal(spike_mon.i[:], np.array([0]))\n assert_equal(spike_mon.t[:], np.array([0])*second)\n assert_equal(rate_mon.rate[:], np.array([0.5, 0])/defaultclock.dt)\n\n\n@pytest.mark.standalone_compatible\ndef test_no_reference_3():\n '''\n Using subgroups without keeping an explicit reference. Monitors\n '''\n G = NeuronGroup(2, 'v:1', threshold='v>1', reset='v=0')\n G.v = [1.1, 0]\n S = Synapses(G[:1], G[1:], on_pre='v+=1')\n S.connect()\n run(defaultclock.dt + schedule_propagation_offset())\n assert_equal(G.v[:], np.array([0, 1]))\n\n\n@pytest.mark.standalone_compatible\ndef test_no_reference_4():\n '''\n Using subgroups without keeping an explicit reference. Synapses\n '''\n G1 = NeuronGroup(10, 'v:1', threshold='v>1', reset='v=0')\n G1.v['i%2==1'] = 1.1 # odd numbers should spike\n G2 = NeuronGroup(20, 'v:1')\n S = Synapses(G1[1:6], G2[10:], on_pre='v+=1')\n S.connect('i==j')\n run(defaultclock.dt + schedule_propagation_offset())\n expected = np.zeros(len(G2))\n # Neurons 1, 3, 5 spiked and are connected to 10, 12, 14\n expected[[10, 12, 14]] = 1\n assert_equal(np.asarray(G2.v).flatten(), expected)\n\n\ndef test_recursive_subgroup():\n '''\n Create a subgroup of a subgroup\n '''\n G = NeuronGroup(10, 'v : 1')\n G.v = 'i'\n SG = G[3:8]\n SG2 = SG[2:4]\n assert_equal(SG2.v[:], np.array([5, 6]))\n assert_equal(SG2.v[:], SG.v[2:4])\n assert SG2.source.name == G.name\n\nif __name__ == '__main__':\n test_str_repr()\n test_state_variables()\n test_state_variables_simple()\n test_state_variables_string_indices()\n test_state_variables_group_as_index()\n test_state_variables_group_as_index_problematic()\n test_state_monitor()\n test_shared_variable()\n test_synapse_creation()\n test_synapse_creation_state_vars()\n test_synapse_creation_generator()\n test_synapse_creation_generator_complex_ranges()\n test_synapse_creation_generator_random()\n test_synapse_creation_generator_multiple_synapses()\n test_synapse_access()\n test_synapses_access_subgroups()\n test_synapses_access_subgroups_problematic()\n test_subgroup_summed_variable()\n test_subexpression_references()\n test_subexpression_no_references()\n test_synaptic_propagation()\n test_synaptic_propagation_2()\n test_run_regularly()\n test_spike_monitor()\n test_wrong_indexing()\n test_no_reference_1()\n test_no_reference_2()\n test_no_reference_3()\n test_no_reference_4()\n test_recursive_subgroup()\n",
"\nimport pytest\nfrom numpy.testing import assert_equal\nimport tempfile\nimport os\n\nfrom brian2.spatialneuron import *\nfrom brian2.units import um, cm, second, DimensionMismatchError\nfrom brian2 import numpy as np\nfrom brian2.tests.utils import assert_allclose\n\n\n@pytest.mark.codegen_independent\ndef test_attributes_soma():\n soma = Soma(diameter=10*um)\n assert isinstance(soma, Morphology)\n # Single compartment\n assert soma.n == 1\n assert soma.total_sections == 1\n assert soma.total_compartments == 1\n with pytest.raises(TypeError):\n len(soma) # ambiguous\n # Compartment attributes\n assert_equal(soma.diameter, [10]*um)\n assert_equal(soma.length, [10]*um)\n assert_equal(soma.distance, [0]*um)\n assert_equal(soma.end_distance, 0 * um)\n assert soma.r_length_1 > 1*cm\n assert soma.r_length_2 > 1*cm\n assert_equal(soma.area, np.pi*soma.diameter**2)\n assert_allclose(soma.volume, 1.0/6.0*np.pi*(10*um)**3)\n\n # No coordinates were specified\n assert soma.start_x is None\n assert soma.start_y is None\n assert soma.start_z is None\n assert soma.x is None\n assert soma.y is None\n assert soma.z is None\n assert soma.end_x is None\n assert soma.end_y is None\n assert soma.end_z is None\n\n\n@pytest.mark.codegen_independent\ndef test_attributes_soma_coordinates():\n # Specify only one of the coordinates\n xyz = {'x', 'y', 'z'}\n for coord in xyz:\n kwds = {coord: 5*um}\n soma = Soma(diameter=10*um, **kwds)\n # Length shouldn't change (not defined by coordinates but by the diameter)\n assert_equal(soma.length, [10]*um)\n assert_equal(soma.distance, [0]*um)\n\n # Coordinates should be specified now, with 0 values for the other\n # coordinates\n for other_coord in xyz - {coord}:\n assert_equal(getattr(soma, 'start_' + other_coord), [0]*um)\n assert_equal(getattr(soma, other_coord), [0]*um)\n assert_equal(getattr(soma, 'end_' + other_coord), [0]*um)\n\n assert_equal(getattr(soma, 'start_' + coord), [5]*um)\n assert_equal(getattr(soma, coord), [5]*um)\n assert_equal(getattr(soma, 'end_' + coord), [5]*um)\n\n # Specify all coordinates\n soma = Soma(diameter=10*um, x=1*um, y=2*um, z=3*um)\n # Length shouldn't change (not defined by coordinates but by the diameter)\n assert_equal(soma.length, [10]*um)\n assert_equal(soma.distance, [0]*um)\n\n assert_equal(soma.start_x, 1*um)\n assert_equal(soma.x, 1*um)\n assert_equal(soma.end_x, 1*um)\n assert_equal(soma.start_y, 2*um)\n assert_equal(soma.y, 2*um)\n assert_equal(soma.end_y, 2*um)\n assert_equal(soma.start_z, 3*um)\n assert_equal(soma.z, 3*um)\n assert_equal(soma.end_z, 3*um)\n\n\n@pytest.mark.codegen_independent\ndef test_attributes_cylinder():\n n = 10\n cylinder = Cylinder(n=n, diameter=10*um, length=200*um)\n assert isinstance(cylinder, Morphology)\n # Single section with 10 compartments\n assert cylinder.n == n\n assert cylinder.total_sections == 1\n assert cylinder.total_compartments == n\n with pytest.raises(TypeError):\n len(cylinder) # ambiguous\n\n # Compartment attributes\n assert_equal(cylinder.diameter, np.ones(n)*10*um)\n assert_equal(cylinder.length, np.ones(n)*20*um)\n assert_equal(cylinder.distance, np.arange(n)*20*um + 10*um)\n assert_equal(cylinder.end_distance, 200 * um)\n # TODO: r_length\n assert_allclose(cylinder.area, np.pi*cylinder.diameter*cylinder.length)\n assert_allclose(cylinder.volume, 1.0/4.0*np.pi*cylinder.diameter**2*cylinder.length)\n\n # No coordinates were specified\n assert cylinder.start_x is None\n assert cylinder.start_y is None\n assert cylinder.start_z is None\n assert cylinder.x is None\n assert cylinder.y is None\n assert cylinder.z is None\n assert cylinder.end_x is None\n assert cylinder.end_y is None\n assert cylinder.end_z is None\n\n\n@pytest.mark.codegen_independent\ndef test_attributes_cylinder_coordinates():\n # Specify only the end-point of the section\n n = 10\n # Specify only one of the coordinates\n xyz = {'x', 'y', 'z'}\n for coord in xyz:\n kwds = {coord: [0, 200]*um}\n cylinder = Cylinder(n=n, diameter=10*um, **kwds)\n assert_equal(cylinder.diameter, np.ones(n)*10*um)\n assert_equal(cylinder.length, np.ones(n)*20*um)\n assert_equal(cylinder.distance, np.arange(n)*20*um + 10*um)\n assert_equal(cylinder.end_distance, 200 * um)\n\n # Coordinates should be specified now, with 0 values for the other\n # coordinates\n for other_coord in xyz - {coord}:\n assert_equal(getattr(cylinder, 'start_' + other_coord), np.zeros(n)*um)\n assert_equal(getattr(cylinder, other_coord), np.zeros(n)*um)\n assert_equal(getattr(cylinder, 'end_' + other_coord), np.zeros(n)*um)\n\n assert_equal(getattr(cylinder, 'start_' + coord), np.arange(n)*20*um)\n assert_equal(getattr(cylinder, coord), np.arange(n)*20*um + 10*um)\n assert_equal(getattr(cylinder, 'end_' + coord), np.arange(n)*20*um + 20*um)\n\n # Specify all coordinates\n val = [0, 200.0/np.sqrt(3.0)]*um\n cylinder = Cylinder(n=n, diameter=10*um, x=val, y=val, z=val)\n\n assert_equal(cylinder.diameter, np.ones(n)*10*um)\n assert_allclose(cylinder.length, np.ones(n)*20*um)\n assert_allclose(cylinder.distance, np.arange(n)*20*um + 10*um)\n assert_allclose(cylinder.end_distance, 200 * um)\n\n for coord in ['x', 'y', 'z']:\n assert_allclose(getattr(cylinder, 'start_' + coord), np.arange(n)*val[1]/n)\n assert_allclose(getattr(cylinder, coord), np.arange(n)*val[1]/n + 0.5*val[1]/n)\n assert_allclose(getattr(cylinder, 'end_' + coord), np.arange(n)*val[1]/n + val[1]/n)\n\n\n@pytest.mark.codegen_independent\ndef test_attributes_section():\n n = 10\n # No difference to a cylinder\n sec = Section(n=n, diameter=np.ones(n+1)*10*um, length=np.ones(n)*20*um)\n cyl = Cylinder(n=1, diameter=10*um, length=0*um) # dummy cylinder\n cyl.child = sec\n assert isinstance(sec, Morphology)\n # Single section with 10 compartments\n assert sec.n == n\n assert sec.total_sections == 1\n assert sec.total_compartments == n\n with pytest.raises(TypeError):\n len(sec) # ambiguous\n\n # Compartment attributes\n assert_allclose(sec.diameter, np.ones(n)*10*um)\n assert_allclose(sec.length, np.ones(n)*20*um)\n assert_allclose(sec.distance, np.arange(n)*20*um + 10*um)\n assert_allclose(sec.end_distance, 200 * um)\n # TODO: r_length\n assert_allclose(sec.area,\n np.pi*0.5*(sec.start_diameter + sec.end_diameter)*sec.length)\n assert_allclose(sec.volume, 1.0/4.0*np.pi*sec.diameter**2*sec.length)\n\n # No coordinates were specified\n assert sec.start_x is None\n assert sec.start_y is None\n assert sec.start_z is None\n assert sec.x is None\n assert sec.y is None\n assert sec.z is None\n assert sec.end_x is None\n assert sec.end_y is None\n assert sec.end_z is None\n\n\n@pytest.mark.codegen_independent\ndef test_attributes_section_coordinates_single():\n # Specify only the end-point of the section (no difference to cylinder)\n n = 10\n # Specify only one of the coordinates\n xyz = {'x', 'y', 'z'}\n for coord in xyz:\n kwds = {coord: np.linspace(0*um, 200*um, n+1)}\n sec = Section(n=n, diameter=np.ones(n+1)*10*um, **kwds)\n cyl = Cylinder(n=1, diameter=10*um, length=0*um) # dummy cylinder\n cyl.child = sec\n assert_equal(sec.diameter, np.ones(n)*10*um)\n assert_equal(sec.length, np.ones(n)*20*um)\n assert_equal(sec.distance, np.arange(n)*20*um + 10*um)\n assert_equal(sec.end_distance, 200 * um)\n\n # Coordinates should be specified now, with 0 values for the other\n # coordinates\n for other_coord in xyz - {coord}:\n assert_equal(getattr(sec, 'start_' + other_coord), np.zeros(n)*um)\n assert_equal(getattr(sec, other_coord), np.zeros(n)*um)\n assert_equal(getattr(sec, 'end_' + other_coord), np.zeros(n)*um)\n\n assert_equal(getattr(sec, 'start_' + coord), np.arange(n)*20*um)\n assert_equal(getattr(sec, coord), np.arange(n)*20*um + 10*um)\n assert_equal(getattr(sec, 'end_' + coord), np.arange(n)*20*um + 20*um)\n\n # Specify all coordinates\n val = 200.0/np.sqrt(3.0)*um\n sec = Section(n=n, diameter=np.ones(n+1)*10*um,\n x=np.linspace(0*um, val, n+1),\n y=np.linspace(0*um, val, n+1),\n z=np.linspace(0*um, val, n+1))\n cyl = Cylinder(n=1, diameter=10*um, length=0*um)\n cyl.child = sec\n assert_equal(sec.diameter, np.ones(n)*10*um)\n assert_allclose(sec.length, np.ones(n)*20*um)\n assert_allclose(sec.distance, np.arange(n)*20*um + 10*um)\n assert_allclose(sec.end_distance, 200 * um)\n\n for coord in ['x', 'y', 'z']:\n assert_allclose(getattr(sec, 'start_' + coord), np.arange(n)*val/n)\n assert_allclose(getattr(sec, coord), np.arange(n)*val/n + 0.5*val/n)\n assert_allclose(getattr(sec, 'end_' + coord), np.arange(n)*val/n + val/n)\n\n\n@pytest.mark.codegen_independent\ndef test_attributes_section_coordinates_all():\n n = 3\n # Specify all coordinates\n sec = Section(n=n, diameter=[10, 10, 10, 10]*um,\n x=[10, 11, 11, 11]*um,\n y=[100, 100, 101, 101]*um,\n z=[1000, 1000, 1000, 1001]*um)\n\n assert_equal(sec.diameter, np.ones(n)*10*um)\n assert_allclose(sec.length, np.ones(n)*um)\n assert_allclose(sec.distance, np.arange(n)*um + .5*um)\n assert_allclose(sec.end_distance, 3 * um)\n\n assert_allclose(sec.start_x, [10, 11, 11]*um)\n assert_allclose(sec.x, [10.5, 11, 11]*um)\n assert_allclose(sec.end_x, [11, 11, 11]*um)\n assert_allclose(sec.start_y, [100, 100, 101]*um)\n assert_allclose(sec.y, [100, 100.5, 101]*um)\n assert_allclose(sec.end_y, [100, 101, 101]*um)\n assert_allclose(sec.start_z, [1000, 1000, 1000]*um)\n assert_allclose(sec.z, [1000, 1000, 1000.5]*um)\n assert_allclose(sec.end_z, [1000, 1000, 1001]*um)\n\n # Specify varying diameters\n sec = Section(n=n, diameter=[20, 10, 5, 2.5]*um,\n x=[0, 1, 1, 1]*um, y=[0, 0, 1, 1]*um, z=[0, 0, 0, 1]*um)\n assert_allclose(sec.start_diameter, [20, 10, 5]*um)\n # diameter at midpoint\n assert_allclose(sec.diameter, 0.5*(sec.start_diameter + sec.end_diameter))\n assert_allclose(sec.end_diameter, [10, 5, 2.5]*um)\n # TODO: Check area and volume\n\n\ndef _check_tree_cables(morphology, coordinates=False):\n # number of compartments per section\n assert morphology.n == 10\n assert morphology['1'].n == 5\n assert morphology['2'].n == 5\n assert morphology['21'].n == 5\n assert morphology['22'].n == 5\n # number of compartments per subtree\n assert morphology.total_compartments == 30\n assert morphology['1'].total_compartments == 5\n assert morphology['2'].total_compartments == 15\n assert morphology['21'].total_compartments == 5\n assert morphology['22'].total_compartments == 5\n # number of sections per subtree\n assert morphology.total_sections == 5\n assert morphology['1'].total_sections == 1\n assert morphology['2'].total_sections == 3\n assert morphology['21'].total_sections == 1\n assert morphology['22'].total_sections == 1\n # Check that distances (= distance to root at electrical midpoint)\n # correctly follow the tree structure\n assert_allclose(morphology.distance, np.arange(10) * 10 * um + 5 * um)\n assert_allclose(morphology['2'].distance,\n 100 * um + np.arange(5) * 10 * um + 5 * um)\n assert_allclose(morphology['21'].distance,\n 150 * um + np.arange(5) * 10 * um + 5 * um)\n assert_allclose(morphology.end_distance, 100 * um)\n assert_allclose(morphology['1'].end_distance, 200 * um)\n assert_allclose(morphology['2'].end_distance, 150 * um)\n assert_allclose(morphology['21'].end_distance, 200 * um)\n assert_allclose(morphology['22'].end_distance, 200 * um)\n # Check that section diameters are correctly inherited from the parent\n # sections\n assert_allclose(morphology['1'].start_diameter, [10, 8, 6, 4, 2] * um)\n assert_allclose(morphology['22'].start_diameter, [5, 4, 3, 2, 1] * um)\n\n if coordinates:\n # Coordinates should be absolute\n # section: cable\n assert_allclose(morphology.start_x, np.arange(10) * 10 * um)\n assert_allclose(morphology.x, np.arange(10) * 10 * um + 5 * um)\n assert_allclose(morphology.end_x, np.arange(10) * 10 * um + 10 * um)\n assert_allclose(morphology.y, np.zeros(10) * um)\n assert_allclose(morphology.z, np.zeros(10) * um)\n # section: cable['1']\n step = 20 / np.sqrt(2) * um\n assert_allclose(morphology['1'].start_x, 100 * um + np.arange(5) * step)\n assert_allclose(morphology['1'].x, 100 * um + np.arange(5) * step + step/2)\n assert_allclose(morphology['1'].end_x, 100 * um + np.arange(5) * step + step)\n assert_allclose(morphology['1'].start_y, np.arange(5) * step)\n assert_allclose(morphology['1'].y, np.arange(5) * step + step/2)\n assert_allclose(morphology['1'].end_y, np.arange(5) * step + step)\n assert_allclose(morphology['1'].z, np.zeros(5) * um)\n # section: cable['2']\n step = 10 / np.sqrt(2) * um\n assert_allclose(morphology['2'].start_x, 100 * um + np.arange(5) * step)\n assert_allclose(morphology['2'].x, 100 * um + np.arange(5) * step + step / 2)\n assert_allclose(morphology['2'].end_x, 100 * um + np.arange(5) * step + step)\n assert_allclose(morphology['2'].start_y, -np.arange(5) * step)\n assert_allclose(morphology['2'].y, -(np.arange(5) * step + step / 2))\n assert_allclose(morphology['2'].end_y, -(np.arange(5) * step + step))\n assert_allclose(morphology['2'].z, np.zeros(5) * um)\n # section: cable ['21']\n step = 10 / np.sqrt(2) * um\n assert_allclose(morphology['21'].start_x,\n 100 * um + 50 / np.sqrt(2) * um + np.arange(5) * step)\n assert_allclose(morphology['21'].x,\n 100 * um + 50 / np.sqrt(2) * um + np.arange(\n 5) * step + step / 2)\n assert_allclose(morphology ['21'].end_x,\n 100 * um + 50 / np.sqrt(2) * um + np.arange(\n 5) * step + step)\n assert_allclose(morphology['21'].start_y, -np.ones(5) * 50 / np.sqrt(2) * um)\n assert_allclose(morphology['21'].y, -np.ones(5) * 50 / np.sqrt(2) * um)\n assert_allclose(morphology['21'].end_y, -np.ones(5) * 50 / np.sqrt(2) * um)\n assert_allclose(morphology['21'].start_z, np.arange(5) * step)\n assert_allclose(morphology['21'].z, np.arange(5) * step + step / 2)\n assert_allclose(morphology['21'].end_z, np.arange(5) * step + step)\n # section: cable['22']\n step = 10 / np.sqrt(2) * um\n assert_allclose(morphology['22'].start_x,\n 100 * um + 50 / np.sqrt(2) * um + np.arange(5) * step)\n assert_allclose(morphology['22'].x,\n 100 * um + 50 / np.sqrt(2) * um + np.arange(5) * step + step/2)\n assert_allclose(morphology['22'].end_x,\n 100 * um + 50 / np.sqrt(2) * um + np.arange(\n 5) * step + step)\n assert_allclose(morphology['22'].start_y, -np.ones(5) * 50 / np.sqrt(2) * um)\n assert_allclose(morphology['22'].y, -np.ones(5) * 50 / np.sqrt(2) * um)\n assert_allclose(morphology['22'].end_y, -np.ones(5) * 50 / np.sqrt(2) * um)\n assert_allclose(morphology['22'].start_z, -np.arange(5) * step)\n assert_allclose(morphology['22'].z, -(np.arange(5) * step + step/2))\n assert_allclose(morphology['22'].end_z, -(np.arange(5) * step + step))\n\n\n@pytest.mark.codegen_independent\ndef test_tree_cables_schematic():\n cable = Cylinder(n=10, diameter=10*um, length=100*um)\n cable.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um, length=np.ones(5)*20*um) # tapering truncated cones\n cable.R = Cylinder(n=5, diameter=5*um, length=50*um)\n cable.RL = Cylinder(n=5, diameter=2.5*um, length=50*um)\n cable.RR = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um, length=np.ones(5)*10*um)\n\n _check_tree_cables(cable)\n\n@pytest.mark.codegen_independent\ndef test_tree_cables_coordinates():\n # The lengths of the sections should be identical to the previous test\n cable = Cylinder(n=10, x=[0, 100]*um, diameter=10*um)\n cable.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,\n x=np.linspace(0, 100, 6)/np.sqrt(2)*um,\n y=np.linspace(0, 100, 6)/np.sqrt(2)*um)\n cable.R = Cylinder(n=5, diameter=5*um, x=[0, 50]*um/np.sqrt(2),\n y=[0, -50]*um/np.sqrt(2))\n cable.RL = Cylinder(n=5, diameter=2.5*um,\n x=[0, 50]*um/np.sqrt(2),\n z=[0, 50]*um/np.sqrt(2))\n cable.RR = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,\n x=np.linspace(0, 50, 6)*um/np.sqrt(2),\n z=np.linspace(0, -50, 6)*um/np.sqrt(2))\n\n _check_tree_cables(cable, coordinates=True)\n\n\n@pytest.mark.codegen_independent\ndef test_tree_cables_from_points():\n # The coordinates should be identical to the previous test\n points = [ # cable\n (1, None, 0, 0, 0, 10, -1),\n (2, None, 10, 0, 0, 10, 1),\n (3, None, 20, 0, 0, 10, 2),\n (4, None, 30, 0, 0, 10, 3),\n (5, None, 40, 0, 0, 10, 4),\n (6, None, 50, 0, 0, 10, 5),\n (7, None, 60, 0, 0, 10, 6),\n (8, None, 70, 0, 0, 10, 7),\n (9, None, 80, 0, 0, 10, 8),\n (10, None, 90, 0, 0, 10, 9),\n (11, None, 100, 0, 0, 10, 10),\n # cable.L (using automatic names)\n (12, None, 100+20/np.sqrt(2), 20/np.sqrt(2), 0, 8 , 11),\n (13, None, 100+40/np.sqrt(2), 40/np.sqrt(2), 0, 6 , 12),\n (14, None, 100+60/np.sqrt(2), 60/np.sqrt(2), 0, 4 , 13),\n (15, None, 100+80/np.sqrt(2), 80/np.sqrt(2), 0, 2 , 14),\n (16, None, 100+100/np.sqrt(2), 100/np.sqrt(2), 0, 0 , 15),\n # cable.R (using automatic names)\n (17, None, 100+10/np.sqrt(2), -10/np.sqrt(2), 0, 5 , 11),\n (18, None, 100+20/np.sqrt(2), -20/np.sqrt(2), 0, 5 , 17),\n (19, None, 100+30/np.sqrt(2), -30/np.sqrt(2), 0, 5 , 18),\n (20, None, 100+40/np.sqrt(2), -40/np.sqrt(2), 0, 5 , 19),\n (21, None, 100+50/np.sqrt(2), -50/np.sqrt(2), 0, 5 , 20),\n # cable.RL (using explicit names)\n (22, 'L' , 100+60/np.sqrt(2), -50/np.sqrt(2), 10/np.sqrt(2), 2.5, 21),\n (23, 'L' , 100+70/np.sqrt(2), -50/np.sqrt(2), 20/np.sqrt(2), 2.5, 22),\n (24, 'L' , 100+80/np.sqrt(2), -50/np.sqrt(2), 30/np.sqrt(2), 2.5, 23),\n (25, 'L' , 100+90/np.sqrt(2), -50/np.sqrt(2), 40/np.sqrt(2), 2.5, 24),\n (26, 'L' , 100+100/np.sqrt(2), -50/np.sqrt(2), 50/np.sqrt(2), 2.5, 25),\n # cable.RR (using explicit names)\n (27, 'R' , 100+60/np.sqrt(2), -50/np.sqrt(2), -10/np.sqrt(2), 4, 21),\n (28, 'R' , 100+70/np.sqrt(2), -50/np.sqrt(2), -20/np.sqrt(2), 3, 27),\n (29, 'R' , 100+80/np.sqrt(2), -50/np.sqrt(2), -30/np.sqrt(2), 2, 28),\n (30, 'R' , 100+90/np.sqrt(2), -50/np.sqrt(2), -40/np.sqrt(2), 1, 29),\n (31, 'R' , 100+100/np.sqrt(2), -50/np.sqrt(2), -50/np.sqrt(2), 0, 30),\n ]\n cable = Morphology.from_points(points)\n\n # Check that the names are used\n assert cable.L.n == 5\n assert cable.R.n == 5\n assert cable.RL.n == 5\n assert cable.RR.n == 5\n _check_tree_cables(cable, coordinates=True)\n\ndef test_tree_cables_from_swc():\n swc_content = '''\n# Test file\n1 0 0 0 0 5 -1\n2 0 10 0 0 5 1\n3 0 20 0 0 5 2\n4 0 30 0 0 5 3\n5 0 40 0 0 5 4\n6 0 50 0 0 5 5\n7 0 60 0 0 5 6\n8 0 70 0 0 5 7\n9 0 80 0 0 5 8\n10 0 90 0 0 5 9\n11 0 100 0 0 5 10\n12 2 114.14213562373095 14.142135623730949 0 4 11\n13 2 128.2842712474619 28.284271247461898 0 3 12\n14 2 142.42640687119285 42.426406871192846 0 2 13\n15 2 156.5685424949238 56.568542494923797 0 1 14\n16 2 170.71067811865476 70.710678118654741 0 0 15\n17 2 107.07106781186548 -7.0710678118654746 0 2.5 11\n18 2 114.14213562373095 -14.142135623730949 0 2.5 17\n19 2 121.21320343559643 -21.213203435596423 0 2.5 18\n20 2 128.2842712474619 -28.284271247461898 0 2.5 19\n21 2 135.35533905932738 -35.35533905932737 0 2.5 20\n22 2 142.42640687119285 -35.35533905932737 7.0710678118654746 1.25 21\n23 2 149.49747468305833 -35.35533905932737 14.142135623730949 1.25 22\n24 2 156.5685424949238 -35.35533905932737 21.213203435596423 1.25 23\n25 2 163.63961030678928 -35.35533905932737 28.284271247461898 1.25 24\n26 2 170.71067811865476 -35.35533905932737 35.35533905932737 1.25 25\n27 2 142.42640687119285 -35.35533905932737 -7.0710678118654746 2 21\n28 2 149.49747468305833 -35.35533905932737 -14.142135623730949 1.5 27\n29 2 156.5685424949238 -35.35533905932737 -21.213203435596423 1 28\n30 2 163.63961030678928 -35.35533905932737 -28.284271247461898 0.5 29\n31 2 170.71067811865476 -35.35533905932737 -35.35533905932737 0 30\n'''\n tmp_filename = tempfile.mktemp('cable_morphology.swc')\n with open(tmp_filename, 'w') as f:\n f.write(swc_content)\n cable = Morphology.from_file(tmp_filename)\n os.remove(tmp_filename)\n _check_tree_cables(cable, coordinates=True)\n\ndef _check_tree_soma(morphology, coordinates=False, use_cylinders=True):\n\n # number of compartments per section\n assert morphology.n == 1\n assert morphology['1'].n == 5\n assert morphology['2'].n == 5\n\n # number of compartments per subtree\n assert morphology.total_compartments == 11\n assert morphology['1'].total_compartments == 5\n assert morphology['2'].total_compartments == 5\n\n # number of sections per subtree\n assert morphology.total_sections == 3\n assert morphology['1'].total_sections == 1\n assert morphology['2'].total_sections == 1\n\n assert_allclose(morphology.diameter, [30]*um)\n\n # Check that distances (= distance to root at midpoint)\n # correctly follow the tree structure\n # Note that the soma does add nothing to the distance\n assert_equal(morphology.distance, 0 * um)\n assert_allclose(morphology['1'].distance, np.arange(5)*20*um + 10*um)\n assert_allclose(morphology['2'].distance, np.arange(5)*10*um + 5*um)\n assert_allclose(morphology.end_distance, 0 * um)\n assert_allclose(morphology['1'].end_distance, 100 * um)\n assert_allclose(morphology['2'].end_distance, 50 * um)\n\n assert_allclose(morphology.diameter, 30*um)\n assert_allclose(morphology['1'].start_diameter, [8, 8, 6, 4, 2]*um)\n assert_allclose(morphology['1'].diameter, [8, 7, 5, 3, 1]*um)\n assert_allclose(morphology['1'].end_diameter, [8, 6, 4, 2, 0]*um)\n assert_allclose(morphology['2'].start_diameter, np.ones(5) * 5*um)\n assert_allclose(morphology['2'].diameter, np.ones(5) * 5*um)\n assert_allclose(morphology['2'].end_diameter, np.ones(5) * 5*um)\n\n if coordinates:\n # Coordinates should be absolute\n # section: soma\n assert_allclose(morphology.start_x, 100*um)\n assert_allclose(morphology.x, 100*um)\n assert_allclose(morphology.end_x, 100*um)\n assert_allclose(morphology.y, 0*um)\n assert_allclose(morphology.z, 0*um)\n # section: cable['1']\n step = 20 / np.sqrt(2) * um\n assert_allclose(morphology['1'].start_x, 100 * um + np.arange(5) * step)\n assert_allclose(morphology['1'].x, 100 * um + np.arange(5) * step + step/2)\n assert_allclose(morphology['1'].end_x, 100 * um + np.arange(5) * step + step)\n assert_allclose(morphology['1'].start_y, np.arange(5) * step)\n assert_allclose(morphology['1'].y, np.arange(5) * step + step/2)\n assert_allclose(morphology['1'].end_y, np.arange(5) * step + step)\n assert_allclose(morphology['1'].z, np.zeros(5) * um)\n # section: cable['2']\n step = 10 / np.sqrt(2) * um\n assert_allclose(morphology['2'].start_x, 100 * um + np.arange(5) * step)\n if use_cylinders:\n assert_allclose(morphology['2'].x, 100 * um + np.arange(5) * step + step / 2)\n assert_allclose(morphology['2'].end_x, 100 * um + np.arange(5) * step + step)\n assert_allclose(morphology['2'].start_y, -np.arange(5) * step)\n if use_cylinders:\n assert_allclose(morphology['2'].y, -(np.arange(5) * step + step / 2))\n assert_allclose(morphology['2'].end_y, -(np.arange(5) * step + step))\n if use_cylinders:\n assert_allclose(morphology['2'].z, np.zeros(5) * um)\n\n\n@pytest.mark.codegen_independent\ndef test_tree_soma_schematic():\n soma = Soma(diameter=30*um)\n soma.L = Section(n=5, diameter=[8, 8, 6, 4, 2, 0]*um,\n length=np.ones(5)*20*um) # tapering truncated cones\n soma.R = Cylinder(n=5, diameter=5*um, length=50*um)\n\n _check_tree_soma(soma)\n\n\n@pytest.mark.codegen_independent\ndef test_tree_soma_coordinates():\n soma = Soma(diameter=30*um, x=100*um)\n soma.L = Section(n=5, diameter=[8, 8, 6, 4, 2, 0]*um,\n x=np.linspace(0, 100, 6)/np.sqrt(2)*um,\n y=np.linspace(0, 100, 6)/np.sqrt(2)*um) # tapering truncated cones\n soma.R = Cylinder(n=5, diameter=5*um,\n x=[0, 50]*um/np.sqrt(2), y=[0, -50]*um/np.sqrt(2))\n\n _check_tree_soma(soma, coordinates=True)\n\n\n@pytest.mark.codegen_independent\ndef test_tree_soma_from_points():\n # The coordinates should be identical to the previous test\n points = [ # soma\n (1, 'soma', 100, 0, 0, 30, -1),\n # soma.L\n (2, 'L' , 100+20/np.sqrt(2), 20/np.sqrt(2), 0, 8 , 1),\n (3, 'L' , 100+40/np.sqrt(2), 40/np.sqrt(2), 0, 6 , 2),\n (4, 'L' , 100+60/np.sqrt(2), 60/np.sqrt(2), 0, 4 , 3),\n (5, 'L' , 100+80/np.sqrt(2), 80/np.sqrt(2), 0, 2 , 4),\n (6, 'L' , 100+100/np.sqrt(2), 100/np.sqrt(2), 0, 0 , 5),\n # soma.R\n (7, 'R' , 100+10/np.sqrt(2), -10/np.sqrt(2), 0, 5 , 1),\n (8, 'R' , 100+20/np.sqrt(2), -20/np.sqrt(2), 0, 5 , 7),\n (9, 'R' , 100+30/np.sqrt(2), -30/np.sqrt(2), 0, 5 , 8),\n (10, 'R' , 100+40/np.sqrt(2), -40/np.sqrt(2), 0, 5 , 9),\n (11, 'R' , 100+50/np.sqrt(2), -50/np.sqrt(2), 0, 5 , 10),\n ]\n cable = Morphology.from_points(points)\n _check_tree_soma(cable, coordinates=True, use_cylinders=False)\n\n\n@pytest.mark.codegen_independent\ndef test_tree_soma_from_points_3_point_soma():\n # The coordinates should be identical to the previous test\n points = [ # soma\n (1, 'soma', 100, 0, 0, 30, -1),\n (2, 'soma', 100, 15, 0, 30, 1),\n (3, 'soma', 100, -15, 0, 30, 1),\n # soma.L\n (4, 'L' , 100+20/np.sqrt(2), 20/np.sqrt(2), 0, 8 , 1),\n (5, 'L' , 100+40/np.sqrt(2), 40/np.sqrt(2), 0, 6 , 4),\n (6, 'L' , 100+60/np.sqrt(2), 60/np.sqrt(2), 0, 4 , 5),\n (7, 'L' , 100+80/np.sqrt(2), 80/np.sqrt(2), 0, 2 , 6),\n (8, 'L' , 100+100/np.sqrt(2), 100/np.sqrt(2), 0, 0 , 7),\n # soma.R\n (9, 'R' , 100+10/np.sqrt(2), -10/np.sqrt(2), 0, 5 , 1),\n (10, 'R' , 100+20/np.sqrt(2), -20/np.sqrt(2), 0, 5 , 9),\n (11, 'R' , 100+30/np.sqrt(2), -30/np.sqrt(2), 0, 5 , 10),\n (12, 'R' , 100+40/np.sqrt(2), -40/np.sqrt(2), 0, 5 , 11),\n (13, 'R' , 100+50/np.sqrt(2), -50/np.sqrt(2), 0, 5 , 12),\n ]\n cable = Morphology.from_points(points)\n _check_tree_soma(cable, coordinates=True, use_cylinders=False)\n # The first compartment should be a spherical soma!\n assert isinstance(cable, Soma)\n\n\n@pytest.mark.codegen_independent\ndef test_tree_soma_from_points_3_point_soma_incorrect():\n # Inconsistent diameters\n points = [ # soma\n (1, 'soma', 100, 0, 0, 30, -1),\n (2, 'soma', 100, 15, 0, 28, 1),\n (3, 'soma', 100, -15, 0, 30, 1),\n # soma.L\n (4, 'L' , 100+20/np.sqrt(2), 20/np.sqrt(2), 0, 8 , 1),\n (5, 'L' , 100+40/np.sqrt(2), 40/np.sqrt(2), 0, 6 , 4),\n (6, 'L' , 100+60/np.sqrt(2), 60/np.sqrt(2), 0, 4 , 5),\n (7, 'L' , 100+80/np.sqrt(2), 80/np.sqrt(2), 0, 2 , 6),\n (8, 'L' , 100+100/np.sqrt(2), 100/np.sqrt(2), 0, 0 , 7)\n ]\n with pytest.raises(ValueError):\n Morphology.from_points(points)\n\n # Inconsistent coordinates\n points = [ # soma\n (1, 'soma', 100, 0, 0, 30, -1),\n (2, 'soma', 100, 15, 0, 30, 1),\n (3, 'soma', 100, -16, 0, 30, 1),\n # soma.L\n (4, 'L', 100 + 20 / np.sqrt(2), 20 / np.sqrt(2), 0, 8, 1),\n (5, 'L', 100 + 40 / np.sqrt(2), 40 / np.sqrt(2), 0, 6, 4),\n (6, 'L', 100 + 60 / np.sqrt(2), 60 / np.sqrt(2), 0, 4, 5),\n (7, 'L', 100 + 80 / np.sqrt(2), 80 / np.sqrt(2), 0, 2, 6),\n (8, 'L', 100 + 100 / np.sqrt(2), 100 / np.sqrt(2), 0, 0, 7)\n ]\n with pytest.raises(ValueError):\n Morphology.from_points(points)\n\n\n@pytest.mark.codegen_independent\ndef test_tree_soma_from_swc():\n swc_content = '''\n# Test file\n1 1 100 0 0 15 -1\n2 2 114.14213562373095 14.142135623730949 0 4 1\n3 2 128.2842712474619 28.284271247461898 0 3 2\n4 2 142.42640687119285 42.426406871192846 0 2 3\n5 2 156.5685424949238 56.568542494923797 0 1 4\n6 2 170.71067811865476 70.710678118654741 0 0 5\n7 2 107.07106781186548 -7.0710678118654746 0 2.5 1\n8 2 114.14213562373095 -14.142135623730949 0 2.5 7\n9 2 121.21320343559643 -21.213203435596423 0 2.5 8\n10 2 128.2842712474619 -28.284271247461898 0 2.5 9\n11 2 135.35533905932738 -35.35533905932737 0 2.5 10\n'''\n tmp_filename = tempfile.mktemp('cable_morphology.swc')\n with open(tmp_filename, 'w') as f:\n f.write(swc_content)\n soma = Morphology.from_file(tmp_filename)\n os.remove(tmp_filename)\n _check_tree_soma(soma, coordinates=True, use_cylinders=False)\n\n\n@pytest.mark.codegen_independent\ndef test_tree_soma_from_swc_3_point_soma():\n swc_content = '''\n# Test file\n1 1 100 0 0 15 -1\n2 1 100 15 0 15 1\n3 1 100 -15 0 15 1\n4 2 114.14213562373095 14.142135623730949 0 4 1\n5 2 128.2842712474619 28.284271247461898 0 3 4\n6 2 142.42640687119285 42.426406871192846 0 2 5\n7 2 156.5685424949238 56.568542494923797 0 1 6\n8 2 170.71067811865476 70.710678118654741 0 0 7\n9 2 107.07106781186548 -7.0710678118654746 0 2.5 1\n10 2 114.14213562373095 -14.142135623730949 0 2.5 9\n11 2 121.21320343559643 -21.213203435596423 0 2.5 10\n12 2 128.2842712474619 -28.284271247461898 0 2.5 11\n13 2 135.35533905932738 -35.35533905932737 0 2.5 12\n'''\n tmp_filename = tempfile.mktemp('cable_morphology.swc')\n with open(tmp_filename, 'w') as f:\n f.write(swc_content)\n soma = Morphology.from_file(tmp_filename)\n os.remove(tmp_filename)\n _check_tree_soma(soma, coordinates=True, use_cylinders=False)\n\n\n@pytest.mark.codegen_independent\ndef test_construction_incorrect_arguments():\n ### Morphology\n dummy_self = Soma(10*um) # To allow testing of Morphology.__init__\n with pytest.raises(TypeError):\n Morphology.__init__(dummy_self, n=1.5)\n with pytest.raises(ValueError):\n Morphology.__init__(dummy_self, n=0)\n with pytest.raises(TypeError):\n Morphology.__init__(dummy_self, 'filename.swc')\n\n ### Soma\n with pytest.raises(DimensionMismatchError):\n Soma(10)\n with pytest.raises(TypeError):\n Soma([10, 20]*um)\n with pytest.raises(TypeError):\n Soma(x=[10, 20]*um)\n with pytest.raises(TypeError):\n Soma(y=[10, 20]*um)\n with pytest.raises(TypeError):\n Soma(z=[10, 20]*um)\n with pytest.raises(DimensionMismatchError):\n Soma(x=10)\n with pytest.raises(DimensionMismatchError):\n Soma(y=10)\n with pytest.raises(DimensionMismatchError):\n Soma(z=10)\n\n ### Cylinder\n # Diameter can only be single value\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=[10, 20]*um, length=100*um)\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=[10, 20, 30]*um, length=100*um)\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=np.ones(3, 2)*um, length=100*um)\n # Length can only be single value\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=10*um, length=[10, 20]*um)\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=10*um, length=[10, 20, 30]*um)\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=10*um, length=np.ones(3, 2)*um)\n # Coordinates have to be two values\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=10*um, x=[10]*um)\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=10*um, x=[10, 20, 30]*um)\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=10*um, y=[10]*um)\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=10*um, y=[10, 20, 30]*um)\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=10*um, z=[10]*um)\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=10*um, z=[10, 20, 30]*um)\n # Need either coordinates or lengths\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=10*um)\n # But not both\n with pytest.raises(TypeError):\n Cylinder(n=3, diameter=10*um, length=30*um, x=[0, 30]*um)\n\n ### Section\n # Diameter have to be n+1 values\n with pytest.raises(TypeError):\n Section(n=3, diameter=10*um, length=np.ones(3)*10*um)\n with pytest.raises(TypeError):\n Section(n=3, diameter=[10, 20, 30]*um, length=np.ones(3)*10*um)\n with pytest.raises(TypeError):\n Section(n=3, diameter=np.ones(4, 2)*um, length=np.ones(3)*10*um)\n # Length have to be n values\n with pytest.raises(TypeError):\n Section(n=3, diameter=np.ones(4)*10*um, length=10*um)\n with pytest.raises(TypeError):\n Section(n=3, diameter=np.ones(4)*10*um, length=[10, 20]*um)\n with pytest.raises(TypeError):\n Section(n=3, diameter=np.ones(4)*10*um, length=np.ones(3, 2)*um)\n # Coordinates have to be n+1 values\n with pytest.raises(TypeError):\n Section(n=3, diameter=np.ones(4)*10*um, x=10*um)\n with pytest.raises(TypeError):\n Section(n=3, diameter=np.ones(4)*10*um, x=[10, 20, 30]*um)\n with pytest.raises(TypeError):\n Section(n=3, diameter=np.ones(4)*10*um, y=10*um)\n with pytest.raises(TypeError):\n Section(n=3, diameter=np.ones(4)*10*um, y=[10, 20, 30]*um)\n with pytest.raises(TypeError):\n Section(n=3, diameter=np.ones(4)*10*um, z=10*um)\n with pytest.raises(TypeError):\n Section(n=3, diameter=np.ones(4)*10*um, z=[10, 20, 30]*um)\n # Need either coordinates or lengths\n with pytest.raises(TypeError):\n Section(n=3, diameter=np.ones(4)*10*um)\n # But not both\n with pytest.raises(TypeError):\n Section(n=3, diameter=np.ones(4)*10*um, length=[10, 20, 30]*um,\n x=[0, 10, 20, 30]*um)\n\n\n@pytest.mark.codegen_independent\ndef test_from_points_minimal():\n points = [(1, 'soma', 10, 20, 30, 30, -1)]\n morph = Morphology.from_points(points)\n assert morph.total_compartments == 1\n assert_allclose(morph.diameter, 30*um)\n assert_allclose(morph.x, 10*um)\n assert_allclose(morph.y, 20*um)\n assert_allclose(morph.z, 30*um)\n\n\n@pytest.mark.codegen_independent\ndef test_from_points_incorrect():\n # The coordinates should be identical to the previous test\n points = [\n (1, None, 0, 0, 0, 10, -1),\n (2, None, 10, 0, 0, 10, 1),\n (2, None, 20, 0, 0, 10, 2),\n ]\n points2 = [\n (1, None, 0, 0, 0, 10, -1),\n (2, None, 10, 0, 0, 10, 1),\n (3, None, 20, 0, 0, 10, 3),\n ]\n points3 = [\n (1, None, 0, 0, 0, 10, -1),\n (2, None, 10, 0, 0, 10, 1),\n (3, None, 20, 0, 0, 10, 4),\n ]\n points4 = [\n (1, 0, 0, 0, 10, -1),\n (2, 10, 0, 0, 10, 1),\n (3, 20, 0, 0, 10, 2),\n ]\n with pytest.raises(ValueError):\n Morphology.from_points(points)\n with pytest.raises(ValueError):\n Morphology.from_points(points2)\n with pytest.raises(ValueError):\n Morphology.from_points(points3)\n with pytest.raises(ValueError):\n Morphology.from_points(points4)\n\n\n@pytest.mark.codegen_independent\ndef test_subtree_deletion():\n soma = Soma(diameter=30*um)\n first_dendrite = Cylinder(n=5, diameter=5*um, length=50*um)\n second_dendrite = Cylinder(n=5, diameter=5*um, length=50*um)\n second_dendrite.L = Cylinder(n=5, diameter=5*um, length=50*um)\n second_dendrite.R = Cylinder(n=5, diameter=5*um, length=50*um)\n soma.dend1 = first_dendrite\n soma.dend2 = second_dendrite\n soma.dend3 = Cylinder(n=5, diameter=5*um, length=50*um)\n soma.dend3.L = Cylinder(n=5, diameter=5*um, length=50*um)\n soma.dend3.L.L = Cylinder(n=5, diameter=5 * um, length=50 * um)\n\n assert soma.total_compartments == 36\n\n del soma.dend1\n assert soma.total_compartments == 31\n with pytest.raises(AttributeError):\n soma.dend1\n with pytest.raises(AttributeError):\n delattr(soma, 'dend1')\n with pytest.raises(AttributeError):\n soma.__delitem__('dend1')\n assert first_dendrite not in soma.children\n\n del soma['dend2']\n assert soma.total_compartments == 16\n with pytest.raises(AttributeError):\n soma.dend2\n assert second_dendrite not in soma.children\n\n del soma.dend3.LL\n assert soma.total_compartments == 11\n with pytest.raises(AttributeError):\n soma.dend3.LL\n with pytest.raises(AttributeError):\n soma.dend3.L.L\n\n\n@pytest.mark.codegen_independent\ndef test_subgroup_indices():\n morpho = Soma(diameter=30*um)\n morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)\n morpho.LL = Cylinder(length=5*um, diameter=2*um, n=5)\n morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)\n\n assert_equal(morpho.LL.indices[:], [11, 12, 13, 14, 15])\n assert_equal(morpho.L.indices[3*um:5*um], [4, 5])\n assert_equal(morpho.L.indices[3*um:5*um],\n morpho.L[3*um:5*um].indices[:])\n assert_equal(morpho.L.indices[:5*um], [1, 2, 3, 4, 5])\n assert_equal(morpho.L.indices[3*um:], [4, 5, 6, 7, 8, 9, 10])\n assert_equal(morpho.L.indices[3.5*um], 4)\n assert_equal(morpho.L.indices[3*um], 4)\n assert_equal(morpho.L.indices[3.9*um], 4)\n assert_equal(morpho.L.indices[3], 4)\n assert_equal(morpho.L.indices[-1], 10)\n assert_equal(morpho.L.indices[3:5], [4, 5])\n assert_equal(morpho.L.indices[3:], [4, 5, 6, 7, 8, 9, 10])\n assert_equal(morpho.L.indices[:5], [1, 2, 3, 4, 5])\n\n@pytest.mark.codegen_independent\ndef test_subgroup_attributes():\n morpho = Soma(diameter=30*um)\n morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)\n morpho.LL = Cylinder(x=[0, 5]*um, diameter=2*um, n=5)\n morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)\n\n # # Getting a single compartment by index\n assert_allclose(morpho.L[2].area, morpho.L.area[2])\n assert_allclose(morpho.L[2].volume, morpho.L.volume[2])\n assert_allclose(morpho.L[2].length, morpho.L.length[2])\n assert_allclose(morpho.L[2].r_length_1, morpho.L.r_length_1[2])\n assert_allclose(morpho.L[2].r_length_2, morpho.L.r_length_2[2])\n assert_allclose(morpho.L[2].distance, morpho.L.distance[2])\n assert_allclose(morpho.L[2].diameter, morpho.L.diameter[2])\n assert morpho.L[2].x is None\n assert morpho.L[2].y is None\n assert morpho.L[2].z is None\n assert morpho.L[2].start_x is None\n assert morpho.L[2].start_y is None\n assert morpho.L[2].start_z is None\n assert morpho.L[2].end_x is None\n assert morpho.L[2].end_y is None\n assert morpho.L[2].end_z is None\n\n # # Getting a single compartment by position\n assert_allclose(morpho.LL[1.5*um].area, morpho.LL.area[1])\n assert_allclose(morpho.LL[1.5*um].volume, morpho.LL.volume[1])\n assert_allclose(morpho.LL[1.5*um].length, morpho.LL.length[1])\n assert_allclose(morpho.LL[1.5*um].r_length_1, morpho.LL.r_length_1[1])\n assert_allclose(morpho.LL[1.5*um].r_length_2, morpho.LL.r_length_2[1])\n assert_allclose(morpho.LL[1.5*um].distance, morpho.LL.distance[1])\n assert_allclose(morpho.LL[1.5*um].diameter, morpho.LL.diameter[1])\n assert_allclose(morpho.LL[1.5*um].x, morpho.LL.x[1])\n assert_allclose(morpho.LL[1.5*um].y, morpho.LL.y[1])\n assert_allclose(morpho.LL[1.5*um].z, morpho.LL.z[1])\n assert_allclose(morpho.LL[1.5*um].start_x, morpho.LL.start_x[1])\n assert_allclose(morpho.LL[1.5*um].start_y, morpho.LL.start_y[1])\n assert_allclose(morpho.LL[1.5*um].start_z, morpho.LL.start_z[1])\n assert_allclose(morpho.LL[1.5*um].end_x, morpho.LL.end_x[1])\n assert_allclose(morpho.LL[1.5*um].end_y, morpho.LL.end_y[1])\n assert_allclose(morpho.LL[1.5*um].end_z, morpho.LL.end_z[1])\n\n # Getting several compartments by indices\n assert_allclose(morpho.right[3:6].area, morpho.right.area[3:6])\n assert_allclose(morpho.right[3:6].volume, morpho.right.volume[3:6])\n assert_allclose(morpho.right[3:6].length, morpho.right.length[3:6])\n assert_allclose(morpho.right[3:6].r_length_1, morpho.right.r_length_1[3:6])\n assert_allclose(morpho.right[3:6].r_length_2, morpho.right.r_length_2[3:6])\n assert_allclose(morpho.right[3:6].distance, morpho.right.distance[3:6])\n assert_allclose(morpho.right[3:6].diameter, morpho.right.diameter[3:6])\n assert morpho.right[3:6].x is None\n assert morpho.right[3:6].y is None\n assert morpho.right[3:6].z is None\n assert morpho.right[3:6].start_x is None\n assert morpho.right[3:6].start_y is None\n assert morpho.right[3:6].start_z is None\n assert morpho.right[3:6].end_x is None\n assert morpho.right[3:6].end_y is None\n assert morpho.right[3:6].end_z is None\n\n # Getting several compartments by position\n assert_allclose(morpho.L[3*um:5*um].distance, [3.5, 4.5]*um)\n assert_allclose(morpho.L[3.5*um:4.5*um].distance, [3.5, 4.5]*um)\n\n\n@pytest.mark.codegen_independent\ndef test_subgroup_incorrect():\n # Incorrect indexing\n morpho = Soma(diameter=30*um)\n morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)\n morpho.LL = Cylinder(length=5*um, diameter=2*um, n=5)\n morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)\n\n # Non-existing branch\n with pytest.raises(AttributeError):\n morpho.axon\n\n # Incorrect indexing\n # wrong units or mixing units\n with pytest.raises(TypeError):\n morpho.L[3*second:5*second]\n with pytest.raises(TypeError):\n morpho.L[3.4:5.3]\n with pytest.raises(TypeError):\n morpho.L[3:5*um]\n with pytest.raises(TypeError):\n morpho.L[3*um:5]\n # providing a step\n with pytest.raises(TypeError):\n morpho.L[3*um:5*um:2*um]\n with pytest.raises(TypeError):\n morpho.L[3:5:2]\n # incorrect type\n with pytest.raises(TypeError):\n morpho.L[object()]\n # out of range\n with pytest.raises(IndexError):\n morpho.L[-10*um]\n with pytest.raises(IndexError):\n morpho.L[15*um]\n with pytest.raises(IndexError):\n morpho.L[10]\n\n\n@pytest.mark.codegen_independent\ndef test_topology():\n soma = Soma(diameter=30*um)\n soma.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,\n length=np.ones(5)*20*um) # tapering truncated cones\n soma.R = Cylinder(n=10, diameter=5*um, length=50*um)\n soma.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)\n soma.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,\n length=np.ones(5)*10*um)\n\n str_topology = str(soma.topology())\n lines = [l for l in str_topology.split('\\n') if len(l.strip())]\n assert len(lines) == 5 # one line for each section\n for line, name in zip(lines, ['root', '.L', '.R', '.R.left', 'R.right']):\n assert name in line\n\n\n@pytest.mark.codegen_independent\ndef test_copy_section_soma():\n soma = Soma(diameter=30*um)\n soma_copy = soma.copy_section()\n assert soma_copy.diameter[0] == 30*um\n assert soma_copy.x is None\n assert soma_copy.y is None\n assert soma_copy.z is None\n assert soma_copy.type == 'soma'\n\n soma = Soma(diameter=30*um, x=5*um, z=-10*um)\n soma_copy = soma.copy_section()\n assert soma_copy.diameter[0] == 30*um\n assert_allclose(soma_copy.x[0], 5*um)\n assert_allclose(soma_copy.y[0], 0*um)\n assert_allclose(soma_copy.z[0], -10*um)\n assert soma_copy.type == 'soma'\n\n\n@pytest.mark.codegen_independent\ndef test_copy_section_section():\n # No coordinates\n sec = Section(diameter=[10, 5, 4, 3, 2, 1]*um, n=5,\n length=np.ones(5)*10*um, type='dend')\n sec_copy = sec.copy_section()\n assert_allclose(sec_copy.start_diameter, sec.start_diameter)\n assert_allclose(sec_copy.end_diameter, sec.end_diameter)\n assert_allclose(sec_copy.length, sec.length)\n assert sec_copy.n == sec.n\n assert sec_copy.x is None\n assert sec_copy.y is None\n assert sec_copy.z is None\n assert sec_copy.type == 'dend'\n\n # With coordinates\n sec = Section(diameter=[10, 5, 4, 3, 2, 1]*um, n=5,\n x=[0, 1, 2, 3, 4, 5]*um,\n y=[0, -1, -2, -3, -4, -5]*um)\n sec_copy = sec.copy_section()\n assert_allclose(sec_copy.start_diameter, sec.start_diameter)\n assert_allclose(sec_copy.end_diameter, sec.end_diameter)\n assert_allclose(sec_copy.length, sec.length)\n assert sec_copy.n == sec.n\n assert_allclose(sec_copy.x, sec.x)\n assert_allclose(sec_copy.y, sec.y)\n assert_allclose(sec_copy.z, sec.z)\n\n assert sec_copy.type is None\n\n@pytest.mark.codegen_independent\ndef test_copy_section_cylinder():\n # no coordinates\n sec = Section(diameter=[10, 5, 4, 3, 2, 1]*um, n=5,\n length=np.ones(5)*20*um, type='dend')\n sec_copy = sec.copy_section()\n assert_allclose(sec_copy.end_diameter, sec.end_diameter)\n assert_allclose(sec_copy.length, sec.length)\n assert sec_copy.n == sec.n\n assert sec_copy.x is None\n assert sec_copy.y is None\n assert sec_copy.z is None\n assert sec_copy.type == 'dend'\n\n # with coordinates\n sec = Section(diameter=[10, 5, 4, 3, 2, 1]*um, n=5,\n x=[0, 1, 2, 3, 4, 5]*um, y=[0, -1, -2, -3, -4, -5]*um)\n sec_copy = sec.copy_section()\n assert_allclose(sec_copy.end_diameter, sec.end_diameter)\n assert_allclose(sec_copy.length, sec.length)\n assert sec_copy.n == sec.n\n assert_allclose(sec_copy.x, sec.x)\n assert_allclose(sec_copy.y, sec.y)\n assert_allclose(sec_copy.z, sec.z)\n\n assert sec_copy.type is None\n\n\ndef _check_length_coord_consistency(morph_with_coords):\n if not isinstance(morph_with_coords, Soma):\n vectors = np.diff(morph_with_coords.coordinates, axis=0)\n calculated_length = np.sqrt(np.sum(vectors**2, axis=1))\n assert_allclose(calculated_length, morph_with_coords.length)\n for child in morph_with_coords.children:\n _check_length_coord_consistency(child)\n\n\n@pytest.mark.codegen_independent\ndef test_generate_coordinates_deterministic():\n morph = Soma(diameter=30*um)\n morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,\n length=np.ones(5)*20*um) # tapering truncated cones\n morph.R = Cylinder(n=10, diameter=5*um, length=50*um)\n morph.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)\n morph.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,\n length=np.ones(5)*10*um)\n\n morph_with_coords = morph.generate_coordinates()\n assert morph_with_coords.total_compartments == morph.total_compartments\n assert morph_with_coords.total_sections == morph.total_sections\n\n for new, old in [(morph_with_coords, morph),\n (morph_with_coords.L, morph.L),\n (morph_with_coords.R, morph.R),\n (morph_with_coords.R.left, morph.R.left),\n (morph_with_coords.R.right, morph.R.right)]:\n assert new.n == old.n\n assert_allclose(new.length, old.length)\n assert_allclose(new.diameter, old.diameter)\n # The morphology should be in the x/y plane\n assert_equal(new.z, 0*um)\n\n _check_length_coord_consistency(morph_with_coords)\n\n\n@pytest.mark.codegen_independent\ndef test_generate_coordinates_random_sections():\n morph = Soma(diameter=30*um)\n morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,\n length=np.ones(5)*20*um) # tapering truncated cones\n morph.R = Cylinder(n=10, diameter=5*um, length=50*um)\n morph.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)\n morph.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,\n length=np.ones(5)*10*um)\n\n morph_with_coords = morph.generate_coordinates(section_randomness=25)\n assert morph_with_coords.total_compartments == morph.total_compartments\n assert morph_with_coords.total_sections == morph.total_sections\n\n for new, old in [(morph_with_coords, morph),\n (morph_with_coords.L, morph.L),\n (morph_with_coords.R, morph.R),\n (morph_with_coords.R.left, morph.R.left),\n (morph_with_coords.R.right, morph.R.right)]:\n assert new.n == old.n\n assert_allclose(new.length, old.length)\n assert_allclose(new.diameter, old.diameter)\n\n _check_length_coord_consistency(morph_with_coords)\n\n\n@pytest.mark.codegen_independent\ndef test_generate_coordinates_random_compartments():\n morph = Soma(diameter=30*um)\n morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,\n length=np.ones(5)*20*um) # tapering truncated cones\n morph.R = Cylinder(n=10, diameter=5*um, length=50*um)\n morph.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)\n morph.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,\n length=np.ones(5)*10*um)\n\n morph_with_coords = morph.generate_coordinates(compartment_randomness=15)\n assert morph_with_coords.total_compartments == morph.total_compartments\n assert morph_with_coords.total_sections == morph.total_sections\n\n for new, old in [(morph_with_coords, morph),\n (morph_with_coords.L, morph.L),\n (morph_with_coords.R, morph.R),\n (morph_with_coords.R.left, morph.R.left),\n (morph_with_coords.R.right, morph.R.right)]:\n assert new.n == old.n\n assert_allclose(new.length, old.length)\n assert_allclose(new.diameter, old.diameter)\n\n _check_length_coord_consistency(morph_with_coords)\n\n\n@pytest.mark.codegen_independent\ndef test_generate_coordinates_random_all():\n morph = Soma(diameter=30*um)\n morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,\n length=np.ones(5)*20*um) # tapering truncated cones\n morph.R = Cylinder(n=10, diameter=5*um, length=50*um)\n morph.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)\n morph.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,\n length=np.ones(5)*10*um)\n\n morph_with_coords = morph.generate_coordinates(section_randomness=25,\n compartment_randomness=15)\n assert morph_with_coords.total_compartments == morph.total_compartments\n assert morph_with_coords.total_sections == morph.total_sections\n\n for new, old in [(morph_with_coords, morph),\n (morph_with_coords.L, morph.L),\n (morph_with_coords.R, morph.R),\n (morph_with_coords.R.left, morph.R.left),\n (morph_with_coords.R.right, morph.R.right)]:\n assert new.n == old.n\n assert_allclose(new.length, old.length)\n assert_allclose(new.diameter, old.diameter)\n\n _check_length_coord_consistency(morph_with_coords)\n\n\n@pytest.mark.codegen_independent\ndef test_generate_coordinates_no_overwrite():\n morph = Soma(diameter=30*um)\n morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,\n length=np.ones(5)*20*um) # tapering truncated cones\n morph.R = Cylinder(n=10, diameter=5*um, length=50*um)\n morph.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)\n morph.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,\n length=np.ones(5)*10*um)\n\n morph_with_coords = morph.generate_coordinates(compartment_randomness=15)\n # This should not change anything because the morphology already has coordinates!\n morph_with_coords2 = morph_with_coords.generate_coordinates(section_randomness=25,\n compartment_randomness=15)\n\n for new, old in [(morph_with_coords2, morph_with_coords),\n (morph_with_coords2.L, morph_with_coords.L),\n (morph_with_coords2.R, morph_with_coords.R),\n (morph_with_coords2.R.left, morph_with_coords.R.left),\n (morph_with_coords2.R.right, morph_with_coords.R.right)]:\n assert new.n == old.n\n assert_allclose(new.length, old.length)\n assert_allclose(new.diameter, old.diameter)\n assert_allclose(new.x, old.x)\n assert_allclose(new.y, old.y)\n assert_allclose(new.z, old.z)\n\n\n@pytest.mark.codegen_independent\ndef test_generate_coordinates_overwrite():\n morph = Soma(diameter=30*um)\n morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,\n length=np.ones(5)*20*um) # tapering truncated cones\n morph.R = Cylinder(n=10, diameter=5*um, length=50*um)\n morph.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)\n morph.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,\n length=np.ones(5)*10*um)\n\n morph_with_coords = morph.generate_coordinates(compartment_randomness=15)\n # This should change things since we explicitly ask for it\n morph_with_coords2 = morph_with_coords.generate_coordinates(section_randomness=25,\n compartment_randomness=15,\n overwrite_existing=True)\n\n for new, old in [# ignore the root compartment\n (morph_with_coords2.L, morph_with_coords.L),\n (morph_with_coords2.R, morph_with_coords.R),\n (morph_with_coords2.R.left, morph_with_coords.R.left),\n (morph_with_coords2.R.right, morph_with_coords.R.right)]:\n assert new.n == old.n\n assert_allclose(new.length, old.length)\n assert_allclose(new.diameter, old.diameter)\n assert all(np.abs(new.x - old.x) > 0)\n assert all(np.abs(new.y - old.y) > 0)\n assert all(np.abs(new.z - old.z) > 0)\n\n _check_length_coord_consistency(morph_with_coords2)\n\n\n@pytest.mark.codegen_independent\ndef test_generate_coordinates_mixed_overwrite():\n morph = Soma(diameter=30*um)\n morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,\n length=np.ones(5)*20*um) # tapering truncated cones\n morph.R = Cylinder(n=10, diameter=5*um, length=50*um)\n morph_with_coords = morph.generate_coordinates(section_randomness=25,\n compartment_randomness=15)\n # The following just returns a copy, as all coordinates are already\n # specified\n morph_copy = morph_with_coords.generate_coordinates()\n\n # Add new sections that do not yet have coordinates\n morph_with_coords.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)\n morph_with_coords.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,\n length=np.ones(5)*10*um)\n\n # This should change things since we explicitly ask for it\n morph_with_coords2 = morph_with_coords.generate_coordinates(section_randomness=25,\n compartment_randomness=15)\n\n for new, old in [(morph_with_coords2, morph_with_coords),\n (morph_with_coords2.L, morph_with_coords.L),\n (morph_with_coords2.R, morph_with_coords.R)]:\n assert new.n == old.n\n assert_allclose(new.length, old.length)\n assert_allclose(new.diameter, old.diameter)\n assert_allclose(new.x, old.x)\n assert_allclose(new.y, old.y)\n assert_allclose(new.z, old.z)\n\n assert morph_with_coords.R.left.x is None\n assert len(morph_with_coords2.R.left.x) == morph_with_coords2.R.left.n\n\n _check_length_coord_consistency(morph_with_coords2)\n\n\n@pytest.mark.codegen_independent\ndef test_str_repr():\n # A very basic test, make sure that the str/repr functions return\n # something and do not raise an error\n for morph in [Soma(diameter=30*um),\n Soma(diameter=30*um, x=5*um, y=10*um),\n Cylinder(n=5, diameter=10*um, length=50*um),\n Cylinder(n=5, diameter=10*um, x=[0, 50]*um),\n Section(n=5, diameter=[2.5, 5, 10, 5, 10, 5]*um, length=[10, 20, 5, 5, 10]*um),\n Section(n=5, diameter=[2.5, 5, 10, 5, 10, 5]*um, x=[0, 10, 30, 35, 40, 50]*um)]:\n\n assert len(repr(morph)) > 0\n assert len(str(morph)) > 0\n morph = Soma(30*um)\n assert len(repr(morph.children)) > 0\n assert len(str(morph.children)) > 0\n morph.axon = Cylinder(1*um, n=10, length=100*um)\n morph.dend = Cylinder(1*um, n=10, length=50*um)\n assert len(repr(morph.children)) > 0\n assert len(str(morph.children)) > 0\n\n\nif __name__ == '__main__':\n test_attributes_soma()\n test_attributes_soma_coordinates()\n test_attributes_cylinder()\n test_attributes_cylinder_coordinates()\n test_attributes_section()\n test_attributes_section_coordinates_single()\n test_attributes_section_coordinates_all()\n test_tree_cables_schematic()\n test_tree_cables_coordinates()\n test_tree_cables_from_points()\n test_tree_cables_from_swc()\n test_tree_soma_schematic()\n test_tree_soma_coordinates()\n test_tree_soma_from_points()\n test_tree_soma_from_points_3_point_soma()\n test_tree_soma_from_points_3_point_soma_incorrect()\n test_tree_soma_from_swc()\n test_tree_soma_from_swc_3_point_soma()\n test_construction_incorrect_arguments()\n test_from_points_minimal()\n test_from_points_incorrect()\n test_subtree_deletion()\n test_subgroup_indices()\n test_subgroup_attributes()\n test_subgroup_incorrect()\n test_topology()\n test_copy_section_soma()\n test_copy_section_section()\n test_copy_section_cylinder()\n test_generate_coordinates_deterministic()\n test_generate_coordinates_random_sections()\n test_generate_coordinates_random_compartments()\n test_generate_coordinates_random_all()\n test_generate_coordinates_no_overwrite()\n test_generate_coordinates_overwrite()\n test_generate_coordinates_mixed_overwrite()\n test_str_repr()\n",
"import brian2\nimport numpy\nimport os\nimport pickle\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport itertools\nimport re\n\nfrom brian2.utils.stringtools import indent\n\nfrom collections import defaultdict\n\n__all__ = ['FeatureTest', 'SpeedTest',\n 'InaccuracyError', 'Configuration',\n 'run_feature_tests', 'run_single_feature_test',\n 'run_speed_tests',\n 'DefaultConfiguration', 'LocalConfiguration',\n 'NumpyConfiguration',\n 'CythonConfiguration', 'CPPStandaloneConfiguration',\n 'CPPStandaloneConfigurationOpenMP']\n\n\nclass InaccuracyError(AssertionError):\n def __init__(self, error, *args):\n self.error = error\n AssertionError.__init__(self, *args)\n\nclass BaseTest(object):\n '''\n '''\n \n category = None # a string with the category of features\n name = None # a string with the particular feature name within the category\n tags = None # a list of tags (strings) of features used\n # whether or not to allow the device to override the time: this can be used to remove the\n # compilation overheads on certain devices (but some tests might want to include this)\n allow_time_override = True\n\n @classmethod\n def fullname(cls):\n return cls.category+': '+cls.name\n\n def run(self):\n '''\n Runs the feature test but do not return results (some devices may \n require an extra step before results are available).\n '''\n raise NotImplementedError\n\n def timed_run(self, duration):\n '''\n Do a timed run. This means that for RuntimeDevice it will run for defaultclock.dt before running for the\n rest of the duration. This means total run duration will be duration+defaultclock.dt.\n For standalone devices, this feature may or may not be implemented.\n '''\n if isinstance(brian2.get_device(), brian2.devices.RuntimeDevice):\n brian2.run(brian2.defaultclock.dt, level=1)\n brian2.run(duration, level=1)\n else:\n brian2.run(duration, level=1)\n\n\nclass FeatureTest(BaseTest):\n '''\n '''\n \n def results(self):\n '''\n Return the results after a run call.\n '''\n raise NotImplementedError\n \n def compare(self, maxrelerr, results_base, results_test):\n '''\n Compare results from standard Brian run to another run.\n \n This method or `check` should be implemented.\n '''\n raise NotImplementedError\n \n def check(self, maxrelerr, results):\n '''\n Check results are valid (e.g. analytically).\n \n This method or `compare` should be implemented.\n '''\n raise NotImplementedError\n \n def compare_arrays(self, maxrelerr, v_base, v_test):\n '''\n Often you just want to compare the values of some arrays, this does that.\n '''\n if isinstance(v_base, dict):\n for k in v_base:\n self.compare_arrays(maxrelerr, v_base[k], v_test[k])\n else:\n I = (v_base!=0)\n err = numpy.amax(numpy.abs(v_base[I]-v_test[I])/v_base[I])\n if err>maxrelerr:\n raise InaccuracyError(err)\n if (v_test[-I]!=0).any():\n raise InaccuracyError(numpy.inf)\n \n \nclass SpeedTest(BaseTest):\n n_range = [1]\n n_label = 'n'\n n_axis_log = True\n time_axis_log = True\n \n def __init__(self, n):\n self.n = n\n \n def results(self):\n return self.n\n \n def compare(self, maxrelerr, results_base, results_test):\n pass\n \n def check(self, maxrelerr, results):\n pass\n \n def __call__(self):\n return self\n\n\nclass Configuration(object):\n '''\n '''\n \n name = None # The name of this configuration\n\n def __init__(self, maximum_run_time=1e7*brian2.second):\n maximum_run_time = float(maximum_run_time)*brian2.second\n self.maximum_run_time = maximum_run_time\n\n def before_run(self):\n pass\n \n def after_run(self):\n pass\n \n def get_last_run_time(self):\n '''\n Implement this to overwrite the measured runtime (e.g. to remove overhead).\n '''\n if hasattr(brian2.device, '_last_run_time'):\n return brian2.device._last_run_time\n raise NotImplementedError\n\n def get_last_run_completed_fraction(self):\n '''\n Implement this to overwrite the amount of the last run that was completed (for devices that allow breaking\n early if the maximum run time is exceeded).\n '''\n if hasattr(brian2.device, '_last_run_completed_fraction'):\n return brian2.device._last_run_completed_fraction\n return 1.0\n\n \nclass DefaultConfiguration(Configuration):\n name = 'Default'\n def before_run(self):\n brian2.prefs.reset_to_defaults()\n brian2.set_device('runtime')\n\n\nclass LocalConfiguration(Configuration):\n name = 'Local'\n def before_run(self):\n brian2.prefs.reset_to_defaults()\n brian2.set_device('runtime')\n brian2.prefs.load_preferences()\n\n\nclass NumpyConfiguration(Configuration):\n name = 'Numpy'\n def before_run(self):\n brian2.prefs.reset_to_defaults()\n brian2.set_device('runtime')\n brian2.prefs.codegen.target = 'numpy'\n\n\nclass CythonConfiguration(Configuration):\n name = 'Cython'\n def before_run(self):\n brian2.prefs.reset_to_defaults()\n brian2.set_device('runtime')\n brian2.prefs.codegen.target = 'cython'\n \n \nclass CPPStandaloneConfiguration(Configuration):\n name = 'C++ standalone'\n def before_run(self):\n brian2.prefs.reset_to_defaults()\n brian2.set_device('cpp_standalone', build_on_run=False)\n \n def after_run(self):\n if os.path.exists('cpp_standalone'):\n shutil.rmtree('cpp_standalone')\n brian2.device.build(directory='cpp_standalone', compile=True, run=True,\n with_output=False)\n\nclass CPPStandaloneConfigurationOpenMP(Configuration):\n name = 'C++ standalone (OpenMP)'\n def before_run(self):\n brian2.prefs.reset_to_defaults()\n brian2.set_device('cpp_standalone', build_on_run=False)\n brian2.prefs.devices.cpp_standalone.openmp_threads = 4\n \n def after_run(self):\n if os.path.exists('cpp_standalone'):\n shutil.rmtree('cpp_standalone')\n brian2.device.build(directory='cpp_standalone', compile=True, run=True,\n with_output=False)\n \n \ndef results(configuration, feature, n=None, maximum_run_time=1e7*brian2.second):\n tempfilename = tempfile.mktemp('exception')\n if n is None:\n init_args = ''\n else:\n init_args = str(n)\n code_string = '''\n__file__ = '{fname}'\nimport brian2\nfrom {config_module} import {config_name}\nfrom {feature_module} import {feature_name}\nconfiguration = {config_name}()\nfeature = {feature_name}({init_args})\nimport warnings, traceback, pickle, sys, os, time\nwarnings.simplefilter('ignore')\ntry:\n start_time = time.time()\n configuration.before_run()\n brian2.device._set_maximum_run_time({maximum_run_time})\n feature.run()\n configuration.after_run()\n results = feature.results()\n run_time = time.time()-start_time\n if feature.allow_time_override:\n try:\n run_time = configuration.get_last_run_time()\n except NotImplementedError:\n pass\n lrcf = configuration.get_last_run_completed_fraction()\n run_time = run_time/lrcf\n prof_info = brian2.magic_network.profiling_info\n new_prof_info = []\n for n, t in prof_info:\n new_prof_info.append((n, t/lrcf))\n f = open(r'{tempfname}', 'wb')\n pickle.dump((None, results, run_time, new_prof_info), f, -1)\n f.close()\nexcept Exception, ex:\n #traceback.print_exc(file=sys.stdout)\n tb = traceback.format_exc()\n f = open(r'{tempfname}', 'wb')\n pickle.dump((tb, ex, 0.0, []), f, -1)\n f.close()\n '''.format(config_module=configuration.__module__,\n config_name=configuration.__name__,\n feature_module=feature.__module__,\n feature_name=feature.__name__,\n tempfname=tempfilename,\n fname=__file__,\n init_args=init_args,\n maximum_run_time=float(maximum_run_time),\n )\n args = [sys.executable, '-c',\n code_string]\n # Run the example in a new process and make sure that stdout gets\n # redirected into the capture plugin\n p = subprocess.Popen(args, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n #sys.stdout.write(stdout)\n #sys.stderr.write(stderr)\n with open(tempfilename, 'rb') as f:\n tb, res, runtime, profiling_info = pickle.load(f)\n return tb, res, runtime, profiling_info\n \n\ndef check_or_compare(feature, res, baseline, maxrelerr):\n feature = feature()\n try:\n feature.check(maxrelerr, res)\n except NotImplementedError:\n feature.compare(maxrelerr, baseline, res)\n \n\ndef run_single_feature_test(configuration, feature):\n return results(configuration, feature) \n\n \ndef run_feature_tests(configurations=None, feature_tests=None,\n strict=1e-5, tolerant=0.05, verbose=True, maximum_run_time=1e7*brian2.second):\n if configurations is None:\n # some configurations to attempt to import\n try:\n import brian2genn.correctness_testing\n except:\n pass\n configurations = Configuration.__subclasses__()\n if feature_tests is None:\n feature_tests = FeatureTest.__subclasses__()\n if DefaultConfiguration in configurations:\n configurations.remove(DefaultConfiguration)\n configurations = [DefaultConfiguration]+configurations\n feature_tests.sort(key=lambda ft: ft.fullname())\n if verbose:\n print('Running feature tests')\n print('Configurations:', ', '.join(c.name for c in configurations))\n\n full_results = {}\n tag_results = defaultdict(lambda:defaultdict(list))\n for ft in feature_tests:\n baseline = None\n if verbose:\n print(ft.fullname()+': [', end=' ')\n for configuration in configurations:\n txt = 'OK'\n sym = '.'\n exc = None\n tb, res, runtime, prof_info = results(configuration, ft, maximum_run_time=maximum_run_time)\n if isinstance(res, Exception):\n if isinstance(res, NotImplementedError):\n sym = 'N'\n txt = 'Not implemented'\n else:\n sym = 'E'\n txt = 'Error'\n if configuration is DefaultConfiguration:\n raise res\n else:\n if configuration is DefaultConfiguration:\n baseline = res \n try:\n check_or_compare(ft, res, baseline, strict)\n except InaccuracyError as exc:\n try:\n check_or_compare(ft, res, baseline, tolerant)\n sym = 'I'\n txt = 'Poor (error=%.2f%%)' % (100.0*exc.error)\n except InaccuracyError as exc:\n sym = 'F'\n txt = 'Fail (error=%.2f%%)' % (100.0*exc.error)\n sys.stdout.write(sym)\n full_results[configuration.name, ft.fullname()] = (sym, txt, exc, tb, runtime, prof_info)\n for tag in ft.tags:\n tag_results[tag][configuration.name].append((sym, txt, exc, tb, runtime, prof_info))\n if verbose:\n print(']')\n \n return FeatureTestResults(full_results, tag_results,\n configurations, feature_tests)\n\n\nclass FeatureTestResults(object):\n def __init__(self, full_results, tag_results,\n configurations, feature_tests):\n self.full_results = full_results\n self.tag_results = tag_results\n self.configurations = configurations\n self.feature_tests = feature_tests\n \n @property\n def test_table(self):\n table = []\n table.append(['Test']+[c.name for c in self.configurations])\n curcat = ''\n \n for ft in self.feature_tests:\n cat = ft.category\n if cat!=curcat:\n table.append([cat]+['']*len(self.configurations))\n curcat = cat\n row = [ft.name]\n for configuration in self.configurations:\n sym, txt, exc, tb, runtime, prof_info = self.full_results[configuration.name,\n ft.fullname()]\n row.append(txt)\n table.append(row)\n return make_table(table)\n \n @property\n def tag_table(self):\n table = []\n table.append(['Tag']+[c.name for c in self.configurations])\n tags = sorted(self.tag_results.keys())\n \n for tag in tags:\n row = [tag]\n for configuration in self.configurations:\n tag_res = self.tag_results[tag][configuration.name]\n syms = [sym for sym, txt, exc, tb, runtime, prof_info in tag_res]\n n = len(syms)\n okcount = sum(sym=='.' for sym in syms)\n poorcount = sum(sym=='I' for sym in syms)\n failcount = sum(sym=='F' for sym in syms)\n errcount = sum(sym=='E' for sym in syms)\n nicount = sum(sym=='N' for sym in syms)\n if okcount==n:\n txt = 'OK'\n elif nicount==n:\n txt = 'Not implemented'\n elif errcount==n:\n txt = 'Unsupported'\n elif okcount+poorcount==n:\n txt = 'Poor (%d%%)' % int(poorcount*100.0/n)\n elif okcount+poorcount+failcount==n:\n txt = 'Fail: {fail}% (poor={poor}%)'.format(\n fail=int(failcount*100.0/n),\n poor=int(poorcount*100.0/n),\n )\n else:\n txt = 'Fail: OK={ok}%, Poor={poor}%, Fail={fail}%, NotImpl={ni}% Error={err}%'.format(\n ok=int(okcount*100.0/n), poor=int(poorcount*100.0/n), \n fail=int(failcount*100.0/n), err=int(errcount*100.0/n),\n ni=int(nicount*100.0/n), \n )\n row.append(txt)\n table.append(row)\n return make_table(table)\n \n @property\n def tables(self):\n r = ''\n s = 'Feature test results'\n r += s+'\\n'+'-'*len(s)+'\\n\\n'+self.test_table+'\\n'\n s = 'Tag results'\n r += s+'\\n'+'-'*len(s)+'\\n\\n'+self.tag_table+'\\n'\n return r\n \n @property\n def exceptions(self):\n exc_list = []\n for configuration in self.configurations:\n curconfig = []\n for ft in self.feature_tests:\n sym, txt, exc, tb, runtime, prof_info = self.full_results[configuration.name,\n ft.fullname()]\n if tb is not None:\n curconfig.append((ft.fullname(), tb))\n if len(curconfig):\n exc_list.append((configuration.name, curconfig))\n if len(exc_list)==0:\n return ''\n r = ''\n s = 'Exceptions'\n r += s+'\\n'+'-'*len(s)+'\\n\\n'\n for config_name, curconfig in exc_list:\n s = config_name\n r += s+'\\n'+'^'*len(s)+'\\n\\n'\n for name, tb in curconfig:\n r += name+'::\\n\\n'+indent(tb)+'\\n\\n' \n return r\n \n @property\n def tables_and_exceptions(self):\n return self.tables+'\\n'+self.exceptions\n \n def __str__(self):\n return self.tables\n __repr__ = __str__\n\n\ndef run_speed_tests(configurations=None, speed_tests=None, run_twice=True, verbose=True,\n n_slice=slice(None), maximum_run_time=1e7*brian2.second):\n if configurations is None:\n # some configurations to attempt to import\n try:\n import brian2genn.correctness_testing\n except:\n pass\n configurations = Configuration.__subclasses__()\n if speed_tests is None:\n speed_tests = SpeedTest.__subclasses__()\n speed_tests.sort(key=lambda ft: ft.fullname())\n if verbose:\n print('Running speed tests')\n print('Configurations:', ', '.join(c.name for c in configurations))\n\n full_results = {}\n tag_results = defaultdict(lambda:defaultdict(list))\n for ft in speed_tests:\n if verbose:\n print(ft.fullname()+': ', end=' ')\n for n in ft.n_range[n_slice]:\n if verbose:\n print('n=%d [' % n, end=' ')\n for configuration in configurations:\n sym = '.'\n for _ in range(1+int(run_twice)):\n tb, res, runtime, prof_info = results(configuration, ft, n, maximum_run_time=maximum_run_time)\n if isinstance(res, Exception):\n if isinstance(res, NotImplementedError):\n sym = 'N'\n else:\n sym = 'E'\n if configuration is DefaultConfiguration:\n raise res\n runtime = numpy.NAN\n sys.stdout.write(sym)\n full_results[configuration.name, ft.fullname(), n, 'All'] = runtime\n suffixtime = defaultdict(float)\n overheadstime = float(runtime)\n for codeobjname, proftime in prof_info:\n # parts = codeobjname.split('_')\n # parts = [part for part in parts if not re.match(r'\\d+', part)]\n #suffix = '_'.join(parts)\n suffix = codeobjname\n suffixtime[suffix] += proftime\n overheadstime -= float(proftime)\n for suffix, proftime in list(suffixtime.items()):\n full_results[configuration.name, ft.fullname(), n, suffix] = proftime\n full_results[configuration.name, ft.fullname(), n, 'Overheads'] = overheadstime\n if verbose:\n print(']', end=' ')\n if verbose:\n print()\n \n return SpeedTestResults(full_results, configurations, speed_tests)\n\n\nclass SpeedTestResults(object):\n def __init__(self, full_results, configurations, speed_tests):\n self.full_results = full_results\n self.configurations = configurations\n self.speed_tests = speed_tests\n \n def get_ns(self, fullname):\n L = [(cn, fn, n, s)\n for cn, fn, n, s in self.full_results\n if fn == fullname]\n confignames, fullnames, n, codeobjsuffixes = zip(*L)\n return numpy.array(sorted(list(set(n))))\n\n def get_codeobjsuffixes(self, fullname):\n L = [(cn, fn, n, s)\n for cn, fn, n, s in self.full_results\n if fn == fullname]\n confignames, fullnames, n, codeobjsuffixes = zip(*L)\n return set(codeobjsuffixes)\n\n def plot_all_tests(self, relative=False, profiling_minimum=1.0):\n if relative and profiling_minimum<1:\n raise ValueError(\"Cannot use relative plots with profiling\")\n import pylab\n for st in self.speed_tests:\n fullname = st.fullname()\n pylab.figure()\n ns = self.get_ns(fullname)\n codeobjsuffixes = self.get_codeobjsuffixes(fullname)\n codeobjsuffixes.remove('All')\n codeobjsuffixes.remove('Overheads')\n codeobjsuffixes = ['All', 'Overheads']+sorted(codeobjsuffixes)\n if relative or profiling_minimum==1:\n codeobjsuffixes = ['All']\n baseline = None\n havelabel = set()\n markerstyles_cycle = iter(itertools.cycle(['o', 's', 'd', 'v', 'p', 'h', '^', '<', '>']))\n dashes = {}\n markerstyles = {}\n for isuffix, suffix in enumerate(codeobjsuffixes):\n cols = itertools.cycle(pylab.rcParams['axes.color_cycle'])\n for (iconfig, config), col in zip(enumerate(self.configurations), cols):\n configname = config.name\n runtimes = []\n skip = True\n for n in ns:\n runtime = self.full_results.get((configname, fullname, n, 'All'), numpy.nan)\n thistime = self.full_results.get((configname, fullname, n, suffix), numpy.nan)\n if float(thistime/runtime)>=profiling_minimum:\n skip = False\n runtimes.append(thistime)\n if skip:\n continue\n runtimes = numpy.array(runtimes)\n if relative:\n if baseline is None:\n baseline = runtimes\n runtimes = baseline/runtimes\n if suffix=='All':\n lw = 2\n label = configname\n else:\n lw = 1\n label = suffix\n plottable = sum(-numpy.isnan(runtimes[1:]+runtimes[:-1]))\n if plottable:\n if label in havelabel:\n label = None\n else:\n havelabel.add(label)\n dash = None\n msty = None\n if suffix!='All':\n if suffix in dashes:\n dash = dashes[suffix]\n msty = markerstyles[suffix]\n else:\n j = len(dashes)\n dash = (8, 2)\n for b in bin(j)[2:]:\n if b=='0':\n dash = dash+(2, 2)\n else:\n dash = dash+(4, 2)\n dashes[suffix] = dash\n markerstyles[suffix] = msty = next(markerstyles_cycle)\n line = pylab.plot(ns, runtimes, lw=lw, color=col, marker=msty,\n mec='none', ms=8, label=label)[0]\n if dash is not None:\n line.set_dashes(dash)\n pylab.title(fullname)\n pylab.legend(loc='best', fontsize='x-small', handlelength=8.0)\n pylab.xlabel(st.n_label)\n if st.n_axis_log:\n pylab.gca().set_xscale('log')\n if st.time_axis_log:\n pylab.gca().set_yscale('log')\n\n# Code below auto generates restructured text tables, copied from:\n# http://stackoverflow.com/questions/11347505/what-are-some-approaches-to-outputting-a-python-data-structure-to-restructuredte\n\ndef make_table(grid):\n max_cols = [max(out) for out in map(list, zip(*[[len(item) for item in row] for row in grid]))]\n rst = table_div(max_cols, 1)\n\n for i, row in enumerate(grid):\n header_flag = False\n if i == 0 or i == len(grid)-1: header_flag = True\n rst += normalize_row(row, max_cols)\n rst += table_div(max_cols, header_flag )\n return rst\n\n\ndef table_div(max_cols, header_flag=1):\n out = \"\"\n if header_flag == 1:\n style = \"=\"\n else:\n style = \"-\"\n\n for max_col in max_cols:\n out += max_col * style + \" \"\n\n out += \"\\n\"\n return out\n\n\ndef normalize_row(row, max_cols):\n r = \"\"\n for i, max_col in enumerate(max_cols):\n r += row[i] + (max_col - len(row[i]) + 1) * \" \"\n\n return r + \"\\n\"\n"
] |
[
[
"numpy.array",
"numpy.asarray"
],
[
"numpy.testing.assert_array_equal",
"numpy.testing.assert_equal"
],
[
"numpy.testing.assert_equal"
],
[
"numpy.array",
"numpy.isnan",
"numpy.abs"
]
] |
kjohi/spark
|
[
"62bbeff24c2c23f52738738ea8b0a7bd4737aa4b"
] |
[
"python/pyspark/pandas/frame.py"
] |
[
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA wrapper class for Spark DataFrame to behave similar to pandas DataFrame.\n\"\"\"\nfrom collections import defaultdict, namedtuple\nfrom collections.abc import Mapping\nimport re\nimport warnings\nimport inspect\nimport json\nimport types\nfrom functools import partial, reduce\nimport sys\nfrom itertools import zip_longest, chain\nfrom types import TracebackType\nfrom typing import (\n Any,\n Callable,\n Dict,\n Generic,\n IO,\n Iterable,\n Iterator,\n List,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n cast,\n no_type_check,\n TYPE_CHECKING,\n)\nimport datetime\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import ( # type: ignore[attr-defined]\n is_bool_dtype,\n is_list_like,\n is_dict_like,\n is_scalar,\n)\nfrom pandas.tseries.frequencies import DateOffset, to_offset\n\nif TYPE_CHECKING:\n from pandas.io.formats.style import Styler\n\nfrom pandas.core.dtypes.common import infer_dtype_from_object\nfrom pandas.core.accessor import CachedAccessor\nfrom pandas.core.dtypes.inference import is_sequence\nfrom pyspark import StorageLevel\nfrom pyspark.sql import Column, DataFrame as SparkDataFrame, functions as F\nfrom pyspark.sql.functions import pandas_udf\nfrom pyspark.sql.types import (\n ArrayType,\n BooleanType,\n DataType,\n DoubleType,\n NumericType,\n Row,\n StringType,\n StructField,\n StructType,\n DecimalType,\n TimestampType,\n TimestampNTZType,\n)\nfrom pyspark.sql.window import Window\n\nfrom pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.\nfrom pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, Label, Name, Scalar, T\nfrom pyspark.pandas.accessors import PandasOnSparkFrameMethods\nfrom pyspark.pandas.config import option_context, get_option\nfrom pyspark.pandas.spark import functions as SF\nfrom pyspark.pandas.spark.accessors import SparkFrameMethods, CachedSparkFrameMethods\nfrom pyspark.pandas.utils import (\n align_diff_frames,\n column_labels_level,\n combine_frames,\n default_session,\n is_name_like_tuple,\n is_name_like_value,\n is_testing,\n name_like_string,\n same_anchor,\n scol_for,\n validate_arguments_and_invoke_function,\n validate_axis,\n validate_bool_kwarg,\n validate_how,\n validate_mode,\n verify_temp_column_name,\n log_advice,\n)\nfrom pyspark.pandas.generic import Frame\nfrom pyspark.pandas.internal import (\n InternalField,\n InternalFrame,\n HIDDEN_COLUMNS,\n NATURAL_ORDER_COLUMN_NAME,\n SPARK_INDEX_NAME_FORMAT,\n SPARK_DEFAULT_INDEX_NAME,\n SPARK_DEFAULT_SERIES_NAME,\n SPARK_INDEX_NAME_PATTERN,\n)\nfrom pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame\nfrom pyspark.pandas.ml import corr\nfrom pyspark.pandas.typedef.typehints import (\n as_spark_type,\n infer_return_type,\n pandas_on_spark_type,\n spark_type_to_pandas_dtype,\n DataFrameType,\n SeriesType,\n ScalarType,\n create_tuple_for_frame_type,\n)\nfrom pyspark.pandas.plot import PandasOnSparkPlotAccessor\n\nif TYPE_CHECKING:\n from pyspark.sql._typing import OptionalPrimitiveType\n\n from pyspark.pandas.groupby import DataFrameGroupBy\n from pyspark.pandas.indexes import Index\n from pyspark.pandas.series import Series\n\n\n# These regular expression patterns are complied and defined here to avoid to compile the same\n# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.\n# Two patterns basically seek the footer string from Pandas'\nREPR_PATTERN = re.compile(r\"\\n\\n\\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\\]$\")\nREPR_HTML_PATTERN = re.compile(\n r\"\\n\\<p\\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\\<\\/p\\>\\n\\<\\/div\\>$\"\n)\n\n\n_flex_doc_FRAME = \"\"\"\nGet {desc} of dataframe and other, element-wise (binary operator `{op_name}`).\n\nEquivalent to ``{equiv}``. With reverse version, `{reverse}`.\n\nAmong flexible wrappers (`add`, `sub`, `mul`, `div`) to\narithmetic operators: `+`, `-`, `*`, `/`, `//`.\n\nParameters\n----------\nother : scalar\n Any single data\n\nReturns\n-------\nDataFrame\n Result of the arithmetic operation.\n\nExamples\n--------\n>>> df = ps.DataFrame({{'angles': [0, 3, 4],\n... 'degrees': [360, 180, 360]}},\n... index=['circle', 'triangle', 'rectangle'],\n... columns=['angles', 'degrees'])\n>>> df\n angles degrees\ncircle 0 360\ntriangle 3 180\nrectangle 4 360\n\nAdd a scalar with operator version which return the same\nresults. Also reverse version.\n\n>>> df + 1\n angles degrees\ncircle 1 361\ntriangle 4 181\nrectangle 5 361\n\n>>> df.add(1)\n angles degrees\ncircle 1 361\ntriangle 4 181\nrectangle 5 361\n\n>>> df.add(df)\n angles degrees\ncircle 0 720\ntriangle 6 360\nrectangle 8 720\n\n>>> df + df + df\n angles degrees\ncircle 0 1080\ntriangle 9 540\nrectangle 12 1080\n\n>>> df.radd(1)\n angles degrees\ncircle 1 361\ntriangle 4 181\nrectangle 5 361\n\nDivide and true divide by constant with reverse version.\n\n>>> df / 10\n angles degrees\ncircle 0.0 36.0\ntriangle 0.3 18.0\nrectangle 0.4 36.0\n\n>>> df.div(10)\n angles degrees\ncircle 0.0 36.0\ntriangle 0.3 18.0\nrectangle 0.4 36.0\n\n>>> df.rdiv(10)\n angles degrees\ncircle inf 0.027778\ntriangle 3.333333 0.055556\nrectangle 2.500000 0.027778\n\n>>> df.truediv(10)\n angles degrees\ncircle 0.0 36.0\ntriangle 0.3 18.0\nrectangle 0.4 36.0\n\n>>> df.rtruediv(10)\n angles degrees\ncircle inf 0.027778\ntriangle 3.333333 0.055556\nrectangle 2.500000 0.027778\n\nSubtract by constant with reverse version.\n\n>>> df - 1\n angles degrees\ncircle -1 359\ntriangle 2 179\nrectangle 3 359\n\n>>> df.sub(1)\n angles degrees\ncircle -1 359\ntriangle 2 179\nrectangle 3 359\n\n>>> df.rsub(1)\n angles degrees\ncircle 1 -359\ntriangle -2 -179\nrectangle -3 -359\n\nMultiply by constant with reverse version.\n\n>>> df * 1\n angles degrees\ncircle 0 360\ntriangle 3 180\nrectangle 4 360\n\n>>> df.mul(1)\n angles degrees\ncircle 0 360\ntriangle 3 180\nrectangle 4 360\n\n>>> df.rmul(1)\n angles degrees\ncircle 0 360\ntriangle 3 180\nrectangle 4 360\n\nFloor Divide by constant with reverse version.\n\n>>> df // 10\n angles degrees\ncircle 0.0 36.0\ntriangle 0.0 18.0\nrectangle 0.0 36.0\n\n>>> df.floordiv(10)\n angles degrees\ncircle 0.0 36.0\ntriangle 0.0 18.0\nrectangle 0.0 36.0\n\n>>> df.rfloordiv(10) # doctest: +SKIP\n angles degrees\ncircle inf 0.0\ntriangle 3.0 0.0\nrectangle 2.0 0.0\n\nMod by constant with reverse version.\n\n>>> df % 2\n angles degrees\ncircle 0 0\ntriangle 1 0\nrectangle 0 0\n\n>>> df.mod(2)\n angles degrees\ncircle 0 0\ntriangle 1 0\nrectangle 0 0\n\n>>> df.rmod(2)\n angles degrees\ncircle NaN 2\ntriangle 2.0 2\nrectangle 2.0 2\n\nPower by constant with reverse version.\n\n>>> df ** 2\n angles degrees\ncircle 0.0 129600.0\ntriangle 9.0 32400.0\nrectangle 16.0 129600.0\n\n>>> df.pow(2)\n angles degrees\ncircle 0.0 129600.0\ntriangle 9.0 32400.0\nrectangle 16.0 129600.0\n\n>>> df.rpow(2)\n angles degrees\ncircle 1.0 2.348543e+108\ntriangle 8.0 1.532496e+54\nrectangle 16.0 2.348543e+108\n\"\"\"\n\n\nclass DataFrame(Frame, Generic[T]):\n \"\"\"\n pandas-on-Spark DataFrame that corresponds to pandas DataFrame logically. This holds Spark\n DataFrame internally.\n\n :ivar _internal: an internal immutable Frame to manage metadata.\n :type _internal: InternalFrame\n\n Parameters\n ----------\n data : numpy ndarray (structured or homogeneous), dict, pandas DataFrame, Spark DataFrame \\\n or pandas-on-Spark Series\n Dict can contain Series, arrays, constants, or list-like objects\n Note that if `data` is a pandas DataFrame, a Spark DataFrame, and a pandas-on-Spark Series,\n other arguments should not be used.\n index : Index or array-like\n Index to use for resulting frame. Will default to RangeIndex if\n no indexing information part of input data and no index provided\n columns : Index or array-like\n Column labels to use for resulting frame. Will default to\n RangeIndex (0, 1, 2, ..., n) if no column labels are provided\n dtype : dtype, default None\n Data type to force. Only a single dtype is allowed. If None, infer\n copy : boolean, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input\n\n Examples\n --------\n Constructing DataFrame from a dictionary.\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = ps.DataFrame(data=d, columns=['col1', 'col2'])\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n Constructing DataFrame from pandas DataFrame\n\n >>> df = ps.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n Notice that the inferred dtype is int64.\n\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n To enforce a single dtype:\n\n >>> df = ps.DataFrame(data=d, dtype=np.int8)\n >>> df.dtypes\n col1 int8\n col2 int8\n dtype: object\n\n Constructing DataFrame from numpy ndarray:\n\n >>> df2 = ps.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n >>> df2 # doctest: +SKIP\n a b c d e\n 0 3 1 4 9 8\n 1 4 8 4 8 4\n 2 7 6 5 6 7\n 3 8 7 9 1 0\n 4 2 5 4 3 9\n \"\"\"\n\n def __init__( # type: ignore[no-untyped-def]\n self, data=None, index=None, columns=None, dtype=None, copy=False\n ):\n if isinstance(data, InternalFrame):\n assert index is None\n assert columns is None\n assert dtype is None\n assert not copy\n internal = data\n elif isinstance(data, SparkDataFrame):\n assert index is None\n assert columns is None\n assert dtype is None\n assert not copy\n internal = InternalFrame(spark_frame=data, index_spark_columns=None)\n elif isinstance(data, ps.Series):\n assert index is None\n assert columns is None\n assert dtype is None\n assert not copy\n data = data.to_frame()\n internal = data._internal\n else:\n if isinstance(data, pd.DataFrame):\n assert index is None\n assert columns is None\n assert dtype is None\n assert not copy\n pdf = data\n else:\n pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)\n internal = InternalFrame.from_pandas(pdf)\n\n object.__setattr__(self, \"_internal_frame\", internal)\n\n @property\n def _pssers(self) -> Dict[Label, \"Series\"]:\n \"\"\"Return a dict of column label -> Series which anchors `self`.\"\"\"\n from pyspark.pandas.series import Series\n\n if not hasattr(self, \"_psseries\"):\n object.__setattr__(\n self,\n \"_psseries\",\n {label: Series(data=self, index=label) for label in self._internal.column_labels},\n )\n else:\n psseries = cast(Dict[Label, Series], self._psseries) # type: ignore[has-type]\n assert len(self._internal.column_labels) == len(psseries), (\n len(self._internal.column_labels),\n len(psseries),\n )\n if any(self is not psser._psdf for psser in psseries.values()):\n # Refresh the dict to contain only Series anchoring `self`.\n self._psseries = {\n label: (\n psseries[label]\n if self is psseries[label]._psdf\n else Series(data=self, index=label)\n )\n for label in self._internal.column_labels\n }\n return self._psseries\n\n @property\n def _internal(self) -> InternalFrame:\n return cast(InternalFrame, self._internal_frame) # type: ignore[has-type]\n\n def _update_internal_frame(\n self, internal: InternalFrame, requires_same_anchor: bool = True\n ) -> None:\n \"\"\"\n Update InternalFrame with the given one.\n\n If the column_label is changed or the new InternalFrame is not the same `anchor`,\n disconnect the link to the Series and create a new one.\n\n If `requires_same_anchor` is `False`, checking whether or not the same anchor is ignored\n and force to update the InternalFrame, e.g., replacing the internal with the resolved_copy,\n updating the underlying Spark DataFrame which need to combine a different Spark DataFrame.\n\n :param internal: the new InternalFrame\n :param requires_same_anchor: whether checking the same anchor\n \"\"\"\n from pyspark.pandas.series import Series\n\n if hasattr(self, \"_psseries\"):\n psseries = {}\n\n for old_label, new_label in zip_longest(\n self._internal.column_labels, internal.column_labels\n ):\n if old_label is not None:\n psser = self._pssers[old_label]\n\n renamed = old_label != new_label\n not_same_anchor = requires_same_anchor and not same_anchor(internal, psser)\n\n if renamed or not_same_anchor:\n psdf: DataFrame = DataFrame(self._internal.select_column(old_label))\n psser._update_anchor(psdf)\n psser = None\n else:\n psser = None\n if new_label is not None:\n if psser is None:\n psser = Series(data=self, index=new_label)\n psseries[new_label] = psser\n\n self._psseries = psseries\n\n self._internal_frame = internal\n\n if hasattr(self, \"_repr_pandas_cache\"):\n del self._repr_pandas_cache\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Return an int representing the number of array dimensions.\n\n return 2 for DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', None],\n ... columns=['max_speed', 'shield'])\n >>> df\n max_speed shield\n cobra 1 2\n viper 4 5\n NaN 7 8\n >>> df.ndim\n 2\n \"\"\"\n return 2\n\n @property\n def axes(self) -> List:\n \"\"\"\n Return a list representing the axes of the DataFrame.\n\n It has the row axis labels and column axis labels as the only members.\n They are returned in that order.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.axes\n [Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')]\n \"\"\"\n return [self.index, self.columns]\n\n def _reduce_for_stat_function(\n self,\n sfun: Callable[[\"Series\"], Column],\n name: str,\n axis: Optional[Axis] = None,\n numeric_only: bool = True,\n **kwargs: Any,\n ) -> \"Series\":\n \"\"\"\n Applies sfun to each column and returns a pd.Series where the number of rows equal the\n number of columns.\n\n Parameters\n ----------\n sfun : either an 1-arg function that takes a Column and returns a Column, or\n a 2-arg function that takes a Column and its DataType and returns a Column.\n axis: used only for sanity check because series only support index axis.\n name : original pandas API name.\n axis : axis to apply. 0 or 1, or 'index' or 'columns.\n numeric_only : bool, default True\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter\n currently.\n \"\"\"\n from pyspark.pandas.series import Series, first_series\n\n axis = validate_axis(axis)\n if axis == 0:\n min_count = kwargs.get(\"min_count\", 0)\n\n exprs = [SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)]\n new_column_labels = []\n for label in self._internal.column_labels:\n psser = self._psser_for(label)\n\n is_numeric_or_boolean = isinstance(\n psser.spark.data_type, (NumericType, BooleanType)\n )\n keep_column = not numeric_only or is_numeric_or_boolean\n\n if keep_column:\n scol = sfun(psser)\n\n if min_count > 0:\n scol = F.when(Frame._count_expr(psser) >= min_count, scol)\n\n exprs.append(scol.alias(name_like_string(label)))\n new_column_labels.append(label)\n\n if len(exprs) == 1:\n return Series([])\n\n sdf = self._internal.spark_frame.select(*exprs)\n\n # The data is expected to be small so it's fine to transpose/use default index.\n with ps.option_context(\"compute.max_rows\", 1):\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],\n column_labels=new_column_labels,\n column_label_names=self._internal.column_label_names,\n )\n return first_series(DataFrame(internal).transpose())\n\n else:\n # Here we execute with the first 1000 to get the return type.\n # If the records were less than 1000, it uses pandas API directly for a shortcut.\n limit = get_option(\"compute.shortcut_limit\")\n pdf = self.head(limit + 1)._to_internal_pandas()\n pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only, **kwargs)\n if len(pdf) <= limit:\n return Series(pser)\n\n @pandas_udf(returnType=as_spark_type(pser.dtype.type)) # type: ignore[call-overload]\n def calculate_columns_axis(*cols: pd.Series) -> pd.Series:\n return getattr(pd.concat(cols, axis=1), name)(\n axis=axis, numeric_only=numeric_only, **kwargs\n )\n\n column_name = verify_temp_column_name(\n self._internal.spark_frame.select(self._internal.index_spark_columns),\n \"__calculate_columns_axis__\",\n )\n sdf = self._internal.spark_frame.select(\n self._internal.index_spark_columns\n + [calculate_columns_axis(*self._internal.data_spark_columns).alias(column_name)]\n )\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=self._internal.index_fields,\n )\n return first_series(DataFrame(internal)).rename(pser.name)\n\n def _psser_for(self, label: Label) -> \"Series\":\n \"\"\"\n Create Series with a proper column label.\n\n The given label must be verified to exist in `InternalFrame.column_labels`.\n\n For example, in some method, self is like:\n\n >>> self = ps.range(3)\n\n `self._psser_for(label)` can be used with `InternalFrame.column_labels`:\n\n >>> self._psser_for(self._internal.column_labels[0])\n 0 0\n 1 1\n 2 2\n Name: id, dtype: int64\n\n `self._psser_for(label)` must not be used directly with user inputs.\n In that case, `self[label]` should be used instead, which checks the label exists or not:\n\n >>> self['id']\n 0 0\n 1 1\n 2 2\n Name: id, dtype: int64\n \"\"\"\n return self._pssers[label]\n\n def _apply_series_op(\n self, op: Callable[[\"Series\"], Union[\"Series\", Column]], should_resolve: bool = False\n ) -> \"DataFrame\":\n applied = []\n for label in self._internal.column_labels:\n applied.append(op(self._psser_for(label)))\n internal = self._internal.with_new_columns(applied)\n if should_resolve:\n internal = internal.resolved_copy\n return DataFrame(internal)\n\n # Arithmetic Operators\n def _map_series_op(self, op: str, other: Any) -> \"DataFrame\":\n from pyspark.pandas.base import IndexOpsMixin\n\n if not isinstance(other, DataFrame) and (\n isinstance(other, IndexOpsMixin) or is_sequence(other)\n ):\n raise TypeError(\n \"%s with a sequence is currently not supported; \"\n \"however, got %s.\" % (op, type(other).__name__)\n )\n\n if isinstance(other, DataFrame):\n if self._internal.column_labels_level != other._internal.column_labels_level:\n raise ValueError(\"cannot join with no overlapping index names\")\n\n if not same_anchor(self, other):\n # Different DataFrames\n def apply_op(\n psdf: DataFrame,\n this_column_labels: List[Label],\n that_column_labels: List[Label],\n ) -> Iterator[Tuple[\"Series\", Label]]:\n for this_label, that_label in zip(this_column_labels, that_column_labels):\n yield (\n getattr(psdf._psser_for(this_label), op)(\n psdf._psser_for(that_label)\n ).rename(this_label),\n this_label,\n )\n\n return align_diff_frames(apply_op, self, other, fillna=True, how=\"full\")\n else:\n applied = []\n column_labels = []\n for label in self._internal.column_labels:\n if label in other._internal.column_labels:\n applied.append(getattr(self._psser_for(label), op)(other._psser_for(label)))\n else:\n applied.append(\n SF.lit(None)\n .cast(self._internal.spark_type_for(label))\n .alias(name_like_string(label))\n )\n column_labels.append(label)\n for label in other._internal.column_labels:\n if label not in column_labels:\n applied.append(\n SF.lit(None)\n .cast(other._internal.spark_type_for(label))\n .alias(name_like_string(label))\n )\n column_labels.append(label)\n internal = self._internal.with_new_columns(applied, column_labels=column_labels)\n return DataFrame(internal)\n else:\n return self._apply_series_op(lambda psser: getattr(psser, op)(other))\n\n def __add__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"add\", other)\n\n def __radd__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"radd\", other)\n\n def __truediv__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"truediv\", other)\n\n def __rtruediv__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"rtruediv\", other)\n\n def __mul__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"mul\", other)\n\n def __rmul__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"rmul\", other)\n\n def __sub__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"sub\", other)\n\n def __rsub__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"rsub\", other)\n\n def __pow__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"pow\", other)\n\n def __rpow__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"rpow\", other)\n\n def __mod__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"mod\", other)\n\n def __rmod__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"rmod\", other)\n\n def __floordiv__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"floordiv\", other)\n\n def __rfloordiv__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"rfloordiv\", other)\n\n def __abs__(self) -> \"DataFrame\":\n return self._apply_series_op(lambda psser: abs(psser))\n\n def __neg__(self) -> \"DataFrame\":\n return self._apply_series_op(lambda psser: -psser)\n\n def add(self, other: Any) -> \"DataFrame\":\n return self + other\n\n # create accessor for plot\n plot = CachedAccessor(\"plot\", PandasOnSparkPlotAccessor)\n\n # create accessor for Spark related methods.\n spark = CachedAccessor(\"spark\", SparkFrameMethods)\n\n # create accessor for pandas-on-Spark specific methods.\n pandas_on_spark = CachedAccessor(\"pandas_on_spark\", PandasOnSparkFrameMethods)\n\n # keep the name \"koalas\" for backward compatibility.\n koalas = CachedAccessor(\"koalas\", PandasOnSparkFrameMethods)\n\n @no_type_check\n def hist(self, bins=10, **kwds):\n return self.plot.hist(bins, **kwds)\n\n hist.__doc__ = PandasOnSparkPlotAccessor.hist.__doc__\n\n @no_type_check\n def kde(self, bw_method=None, ind=None, **kwds):\n return self.plot.kde(bw_method, ind, **kwds)\n\n kde.__doc__ = PandasOnSparkPlotAccessor.kde.__doc__\n\n add.__doc__ = _flex_doc_FRAME.format(\n desc=\"Addition\", op_name=\"+\", equiv=\"dataframe + other\", reverse=\"radd\"\n )\n\n def radd(self, other: Any) -> \"DataFrame\":\n return other + self\n\n radd.__doc__ = _flex_doc_FRAME.format(\n desc=\"Addition\", op_name=\"+\", equiv=\"other + dataframe\", reverse=\"add\"\n )\n\n def div(self, other: Any) -> \"DataFrame\":\n return self / other\n\n div.__doc__ = _flex_doc_FRAME.format(\n desc=\"Floating division\", op_name=\"/\", equiv=\"dataframe / other\", reverse=\"rdiv\"\n )\n\n divide = div\n\n def rdiv(self, other: Any) -> \"DataFrame\":\n return other / self\n\n rdiv.__doc__ = _flex_doc_FRAME.format(\n desc=\"Floating division\", op_name=\"/\", equiv=\"other / dataframe\", reverse=\"div\"\n )\n\n def truediv(self, other: Any) -> \"DataFrame\":\n return self / other\n\n truediv.__doc__ = _flex_doc_FRAME.format(\n desc=\"Floating division\", op_name=\"/\", equiv=\"dataframe / other\", reverse=\"rtruediv\"\n )\n\n def rtruediv(self, other: Any) -> \"DataFrame\":\n return other / self\n\n rtruediv.__doc__ = _flex_doc_FRAME.format(\n desc=\"Floating division\", op_name=\"/\", equiv=\"other / dataframe\", reverse=\"truediv\"\n )\n\n def mul(self, other: Any) -> \"DataFrame\":\n return self * other\n\n mul.__doc__ = _flex_doc_FRAME.format(\n desc=\"Multiplication\", op_name=\"*\", equiv=\"dataframe * other\", reverse=\"rmul\"\n )\n\n multiply = mul\n\n def rmul(self, other: Any) -> \"DataFrame\":\n return other * self\n\n rmul.__doc__ = _flex_doc_FRAME.format(\n desc=\"Multiplication\", op_name=\"*\", equiv=\"other * dataframe\", reverse=\"mul\"\n )\n\n def sub(self, other: Any) -> \"DataFrame\":\n return self - other\n\n sub.__doc__ = _flex_doc_FRAME.format(\n desc=\"Subtraction\", op_name=\"-\", equiv=\"dataframe - other\", reverse=\"rsub\"\n )\n\n subtract = sub\n\n def rsub(self, other: Any) -> \"DataFrame\":\n return other - self\n\n rsub.__doc__ = _flex_doc_FRAME.format(\n desc=\"Subtraction\", op_name=\"-\", equiv=\"other - dataframe\", reverse=\"sub\"\n )\n\n def mod(self, other: Any) -> \"DataFrame\":\n return self % other\n\n mod.__doc__ = _flex_doc_FRAME.format(\n desc=\"Modulo\", op_name=\"%\", equiv=\"dataframe % other\", reverse=\"rmod\"\n )\n\n def rmod(self, other: Any) -> \"DataFrame\":\n return other % self\n\n rmod.__doc__ = _flex_doc_FRAME.format(\n desc=\"Modulo\", op_name=\"%\", equiv=\"other % dataframe\", reverse=\"mod\"\n )\n\n def pow(self, other: Any) -> \"DataFrame\":\n return self ** other\n\n pow.__doc__ = _flex_doc_FRAME.format(\n desc=\"Exponential power of series\", op_name=\"**\", equiv=\"dataframe ** other\", reverse=\"rpow\"\n )\n\n def rpow(self, other: Any) -> \"DataFrame\":\n return other ** self\n\n rpow.__doc__ = _flex_doc_FRAME.format(\n desc=\"Exponential power\", op_name=\"**\", equiv=\"other ** dataframe\", reverse=\"pow\"\n )\n\n def floordiv(self, other: Any) -> \"DataFrame\":\n return self // other\n\n floordiv.__doc__ = _flex_doc_FRAME.format(\n desc=\"Integer division\", op_name=\"//\", equiv=\"dataframe // other\", reverse=\"rfloordiv\"\n )\n\n def rfloordiv(self, other: Any) -> \"DataFrame\":\n return other // self\n\n rfloordiv.__doc__ = _flex_doc_FRAME.format(\n desc=\"Integer division\", op_name=\"//\", equiv=\"other // dataframe\", reverse=\"floordiv\"\n )\n\n # Comparison Operators\n def __eq__(self, other: Any) -> \"DataFrame\": # type: ignore[override]\n return self._map_series_op(\"eq\", other)\n\n def __ne__(self, other: Any) -> \"DataFrame\": # type: ignore[override]\n return self._map_series_op(\"ne\", other)\n\n def __lt__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"lt\", other)\n\n def __le__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"le\", other)\n\n def __ge__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"ge\", other)\n\n def __gt__(self, other: Any) -> \"DataFrame\":\n return self._map_series_op(\"gt\", other)\n\n def eq(self, other: Any) -> \"DataFrame\":\n \"\"\"\n Compare if the current value is equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.eq(1)\n a b\n a True True\n b False False\n c False True\n d False False\n \"\"\"\n return self == other\n\n equals = eq\n\n def gt(self, other: Any) -> \"DataFrame\":\n \"\"\"\n Compare if the current value is greater than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.gt(2)\n a b\n a False False\n b False False\n c True False\n d True False\n \"\"\"\n return self > other\n\n def ge(self, other: Any) -> \"DataFrame\":\n \"\"\"\n Compare if the current value is greater than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.ge(1)\n a b\n a True True\n b True False\n c True True\n d True False\n \"\"\"\n return self >= other\n\n def lt(self, other: Any) -> \"DataFrame\":\n \"\"\"\n Compare if the current value is less than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.lt(1)\n a b\n a False False\n b False False\n c False False\n d False False\n \"\"\"\n return self < other\n\n def le(self, other: Any) -> \"DataFrame\":\n \"\"\"\n Compare if the current value is less than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.le(2)\n a b\n a True True\n b True False\n c False True\n d False False\n \"\"\"\n return self <= other\n\n def ne(self, other: Any) -> \"DataFrame\":\n \"\"\"\n Compare if the current value is not equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.ne(1)\n a b\n a False False\n b True True\n c True False\n d True True\n \"\"\"\n return self != other\n\n def applymap(self, func: Callable[[Any], Any]) -> \"DataFrame\":\n \"\"\"\n Apply a function to a Dataframe elementwise.\n\n This method applies a function that accepts and returns a scalar\n to every element of a DataFrame.\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify return type in ``func``, for instance, as below:\n\n >>> def square(x) -> np.int32:\n ... return x ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n Parameters\n ----------\n func : callable\n Python function, returns a single value from a single value.\n\n Returns\n -------\n DataFrame\n Transformed DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2.12], [3.356, 4.567]])\n >>> df\n 0 1\n 0 1.000 2.120\n 1 3.356 4.567\n\n >>> def str_len(x) -> int:\n ... return len(str(x))\n >>> df.applymap(str_len)\n 0 1\n 0 3 4\n 1 5 5\n\n >>> def power(x) -> float:\n ... return x ** 2\n >>> df.applymap(power)\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.applymap(lambda x: x ** 2)\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n \"\"\"\n\n # TODO: We can implement shortcut theoretically since it creates new DataFrame\n # anyway and we don't have to worry about operations on different DataFrames.\n return self._apply_series_op(lambda psser: psser.apply(func))\n\n # TODO: not all arguments are implemented comparing to pandas' for now.\n def aggregate(self, func: Union[List[str], Dict[Name, List[str]]]) -> \"DataFrame\":\n \"\"\"Aggregate using one or more operations over the specified axis.\n\n Parameters\n ----------\n func : dict or a list\n a dict mapping from column name (string) to\n aggregate functions (list of strings).\n If a list is given, the aggregation is performed against\n all columns.\n\n Returns\n -------\n DataFrame\n\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n See Also\n --------\n DataFrame.apply : Invoke function on DataFrame.\n DataFrame.transform : Only perform transforming type operations.\n DataFrame.groupby : Perform operations over groups.\n Series.aggregate : The equivalent function for Series.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9],\n ... [np.nan, np.nan, np.nan]],\n ... columns=['A', 'B', 'C'])\n\n >>> df\n A B C\n 0 1.0 2.0 3.0\n 1 4.0 5.0 6.0\n 2 7.0 8.0 9.0\n 3 NaN NaN NaN\n\n Aggregate these functions over the rows.\n\n >>> df.agg(['sum', 'min'])[['A', 'B', 'C']].sort_index()\n A B C\n min 1.0 2.0 3.0\n sum 12.0 15.0 18.0\n\n Different aggregations per column.\n\n >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']].sort_index()\n A B\n max NaN 8.0\n min 1.0 2.0\n sum 12.0 NaN\n\n For multi-index columns:\n\n >>> df.columns = pd.MultiIndex.from_tuples([(\"X\", \"A\"), (\"X\", \"B\"), (\"Y\", \"C\")])\n >>> df.agg(['sum', 'min'])[[(\"X\", \"A\"), (\"X\", \"B\"), (\"Y\", \"C\")]].sort_index()\n X Y\n A B C\n min 1.0 2.0 3.0\n sum 12.0 15.0 18.0\n\n >>> aggregated = df.agg({(\"X\", \"A\") : ['sum', 'min'], (\"X\", \"B\") : ['min', 'max']})\n >>> aggregated[[(\"X\", \"A\"), (\"X\", \"B\")]].sort_index() # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n max NaN 8.0\n min 1.0 2.0\n sum 12.0 NaN\n \"\"\"\n from pyspark.pandas.groupby import GroupBy\n\n if isinstance(func, list):\n if all((isinstance(f, str) for f in func)):\n func = dict([(column, func) for column in self.columns])\n else:\n raise ValueError(\n \"If the given function is a list, it \"\n \"should only contains function names as strings.\"\n )\n\n if not isinstance(func, dict) or not all(\n is_name_like_value(key)\n and (\n isinstance(value, str)\n or (isinstance(value, list) and all(isinstance(v, str) for v in value))\n )\n for key, value in func.items()\n ):\n raise ValueError(\n \"aggs must be a dict mapping from column name to aggregate \"\n \"functions (string or list of strings).\"\n )\n\n with option_context(\"compute.default_index_type\", \"distributed\"):\n psdf: DataFrame = DataFrame(GroupBy._spark_groupby(self, func))\n\n # The codes below basically converts:\n #\n # A B\n # sum min min max\n # 0 12.0 1.0 2.0 8.0\n #\n # to:\n # A B\n # max NaN 8.0\n # min 1.0 2.0\n # sum 12.0 NaN\n #\n # Aggregated output is usually pretty much small.\n\n return psdf.stack().droplevel(0)[list(func.keys())]\n\n agg = aggregate\n\n def corr(self, method: str = \"pearson\") -> \"DataFrame\":\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'spearman'}\n * pearson : standard correlation coefficient\n * spearman : Spearman rank correlation\n\n Returns\n -------\n y : DataFrame\n\n See Also\n --------\n Series.corr\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.corr('pearson')\n dogs cats\n dogs 1.000000 -0.851064\n cats -0.851064 1.000000\n\n >>> df.corr('spearman')\n dogs cats\n dogs 1.000000 -0.948683\n cats -0.948683 1.000000\n\n Notes\n -----\n There are behavior differences between pandas-on-Spark and pandas.\n\n * the `method` argument only accepts 'pearson', 'spearman'\n * the data should not contain NaNs. pandas-on-Spark will return an error.\n * pandas-on-Spark doesn't support the following argument(s).\n\n * `min_periods` argument is not supported\n \"\"\"\n return cast(DataFrame, ps.from_pandas(corr(self, method)))\n\n def iteritems(self) -> Iterator[Tuple[Name, \"Series\"]]:\n \"\"\"\n Iterator over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Returns\n -------\n label : object\n The column names for the DataFrame being iterated over.\n content : Series\n The column entries belonging to each label, as a Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'species': ['bear', 'bear', 'marsupial'],\n ... 'population': [1864, 22000, 80000]},\n ... index=['panda', 'polar', 'koala'],\n ... columns=['species', 'population'])\n >>> df\n species population\n panda bear 1864\n polar bear 22000\n koala marsupial 80000\n\n >>> for label, content in df.iteritems():\n ... print('label:', label)\n ... print('content:', content.to_string())\n ...\n label: species\n content: panda bear\n polar bear\n koala marsupial\n label: population\n content: panda 1864\n polar 22000\n koala 80000\n \"\"\"\n return (\n (label if len(label) > 1 else label[0], self._psser_for(label))\n for label in self._internal.column_labels\n )\n\n def iterrows(self) -> Iterator[Tuple[Name, pd.Series]]:\n \"\"\"\n Iterate over DataFrame rows as (index, Series) pairs.\n\n Yields\n ------\n index : label or tuple of label\n The index of the row. A tuple for a `MultiIndex`.\n data : pandas.Series\n The data of the row as a Series.\n\n it : generator\n A generator that iterates over the rows of the frame.\n\n Notes\n -----\n\n 1. Because ``iterrows`` returns a Series for each row,\n it does **not** preserve dtypes across the rows (dtypes are\n preserved across columns for DataFrames). For example,\n\n >>> df = ps.DataFrame([[1, 1.5]], columns=['int', 'float'])\n >>> row = next(df.iterrows())[1]\n >>> row\n int 1.0\n float 1.5\n Name: 0, dtype: float64\n >>> print(row['int'].dtype)\n float64\n >>> print(df['int'].dtype)\n int64\n\n To preserve dtypes while iterating over the rows, it is better\n to use :meth:`itertuples` which returns namedtuples of the values\n and which is generally faster than ``iterrows``.\n\n 2. You should **never modify** something you are iterating over.\n This is not guaranteed to work in all cases. Depending on the\n data types, the iterator returns a copy and not a view, and writing\n to it will have no effect.\n \"\"\"\n\n columns = self.columns\n internal_index_columns = self._internal.index_spark_column_names\n internal_data_columns = self._internal.data_spark_column_names\n\n def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]:\n k = (\n row[internal_index_columns[0]]\n if len(internal_index_columns) == 1\n else tuple(row[c] for c in internal_index_columns)\n )\n v = [row[c] for c in internal_data_columns]\n return k, v\n\n for k, v in map(\n extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()\n ):\n s = pd.Series(v, index=columns, name=k)\n yield k, s\n\n def itertuples(\n self, index: bool = True, name: Optional[str] = \"PandasOnSpark\"\n ) -> Iterator[Tuple]:\n \"\"\"\n Iterate over DataFrame rows as namedtuples.\n\n Parameters\n ----------\n index : bool, default True\n If True, return the index as the first element of the tuple.\n name : str or None, default \"PandasOnSpark\"\n The name of the returned namedtuples or None to return regular\n tuples.\n\n Returns\n -------\n iterator\n An object to iterate over namedtuples for each row in the\n DataFrame with the first field possibly being the index and\n following fields being the column values.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)\n pairs.\n DataFrame.items : Iterate over (column name, Series) pairs.\n\n Notes\n -----\n The column names will be renamed to positional names if they are\n invalid Python identifiers, repeated, or start with an underscore.\n On python versions < 3.7 regular tuples are returned for DataFrames\n with a large number of columns (>254).\n\n Examples\n --------\n >>> df = ps.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},\n ... index=['dog', 'hawk'])\n >>> df\n num_legs num_wings\n dog 4 0\n hawk 2 2\n\n >>> for row in df.itertuples():\n ... print(row)\n ...\n PandasOnSpark(Index='dog', num_legs=4, num_wings=0)\n PandasOnSpark(Index='hawk', num_legs=2, num_wings=2)\n\n By setting the `index` parameter to False we can remove the index\n as the first element of the tuple:\n\n >>> for row in df.itertuples(index=False):\n ... print(row)\n ...\n PandasOnSpark(num_legs=4, num_wings=0)\n PandasOnSpark(num_legs=2, num_wings=2)\n\n With the `name` parameter set we set a custom name for the yielded\n namedtuples:\n\n >>> for row in df.itertuples(name='Animal'):\n ... print(row)\n ...\n Animal(Index='dog', num_legs=4, num_wings=0)\n Animal(Index='hawk', num_legs=2, num_wings=2)\n \"\"\"\n fields = list(self.columns)\n if index:\n fields.insert(0, \"Index\")\n\n index_spark_column_names = self._internal.index_spark_column_names\n data_spark_column_names = self._internal.data_spark_column_names\n\n def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]:\n k = (\n row[index_spark_column_names[0]]\n if len(index_spark_column_names) == 1\n else tuple(row[c] for c in index_spark_column_names)\n )\n v = [row[c] for c in data_spark_column_names]\n return k, v\n\n can_return_named_tuples = sys.version_info >= (3, 7) or len(self.columns) + index < 255\n\n if name is not None and can_return_named_tuples:\n itertuple = namedtuple(name, fields, rename=True) # type: ignore[misc]\n for k, v in map(\n extract_kv_from_spark_row,\n self._internal.resolved_copy.spark_frame.toLocalIterator(),\n ):\n yield itertuple._make(([k] if index else []) + list(v))\n else:\n for k, v in map(\n extract_kv_from_spark_row,\n self._internal.resolved_copy.spark_frame.toLocalIterator(),\n ):\n yield tuple(([k] if index else []) + list(v))\n\n def items(self) -> Iterator[Tuple[Name, \"Series\"]]:\n \"\"\"This is an alias of ``iteritems``.\"\"\"\n return self.iteritems()\n\n def to_clipboard(self, excel: bool = True, sep: Optional[str] = None, **kwargs: Any) -> None:\n \"\"\"\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n .. note:: This method should only be used if the resulting DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n excel : bool, default True\n - True, use the provided separator, writing in a csv format for\n allowing easy pasting into excel.\n - False, write a string representation of the object to the\n clipboard.\n\n sep : str, default ``'\\\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n See Also\n --------\n read_clipboard : Read text from clipboard.\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = ps.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n\n This function also works for Series:\n\n >>> df = ps.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # 0, 1\n ... # 1, 2\n ... # 2, 3\n ... # 3, 4\n ... # 4, 5\n ... # 5, 6\n ... # 6, 7\n \"\"\"\n\n args = locals()\n psdf = self\n return validate_arguments_and_invoke_function(\n psdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args\n )\n\n def to_html(\n self,\n buf: Optional[IO[str]] = None,\n columns: Optional[Sequence[Name]] = None,\n col_space: Optional[Union[str, int, Dict[Name, Union[str, int]]]] = None,\n header: bool = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[\n Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]\n ] = None,\n float_format: Optional[Callable[[float], str]] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n justify: Optional[str] = None,\n max_rows: Optional[int] = None,\n max_cols: Optional[int] = None,\n show_dimensions: bool = False,\n decimal: str = \".\",\n bold_rows: bool = True,\n classes: Optional[Union[str, list, tuple]] = None,\n escape: bool = True,\n notebook: bool = False,\n border: Optional[int] = None,\n table_id: Optional[str] = None,\n render_links: bool = False,\n ) -> Optional[str]:\n \"\"\"\n Render a DataFrame as an HTML table.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n columns : sequence, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool, optional\n Write out the column names. If a list of strings is given, it\n is assumed to be aliases for the column names\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of NAN to use.\n formatters : list or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. The result of this function must be a unicode string.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n bold_rows : bool, default True\n Make the row labels bold in the output.\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table.\n escape : bool, default True\n Convert the characters <, >, and & to HTML-safe sequences.\n notebook : {True, False}, default False\n Whether the generated HTML is for IPython Notebook.\n border : int\n A ``border=border`` attribute is included in the opening\n `<table>` tag. Default ``pd.options.html.border``.\n table_id : str, optional\n A css id is included in the opening `<table>` tag if specified.\n render_links : bool, default False\n Convert URLs to HTML links (only works with pandas 0.24+).\n\n Returns\n -------\n str (or unicode, depending on data and options)\n String representation of the dataframe.\n\n See Also\n --------\n to_string : Convert DataFrame to a string.\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n if max_rows is not None:\n psdf = self.head(max_rows)\n else:\n psdf = self\n\n return validate_arguments_and_invoke_function(\n psdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args\n )\n\n def to_string(\n self,\n buf: Optional[IO[str]] = None,\n columns: Optional[Sequence[Name]] = None,\n col_space: Optional[Union[str, int, Dict[Name, Union[str, int]]]] = None,\n header: bool = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[\n Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]\n ] = None,\n float_format: Optional[Callable[[float], str]] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n justify: Optional[str] = None,\n max_rows: Optional[int] = None,\n max_cols: Optional[int] = None,\n show_dimensions: bool = False,\n decimal: str = \".\",\n line_width: Optional[int] = None,\n ) -> Optional[str]:\n \"\"\"\n Render a DataFrame to a console-friendly tabular output.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n columns : sequence, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool, optional\n Write out the column names. If a list of strings is given, it\n is assumed to be aliases for the column names\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of NAN to use.\n formatters : list or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. The result of this function must be a unicode string.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n line_width : int, optional\n Width to wrap a line in characters.\n\n Returns\n -------\n str (or unicode, depending on data and options)\n String representation of the dataframe.\n\n See Also\n --------\n to_html : Convert DataFrame to HTML.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])\n >>> print(df.to_string())\n col1 col2\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> print(df.to_string(max_rows=2))\n col1 col2\n 0 1 4\n 1 2 5\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n if max_rows is not None:\n psdf = self.head(max_rows)\n else:\n psdf = self\n\n return validate_arguments_and_invoke_function(\n psdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args\n )\n\n def to_dict(self, orient: str = \"dict\", into: Type = dict) -> Union[List, Mapping]:\n \"\"\"\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n .. note:: This method should only be used if the resulting pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}\n Determines the type of the values of the dictionary.\n\n - 'dict' (default) : dict like {column -> {index -> value}}\n - 'list' : dict like {column -> [values]}\n - 'series' : dict like {column -> Series(values)}\n - 'split' : dict like\n {'index' -> [index], 'columns' -> [columns], 'data' -> [values]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n into : class, default dict\n The collections.abc.Mapping subclass used for all Mappings\n in the return value. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n dict, list or collections.abc.Mapping\n Return a collections.abc.Mapping object representing the DataFrame.\n The resulting transformation depends on the `orient` parameter.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2],\n ... 'col2': [0.5, 0.75]},\n ... index=['row1', 'row2'],\n ... columns=['col1', 'col2'])\n >>> df\n col1 col2\n row1 1 0.50\n row2 2 0.75\n\n >>> df_dict = df.to_dict()\n >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])\n [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]\n\n You can specify the return orientation.\n\n >>> df_dict = df.to_dict('series')\n >>> sorted(df_dict.items())\n [('col1', row1 1\n row2 2\n Name: col1, dtype: int64), ('col2', row1 0.50\n row2 0.75\n Name: col2, dtype: float64)]\n\n >>> df_dict = df.to_dict('split')\n >>> sorted(df_dict.items()) # doctest: +ELLIPSIS\n [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]\n\n >>> df_dict = df.to_dict('records')\n >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS\n [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]\n\n >>> df_dict = df.to_dict('index')\n >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])\n [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]\n\n You can also specify the mapping type.\n\n >>> from collections import OrderedDict, defaultdict\n >>> df.to_dict(into=OrderedDict)\n OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \\\n('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])\n\n If you want a `defaultdict`, you need to initialize it:\n\n >>> dd = defaultdict(list)\n >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS\n [defaultdict(<class 'list'>, {'col..., 'col...}), \\\ndefaultdict(<class 'list'>, {'col..., 'col...})]\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n psdf = self\n return validate_arguments_and_invoke_function(\n psdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args\n )\n\n def to_latex(\n self,\n buf: Optional[IO[str]] = None,\n columns: Optional[List[Name]] = None,\n col_space: Optional[int] = None,\n header: bool = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[\n Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]\n ] = None,\n float_format: Optional[Callable[[float], str]] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n bold_rows: bool = False,\n column_format: Optional[str] = None,\n longtable: Optional[bool] = None,\n escape: Optional[bool] = None,\n encoding: Optional[str] = None,\n decimal: str = \".\",\n multicolumn: Optional[bool] = None,\n multicolumn_format: Optional[str] = None,\n multirow: Optional[bool] = None,\n ) -> Optional[str]:\n r\"\"\"\n Render an object to a LaTeX tabular environment table.\n\n Render an object to a tabular environment table. You can splice this into a LaTeX\n document. Requires usepackage{booktabs}.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, consider alternative formats.\n\n Parameters\n ----------\n buf : file descriptor or None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given, it is assumed to be aliases\n for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default ‘NaN’\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns’ elements by position or name. The result of\n each function must be a unicode string. List must be of length equal to the number of\n columns.\n float_format : str, optional\n Format string for floating point numbers.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print every multiindex key at\n each row. By default, the value will be read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By\n default, ‘l’ will be used for all columns except columns of numbers, which default\n to ‘r’.\n longtable : bool, optional\n By default, the value will be read from the pandas config module. Use a longtable\n environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX\n preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config module. When set to False\n prevents from escaping latex special characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file, defaults to ‘ascii’ on\n Python 2 and ‘utf-8’ on Python 3.\n decimal : str, default ‘.’\n Character recognized as decimal separator, e.g. ‘,’ in Europe.\n multicolumn : bool, default True\n Use multicolumn to enhance MultiIndex columns. The default will be read from the config\n module.\n multicolumn_format : str, default ‘l’\n The alignment for multicolumns, similar to column_format The default will be read from\n the config module.\n multirow : bool, default False\n Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your\n LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read from the pandas config\n module.\n\n Returns\n -------\n str or None\n If buf is None, returns the resulting LateX format as a string. Otherwise returns None.\n\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n\n Examples\n --------\n >>> df = ps.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']},\n ... columns=['name', 'mask', 'weapon'])\n >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE\n \\begin{tabular}{lll}\n \\toprule\n name & mask & weapon \\\\\n \\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n \\bottomrule\n \\end{tabular}\n <BLANKLINE>\n \"\"\"\n\n args = locals()\n psdf = self\n return validate_arguments_and_invoke_function(\n psdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args\n )\n\n # TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic\n # when creating arrays)\n def transpose(self) -> \"DataFrame\":\n \"\"\"\n Transpose index and columns.\n\n Reflect the DataFrame over its main diagonal by writing rows as columns\n and vice-versa. The property :attr:`.T` is an accessor to the method\n :meth:`transpose`.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n then group twice - it is a huge operation. To prevent misusage, this method\n has the 'compute.max_rows' default limit of input length, and raises a ValueError.\n\n >>> from pyspark.pandas.config import option_context\n >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE\n ... ps.DataFrame({'a': range(1001)}).transpose()\n Traceback (most recent call last):\n ...\n ValueError: Current DataFrame has more then the given limit 1000 rows.\n Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'\n to retrieve to retrieve more than 1000 rows. Note that, before changing the\n 'compute.max_rows', this operation is considerably expensive.\n\n Returns\n -------\n DataFrame\n The transposed DataFrame.\n\n Notes\n -----\n Transposing a DataFrame with mixed dtypes will result in a homogeneous\n DataFrame with the coerced dtype. For instance, if int and float have\n to be placed in same column, it becomes float. If type coercion is not\n possible, it fails.\n\n Also, note that the values in index should be unique because they become\n unique column names.\n\n In addition, if Spark 2.3 is used, the types should always be exactly same.\n\n Examples\n --------\n **Square DataFrame with homogeneous dtype**\n\n >>> d1 = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df1 = ps.DataFrame(data=d1, columns=['col1', 'col2'])\n >>> df1\n col1 col2\n 0 1 3\n 1 2 4\n\n >>> df1_transposed = df1.T.sort_index() # doctest: +SKIP\n >>> df1_transposed # doctest: +SKIP\n 0 1\n col1 1 2\n col2 3 4\n\n When the dtype is homogeneous in the original DataFrame, we get a\n transposed DataFrame with the same dtype:\n\n >>> df1.dtypes\n col1 int64\n col2 int64\n dtype: object\n >>> df1_transposed.dtypes # doctest: +SKIP\n 0 int64\n 1 int64\n dtype: object\n\n **Non-square DataFrame with mixed dtypes**\n\n >>> d2 = {'score': [9.5, 8],\n ... 'kids': [0, 0],\n ... 'age': [12, 22]}\n >>> df2 = ps.DataFrame(data=d2, columns=['score', 'kids', 'age'])\n >>> df2\n score kids age\n 0 9.5 0 12\n 1 8.0 0 22\n\n >>> df2_transposed = df2.T.sort_index() # doctest: +SKIP\n >>> df2_transposed # doctest: +SKIP\n 0 1\n age 12.0 22.0\n kids 0.0 0.0\n score 9.5 8.0\n\n When the DataFrame has mixed dtypes, we get a transposed DataFrame with\n the coerced dtype:\n\n >>> df2.dtypes\n score float64\n kids int64\n age int64\n dtype: object\n\n >>> df2_transposed.dtypes # doctest: +SKIP\n 0 float64\n 1 float64\n dtype: object\n \"\"\"\n max_compute_count = get_option(\"compute.max_rows\")\n if max_compute_count is not None:\n pdf = self.head(max_compute_count + 1)._to_internal_pandas()\n if len(pdf) > max_compute_count:\n raise ValueError(\n \"Current DataFrame has more then the given limit {0} rows. \"\n \"Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' \"\n \"to retrieve to retrieve more than {0} rows. Note that, before changing the \"\n \"'compute.max_rows', this operation is considerably expensive.\".format(\n max_compute_count\n )\n )\n return DataFrame(pdf.transpose())\n\n # Explode the data to be pairs.\n #\n # For instance, if the current input DataFrame is as below:\n #\n # +------+------+------+------+------+\n # |index1|index2|(a,x1)|(a,x2)|(b,x3)|\n # +------+------+------+------+------+\n # | y1| z1| 1| 0| 0|\n # | y2| z2| 0| 50| 0|\n # | y3| z3| 3| 2| 1|\n # +------+------+------+------+------+\n #\n # Output of `exploded_df` becomes as below:\n #\n # +-----------------+-----------------+-----------------+-----+\n # | index|__index_level_0__|__index_level_1__|value|\n # +-----------------+-----------------+-----------------+-----+\n # |{\"a\":[\"y1\",\"z1\"]}| a| x1| 1|\n # |{\"a\":[\"y1\",\"z1\"]}| a| x2| 0|\n # |{\"a\":[\"y1\",\"z1\"]}| b| x3| 0|\n # |{\"a\":[\"y2\",\"z2\"]}| a| x1| 0|\n # |{\"a\":[\"y2\",\"z2\"]}| a| x2| 50|\n # |{\"a\":[\"y2\",\"z2\"]}| b| x3| 0|\n # |{\"a\":[\"y3\",\"z3\"]}| a| x1| 3|\n # |{\"a\":[\"y3\",\"z3\"]}| a| x2| 2|\n # |{\"a\":[\"y3\",\"z3\"]}| b| x3| 1|\n # +-----------------+-----------------+-----------------+-----+\n pairs = F.explode(\n F.array(\n *[\n F.struct(\n *[\n SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i))\n for i, col in enumerate(label)\n ],\n *[self._internal.spark_column_for(label).alias(\"value\")],\n )\n for label in self._internal.column_labels\n ]\n )\n )\n\n exploded_df = self._internal.spark_frame.withColumn(\"pairs\", pairs).select(\n [\n F.to_json(\n F.struct(\n F.array(*[scol for scol in self._internal.index_spark_columns]).alias(\"a\")\n )\n ).alias(\"index\"),\n F.col(\"pairs.*\"),\n ]\n )\n\n # After that, executes pivot with key and its index column.\n # Note that index column should contain unique values since column names\n # should be unique.\n internal_index_columns = [\n SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)\n ]\n pivoted_df = exploded_df.groupBy(internal_index_columns).pivot(\"index\")\n\n transposed_df = pivoted_df.agg(F.first(F.col(\"value\")))\n\n new_data_columns = list(\n filter(lambda x: x not in internal_index_columns, transposed_df.columns)\n )\n\n column_labels = [\n None if len(label) == 1 and label[0] is None else label\n for label in (tuple(json.loads(col)[\"a\"]) for col in new_data_columns)\n ]\n\n internal = InternalFrame(\n spark_frame=transposed_df,\n index_spark_columns=[scol_for(transposed_df, col) for col in internal_index_columns],\n index_names=self._internal.column_label_names,\n column_labels=column_labels,\n data_spark_columns=[scol_for(transposed_df, col) for col in new_data_columns],\n column_label_names=self._internal.index_names,\n )\n\n return DataFrame(internal)\n\n T = property(transpose)\n\n def apply(\n self, func: Callable, axis: Axis = 0, args: Sequence[Any] = (), **kwds: Any\n ) -> Union[\"Series\", \"DataFrame\", \"Index\"]:\n \"\"\"\n Apply a function along an axis of the DataFrame.\n\n Objects passed to the function are Series objects whose index is\n either the DataFrame's index (``axis=0``) or the DataFrame's columns\n (``axis=1``).\n\n See also `Transform and apply a function\n <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.\n\n .. note:: when `axis` is 0 or 'index', the `func` is unable to access\n to the whole input series. pandas-on-Spark internally splits the input series into\n multiple batches and calls `func` with each batch multiple times. Therefore, operations\n such as global aggregations are impossible. See the example below.\n\n >>> # This case does not return the length of whole series but of the batch internally\n ... # used.\n ... def length(s) -> int:\n ... return len(s)\n ...\n >>> df = ps.DataFrame({'A': range(1000)})\n >>> df.apply(length, axis=0) # doctest: +SKIP\n 0 83\n 1 83\n 2 83\n ...\n 10 83\n 11 83\n dtype: int32\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify the return type as `Series` or scalar value in ``func``,\n for instance, as below:\n\n >>> def square(s) -> ps.Series[np.int32]:\n ... return s ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n In case when axis is 1, it requires to specify `DataFrame` or scalar value\n with type hints as below:\n\n >>> def plus_one(x) -> ps.DataFrame[int, [float, float]]:\n ... return x + 1\n\n If the return type is specified as `DataFrame`, the output column names become\n `c0, c1, c2 ... cn`. These names are positionally mapped to the returned\n DataFrame in ``func``.\n\n To specify the column names, you can assign them in a pandas friendly style as below:\n\n >>> def plus_one(x) -> ps.DataFrame[(\"index\", int), [(\"a\", float), (\"b\", float)]]:\n ... return x + 1\n\n >>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})\n >>> def plus_one(x) -> ps.DataFrame[\n ... (pdf.index.name, pdf.index.dtype), zip(pdf.dtypes, pdf.columns)]:\n ... return x + 1\n\n Parameters\n ----------\n func : function\n Function to apply to each column or row.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis along which the function is applied:\n\n * 0 or 'index': apply function to each column.\n * 1 or 'columns': apply function to each row.\n args : tuple\n Positional arguments to pass to `func` in addition to the\n array/series.\n **kwds\n Additional keyword arguments to pass as keywords arguments to\n `func`.\n\n Returns\n -------\n Series or DataFrame\n Result of applying ``func`` along the given axis of the\n DataFrame.\n\n See Also\n --------\n DataFrame.applymap : For elementwise operations.\n DataFrame.aggregate : Only perform aggregating type operations.\n DataFrame.transform : Only perform transforming type operations.\n Series.apply : The equivalent function for Series.\n\n Examples\n --------\n >>> df = ps.DataFrame([[4, 9]] * 3, columns=['A', 'B'])\n >>> df\n A B\n 0 4 9\n 1 4 9\n 2 4 9\n\n Using a numpy universal function (in this case the same as\n ``np.sqrt(df)``):\n\n >>> def sqrt(x) -> ps.Series[float]:\n ... return np.sqrt(x)\n ...\n >>> df.apply(sqrt, axis=0)\n A B\n 0 2.0 3.0\n 1 2.0 3.0\n 2 2.0 3.0\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.apply(np.sqrt, axis=0)\n A B\n 0 2.0 3.0\n 1 2.0 3.0\n 2 2.0 3.0\n\n When `axis` is 1 or 'columns', it applies the function for each row.\n\n >>> def summation(x) -> np.int64:\n ... return np.sum(x)\n ...\n >>> df.apply(summation, axis=1)\n 0 13\n 1 13\n 2 13\n dtype: int64\n\n Likewise, you can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.apply(np.sum, axis=1)\n 0 13\n 1 13\n 2 13\n dtype: int64\n\n >>> df.apply(max, axis=1)\n 0 9\n 1 9\n 2 9\n dtype: int64\n\n Returning a list-like will result in a Series\n\n >>> df.apply(lambda x: [1, 2], axis=1)\n 0 [1, 2]\n 1 [1, 2]\n 2 [1, 2]\n dtype: object\n\n In order to specify the types when `axis` is '1', it should use DataFrame[...]\n annotation. In this case, the column names are automatically generated.\n\n >>> def identify(x) -> ps.DataFrame[('index', int), [('A', np.int64), ('B', np.int64)]]:\n ... return x\n ...\n >>> df.apply(identify, axis=1) # doctest: +NORMALIZE_WHITESPACE\n A B\n index\n 0 4 9\n 1 4 9\n 2 4 9\n\n You can also specify extra arguments.\n\n >>> def plus_two(a, b, c) -> ps.DataFrame[np.int64, [np.int64, np.int64]]:\n ... return a + b + c\n ...\n >>> df.apply(plus_two, axis=1, args=(1,), c=3)\n c0 c1\n 0 8 13\n 1 8 13\n 2 8 13\n \"\"\"\n from pyspark.pandas.groupby import GroupBy\n from pyspark.pandas.series import first_series\n\n if not isinstance(func, types.FunctionType):\n assert callable(func), \"the first argument should be a callable function.\"\n f = func\n # Note that the return type hint specified here affects actual return\n # type in Spark (e.g., infer_return_type). And, MyPy does not allow\n # redefinition of a function.\n func = lambda *args, **kwargs: f(*args, **kwargs) # noqa: E731\n\n axis = validate_axis(axis)\n should_return_series = False\n spec = inspect.getfullargspec(func)\n return_sig = spec.annotations.get(\"return\", None)\n should_infer_schema = return_sig is None\n should_retain_index = should_infer_schema\n\n def apply_func(pdf: pd.DataFrame) -> pd.DataFrame:\n pdf_or_pser = pdf.apply(func, axis=axis, args=args, **kwds) # type: ignore[arg-type]\n if isinstance(pdf_or_pser, pd.Series):\n return pdf_or_pser.to_frame()\n else:\n return pdf_or_pser\n\n self_applied: DataFrame = DataFrame(self._internal.resolved_copy)\n\n column_labels: Optional[List[Label]] = None\n if should_infer_schema:\n # Here we execute with the first 1000 to get the return type.\n # If the records were less than 1000, it uses pandas API directly for a shortcut.\n log_advice(\n \"If the type hints is not specified for `apply`, \"\n \"it is expensive to infer the data type internally.\"\n )\n limit = get_option(\"compute.shortcut_limit\")\n pdf = self_applied.head(limit + 1)._to_internal_pandas()\n applied = pdf.apply(func, axis=axis, args=args, **kwds) # type: ignore[arg-type]\n psser_or_psdf = ps.from_pandas(applied)\n if len(pdf) <= limit:\n return psser_or_psdf\n\n psdf = psser_or_psdf\n if isinstance(psser_or_psdf, ps.Series):\n should_return_series = True\n psdf = psser_or_psdf._psdf\n\n index_fields = [field.normalize_spark_type() for field in psdf._internal.index_fields]\n data_fields = [field.normalize_spark_type() for field in psdf._internal.data_fields]\n\n return_schema = StructType([field.struct_field for field in index_fields + data_fields])\n\n output_func = GroupBy._make_pandas_df_builder_func(\n self_applied, apply_func, return_schema, retain_index=should_retain_index\n )\n sdf = self_applied._internal.to_internal_spark_frame.mapInPandas(\n lambda iterator: map(output_func, iterator), schema=return_schema\n )\n\n # If schema is inferred, we can restore indexes too.\n internal = psdf._internal.with_new_sdf(\n spark_frame=sdf, index_fields=index_fields, data_fields=data_fields\n )\n else:\n return_type = infer_return_type(func)\n require_index_axis = isinstance(return_type, SeriesType)\n require_column_axis = isinstance(return_type, DataFrameType)\n index_fields = None\n\n if require_index_axis:\n if axis != 0:\n raise TypeError(\n \"The given function should specify a scalar or a series as its type \"\n \"hints when axis is 0 or 'index'; however, the return type \"\n \"was %s\" % return_sig\n )\n dtype = cast(SeriesType, return_type).dtype\n spark_type = cast(SeriesType, return_type).spark_type\n data_fields = [\n InternalField(\n dtype=dtype, struct_field=StructField(name=name, dataType=spark_type)\n )\n for name in self_applied.columns\n ]\n return_schema = StructType([field.struct_field for field in data_fields])\n elif require_column_axis:\n if axis != 1:\n raise TypeError(\n \"The given function should specify a scalar or a frame as its type \"\n \"hints when axis is 1 or 'column'; however, the return type \"\n \"was %s\" % return_sig\n )\n index_fields = cast(DataFrameType, return_type).index_fields\n should_retain_index = len(index_fields) > 0\n data_fields = cast(DataFrameType, return_type).data_fields\n return_schema = cast(DataFrameType, return_type).spark_type\n else:\n # any axis is fine.\n should_return_series = True\n spark_type = cast(ScalarType, return_type).spark_type\n dtype = cast(ScalarType, return_type).dtype\n data_fields = [\n InternalField(\n dtype=dtype,\n struct_field=StructField(\n name=SPARK_DEFAULT_SERIES_NAME, dataType=spark_type\n ),\n )\n ]\n return_schema = StructType([field.struct_field for field in data_fields])\n column_labels = [None]\n\n output_func = GroupBy._make_pandas_df_builder_func(\n self_applied, apply_func, return_schema, retain_index=should_retain_index\n )\n sdf = self_applied._internal.to_internal_spark_frame.mapInPandas(\n lambda iterator: map(output_func, iterator), schema=return_schema\n )\n\n index_spark_columns = None\n index_names: Optional[List[Optional[Tuple[Any, ...]]]] = None\n\n if should_retain_index:\n index_spark_columns = [\n scol_for(sdf, index_field.struct_field.name) for index_field in index_fields\n ]\n\n if not any(\n [\n SPARK_INDEX_NAME_PATTERN.match(index_field.struct_field.name)\n for index_field in index_fields\n ]\n ):\n index_names = [(index_field.struct_field.name,) for index_field in index_fields]\n internal = InternalFrame(\n spark_frame=sdf,\n index_names=index_names,\n index_spark_columns=index_spark_columns,\n index_fields=index_fields,\n data_fields=data_fields,\n column_labels=column_labels,\n )\n\n result: DataFrame = DataFrame(internal)\n if should_return_series:\n return first_series(result)\n else:\n return result\n\n def transform(\n self, func: Callable[..., \"Series\"], axis: Axis = 0, *args: Any, **kwargs: Any\n ) -> \"DataFrame\":\n \"\"\"\n Call ``func`` on self producing a Series with transformed values\n and that has the same length as its input.\n\n See also `Transform and apply a function\n <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify return type in ``func``, for instance, as below:\n\n >>> def square(x) -> ps.Series[np.int32]:\n ... return x ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n .. note:: the series within ``func`` is actually multiple pandas series as the\n segments of the whole pandas-on-Spark series; therefore, the length of each series\n is not guaranteed. As an example, an aggregation against each series\n does work as a global aggregation but an aggregation of each segment. See\n below:\n\n >>> def func(x) -> ps.Series[np.int32]:\n ... return x + sum(x)\n\n Parameters\n ----------\n func : function\n Function to use for transforming the data. It must work when pandas Series\n is passed.\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n *args\n Positional arguments to pass to func.\n **kwargs\n Keyword arguments to pass to func.\n\n Returns\n -------\n DataFrame\n A DataFrame that must have the same length as self.\n\n Raises\n ------\n Exception : If the returned DataFrame has a different length than self.\n\n See Also\n --------\n DataFrame.aggregate : Only perform aggregating type operations.\n DataFrame.apply : Invoke function on DataFrame.\n Series.transform : The equivalent function for Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])\n >>> df\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n\n >>> def square(x) -> ps.Series[np.int32]:\n ... return x ** 2\n >>> df.transform(square)\n A B\n 0 0 1\n 1 1 4\n 2 4 9\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> df.transform(lambda x: x ** 2)\n A B\n 0 0 1\n 1 1 4\n 2 4 9\n\n For multi-index columns:\n\n >>> df.columns = [('X', 'A'), ('X', 'B')]\n >>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n 0 0 1\n 1 1 4\n 2 4 9\n\n >>> (df * -1).transform(abs) # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n\n You can also specify extra arguments.\n\n >>> def calculation(x, y, z) -> ps.Series[int]:\n ... return x ** y + z\n >>> df.transform(calculation, y=10, z=20) # doctest: +NORMALIZE_WHITESPACE\n X\n A B\n 0 20 21\n 1 21 1044\n 2 1044 59069\n \"\"\"\n if not isinstance(func, types.FunctionType):\n assert callable(func), \"the first argument should be a callable function.\"\n f = func\n # Note that the return type hint specified here affects actual return\n # type in Spark (e.g., infer_return_type). And, MyPy does not allow\n # redefinition of a function.\n func = lambda *args, **kwargs: f(*args, **kwargs) # noqa: E731\n\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n spec = inspect.getfullargspec(func)\n return_sig = spec.annotations.get(\"return\", None)\n should_infer_schema = return_sig is None\n\n if should_infer_schema:\n # Here we execute with the first 1000 to get the return type.\n # If the records were less than 1000, it uses pandas API directly for a shortcut.\n log_advice(\n \"If the type hints is not specified for `transform`, \"\n \"it is expensive to infer the data type internally.\"\n )\n limit = get_option(\"compute.shortcut_limit\")\n pdf = self.head(limit + 1)._to_internal_pandas()\n transformed = pdf.transform(func, axis, *args, **kwargs) # type: ignore[arg-type]\n psdf: DataFrame = DataFrame(transformed)\n if len(pdf) <= limit:\n return psdf\n\n applied = []\n data_fields = []\n for input_label, output_label in zip(\n self._internal.column_labels, psdf._internal.column_labels\n ):\n psser = self._psser_for(input_label)\n\n field = psdf._internal.field_for(output_label).normalize_spark_type()\n data_fields.append(field)\n\n return_schema = field.spark_type\n applied.append(\n psser.pandas_on_spark._transform_batch(\n func=lambda c: func(c, *args, **kwargs),\n return_type=SeriesType(field.dtype, return_schema),\n )\n )\n\n internal = self._internal.with_new_columns(applied, data_fields=data_fields)\n return DataFrame(internal)\n else:\n return self._apply_series_op(\n lambda psser: psser.pandas_on_spark.transform_batch(func, *args, **kwargs)\n )\n\n def pop(self, item: Name) -> \"DataFrame\":\n \"\"\"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey','mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n\n Also support for MultiIndex\n\n >>> df = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey','mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')]\n >>> df.columns = pd.MultiIndex.from_tuples(columns)\n >>> df\n a b\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('a')\n name class\n 0 falcon bird\n 1 parrot bird\n 2 lion mammal\n 3 monkey mammal\n\n >>> df\n b\n max_speed\n 0 389.0\n 1 24.0\n 2 80.5\n 3 NaN\n \"\"\"\n result = self[item]\n self._update_internal_frame(self.drop(columns=item)._internal)\n return result\n\n # TODO: add axis parameter can work when '1' or 'columns'\n def xs(self, key: Name, axis: Axis = 0, level: Optional[int] = None) -> DataFrameOrSeries:\n \"\"\"\n Return cross-section from the DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : 0 or 'index', default 0\n Axis to retrieve cross-section on.\n currently only support 0 or 'index'\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n\n Returns\n -------\n DataFrame or Series\n Cross-section from the original DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = ps.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n locomotion\n walks 4 0\n\n >>> df.xs(('mammal', 'dog', 'walks')) # doctest: +NORMALIZE_WHITESPACE\n num_legs 4\n num_wings 0\n Name: (mammal, dog, walks), dtype: int64\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n \"\"\"\n from pyspark.pandas.series import first_series\n\n if not is_name_like_value(key):\n raise TypeError(\"'key' should be a scalar value or tuple that contains scalar values\")\n\n if level is not None and is_name_like_tuple(key):\n raise KeyError(key)\n\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n if not is_name_like_tuple(key):\n key = (key,)\n if len(key) > self._internal.index_level:\n raise KeyError(\n \"Key length ({}) exceeds index depth ({})\".format(\n len(key), self._internal.index_level\n )\n )\n if level is None:\n level = 0\n\n rows = [\n self._internal.index_spark_columns[lvl] == index for lvl, index in enumerate(key, level)\n ]\n internal = self._internal.with_filter(reduce(lambda x, y: x & y, rows))\n\n if len(key) == self._internal.index_level:\n psdf: DataFrame = DataFrame(internal)\n pdf = psdf.head(2)._to_internal_pandas()\n if len(pdf) == 0:\n raise KeyError(key)\n elif len(pdf) > 1:\n return psdf\n else:\n return first_series(DataFrame(pdf.transpose()))\n else:\n index_spark_columns = (\n internal.index_spark_columns[:level]\n + internal.index_spark_columns[level + len(key) :]\n )\n index_names = internal.index_names[:level] + internal.index_names[level + len(key) :]\n index_fields = internal.index_fields[:level] + internal.index_fields[level + len(key) :]\n\n internal = internal.copy(\n index_spark_columns=index_spark_columns,\n index_names=index_names,\n index_fields=index_fields,\n ).resolved_copy\n return DataFrame(internal)\n\n def between_time(\n self,\n start_time: Union[datetime.time, str],\n end_time: Union[datetime.time, str],\n include_start: bool = True,\n include_end: bool = True,\n axis: Axis = 0,\n ) -> \"DataFrame\":\n \"\"\"\n Select values between particular times of the day (example: 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n Initial time as a time filter limit.\n end_time : datetime.time or str\n End time as a time filter limit.\n include_start : bool, default True\n Whether the start time needs to be included in the result.\n include_end : bool, default True\n Whether the end time needs to be included in the result.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine range time on index or columns value.\n\n Returns\n -------\n DataFrame\n Data from the original object filtered to the specified dates range.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx)\n >>> psdf\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> psdf.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> psdf.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n \"\"\"\n axis = validate_axis(axis)\n\n if axis != 0:\n raise NotImplementedError(\"between_time currently only works for axis=0\")\n\n if not isinstance(self.index, ps.DatetimeIndex):\n raise TypeError(\"Index must be DatetimeIndex\")\n\n psdf = self.copy()\n psdf.index.name = verify_temp_column_name(psdf, \"__index_name__\")\n return_types = [psdf.index.dtype] + list(psdf.dtypes)\n\n def pandas_between_time( # type: ignore[no-untyped-def]\n pdf,\n ) -> ps.DataFrame[return_types]: # type: ignore[valid-type]\n return pdf.between_time(start_time, end_time, include_start, include_end).reset_index()\n\n # apply_batch will remove the index of the pandas-on-Spark DataFrame and attach a\n # default index, which will never be used. So use \"distributed\" index as a dummy to\n # avoid overhead.\n with option_context(\"compute.default_index_type\", \"distributed\"):\n psdf = psdf.pandas_on_spark.apply_batch(pandas_between_time)\n\n return DataFrame(\n self._internal.copy(\n spark_frame=psdf._internal.spark_frame,\n index_spark_columns=psdf._internal.data_spark_columns[:1],\n index_fields=psdf._internal.data_fields[:1],\n data_spark_columns=psdf._internal.data_spark_columns[1:],\n data_fields=psdf._internal.data_fields[1:],\n )\n )\n\n # TODO: implement axis=1\n def at_time(\n self, time: Union[datetime.time, str], asof: bool = False, axis: Axis = 0\n ) -> \"DataFrame\":\n \"\"\"\n Select values at particular time of day (example: 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n Returns\n -------\n DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> idx = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx)\n >>> psdf\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> psdf.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n \"\"\"\n if asof:\n raise NotImplementedError(\"'asof' argument is not supported\")\n\n axis = validate_axis(axis)\n\n if axis != 0:\n raise NotImplementedError(\"at_time currently only works for axis=0\")\n\n if not isinstance(self.index, ps.DatetimeIndex):\n raise TypeError(\"Index must be DatetimeIndex\")\n\n psdf = self.copy()\n psdf.index.name = verify_temp_column_name(psdf, \"__index_name__\")\n return_types = [psdf.index.dtype] + list(psdf.dtypes)\n\n def pandas_at_time( # type: ignore[no-untyped-def]\n pdf,\n ) -> ps.DataFrame[return_types]: # type: ignore[valid-type]\n return pdf.at_time(time, asof, axis).reset_index()\n\n # apply_batch will remove the index of the pandas-on-Spark DataFrame and attach\n # a default index, which will never be used. So use \"distributed\" index as a dummy\n # to avoid overhead.\n with option_context(\"compute.default_index_type\", \"distributed\"):\n psdf = psdf.pandas_on_spark.apply_batch(pandas_at_time)\n\n return DataFrame(\n self._internal.copy(\n spark_frame=psdf._internal.spark_frame,\n index_spark_columns=psdf._internal.data_spark_columns[:1],\n index_fields=psdf._internal.data_fields[:1],\n data_spark_columns=psdf._internal.data_spark_columns[1:],\n data_fields=psdf._internal.data_fields[1:],\n )\n )\n\n def where(\n self,\n cond: DataFrameOrSeries,\n other: Union[DataFrameOrSeries, Any] = np.nan,\n axis: Axis = None,\n ) -> \"DataFrame\":\n \"\"\"\n Replace values where the condition is False.\n\n Parameters\n ----------\n cond : boolean DataFrame\n Where cond is True, keep the original value. Where False,\n replace with corresponding value from other.\n other : scalar, DataFrame\n Entries where cond is False are replaced with corresponding value from other.\n axis : int, default None\n Can only be set to 0 at the moment for compatibility with pandas.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n >>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]})\n >>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]})\n >>> df1\n A B\n 0 0 100\n 1 1 200\n 2 2 300\n 3 3 400\n 4 4 500\n >>> df2\n A B\n 0 0 -100\n 1 -1 -200\n 2 -2 -300\n 3 -3 -400\n 4 -4 -500\n\n >>> df1.where(df1 > 0).sort_index()\n A B\n 0 NaN 100.0\n 1 1.0 200.0\n 2 2.0 300.0\n 3 3.0 400.0\n 4 4.0 500.0\n\n >>> df1.where(df1 > 1, 10).sort_index()\n A B\n 0 10 100\n 1 10 200\n 2 2 300\n 3 3 400\n 4 4 500\n\n >>> df1.where(df1 > 1, df1 + 100).sort_index()\n A B\n 0 100 100\n 1 101 200\n 2 2 300\n 3 3 400\n 4 4 500\n\n >>> df1.where(df1 > 1, df2).sort_index()\n A B\n 0 0 100\n 1 -1 200\n 2 2 300\n 3 3 400\n 4 4 500\n\n When the column name of cond is different from self, it treats all values are False\n\n >>> cond = ps.DataFrame({'C': [0, -1, -2, -3, -4], 'D':[4, 3, 2, 1, 0]}) % 3 == 0\n >>> cond\n C D\n 0 True False\n 1 False True\n 2 False False\n 3 True False\n 4 False True\n\n >>> df1.where(cond).sort_index()\n A B\n 0 NaN NaN\n 1 NaN NaN\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n\n When the type of cond is Series, it just check boolean regardless of column name\n\n >>> cond = ps.Series([1, 2]) > 1\n >>> cond\n 0 False\n 1 True\n dtype: bool\n\n >>> df1.where(cond).sort_index()\n A B\n 0 NaN NaN\n 1 1.0 200.0\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n from pyspark.pandas.series import Series\n\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n tmp_cond_col_name = \"__tmp_cond_col_{}__\".format\n tmp_other_col_name = \"__tmp_other_col_{}__\".format\n\n psdf = self.copy()\n\n tmp_cond_col_names = [\n tmp_cond_col_name(name_like_string(label)) for label in self._internal.column_labels\n ]\n if isinstance(cond, DataFrame):\n cond = cond[\n [\n (\n cond._internal.spark_column_for(label)\n if label in cond._internal.column_labels\n else SF.lit(False)\n ).alias(name)\n for label, name in zip(self._internal.column_labels, tmp_cond_col_names)\n ]\n ]\n psdf[tmp_cond_col_names] = cond\n elif isinstance(cond, Series):\n cond = cond.to_frame()\n cond = cond[\n [cond._internal.data_spark_columns[0].alias(name) for name in tmp_cond_col_names]\n ]\n psdf[tmp_cond_col_names] = cond\n else:\n raise TypeError(\"type of cond must be a DataFrame or Series\")\n\n tmp_other_col_names = [\n tmp_other_col_name(name_like_string(label)) for label in self._internal.column_labels\n ]\n if isinstance(other, DataFrame):\n other = other[\n [\n (\n other._internal.spark_column_for(label)\n if label in other._internal.column_labels\n else SF.lit(np.nan)\n ).alias(name)\n for label, name in zip(self._internal.column_labels, tmp_other_col_names)\n ]\n ]\n psdf[tmp_other_col_names] = other\n elif isinstance(other, Series):\n other = other.to_frame()\n other = other[\n [other._internal.data_spark_columns[0].alias(name) for name in tmp_other_col_names]\n ]\n psdf[tmp_other_col_names] = other\n else:\n for label in self._internal.column_labels:\n psdf[tmp_other_col_name(name_like_string(label))] = other\n\n # above logic make spark dataframe looks like below:\n # +-----------------+---+---+------------------+-------------------+------------------+--...\n # |__index_level_0__| A| B|__tmp_cond_col_A__|__tmp_other_col_A__|__tmp_cond_col_B__|__...\n # +-----------------+---+---+------------------+-------------------+------------------+--...\n # | 0| 0|100| true| 0| false| ...\n # | 1| 1|200| false| -1| false| ...\n # | 3| 3|400| true| -3| false| ...\n # | 2| 2|300| false| -2| true| ...\n # | 4| 4|500| false| -4| false| ...\n # +-----------------+---+---+------------------+-------------------+------------------+--...\n\n data_spark_columns = []\n for label in self._internal.column_labels:\n data_spark_columns.append(\n F.when(\n psdf[tmp_cond_col_name(name_like_string(label))].spark.column,\n psdf._internal.spark_column_for(label),\n )\n .otherwise(psdf[tmp_other_col_name(name_like_string(label))].spark.column)\n .alias(psdf._internal.spark_column_name_for(label))\n )\n\n return DataFrame(\n psdf._internal.with_new_columns(\n data_spark_columns, column_labels=self._internal.column_labels # TODO: dtypes?\n )\n )\n\n def mask(\n self, cond: DataFrameOrSeries, other: Union[DataFrameOrSeries, Any] = np.nan\n ) -> \"DataFrame\":\n \"\"\"\n Replace values where the condition is True.\n\n Parameters\n ----------\n cond : boolean DataFrame\n Where cond is False, keep the original value. Where True,\n replace with corresponding value from other.\n other : scalar, DataFrame\n Entries where cond is True are replaced with corresponding value from other.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n >>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]})\n >>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]})\n >>> df1\n A B\n 0 0 100\n 1 1 200\n 2 2 300\n 3 3 400\n 4 4 500\n >>> df2\n A B\n 0 0 -100\n 1 -1 -200\n 2 -2 -300\n 3 -3 -400\n 4 -4 -500\n\n >>> df1.mask(df1 > 0).sort_index()\n A B\n 0 0.0 NaN\n 1 NaN NaN\n 2 NaN NaN\n 3 NaN NaN\n 4 NaN NaN\n\n >>> df1.mask(df1 > 1, 10).sort_index()\n A B\n 0 0 10\n 1 1 10\n 2 10 10\n 3 10 10\n 4 10 10\n\n >>> df1.mask(df1 > 1, df1 + 100).sort_index()\n A B\n 0 0 200\n 1 1 300\n 2 102 400\n 3 103 500\n 4 104 600\n\n >>> df1.mask(df1 > 1, df2).sort_index()\n A B\n 0 0 -100\n 1 1 -200\n 2 -2 -300\n 3 -3 -400\n 4 -4 -500\n\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n from pyspark.pandas.series import Series\n\n if not isinstance(cond, (DataFrame, Series)):\n raise TypeError(\"type of cond must be a DataFrame or Series\")\n\n cond_inversed = cond._apply_series_op(lambda psser: ~psser)\n return self.where(cond_inversed, other)\n\n @property\n def index(self) -> \"Index\":\n \"\"\"The index (row labels) Column of the DataFrame.\n\n Currently not supported when the DataFrame has no index.\n\n See Also\n --------\n Index\n \"\"\"\n from pyspark.pandas.indexes.base import Index\n\n return Index._new_instance(self)\n\n @property\n def empty(self) -> bool:\n \"\"\"\n Returns true if the current DataFrame is empty. Otherwise, returns false.\n\n Examples\n --------\n >>> ps.range(10).empty\n False\n\n >>> ps.range(0).empty\n True\n\n >>> ps.DataFrame({}, index=list('abc')).empty\n True\n \"\"\"\n return (\n len(self._internal.column_labels) == 0\n or self._internal.resolved_copy.spark_frame.rdd.isEmpty()\n )\n\n @property\n def style(self) -> \"Styler\":\n \"\"\"\n Property returning a Styler object containing methods for\n building a styled HTML representation for the DataFrame.\n\n .. note:: currently it collects top 1000 rows and return its\n pandas `pandas.io.formats.style.Styler` instance.\n\n Examples\n --------\n >>> ps.range(1001).style # doctest: +SKIP\n <pandas.io.formats.style.Styler object at ...>\n \"\"\"\n max_results = get_option(\"compute.max_rows\")\n pdf = self.head(max_results + 1)._to_internal_pandas()\n if len(pdf) > max_results:\n warnings.warn(\"'style' property will only use top %s rows.\" % max_results, UserWarning)\n return pdf.head(max_results).style\n\n def set_index(\n self,\n keys: Union[Name, List[Name]],\n drop: bool = True,\n append: bool = False,\n inplace: bool = False,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"Set the DataFrame index (row labels) using one or more existing columns.\n\n Set the DataFrame index (row labels) using one or more existing\n columns or arrays (of the correct length). The index can replace the\n existing index or expand on it.\n\n Parameters\n ----------\n keys : label or array-like or list of labels/arrays\n This parameter can be either a single column key, a single array of\n the same length as the calling DataFrame, or a list containing an\n arbitrary combination of column keys and arrays. Here, \"array\"\n encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.\n drop : bool, default True\n Delete columns to be used as the new index.\n append : bool, default False\n Whether to append columns to existing index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n\n Returns\n -------\n DataFrame\n Changed row labels.\n\n See Also\n --------\n DataFrame.reset_index : Opposite of set_index.\n\n Examples\n --------\n >>> df = ps.DataFrame({'month': [1, 4, 7, 10],\n ... 'year': [2012, 2014, 2013, 2014],\n ... 'sale': [55, 40, 84, 31]},\n ... columns=['month', 'year', 'sale'])\n >>> df\n month year sale\n 0 1 2012 55\n 1 4 2014 40\n 2 7 2013 84\n 3 10 2014 31\n\n Set the index to become the 'month' column:\n\n >>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE\n year sale\n month\n 1 2012 55\n 4 2014 40\n 7 2013 84\n 10 2014 31\n\n Create a MultiIndex using columns 'year' and 'month':\n\n >>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE\n sale\n year month\n 2012 1 55\n 2014 4 40\n 2013 7 84\n 2014 10 31\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n key_list: List[Label]\n if is_name_like_tuple(keys):\n key_list = [cast(Label, keys)]\n elif is_name_like_value(keys):\n key_list = [(keys,)]\n else:\n key_list = [key if is_name_like_tuple(key) else (key,) for key in keys]\n columns = set(self._internal.column_labels)\n for key in key_list:\n if key not in columns:\n raise KeyError(name_like_string(key))\n\n if drop:\n column_labels = [\n label for label in self._internal.column_labels if label not in key_list\n ]\n else:\n column_labels = self._internal.column_labels\n if append:\n index_spark_columns = self._internal.index_spark_columns + [\n self._internal.spark_column_for(label) for label in key_list\n ]\n index_names = self._internal.index_names + key_list\n index_fields = self._internal.index_fields + [\n self._internal.field_for(label) for label in key_list\n ]\n else:\n index_spark_columns = [self._internal.spark_column_for(label) for label in key_list]\n index_names = key_list\n index_fields = [self._internal.field_for(label) for label in key_list]\n\n internal = self._internal.copy(\n index_spark_columns=index_spark_columns,\n index_names=index_names,\n index_fields=index_fields,\n column_labels=column_labels,\n data_spark_columns=[self._internal.spark_column_for(label) for label in column_labels],\n data_fields=[self._internal.field_for(label) for label in column_labels],\n )\n\n if inplace:\n self._update_internal_frame(internal)\n return None\n else:\n return DataFrame(internal)\n\n def reset_index(\n self,\n level: Optional[Union[int, Name, Sequence[Union[int, Name]]]] = None,\n drop: bool = False,\n inplace: bool = False,\n col_level: int = 0,\n col_fill: str = \"\",\n ) -> Optional[\"DataFrame\"]:\n \"\"\"Reset the index, or a level of it.\n\n For DataFrame with multi-level index, return new DataFrame with labeling information in\n the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.\n For a standard index, the index name will be used (if set), otherwise a default 'index' or\n 'level_0' (if 'index' is already taken) will be used.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default.\n drop : bool, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n col_level : int or str, default 0\n If the columns have multiple levels, determines which level the\n labels are inserted into. By default it is inserted into the first\n level.\n col_fill : object, default ''\n If the columns have multiple levels, determines how the other\n levels are named. If None then the index name is repeated.\n\n Returns\n -------\n DataFrame\n DataFrame with the new index.\n\n See Also\n --------\n DataFrame.set_index : Opposite of reset_index.\n\n Examples\n --------\n >>> df = ps.DataFrame([('bird', 389.0),\n ... ('bird', 24.0),\n ... ('mammal', 80.5),\n ... ('mammal', np.nan)],\n ... index=['falcon', 'parrot', 'lion', 'monkey'],\n ... columns=('class', 'max_speed'))\n >>> df\n class max_speed\n falcon bird 389.0\n parrot bird 24.0\n lion mammal 80.5\n monkey mammal NaN\n\n When we reset the index, the old index is added as a column. Unlike pandas, pandas-on-Spark\n does not automatically add a sequential index. The following 0, 1, 2, 3 are only\n there when we display the DataFrame.\n\n >>> df.reset_index()\n index class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n We can use the `drop` parameter to avoid the old index being added as\n a column:\n\n >>> df.reset_index(drop=True)\n class max_speed\n 0 bird 389.0\n 1 bird 24.0\n 2 mammal 80.5\n 3 mammal NaN\n\n You can also use `reset_index` with `MultiIndex`.\n\n >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),\n ... ('bird', 'parrot'),\n ... ('mammal', 'lion'),\n ... ('mammal', 'monkey')],\n ... names=['class', 'name'])\n >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),\n ... ('species', 'type')])\n >>> df = ps.DataFrame([(389.0, 'fly'),\n ... ( 24.0, 'fly'),\n ... ( 80.5, 'run'),\n ... (np.nan, 'jump')],\n ... index=index,\n ... columns=columns)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n speed species\n max type\n class name\n bird falcon 389.0 fly\n parrot 24.0 fly\n mammal lion 80.5 run\n monkey NaN jump\n\n If the index has multiple levels, we can reset a subset of them:\n\n >>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE\n class speed species\n max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we are not dropping the index, by default, it is placed in the top\n level. We can place it in another level:\n\n >>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE\n speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n When the index is inserted under another level, we can specify under\n which one with the parameter `col_fill`:\n\n >>> df.reset_index(level='class', col_level=1,\n ... col_fill='species') # doctest: +NORMALIZE_WHITESPACE\n species speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we specify a nonexistent level for `col_fill`, it is created:\n\n >>> df.reset_index(level='class', col_level=1,\n ... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE\n genus speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n multi_index = self._internal.index_level > 1\n\n def rename(index: int) -> Label:\n if multi_index:\n return (\"level_{}\".format(index),)\n else:\n if (\"index\",) not in self._internal.column_labels:\n return (\"index\",)\n else:\n return (\"level_{}\".format(index),)\n\n if level is None:\n new_column_labels = [\n name if name is not None else rename(i)\n for i, name in enumerate(self._internal.index_names)\n ]\n new_data_spark_columns = [\n scol.alias(name_like_string(label))\n for scol, label in zip(self._internal.index_spark_columns, new_column_labels)\n ]\n new_data_fields = self._internal.index_fields\n\n index_spark_columns = []\n index_names = []\n index_fields = []\n else:\n if is_list_like(level):\n level = list(cast(Sequence[Union[int, Name]], level))\n if isinstance(level, int) or is_name_like_tuple(level):\n level_list = [cast(Union[int, Label], level)]\n elif is_name_like_value(level):\n level_list = [(level,)]\n else:\n level_list = [\n lvl if isinstance(lvl, int) or is_name_like_tuple(lvl) else (lvl,)\n for lvl in level\n ]\n\n if all(isinstance(lvl, int) for lvl in level_list):\n int_level_list = cast(List[int], level_list)\n for lev in int_level_list:\n if lev >= self._internal.index_level:\n raise IndexError(\n \"Too many levels: Index has only {} level, not {}\".format(\n self._internal.index_level, lev + 1\n )\n )\n idx = int_level_list\n elif all(is_name_like_tuple(lev) for lev in level_list):\n idx = []\n for label in cast(List[Label], level_list):\n try:\n i = self._internal.index_names.index(label)\n idx.append(i)\n except ValueError:\n if multi_index:\n raise KeyError(\"Level unknown not found\")\n else:\n raise KeyError(\n \"Level unknown must be same as name ({})\".format(\n name_like_string(self._internal.index_names[0])\n )\n )\n else:\n raise ValueError(\"Level should be all int or all string.\")\n idx.sort()\n\n new_column_labels = []\n new_data_spark_columns = []\n new_data_fields = []\n\n index_spark_columns = self._internal.index_spark_columns.copy()\n index_names = self._internal.index_names.copy()\n index_fields = self._internal.index_fields.copy()\n\n for i in idx[::-1]:\n name = index_names.pop(i)\n new_column_labels.insert(0, name if name is not None else rename(i))\n\n scol = index_spark_columns.pop(i)\n new_data_spark_columns.insert(0, scol.alias(name_like_string(name)))\n\n new_data_fields.insert(0, index_fields.pop(i).copy(name=name_like_string(name)))\n\n if drop:\n new_data_spark_columns = []\n new_column_labels = []\n new_data_fields = []\n\n for label in new_column_labels:\n if label in self._internal.column_labels:\n raise ValueError(\"cannot insert {}, already exists\".format(name_like_string(label)))\n\n if self._internal.column_labels_level > 1:\n column_depth = len(self._internal.column_labels[0])\n if col_level >= column_depth:\n raise IndexError(\n \"Too many levels: Index has only {} levels, not {}\".format(\n column_depth, col_level + 1\n )\n )\n if any(col_level + len(label) > column_depth for label in new_column_labels):\n raise ValueError(\"Item must have length equal to number of levels.\")\n new_column_labels = [\n tuple(\n ([col_fill] * col_level)\n + list(label)\n + ([col_fill] * (column_depth - (len(label) + col_level)))\n )\n for label in new_column_labels\n ]\n\n internal = self._internal.copy(\n index_spark_columns=index_spark_columns,\n index_names=index_names,\n index_fields=index_fields,\n column_labels=new_column_labels + self._internal.column_labels,\n data_spark_columns=new_data_spark_columns + self._internal.data_spark_columns,\n data_fields=new_data_fields + self._internal.data_fields,\n )\n\n if inplace:\n self._update_internal_frame(internal)\n return None\n else:\n return DataFrame(internal)\n\n def isnull(self) -> \"DataFrame\":\n \"\"\"\n Detects missing values for items in the current Dataframe.\n\n Return a boolean same-sized Dataframe indicating if the values are NA.\n NA values, such as None or numpy.NaN, gets mapped to True values.\n Everything else gets mapped to False values.\n\n See Also\n --------\n DataFrame.notnull\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])\n >>> df.isnull()\n 0 1\n 0 False False\n 1 False True\n 2 False True\n 3 False False\n\n >>> df = ps.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])\n >>> df.isnull()\n 0 1 2\n 0 True False True\n 1 False True False\n \"\"\"\n return self._apply_series_op(lambda psser: psser.isnull())\n\n isna = isnull\n\n def notnull(self) -> \"DataFrame\":\n \"\"\"\n Detects non-missing values for items in the current Dataframe.\n\n This function takes a dataframe and indicates whether it's\n values are valid (not missing, which is ``NaN`` in numeric\n datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).\n\n See Also\n --------\n DataFrame.isnull\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])\n >>> df.notnull()\n 0 1\n 0 True True\n 1 True False\n 2 True False\n 3 True True\n\n >>> df = ps.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])\n >>> df.notnull()\n 0 1 2\n 0 True True True\n 1 True False True\n \"\"\"\n return self._apply_series_op(lambda psser: psser.notnull())\n\n notna = notnull\n\n def insert(\n self,\n loc: int,\n column: Name,\n value: Union[Scalar, \"Series\", Iterable],\n allow_duplicates: bool = False,\n ) -> None:\n \"\"\"\n Insert column into DataFrame at specified location.\n\n Raises a ValueError if `column` is already contained in the DataFrame,\n unless `allow_duplicates` is set to True.\n\n Parameters\n ----------\n loc : int\n Insertion index. Must verify 0 <= loc <= len(columns).\n column : str, number, or hashable object\n Label of the inserted column.\n value : int, Series, or array-like\n allow_duplicates : bool, optional\n\n Examples\n --------\n >>> psdf = ps.DataFrame([1, 2, 3])\n >>> psdf.sort_index()\n 0\n 0 1\n 1 2\n 2 3\n >>> psdf.insert(0, 'x', 4)\n >>> psdf.sort_index()\n x 0\n 0 4 1\n 1 4 2\n 2 4 3\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n\n >>> psdf.insert(1, 'y', [5, 6, 7])\n >>> psdf.sort_index()\n x y 0\n 0 4 5 1\n 1 4 6 2\n 2 4 7 3\n\n >>> psdf.insert(2, 'z', ps.Series([8, 9, 10]))\n >>> psdf.sort_index()\n x y z 0\n 0 4 5 8 1\n 1 4 6 9 2\n 2 4 7 10 3\n\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n if not isinstance(loc, int):\n raise TypeError(\"loc must be int\")\n\n assert 0 <= loc <= len(self.columns)\n assert allow_duplicates is False\n\n if not is_name_like_value(column):\n raise TypeError(\n '\"column\" should be a scalar value or tuple that contains scalar values'\n )\n\n # TODO(SPARK-37723): Support tuple for non-MultiIndex column name.\n if is_name_like_tuple(column):\n if self._internal.column_labels_level > 1:\n if len(column) != len(self.columns.levels): # type: ignore[attr-defined]\n # To be consistent with pandas\n raise ValueError('\"column\" must have length equal to number of column levels.')\n else:\n raise NotImplementedError(\n \"Assigning column name as tuple is only supported for MultiIndex columns \"\n \"for now.\"\n )\n\n if column in self.columns:\n raise ValueError(\"cannot insert %s, already exists\" % str(column))\n\n psdf = self.copy()\n psdf[column] = value\n columns = psdf.columns[:-1].insert(loc, psdf.columns[-1])\n psdf = psdf[columns]\n self._update_internal_frame(psdf._internal)\n\n # TODO: add frep and axis parameter\n def shift(self, periods: int = 1, fill_value: Optional[Any] = None) -> \"DataFrame\":\n \"\"\"\n Shift DataFrame by desired number of periods.\n\n .. note:: the current implementation of shift uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n The default depends on the dtype of self. For numeric data, np.nan is used.\n\n Returns\n -------\n Copy of input DataFrame, shifted.\n\n Examples\n --------\n >>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]},\n ... columns=['Col1', 'Col2', 'Col3'])\n\n >>> df.shift(periods=3)\n Col1 Col2 Col3\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 10.0 13.0 17.0\n 4 20.0 23.0 27.0\n\n >>> df.shift(periods=3, fill_value=0)\n Col1 Col2 Col3\n 0 0 0 0\n 1 0 0 0\n 2 0 0 0\n 3 10 13 17\n 4 20 23 27\n\n \"\"\"\n return self._apply_series_op(\n lambda psser: psser._shift(periods, fill_value), should_resolve=True\n )\n\n # TODO: axis should support 1 or 'columns' either at this moment\n def diff(self, periods: int = 1, axis: Axis = 0) -> \"DataFrame\":\n \"\"\"\n First discrete difference of element.\n\n Calculates the difference of a DataFrame element compared with another element in the\n DataFrame (default is the element in the same column of the previous row).\n\n .. note:: the current implementation of diff uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for calculating difference, accepts negative values.\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n\n Returns\n -------\n diffed : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [1, 1, 2, 3, 5, 8],\n ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])\n >>> df\n a b c\n 0 1 1 1\n 1 2 1 4\n 2 3 2 9\n 3 4 3 16\n 4 5 5 25\n 5 6 8 36\n\n >>> df.diff()\n a b c\n 0 NaN NaN NaN\n 1 1.0 0.0 3.0\n 2 1.0 1.0 5.0\n 3 1.0 1.0 7.0\n 4 1.0 2.0 9.0\n 5 1.0 3.0 11.0\n\n Difference with previous column\n\n >>> df.diff(periods=3)\n a b c\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 3.0 2.0 15.0\n 4 3.0 4.0 21.0\n 5 3.0 6.0 27.0\n\n Difference with following row\n\n >>> df.diff(periods=-1)\n a b c\n 0 -1.0 0.0 -3.0\n 1 -1.0 -1.0 -5.0\n 2 -1.0 -1.0 -7.0\n 3 -1.0 -2.0 -9.0\n 4 -1.0 -3.0 -11.0\n 5 NaN NaN NaN\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n return self._apply_series_op(lambda psser: psser._diff(periods), should_resolve=True)\n\n # TODO: axis should support 1 or 'columns' either at this moment\n def nunique(\n self,\n axis: Axis = 0,\n dropna: bool = True,\n approx: bool = False,\n rsd: float = 0.05,\n ) -> \"Series\":\n \"\"\"\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n dropna : bool, default True\n Don’t include NaN in the count.\n approx: bool, default False\n If False, will use the exact algorithm and return the exact number of unique.\n If True, it uses the HyperLogLog approximate algorithm, which is significantly faster\n for large amount of data.\n Note: This parameter is specific to pandas-on-Spark and is not found in pandas.\n rsd: float, default 0.05\n Maximum estimation error allowed in the HyperLogLog algorithm.\n Note: Just like ``approx`` this parameter is specific to pandas-on-Spark.\n\n Returns\n -------\n The number of unique values per column as a pandas-on-Spark Series.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})\n >>> df.nunique()\n A 3\n B 1\n dtype: int64\n\n >>> df.nunique(dropna=False)\n A 3\n B 2\n dtype: int64\n\n On big data, we recommend using the approximate algorithm to speed up this function.\n The result will be very close to the exact unique count.\n\n >>> df.nunique(approx=True)\n A 3\n B 1\n dtype: int64\n \"\"\"\n from pyspark.pandas.series import first_series\n\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n sdf = self._internal.spark_frame.select(\n [SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)]\n + [\n self._psser_for(label)._nunique(dropna, approx, rsd)\n for label in self._internal.column_labels\n ]\n )\n\n # The data is expected to be small so it's fine to transpose/use default index.\n with ps.option_context(\"compute.max_rows\", 1):\n internal = self._internal.copy(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],\n index_names=[None],\n index_fields=[None],\n data_spark_columns=[\n scol_for(sdf, col) for col in self._internal.data_spark_column_names\n ],\n data_fields=None,\n )\n return first_series(DataFrame(internal).transpose())\n\n def round(self, decimals: Union[int, Dict[Name, int], \"Series\"] = 0) -> \"DataFrame\":\n \"\"\"\n Round a DataFrame to a variable number of decimal places.\n\n Parameters\n ----------\n decimals : int, dict, Series\n Number of decimal places to round each column to. If an int is\n given, round each column to the same number of places.\n Otherwise dict and Series round to variable numbers of places.\n Column names should be in the keys if `decimals` is a\n dict-like, or in the index if `decimals` is a Series. Any\n columns not included in `decimals` will be left as is. Elements\n of `decimals` which are not columns of the input will be\n ignored.\n\n .. note:: If `decimals` is a Series, it is expected to be small,\n as all the data is loaded into the driver's memory.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n Series.round\n\n Examples\n --------\n >>> df = ps.DataFrame({'A':[0.028208, 0.038683, 0.877076],\n ... 'B':[0.992815, 0.645646, 0.149370],\n ... 'C':[0.173891, 0.577595, 0.491027]},\n ... columns=['A', 'B', 'C'],\n ... index=['first', 'second', 'third'])\n >>> df\n A B C\n first 0.028208 0.992815 0.173891\n second 0.038683 0.645646 0.577595\n third 0.877076 0.149370 0.491027\n\n >>> df.round(2)\n A B C\n first 0.03 0.99 0.17\n second 0.04 0.65 0.58\n third 0.88 0.15 0.49\n\n >>> df.round({'A': 1, 'C': 2})\n A B C\n first 0.0 0.992815 0.17\n second 0.0 0.645646 0.58\n third 0.9 0.149370 0.49\n\n >>> decimals = ps.Series([1, 0, 2], index=['A', 'B', 'C'])\n >>> df.round(decimals)\n A B C\n first 0.0 1.0 0.17\n second 0.0 1.0 0.58\n third 0.9 0.0 0.49\n \"\"\"\n if isinstance(decimals, ps.Series):\n decimals_dict = {\n k if isinstance(k, tuple) else (k,): v\n for k, v in decimals._to_internal_pandas().items()\n }\n elif isinstance(decimals, dict):\n decimals_dict = {k if is_name_like_tuple(k) else (k,): v for k, v in decimals.items()}\n elif isinstance(decimals, int):\n decimals_dict = {k: decimals for k in self._internal.column_labels}\n else:\n raise TypeError(\"decimals must be an integer, a dict-like or a Series\")\n\n def op(psser: ps.Series) -> Union[ps.Series, Column]:\n label = psser._column_label\n if label in decimals_dict:\n return F.round(psser.spark.column, decimals_dict[label])\n else:\n return psser\n\n return self._apply_series_op(op)\n\n def _mark_duplicates(\n self,\n subset: Optional[Union[Name, List[Name]]] = None,\n keep: Union[bool, str] = \"first\",\n ) -> Tuple[SparkDataFrame, str]:\n if subset is None:\n subset_list = self._internal.column_labels\n else:\n if is_name_like_tuple(subset):\n subset_list = [cast(Label, subset)]\n elif is_name_like_value(subset):\n subset_list = [(subset,)]\n else:\n subset_list = [sub if is_name_like_tuple(sub) else (sub,) for sub in subset]\n diff = set(subset_list).difference(set(self._internal.column_labels))\n if len(diff) > 0:\n raise KeyError(\", \".join([name_like_string(d) for d in diff]))\n group_cols = [self._internal.spark_column_name_for(label) for label in subset_list]\n\n sdf = self._internal.resolved_copy.spark_frame\n\n column = verify_temp_column_name(sdf, \"__duplicated__\")\n\n if keep == \"first\" or keep == \"last\":\n if keep == \"first\":\n ord_func = F.asc\n else:\n ord_func = F.desc\n window = (\n Window.partitionBy(*group_cols)\n .orderBy(ord_func(NATURAL_ORDER_COLUMN_NAME))\n .rowsBetween(Window.unboundedPreceding, Window.currentRow)\n )\n sdf = sdf.withColumn(column, F.row_number().over(window) > 1)\n elif not keep:\n window = Window.partitionBy(*group_cols).rowsBetween(\n Window.unboundedPreceding, Window.unboundedFollowing\n )\n sdf = sdf.withColumn(column, F.count(\"*\").over(window) > 1)\n else:\n raise ValueError(\"'keep' only supports 'first', 'last' and False\")\n return sdf, column\n\n def duplicated(\n self,\n subset: Optional[Union[Name, List[Name]]] = None,\n keep: Union[bool, str] = \"first\",\n ) -> \"Series\":\n \"\"\"\n Return boolean Series denoting duplicate rows, optionally only considering certain columns.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates,\n by default use all of the columns\n keep : {'first', 'last', False}, default 'first'\n - ``first`` : Mark duplicates as ``True`` except for the first occurrence.\n - ``last`` : Mark duplicates as ``True`` except for the last occurrence.\n - False : Mark all duplicates as ``True``.\n\n Returns\n -------\n duplicated : Series\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]},\n ... columns = ['a', 'b', 'c'])\n >>> df\n a b c\n 0 1 1 1\n 1 1 1 1\n 2 1 1 1\n 3 3 4 5\n\n >>> df.duplicated().sort_index()\n 0 False\n 1 True\n 2 True\n 3 False\n dtype: bool\n\n Mark duplicates as ``True`` except for the last occurrence.\n\n >>> df.duplicated(keep='last').sort_index()\n 0 True\n 1 True\n 2 False\n 3 False\n dtype: bool\n\n Mark all duplicates as ``True``.\n\n >>> df.duplicated(keep=False).sort_index()\n 0 True\n 1 True\n 2 True\n 3 False\n dtype: bool\n \"\"\"\n from pyspark.pandas.series import first_series\n\n sdf, column = self._mark_duplicates(subset, keep)\n\n sdf = sdf.select(\n self._internal.index_spark_columns\n + [scol_for(sdf, column).alias(SPARK_DEFAULT_SERIES_NAME)]\n )\n return first_series(\n DataFrame(\n InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=self._internal.index_fields,\n column_labels=[None],\n data_spark_columns=[scol_for(sdf, SPARK_DEFAULT_SERIES_NAME)],\n )\n )\n )\n\n # TODO: support other as DataFrame or array-like\n def dot(self, other: \"Series\") -> \"Series\":\n \"\"\"\n Compute the matrix multiplication between the DataFrame and other.\n\n This method computes the matrix product between the DataFrame and the\n values of an other Series\n\n It can also be called using ``self @ other`` in Python >= 3.5.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n then group twice - it is a huge operation. To prevent misusage, this method\n has the 'compute.max_rows' default limit of input length, and raises a ValueError.\n\n >>> from pyspark.pandas.config import option_context\n >>> with option_context(\n ... 'compute.max_rows', 1000, \"compute.ops_on_diff_frames\", True\n ... ): # doctest: +NORMALIZE_WHITESPACE\n ... psdf = ps.DataFrame({'a': range(1001)})\n ... psser = ps.Series([2], index=['a'])\n ... psdf.dot(psser)\n Traceback (most recent call last):\n ...\n ValueError: Current DataFrame has more then the given limit 1000 rows.\n Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'\n to retrieve to retrieve more than 1000 rows. Note that, before changing the\n 'compute.max_rows', this operation is considerably expensive.\n\n Parameters\n ----------\n other : Series\n The other object to compute the matrix product with.\n\n Returns\n -------\n Series\n Return the matrix product between self and other as a Series.\n\n See Also\n --------\n Series.dot: Similar method for Series.\n\n Notes\n -----\n The dimensions of DataFrame and other must be compatible in order to\n compute the matrix multiplication. In addition, the column names of\n DataFrame and the index of other must contain the same values, as they\n will be aligned prior to the multiplication.\n\n The dot method for Series computes the inner product, instead of the\n matrix product here.\n\n Examples\n --------\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n >>> psdf = ps.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])\n >>> psser = ps.Series([1, 1, 2, 1])\n >>> psdf.dot(psser)\n 0 -4\n 1 5\n dtype: int64\n\n Note how shuffling of the objects does not change the result.\n\n >>> psser2 = psser.reindex([1, 0, 2, 3])\n >>> psdf.dot(psser2)\n 0 -4\n 1 5\n dtype: int64\n >>> psdf @ psser2\n 0 -4\n 1 5\n dtype: int64\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n if not isinstance(other, ps.Series):\n raise TypeError(\"Unsupported type {}\".format(type(other).__name__))\n else:\n return cast(ps.Series, other.dot(self.transpose())).rename(None)\n\n def __matmul__(self, other: \"Series\") -> \"Series\":\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(other)\n\n def to_table(\n self,\n name: str,\n format: Optional[str] = None,\n mode: str = \"w\",\n partition_cols: Optional[Union[str, List[str]]] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: Any,\n ) -> None:\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `to_table`, \"\n \"the existing index is lost when converting to table.\"\n )\n mode = validate_mode(mode)\n return self.spark.to_table(name, format, mode, partition_cols, index_col, **options)\n\n to_table.__doc__ = SparkFrameMethods.to_table.__doc__\n\n def to_delta(\n self,\n path: str,\n mode: str = \"w\",\n partition_cols: Optional[Union[str, List[str]]] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: \"OptionalPrimitiveType\",\n ) -> None:\n \"\"\"\n Write the DataFrame out as a Delta Lake table.\n\n Parameters\n ----------\n path : str, required\n Path to write to.\n mode : str\n Python write mode, default 'w'.\n\n .. note:: mode can accept the strings for Spark writing mode.\n Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.\n\n - 'append' (equivalent to 'a'): Append the new data to existing data.\n - 'overwrite' (equivalent to 'w'): Overwrite existing data.\n - 'ignore': Silently ignore this operation if data already exists.\n - 'error' or 'errorifexists': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark's index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options : dict\n All other options passed directly into Delta Lake.\n\n See Also\n --------\n read_delta\n DataFrame.to_parquet\n DataFrame.to_table\n DataFrame.to_spark_io\n\n Examples\n --------\n\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),\n ... country=['KR', 'US', 'JP'],\n ... code=[1, 2 ,3]), columns=['date', 'country', 'code'])\n >>> df\n date country code\n 0 2012-01-31 12:00:00 KR 1\n 1 2012-02-29 12:00:00 US 2\n 2 2012-03-31 12:00:00 JP 3\n\n Create a new Delta Lake table, partitioned by one column:\n\n >>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date') # doctest: +SKIP\n\n Partitioned by two columns:\n\n >>> df.to_delta('%s/to_delta/bar' % path,\n ... partition_cols=['date', 'country']) # doctest: +SKIP\n\n Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:\n\n >>> df.to_delta('%s/to_delta/bar' % path,\n ... mode='overwrite', replaceWhere='date >= \"2012-01-01\"') # doctest: +SKIP\n \"\"\"\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `to_delta`, \"\n \"the existing index is lost when converting to Delta.\"\n )\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\") # type: ignore[assignment]\n\n mode = validate_mode(mode)\n self.spark.to_spark_io(\n path=path,\n mode=mode,\n format=\"delta\",\n partition_cols=partition_cols,\n index_col=index_col,\n **options,\n )\n\n def to_parquet(\n self,\n path: str,\n mode: str = \"w\",\n partition_cols: Optional[Union[str, List[str]]] = None,\n compression: Optional[str] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: Any,\n ) -> None:\n \"\"\"\n Write the DataFrame out as a Parquet file or directory.\n\n Parameters\n ----------\n path : str, required\n Path to write to.\n mode : str\n Python write mode, default 'w'.\n\n .. note:: mode can accept the strings for Spark writing mode.\n Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.\n\n - 'append' (equivalent to 'a'): Append the new data to existing data.\n - 'overwrite' (equivalent to 'w'): Overwrite existing data.\n - 'ignore': Silently ignore this operation if data already exists.\n - 'error' or 'errorifexists': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}\n Compression codec to use when saving to file. If None is set, it uses the\n value specified in `spark.sql.parquet.compression.codec`.\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark's index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options : dict\n All other options passed directly into Spark's data source.\n\n See Also\n --------\n read_parquet\n DataFrame.to_delta\n DataFrame.to_table\n DataFrame.to_spark_io\n\n Examples\n --------\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),\n ... country=['KR', 'US', 'JP'],\n ... code=[1, 2 ,3]), columns=['date', 'country', 'code'])\n >>> df\n date country code\n 0 2012-01-31 12:00:00 KR 1\n 1 2012-02-29 12:00:00 US 2\n 2 2012-03-31 12:00:00 JP 3\n\n >>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')\n\n >>> df.to_parquet(\n ... '%s/to_parquet/foo.parquet' % path,\n ... mode = 'overwrite',\n ... partition_cols=['date', 'country'])\n \"\"\"\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `to_parquet`, \"\n \"the existing index is lost when converting to Parquet.\"\n )\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\")\n\n mode = validate_mode(mode)\n builder = self.to_spark(index_col=index_col).write.mode(mode)\n if partition_cols is not None:\n builder.partitionBy(partition_cols)\n if compression is not None:\n builder.option(\"compression\", compression)\n builder.options(**options).format(\"parquet\").save(path)\n\n def to_orc(\n self,\n path: str,\n mode: str = \"w\",\n partition_cols: Optional[Union[str, List[str]]] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: \"OptionalPrimitiveType\",\n ) -> None:\n \"\"\"\n Write the DataFrame out as a ORC file or directory.\n\n Parameters\n ----------\n path : str, required\n Path to write to.\n mode : str\n Python write mode, default 'w'.\n\n .. note:: mode can accept the strings for Spark writing mode.\n Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.\n\n - 'append' (equivalent to 'a'): Append the new data to existing data.\n - 'overwrite' (equivalent to 'w'): Overwrite existing data.\n - 'ignore': Silently ignore this operation if data already exists.\n - 'error' or 'errorifexists': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark's index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options : dict\n All other options passed directly into Spark's data source.\n\n See Also\n --------\n read_orc\n DataFrame.to_delta\n DataFrame.to_parquet\n DataFrame.to_table\n DataFrame.to_spark_io\n\n Examples\n --------\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),\n ... country=['KR', 'US', 'JP'],\n ... code=[1, 2 ,3]), columns=['date', 'country', 'code'])\n >>> df\n date country code\n 0 2012-01-31 12:00:00 KR 1\n 1 2012-02-29 12:00:00 US 2\n 2 2012-03-31 12:00:00 JP 3\n\n >>> df.to_orc('%s/to_orc/foo.orc' % path, partition_cols='date')\n\n >>> df.to_orc(\n ... '%s/to_orc/foo.orc' % path,\n ... mode = 'overwrite',\n ... partition_cols=['date', 'country'])\n \"\"\"\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `to_orc`, \"\n \"the existing index is lost when converting to ORC.\"\n )\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\") # type: ignore[assignment]\n\n mode = validate_mode(mode)\n self.spark.to_spark_io(\n path=path,\n mode=mode,\n format=\"orc\",\n partition_cols=partition_cols,\n index_col=index_col,\n **options,\n )\n\n def to_spark_io(\n self,\n path: Optional[str] = None,\n format: Optional[str] = None,\n mode: str = \"overwrite\",\n partition_cols: Optional[Union[str, List[str]]] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: \"OptionalPrimitiveType\",\n ) -> None:\n \"\"\"An alias for :func:`DataFrame.spark.to_spark_io`.\n See :meth:`pyspark.pandas.spark.accessors.SparkFrameMethods.to_spark_io`.\n\n .. deprecated:: 3.2.0\n Use :func:`DataFrame.spark.to_spark_io` instead.\n \"\"\"\n warnings.warn(\"Deprecated in 3.2, Use DataFrame.spark.to_spark_io instead.\", FutureWarning)\n return self.spark.to_spark_io(path, format, mode, partition_cols, index_col, **options)\n\n to_spark_io.__doc__ = SparkFrameMethods.to_spark_io.__doc__\n\n def to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame:\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `to_spark`, \"\n \"the existing index is lost when converting to Spark DataFrame.\"\n )\n return self._to_spark(index_col)\n\n to_spark.__doc__ = SparkFrameMethods.__doc__\n\n def _to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame:\n \"\"\"\n Same as `to_spark()`, without issueing the advice log when `index_col` is not specified\n for internal usage.\n \"\"\"\n return self.spark.frame(index_col)\n\n def to_pandas(self) -> pd.DataFrame:\n \"\"\"\n Return a pandas DataFrame.\n\n .. note:: This method should only be used if the resulting pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.to_pandas()\n dogs cats\n 0 0.2 0.3\n 1 0.0 0.6\n 2 0.6 0.0\n 3 0.2 0.1\n \"\"\"\n log_advice(\n \"`to_pandas` loads all data into the driver's memory. \"\n \"It should only be used if the resulting pandas DataFrame is expected to be small.\"\n )\n return self._to_pandas()\n\n def _to_pandas(self) -> pd.DataFrame:\n \"\"\"\n Same as `to_pandas()`, without issueing the advice log for internal usage.\n \"\"\"\n return self._internal.to_pandas_frame.copy()\n\n def assign(self, **kwargs: Any) -> \"DataFrame\":\n \"\"\"\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable, Series or Index}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though pandas-on-Spark doesn't check it).\n If the values are not callable, (e.g. a Series or a literal),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Examples\n --------\n >>> df = ps.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence and you can also\n create multiple columns within the same assign.\n\n >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,\n ... temp_k=df['temp_c'] + 273.15,\n ... temp_idx=df.index)\n >>> assigned[['temp_c', 'temp_f', 'temp_k', 'temp_idx']]\n temp_c temp_f temp_k temp_idx\n Portland 17.0 62.6 290.15 Portland\n Berkeley 25.0 77.0 298.15 Berkeley\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible\n but you cannot refer to newly created or modified columns. This\n feature is supported in pandas for Python 3.6 and later but not in\n pandas-on-Spark. In pandas-on-Spark, all items are computed first,\n and then assigned.\n \"\"\"\n return self._assign(kwargs)\n\n def _assign(self, kwargs: Any) -> \"DataFrame\":\n assert isinstance(kwargs, dict)\n from pyspark.pandas.indexes import MultiIndex\n from pyspark.pandas.series import IndexOpsMixin\n\n for k, v in kwargs.items():\n is_invalid_assignee = (\n not (isinstance(v, (IndexOpsMixin, Column)) or callable(v) or is_scalar(v))\n ) or isinstance(v, MultiIndex)\n if is_invalid_assignee:\n raise TypeError(\n \"Column assignment doesn't support type \" \"{0}\".format(type(v).__name__)\n )\n if callable(v):\n kwargs[k] = v(self)\n\n pairs = {\n (k if is_name_like_tuple(k) else (k,)): (\n (v.spark.column, v._internal.data_fields[0])\n if isinstance(v, IndexOpsMixin) and not isinstance(v, MultiIndex)\n else (v, None)\n if isinstance(v, Column)\n else (SF.lit(v), None)\n )\n for k, v in kwargs.items()\n }\n\n scols = []\n data_fields = []\n for label in self._internal.column_labels:\n for i in range(len(label)):\n if label[: len(label) - i] in pairs:\n scol, field = pairs[label[: len(label) - i]]\n\n name = self._internal.spark_column_name_for(label)\n scol = scol.alias(name)\n if field is not None:\n field = field.copy(name=name)\n break\n else:\n scol = self._internal.spark_column_for(label)\n field = self._internal.field_for(label)\n scols.append(scol)\n data_fields.append(field)\n\n column_labels = self._internal.column_labels.copy()\n for label, (scol, field) in pairs.items():\n if label not in set(i[: len(label)] for i in self._internal.column_labels):\n name = name_like_string(label)\n scols.append(scol.alias(name))\n if field is not None:\n field = field.copy(name=name)\n data_fields.append(field)\n\n column_labels.append(label)\n\n level = self._internal.column_labels_level\n column_labels = [\n tuple(list(label) + ([\"\"] * (level - len(label)))) for label in column_labels\n ]\n\n internal = self._internal.with_new_columns(\n scols, column_labels=column_labels, data_fields=data_fields\n )\n return DataFrame(internal)\n\n @staticmethod\n def from_records(\n data: Union[np.ndarray, List[tuple], dict, pd.DataFrame],\n index: Union[str, list, np.ndarray] = None,\n exclude: list = None,\n columns: list = None,\n coerce_float: bool = False,\n nrows: int = None,\n ) -> \"DataFrame\":\n \"\"\"\n Convert structured or record ndarray to DataFrame.\n\n Parameters\n ----------\n data : ndarray (structured dtype), list of tuples, dict, or DataFrame\n index : string, list of fields, array-like\n Field of array to use as the index, alternately a specific set of input labels to use\n exclude : sequence, default None\n Columns or fields to exclude\n columns : sequence, default None\n Column names to use. If the passed data do not have names associated with them, this\n argument provides names for the columns. Otherwise this argument indicates the order of\n the columns in the result (any names not found in the data will become all-NA columns)\n coerce_float : boolean, default False\n Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to\n floating point, useful for SQL result sets\n nrows : int, default None\n Number of rows to read if data is an iterator\n\n Returns\n -------\n df : DataFrame\n\n Examples\n --------\n Use dict as input\n\n >>> ps.DataFrame.from_records({'A': [1, 2, 3]})\n A\n 0 1\n 1 2\n 2 3\n\n Use list of tuples as input\n\n >>> ps.DataFrame.from_records([(1, 2), (3, 4)])\n 0 1\n 0 1 2\n 1 3 4\n\n Use NumPy array as input\n\n >>> ps.DataFrame.from_records(np.eye(3))\n 0 1 2\n 0 1.0 0.0 0.0\n 1 0.0 1.0 0.0\n 2 0.0 0.0 1.0\n \"\"\"\n return DataFrame(\n pd.DataFrame.from_records(data, index, exclude, columns, coerce_float, nrows)\n )\n\n def to_records(\n self,\n index: bool = True,\n column_dtypes: Optional[Union[str, Dtype, Dict[Name, Union[str, Dtype]]]] = None,\n index_dtypes: Optional[Union[str, Dtype, Dict[Name, Union[str, Dtype]]]] = None,\n ) -> np.recarray:\n \"\"\"\n Convert DataFrame to a NumPy record array.\n\n Index will be included as the first field of the record array if\n requested.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is\n expected to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n index : bool, default True\n Include index in resulting record array, stored in 'index'\n field or using the index label, if set.\n column_dtypes : str, type, dict, default None\n If a string or type, the data type to store all columns. If\n a dictionary, a mapping of column names and indices (zero-indexed)\n to specific data types.\n index_dtypes : str, type, dict, default None\n If a string or type, the data type to store all index levels. If\n a dictionary, a mapping of index level names and indices\n (zero-indexed) to specific data types.\n This mapping is applied only if `index=True`.\n\n Returns\n -------\n numpy.recarray\n NumPy ndarray with the DataFrame labels as fields and each row\n of the DataFrame as entries.\n\n See Also\n --------\n DataFrame.from_records: Convert structured or record ndarray\n to DataFrame.\n numpy.recarray: An ndarray that allows field access using\n attributes, analogous to typed columns in a\n spreadsheet.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},\n ... index=['a', 'b'])\n >>> df\n A B\n a 1 0.50\n b 2 0.75\n\n >>> df.to_records() # doctest: +SKIP\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])\n\n The index can be excluded from the record array:\n\n >>> df.to_records(index=False) # doctest: +SKIP\n rec.array([(1, 0.5 ), (2, 0.75)],\n dtype=[('A', '<i8'), ('B', '<f8')])\n\n Specification of dtype for columns is new in pandas 0.24.0.\n Data types can be specified for the columns:\n\n >>> df.to_records(column_dtypes={\"A\": \"int32\"}) # doctest: +SKIP\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])\n\n Specification of dtype for index is new in pandas 0.24.0.\n Data types can also be specified for the index:\n\n >>> df.to_records(index_dtypes=\"<S2\") # doctest: +SKIP\n rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],\n dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])\n \"\"\"\n args = locals()\n psdf = self\n\n return validate_arguments_and_invoke_function(\n psdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args\n )\n\n def copy(self, deep: bool = True) -> \"DataFrame\":\n \"\"\"\n Make a copy of this object's indices and data.\n\n Parameters\n ----------\n deep : bool, default True\n this parameter is not supported but just dummy parameter to match pandas.\n\n Returns\n -------\n copy : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},\n ... columns=['x', 'y', 'z', 'w'])\n >>> df\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n >>> df_copy = df.copy()\n >>> df_copy\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n \"\"\"\n return DataFrame(self._internal)\n\n def dropna(\n self,\n axis: Axis = 0,\n how: str = \"any\",\n thresh: Optional[int] = None,\n subset: Optional[Union[Name, List[Name]]] = None,\n inplace: bool = False,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Remove missing values.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Determine if rows or columns which contain missing values are\n removed.\n\n * 0, or 'index' : Drop rows which contain missing values.\n how : {'any', 'all'}, default 'any'\n Determine if row or column is removed from DataFrame, when we have\n at least one NA or all NA.\n\n * 'any' : If any NA values are present, drop that row or column.\n * 'all' : If all values are NA, drop that row or column.\n\n thresh : int, optional\n Require that many non-NA values.\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries dropped from it.\n\n See Also\n --------\n DataFrame.drop : Drop specified labels from columns.\n DataFrame.isnull: Indicate missing values.\n DataFrame.notnull : Indicate existing (non-missing) values.\n\n Examples\n --------\n >>> df = ps.DataFrame({\"name\": ['Alfred', 'Batman', 'Catwoman'],\n ... \"toy\": [None, 'Batmobile', 'Bullwhip'],\n ... \"born\": [None, \"1940-04-25\", None]},\n ... columns=['name', 'toy', 'born'])\n >>> df\n name toy born\n 0 Alfred None None\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Drop the rows where at least one element is missing.\n\n >>> df.dropna()\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Drop the columns where at least one element is missing.\n\n >>> df.dropna(axis='columns')\n name\n 0 Alfred\n 1 Batman\n 2 Catwoman\n\n Drop the rows where all elements are missing.\n\n >>> df.dropna(how='all')\n name toy born\n 0 Alfred None None\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Keep only the rows with at least 2 non-NA values.\n\n >>> df.dropna(thresh=2)\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Define in which columns to look for missing values.\n\n >>> df.dropna(subset=['name', 'born'])\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.dropna(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25\n \"\"\"\n axis = validate_axis(axis)\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if thresh is None:\n if how is None:\n raise TypeError(\"must specify how or thresh\")\n elif how not in (\"any\", \"all\"):\n raise ValueError(\"invalid how option: {h}\".format(h=how))\n\n labels: Optional[List[Label]]\n if subset is not None:\n if isinstance(subset, str):\n labels = [(subset,)]\n elif isinstance(subset, tuple):\n labels = [subset]\n else:\n labels = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]\n else:\n labels = None\n\n if axis == 0:\n if labels is not None:\n invalids = [label for label in labels if label not in self._internal.column_labels]\n if len(invalids) > 0:\n raise KeyError(invalids)\n else:\n labels = self._internal.column_labels\n\n cnt = reduce(\n lambda x, y: x + y,\n [\n F.when(self._psser_for(label).notna().spark.column, 1).otherwise(0)\n for label in labels\n ],\n SF.lit(0),\n )\n if thresh is not None:\n pred = cnt >= SF.lit(int(thresh))\n elif how == \"any\":\n pred = cnt == SF.lit(len(labels))\n elif how == \"all\":\n pred = cnt > SF.lit(0)\n\n internal = self._internal.with_filter(pred)\n if inplace:\n self._update_internal_frame(internal)\n return None\n else:\n return DataFrame(internal)\n else:\n assert axis == 1\n\n internal = self._internal.resolved_copy\n\n if labels is not None:\n if any(len(lbl) != internal.index_level for lbl in labels):\n raise ValueError(\n \"The length of each subset must be the same as the index size.\"\n )\n\n cond = reduce(\n lambda x, y: x | y,\n [\n reduce(\n lambda x, y: x & y,\n [\n scol == SF.lit(part)\n for part, scol in zip(lbl, internal.index_spark_columns)\n ],\n )\n for lbl in labels\n ],\n )\n\n internal = internal.with_filter(cond)\n\n psdf: DataFrame = DataFrame(internal)\n\n null_counts = []\n for label in internal.column_labels:\n psser = psdf._psser_for(label)\n cond = psser.isnull().spark.column\n null_counts.append(\n F.sum(F.when(~cond, 1).otherwise(0)).alias(name_like_string(label))\n )\n\n counts = internal.spark_frame.select(null_counts + [F.count(\"*\")]).head()\n\n if thresh is not None:\n column_labels = [\n label\n for label, cnt in zip(internal.column_labels, counts)\n if (cnt or 0) >= int(thresh)\n ]\n elif how == \"any\":\n column_labels = [\n label\n for label, cnt in zip(internal.column_labels, counts)\n if (cnt or 0) == counts[-1]\n ]\n elif how == \"all\":\n column_labels = [\n label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) > 0\n ]\n\n psdf = self[column_labels]\n if inplace:\n self._update_internal_frame(psdf._internal)\n return None\n else:\n return psdf\n\n # TODO: add 'limit' when value parameter exists\n def fillna(\n self,\n value: Optional[Union[Any, Dict[Name, Any]]] = None,\n method: Optional[str] = None,\n axis: Optional[Axis] = None,\n inplace: bool = False,\n limit: Optional[int] = None,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"Fill NA/NaN values.\n\n .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window\n without specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n value : scalar, dict, Series\n Value to use to fill holes. alternately a dict/Series of values\n specifying which value to use for each column.\n DataFrame is not supported.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series pad / ffill: propagate last valid\n observation forward to next valid backfill / bfill:\n use NEXT valid observation to fill gap\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries filled.\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'A': [None, 3, None, None],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 1.0 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 1.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 1.0 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 1.0 4\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError(\"fillna currently only works for axis=0 or axis='index'\")\n\n if value is not None:\n if not isinstance(value, (float, int, str, bool, dict, pd.Series)):\n raise TypeError(\"Unsupported type %s\" % type(value).__name__)\n if limit is not None:\n raise ValueError(\"limit parameter for value is not support now\")\n if isinstance(value, pd.Series):\n value = value.to_dict()\n if isinstance(value, dict):\n for v in value.values():\n if not isinstance(v, (float, int, str, bool)):\n raise TypeError(\"Unsupported type %s\" % type(v).__name__)\n value = {k if is_name_like_tuple(k) else (k,): v for k, v in value.items()}\n\n def op(psser: ps.Series) -> ps.Series:\n label = psser._column_label\n for k, v in value.items():\n if k == label[: len(k)]:\n return psser._fillna(\n value=value[k], method=method, axis=axis, limit=limit\n )\n else:\n return psser\n\n else:\n\n def op(psser: ps.Series) -> ps.Series:\n return psser._fillna(value=value, method=method, axis=axis, limit=limit)\n\n elif method is not None:\n\n def op(psser: ps.Series) -> ps.Series:\n return psser._fillna(value=value, method=method, axis=axis, limit=limit)\n\n else:\n raise ValueError(\"Must specify a fillna 'value' or 'method' parameter.\")\n\n psdf = self._apply_series_op(op, should_resolve=(method is not None))\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if inplace:\n self._update_internal_frame(psdf._internal, requires_same_anchor=False)\n return None\n else:\n return psdf\n\n def replace(\n self,\n to_replace: Optional[Union[Any, List, Tuple, Dict]] = None,\n value: Optional[Any] = None,\n inplace: bool = False,\n limit: Optional[int] = None,\n regex: bool = False,\n method: str = \"pad\",\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Returns a new DataFrame replacing a value with another value.\n\n Parameters\n ----------\n to_replace : int, float, string, list, tuple or dict\n Value to be replaced.\n value : int, float, string, list or tuple\n Value to use to replace holes. The replacement value must be an int, float,\n or string.\n If value is a list or tuple, value should be of the same length with to_replace.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n\n Returns\n -------\n DataFrame\n Object after replacement.\n\n Examples\n --------\n >>> df = ps.DataFrame({\"name\": ['Ironman', 'Captain America', 'Thor', 'Hulk'],\n ... \"weapon\": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']},\n ... columns=['name', 'weapon'])\n >>> df\n name weapon\n 0 Ironman Mark-45\n 1 Captain America Shield\n 2 Thor Mjolnir\n 3 Hulk Smash\n\n Scalar `to_replace` and `value`\n\n >>> df.replace('Ironman', 'War-Machine')\n name weapon\n 0 War-Machine Mark-45\n 1 Captain America Shield\n 2 Thor Mjolnir\n 3 Hulk Smash\n\n List like `to_replace` and `value`\n\n >>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True)\n >>> df\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Mjolnir\n 3 Hulk Smash\n\n Dicts can be used to specify different replacement values for different existing values\n To use a dict in this way the value parameter should be None\n\n >>> df.replace({'Mjolnir': 'Stormbuster'})\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Stormbuster\n 3 Hulk Smash\n\n Dict can specify that different values should be replaced in different columns\n The value parameter should not be None in this case\n\n >>> df.replace({'weapon': 'Mjolnir'}, 'Stormbuster')\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Stormbuster\n 3 Hulk Smash\n\n Nested dictionaries\n The value parameter should be None to use a nested dict in this way\n\n >>> df.replace({'weapon': {'Mjolnir': 'Stormbuster'}})\n name weapon\n 0 Rescue Mark-45\n 1 Hawkeye Shield\n 2 Thor Stormbuster\n 3 Hulk Smash\n \"\"\"\n if method != \"pad\":\n raise NotImplementedError(\"replace currently works only for method='pad\")\n if limit is not None:\n raise NotImplementedError(\"replace currently works only when limit=None\")\n if regex is not False:\n raise NotImplementedError(\"replace currently doesn't supports regex\")\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if value is not None and not isinstance(value, (int, float, str, list, tuple, dict)):\n raise TypeError(\"Unsupported type {}\".format(type(value).__name__))\n if to_replace is not None and not isinstance(\n to_replace, (int, float, str, list, tuple, dict)\n ):\n raise TypeError(\"Unsupported type {}\".format(type(to_replace).__name__))\n\n if isinstance(value, (list, tuple)) and isinstance(to_replace, (list, tuple)):\n if len(value) != len(to_replace):\n raise ValueError(\"Length of to_replace and value must be same\")\n\n if isinstance(to_replace, dict) and (\n value is not None or all(isinstance(i, dict) for i in to_replace.values())\n ):\n to_replace_dict = to_replace\n\n def op(psser: ps.Series) -> ps.Series:\n if psser.name in to_replace_dict:\n return psser.replace(\n to_replace=to_replace_dict[psser.name], value=value, regex=regex\n )\n else:\n return psser\n\n else:\n\n def op(psser: ps.Series) -> ps.Series:\n return psser.replace(to_replace=to_replace, value=value, regex=regex)\n\n psdf = self._apply_series_op(op)\n if inplace:\n self._update_internal_frame(psdf._internal)\n return None\n else:\n return psdf\n\n def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> \"DataFrame\":\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values.\n\n Parameters\n ----------\n lower : float or int, default None\n Minimum threshold value. All values below this threshold will be set to it.\n upper : float or int, default None\n Maximum threshold value. All values above this threshold will be set to it.\n\n Returns\n -------\n DataFrame\n DataFrame with the values outside the clip boundaries replaced.\n\n Examples\n --------\n >>> ps.DataFrame({'A': [0, 2, 4]}).clip(1, 3)\n A\n 0 1\n 1 2\n 2 3\n\n Notes\n -----\n One difference between this implementation and pandas is that running\n pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with \"TypeError: '<=' not supported\n between instances of 'str' and 'int'\" while ps.DataFrame({'A': ['a', 'b']}).clip(0, 1)\n will output the original DataFrame, simply ignoring the incompatible types.\n \"\"\"\n if is_list_like(lower) or is_list_like(upper):\n raise TypeError(\n \"List-like value are not supported for 'lower' and 'upper' at the \" + \"moment\"\n )\n\n if lower is None and upper is None:\n return self\n\n return self._apply_series_op(lambda psser: psser.clip(lower=lower, upper=upper))\n\n def head(self, n: int = 5) -> \"DataFrame\":\n \"\"\"\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n obj_head : same type as caller\n The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n \"\"\"\n if n < 0:\n n = len(self) + n\n if n <= 0:\n return DataFrame(self._internal.with_filter(SF.lit(False)))\n else:\n sdf = self._internal.resolved_copy.spark_frame\n if get_option(\"compute.ordered_head\"):\n sdf = sdf.orderBy(NATURAL_ORDER_COLUMN_NAME)\n return DataFrame(self._internal.with_new_sdf(sdf.limit(n)))\n\n def last(self, offset: Union[str, DateOffset]) -> \"DataFrame\":\n \"\"\"\n Select final periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the last few rows based on a date offset.\n\n Parameters\n ----------\n offset : str or DateOffset\n The offset length of the data that will be selected. For instance,\n '3D' will display all the rows having their index within the last 3 days.\n\n Returns\n -------\n DataFrame\n A subset of the caller.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n Examples\n --------\n\n >>> index = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index)\n >>> psdf\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> psdf.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calendar days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n \"\"\"\n # Check index type should be format DateTime\n if not isinstance(self.index, ps.DatetimeIndex):\n raise TypeError(\"'last' only supports a DatetimeIndex\")\n\n offset_: Optional[DateOffset] = to_offset(offset)\n assert offset_ is not None\n\n from_date = cast(datetime.datetime, self.index.max()) - offset_ # type: ignore[operator]\n\n return cast(DataFrame, self.loc[from_date:])\n\n def first(self, offset: Union[str, DateOffset]) -> \"DataFrame\":\n \"\"\"\n Select first periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the first few rows based on a date offset.\n\n Parameters\n ----------\n offset : str or DateOffset\n The offset length of the data that will be selected. For instance,\n '3D' will display all the rows having their index within the first 3 days.\n\n Returns\n -------\n DataFrame\n A subset of the caller.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n Examples\n --------\n\n >>> index = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index)\n >>> psdf\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> psdf.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calendar days were returned, not the first\n 3 observed days in the dataset, and therefore data for 2018-04-13 was\n not returned.\n \"\"\"\n # Check index type should be format DatetimeIndex\n if not isinstance(self.index, ps.DatetimeIndex):\n raise TypeError(\"'first' only supports a DatetimeIndex\")\n\n offset_: Optional[DateOffset] = to_offset(offset)\n assert offset_ is not None\n\n to_date = cast(datetime.datetime, self.index.min()) + offset_ # type: ignore[operator]\n\n return cast(DataFrame, self.loc[:to_date]) # type: ignore[misc]\n\n def pivot_table(\n self,\n values: Optional[Union[Name, List[Name]]] = None,\n index: Optional[List[Name]] = None,\n columns: Optional[Name] = None,\n aggfunc: Union[str, Dict[Name, str]] = \"mean\",\n fill_value: Optional[Any] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Create a spreadsheet-style pivot table as a DataFrame. The levels in\n the pivot table will be stored in MultiIndex objects (hierarchical\n indexes) on the index and columns of the result DataFrame.\n\n Parameters\n ----------\n values : column to aggregate.\n They should be either a list less than three or a string.\n index : column (string) or list of columns\n If an array is passed, it must be the same length as the data.\n The list should contain string.\n columns : column\n Columns used in the pivot operation. Only one column is supported and\n it should be a string.\n aggfunc : function (string), dict, default mean\n If dict is passed, the key is column to aggregate and value\n is function or list of functions.\n fill_value : scalar, default None\n Value to replace missing values with.\n\n Returns\n -------\n table : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({\"A\": [\"foo\", \"foo\", \"foo\", \"foo\", \"foo\",\n ... \"bar\", \"bar\", \"bar\", \"bar\"],\n ... \"B\": [\"one\", \"one\", \"one\", \"two\", \"two\",\n ... \"one\", \"one\", \"two\", \"two\"],\n ... \"C\": [\"small\", \"large\", \"large\", \"small\",\n ... \"small\", \"large\", \"small\", \"small\",\n ... \"large\"],\n ... \"D\": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n ... \"E\": [2, 4, 5, 5, 6, 6, 8, 9, 9]},\n ... columns=['A', 'B', 'C', 'D', 'E'])\n >>> df\n A B C D E\n 0 foo one small 1 2\n 1 foo one large 2 4\n 2 foo one large 2 5\n 3 foo two small 3 5\n 4 foo two small 3 6\n 5 bar one large 4 6\n 6 bar one small 5 8\n 7 bar two small 6 9\n 8 bar two large 7 9\n\n This first example aggregates values by taking the sum.\n\n >>> table = df.pivot_table(values='D', index=['A', 'B'],\n ... columns='C', aggfunc='sum')\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n C large small\n A B\n bar one 4.0 5\n two 7.0 6\n foo one 4.0 1\n two NaN 6\n\n We can also fill missing values using the `fill_value` parameter.\n\n >>> table = df.pivot_table(values='D', index=['A', 'B'],\n ... columns='C', aggfunc='sum', fill_value=0)\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n C large small\n A B\n bar one 4 5\n two 7 6\n foo one 4 1\n two 0 6\n\n We can also calculate multiple types of aggregations for any given\n value column.\n\n >>> table = df.pivot_table(values=['D'], index =['C'],\n ... columns=\"A\", aggfunc={'D': 'mean'})\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n D\n A bar foo\n C\n large 5.5 2.000000\n small 5.5 2.333333\n\n The next example aggregates on multiple values.\n\n >>> table = df.pivot_table(index=['C'], columns=\"A\", values=['D', 'E'],\n ... aggfunc={'D': 'mean', 'E': 'sum'})\n >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE\n D E\n A bar foo bar foo\n C\n large 5.5 2.000000 15 9\n small 5.5 2.333333 17 13\n \"\"\"\n if not is_name_like_value(columns):\n raise TypeError(\"columns should be one column name.\")\n\n if not is_name_like_value(values) and not (\n isinstance(values, list) and all(is_name_like_value(v) for v in values)\n ):\n raise TypeError(\"values should be one column or list of columns.\")\n\n if not isinstance(aggfunc, str) and (\n not isinstance(aggfunc, dict)\n or not all(\n is_name_like_value(key) and isinstance(value, str) for key, value in aggfunc.items()\n )\n ):\n raise TypeError(\n \"aggfunc must be a dict mapping from column name \"\n \"to aggregate functions (string).\"\n )\n\n if isinstance(aggfunc, dict) and index is None:\n raise NotImplementedError(\n \"pivot_table doesn't support aggfunc\" \" as dict and without index.\"\n )\n if isinstance(values, list) and index is None:\n raise NotImplementedError(\"values can't be a list without index.\")\n\n if columns not in self.columns:\n raise ValueError(\"Wrong columns {}.\".format(name_like_string(columns)))\n if not is_name_like_tuple(columns):\n columns = (columns,)\n\n if isinstance(values, list):\n values = [col if is_name_like_tuple(col) else (col,) for col in values]\n if not all(\n isinstance(self._internal.spark_type_for(col), NumericType) for col in values\n ):\n raise TypeError(\"values should be a numeric type.\")\n else:\n values = values if is_name_like_tuple(values) else (values,)\n if not isinstance(self._internal.spark_type_for(values), NumericType):\n raise TypeError(\"values should be a numeric type.\")\n\n if isinstance(aggfunc, str):\n if isinstance(values, list):\n agg_cols = [\n F.expr(\n \"{1}(`{0}`) as `{0}`\".format(\n self._internal.spark_column_name_for(value), aggfunc\n )\n )\n for value in values\n ]\n else:\n agg_cols = [\n F.expr(\n \"{1}(`{0}`) as `{0}`\".format(\n self._internal.spark_column_name_for(values), aggfunc\n )\n )\n ]\n elif isinstance(aggfunc, dict):\n aggfunc = {\n key if is_name_like_tuple(key) else (key,): value for key, value in aggfunc.items()\n }\n agg_cols = [\n F.expr(\n \"{1}(`{0}`) as `{0}`\".format(self._internal.spark_column_name_for(key), value)\n )\n for key, value in aggfunc.items()\n ]\n agg_columns = [key for key, _ in aggfunc.items()]\n\n if set(agg_columns) != set(values):\n raise ValueError(\"Columns in aggfunc must be the same as values.\")\n\n sdf = self._internal.resolved_copy.spark_frame\n if index is None:\n sdf = (\n sdf.groupBy()\n .pivot(pivot_col=self._internal.spark_column_name_for(columns))\n .agg(*agg_cols)\n )\n\n elif isinstance(index, list):\n index = [label if is_name_like_tuple(label) else (label,) for label in index]\n sdf = (\n sdf.groupBy([self._internal.spark_column_name_for(label) for label in index])\n .pivot(pivot_col=self._internal.spark_column_name_for(columns))\n .agg(*agg_cols)\n )\n else:\n raise TypeError(\"index should be a None or a list of columns.\")\n\n if fill_value is not None and isinstance(fill_value, (int, float)):\n sdf = sdf.fillna(fill_value)\n\n psdf: DataFrame\n if index is not None:\n index_columns = [self._internal.spark_column_name_for(label) for label in index]\n index_fields = [self._internal.field_for(label) for label in index]\n\n if isinstance(values, list):\n data_columns = [column for column in sdf.columns if column not in index_columns]\n\n if len(values) > 1:\n # If we have two values, Spark will return column's name\n # in this format: column_values, where column contains\n # their values in the DataFrame and values is\n # the column list passed to the pivot_table().\n # E.g. if column is b and values is ['b','e'],\n # then ['2_b', '2_e', '3_b', '3_e'].\n\n # We sort the columns of Spark DataFrame by values.\n data_columns.sort(key=lambda x: x.split(\"_\", 1)[1])\n sdf = sdf.select(index_columns + data_columns)\n\n column_name_to_index = dict(\n zip(self._internal.data_spark_column_names, self._internal.column_labels)\n )\n column_labels = [\n tuple(list(column_name_to_index[name.split(\"_\")[1]]) + [name.split(\"_\")[0]])\n for name in data_columns\n ]\n column_label_names = (\n [cast(Optional[Name], None)] * column_labels_level(values)\n ) + [columns]\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in index_columns],\n index_names=index,\n index_fields=index_fields,\n column_labels=column_labels,\n data_spark_columns=[scol_for(sdf, col) for col in data_columns],\n column_label_names=column_label_names,\n )\n psdf = DataFrame(internal)\n else:\n column_labels = [tuple(list(values[0]) + [column]) for column in data_columns]\n column_label_names = ([cast(Optional[Name], None)] * len(values[0])) + [columns]\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in index_columns],\n index_names=index,\n index_fields=index_fields,\n column_labels=column_labels,\n data_spark_columns=[scol_for(sdf, col) for col in data_columns],\n column_label_names=column_label_names,\n )\n psdf = DataFrame(internal)\n else:\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in index_columns],\n index_names=index,\n index_fields=index_fields,\n column_label_names=[columns],\n )\n psdf = DataFrame(internal)\n else:\n if isinstance(values, list):\n index_values = values[-1]\n else:\n index_values = values\n index_map: Dict[str, Optional[Label]] = {}\n for i, index_value in enumerate(index_values):\n colname = SPARK_INDEX_NAME_FORMAT(i)\n sdf = sdf.withColumn(colname, SF.lit(index_value))\n index_map[colname] = None\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in index_map.keys()],\n index_names=list(index_map.values()),\n column_label_names=[columns],\n )\n psdf = DataFrame(internal)\n\n psdf_columns = psdf.columns\n if isinstance(psdf_columns, pd.MultiIndex):\n psdf.columns = psdf_columns.set_levels(\n psdf_columns.levels[-1].astype( # type: ignore[index]\n spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type)\n ),\n level=-1,\n )\n else:\n psdf.columns = psdf_columns.astype(\n spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type)\n )\n\n return psdf\n\n def pivot(\n self,\n index: Optional[Name] = None,\n columns: Optional[Name] = None,\n values: Optional[Name] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Return reshaped DataFrame organized by given index / column values.\n\n Reshape data (produce a \"pivot\" table) based on column values. Uses\n unique values from specified `index` / `columns` to form axes of the\n resulting DataFrame. This function does not support data\n aggregation.\n\n Parameters\n ----------\n index : string, optional\n Column to use to make new frame's index. If None, uses\n existing index.\n columns : string\n Column to use to make new frame's columns.\n values : string, object or a list of the previous\n Column(s) to use for populating new frame's values.\n\n Returns\n -------\n DataFrame\n Returns reshaped DataFrame.\n\n See Also\n --------\n DataFrame.pivot_table : Generalization of pivot that can handle\n duplicate values for one index/column pair.\n\n Examples\n --------\n >>> df = ps.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',\n ... 'two'],\n ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],\n ... 'baz': [1, 2, 3, 4, 5, 6],\n ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']},\n ... columns=['foo', 'bar', 'baz', 'zoo'])\n >>> df\n foo bar baz zoo\n 0 one A 1 x\n 1 one B 2 y\n 2 one C 3 z\n 3 two A 4 q\n 4 two B 5 w\n 5 two C 6 t\n\n >>> df.pivot(index='foo', columns='bar', values='baz').sort_index()\n ... # doctest: +NORMALIZE_WHITESPACE\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE\n bar A B C\n 0 1.0 NaN NaN\n 1 NaN 2.0 NaN\n 2 NaN NaN 3.0\n 3 4.0 NaN NaN\n 4 NaN 5.0 NaN\n 5 NaN NaN 6.0\n\n Notice that, unlike pandas raises an ValueError when duplicated values are found,\n pandas-on-Spark's pivot still works with its first value it meets during operation because\n pivot is an expensive operation and it is preferred to permissively execute over failing\n fast when processing large data.\n\n >>> df = ps.DataFrame({\"foo\": ['one', 'one', 'two', 'two'],\n ... \"bar\": ['A', 'A', 'B', 'C'],\n ... \"baz\": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz'])\n >>> df\n foo bar baz\n 0 one A 1\n 1 one A 2\n 2 two B 3\n 3 two C 4\n\n >>> df.pivot(index='foo', columns='bar', values='baz').sort_index()\n ... # doctest: +NORMALIZE_WHITESPACE\n bar A B C\n foo\n one 1.0 NaN NaN\n two NaN 3.0 4.0\n\n It also support multi-index and multi-index column.\n >>> df.columns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'baz')])\n\n >>> df = df.set_index(('a', 'bar'), append=True)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n a b\n foo baz\n (a, bar)\n 0 A one 1\n 1 A one 2\n 2 B two 3\n 3 C two 4\n\n >>> df.pivot(columns=('a', 'foo'), values=('b', 'baz')).sort_index()\n ... # doctest: +NORMALIZE_WHITESPACE\n ('a', 'foo') one two\n (a, bar)\n 0 A 1.0 NaN\n 1 A 2.0 NaN\n 2 B NaN 3.0\n 3 C NaN 4.0\n\n \"\"\"\n if columns is None:\n raise ValueError(\"columns should be set.\")\n\n if values is None:\n raise ValueError(\"values should be set.\")\n\n should_use_existing_index = index is not None\n if should_use_existing_index:\n df = self\n index_labels = [index]\n else:\n # The index after `reset_index()` will never be used, so use \"distributed\" index\n # as a dummy to avoid overhead.\n with option_context(\"compute.default_index_type\", \"distributed\"):\n df = self.reset_index()\n index_labels = df._internal.column_labels[: self._internal.index_level]\n\n df = df.pivot_table(index=index_labels, columns=columns, values=values, aggfunc=\"first\")\n\n if should_use_existing_index:\n return df\n else:\n internal = df._internal.copy(index_names=self._internal.index_names)\n return DataFrame(internal)\n\n @property\n def columns(self) -> pd.Index:\n \"\"\"The column labels of the DataFrame.\"\"\"\n names = [\n name if name is None or len(name) > 1 else name[0]\n for name in self._internal.column_label_names\n ]\n if self._internal.column_labels_level > 1:\n columns = pd.MultiIndex.from_tuples(self._internal.column_labels, names=names)\n else:\n columns = pd.Index([label[0] for label in self._internal.column_labels], name=names[0])\n return columns\n\n @columns.setter\n def columns(self, columns: Union[pd.Index, List[Name]]) -> None:\n if isinstance(columns, pd.MultiIndex):\n column_labels = columns.tolist()\n else:\n column_labels = [\n col if is_name_like_tuple(col, allow_none=False) else (col,) for col in columns\n ]\n\n if len(self._internal.column_labels) != len(column_labels):\n raise ValueError(\n \"Length mismatch: Expected axis has {} elements, \"\n \"new values have {} elements\".format(\n len(self._internal.column_labels), len(column_labels)\n )\n )\n\n column_label_names: Optional[List]\n if isinstance(columns, pd.Index):\n column_label_names = [\n name if is_name_like_tuple(name) else (name,) for name in columns.names\n ]\n else:\n column_label_names = None\n\n pssers = [\n self._psser_for(label).rename(name)\n for label, name in zip(self._internal.column_labels, column_labels)\n ]\n self._update_internal_frame(\n self._internal.with_new_columns(pssers, column_label_names=column_label_names)\n )\n\n @property\n def dtypes(self) -> pd.Series:\n \"\"\"Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column. The result's index is the original\n DataFrame's columns. Columns with mixed types are stored with the object dtype.\n\n Returns\n -------\n pd.Series\n The data type of each column.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': list('abc'),\n ... 'b': list(range(1, 4)),\n ... 'c': np.arange(3, 6).astype('i1'),\n ... 'd': np.arange(4.0, 7.0, dtype='float64'),\n ... 'e': [True, False, True],\n ... 'f': pd.date_range('20130101', periods=3)},\n ... columns=['a', 'b', 'c', 'd', 'e', 'f'])\n >>> df.dtypes\n a object\n b int64\n c int8\n d float64\n e bool\n f datetime64[ns]\n dtype: object\n \"\"\"\n return pd.Series(\n [self._psser_for(label).dtype for label in self._internal.column_labels],\n index=pd.Index(\n [label if len(label) > 1 else label[0] for label in self._internal.column_labels]\n ),\n )\n\n def select_dtypes(\n self,\n include: Optional[Union[str, List[str]]] = None,\n exclude: Optional[Union[str, List[str]]] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Return a subset of the DataFrame's columns based on the column dtypes.\n\n Parameters\n ----------\n include, exclude : scalar or list-like\n A selection of dtypes or strings to be included/excluded. At least\n one of these parameters must be supplied. It also takes Spark SQL\n DDL type strings, for instance, 'string' and 'date'.\n\n Returns\n -------\n DataFrame\n The subset of the frame including the dtypes in ``include`` and\n excluding the dtypes in ``exclude``.\n\n Raises\n ------\n ValueError\n * If both of ``include`` and ``exclude`` are empty\n\n >>> df = ps.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df.select_dtypes()\n Traceback (most recent call last):\n ...\n ValueError: at least one of include or exclude must be nonempty\n\n * If ``include`` and ``exclude`` have overlapping elements\n\n >>> df = ps.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df.select_dtypes(include='a', exclude='a')\n Traceback (most recent call last):\n ...\n ValueError: include and exclude overlap on {'a'}\n\n Notes\n -----\n * To select datetimes, use ``np.datetime64``, ``'datetime'`` or\n ``'datetime64'``\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3,\n ... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])\n >>> df\n a b c d\n 0 1 True 1.0 a\n 1 2 False 2.0 b\n 2 1 True 1.0 a\n 3 2 False 2.0 b\n 4 1 True 1.0 a\n 5 2 False 2.0 b\n\n >>> df.select_dtypes(include='bool')\n b\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n\n >>> df.select_dtypes(include=['float64'], exclude=['int'])\n c\n 0 1.0\n 1 2.0\n 2 1.0\n 3 2.0\n 4 1.0\n 5 2.0\n\n >>> df.select_dtypes(exclude=['int'])\n b c d\n 0 True 1.0 a\n 1 False 2.0 b\n 2 True 1.0 a\n 3 False 2.0 b\n 4 True 1.0 a\n 5 False 2.0 b\n\n Spark SQL DDL type strings can be used as well.\n\n >>> df.select_dtypes(exclude=['string'])\n a b c\n 0 1 True 1.0\n 1 2 False 2.0\n 2 1 True 1.0\n 3 2 False 2.0\n 4 1 True 1.0\n 5 2 False 2.0\n \"\"\"\n from pyspark.sql.types import _parse_datatype_string\n\n include_list: List[str]\n if not is_list_like(include):\n include_list = [cast(str, include)] if include is not None else []\n else:\n include_list = list(include)\n exclude_list: List[str]\n if not is_list_like(exclude):\n exclude_list = [cast(str, exclude)] if exclude is not None else []\n else:\n exclude_list = list(exclude)\n\n if not any((include_list, exclude_list)):\n raise ValueError(\"at least one of include or exclude must be \" \"nonempty\")\n\n # can't both include AND exclude!\n if set(include_list).intersection(set(exclude_list)):\n raise ValueError(\n \"include and exclude overlap on {inc_ex}\".format(\n inc_ex=set(include_list).intersection(set(exclude_list))\n )\n )\n\n # Handle Spark types\n include_spark_type = []\n for inc in include_list:\n try:\n include_spark_type.append(_parse_datatype_string(inc))\n except BaseException:\n pass\n\n exclude_spark_type = []\n for exc in exclude_list:\n try:\n exclude_spark_type.append(_parse_datatype_string(exc))\n except BaseException:\n pass\n\n # Handle pandas types\n include_numpy_type = []\n for inc in include_list:\n try:\n include_numpy_type.append(infer_dtype_from_object(inc))\n except BaseException:\n pass\n\n exclude_numpy_type = []\n for exc in exclude_list:\n try:\n exclude_numpy_type.append(infer_dtype_from_object(exc))\n except BaseException:\n pass\n\n column_labels = []\n for label in self._internal.column_labels:\n if len(include_list) > 0:\n should_include = (\n infer_dtype_from_object(self._psser_for(label).dtype.name) in include_numpy_type\n or self._internal.spark_type_for(label) in include_spark_type\n )\n else:\n should_include = not (\n infer_dtype_from_object(self._psser_for(label).dtype.name) in exclude_numpy_type\n or self._internal.spark_type_for(label) in exclude_spark_type\n )\n\n if should_include:\n column_labels.append(label)\n\n return DataFrame(\n self._internal.with_new_columns([self._psser_for(label) for label in column_labels])\n )\n\n def droplevel(\n self, level: Union[int, Name, List[Union[int, Name]]], axis: Axis = 0\n ) -> \"DataFrame\":\n \"\"\"\n Return DataFrame with requested index / column level(s) removed.\n\n Parameters\n ----------\n level: int, str, or list-like\n If a string is given, must be the name of a level If list-like, elements must\n be names or positional indexes of levels.\n\n axis: {0 or ‘index’, 1 or ‘columns’}, default 0\n\n Returns\n -------\n DataFrame with requested index / column level(s) removed.\n\n Examples\n --------\n >>> df = ps.DataFrame(\n ... [[3, 4], [7, 8], [11, 12]],\n ... index=pd.MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=[\"a\", \"b\"]),\n ... )\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... ('c', 'e'), ('d', 'f')\n ... ], names=['level_1', 'level_2'])\n\n >>> df # doctest: +NORMALIZE_WHITESPACE\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel('a') # doctest: +NORMALIZE_WHITESPACE\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel('level_2', axis=1) # doctest: +NORMALIZE_WHITESPACE\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n \"\"\"\n axis = validate_axis(axis)\n if axis == 0:\n if not isinstance(level, (tuple, list)): # huh?\n level = [level]\n\n names = self.index.names\n nlevels = self._internal.index_level\n\n int_level = set()\n for n in level:\n if isinstance(n, int):\n if n < 0:\n n = n + nlevels\n if n < 0:\n raise IndexError(\n \"Too many levels: Index has only {} levels, \"\n \"{} is not a valid level number\".format(nlevels, (n - nlevels))\n )\n if n >= nlevels:\n raise IndexError(\n \"Too many levels: Index has only {} levels, not {}\".format(\n nlevels, (n + 1)\n )\n )\n else:\n if n not in names:\n raise KeyError(\"Level {} not found\".format(n))\n n = names.index(n)\n int_level.add(n)\n\n if len(level) >= nlevels:\n raise ValueError(\n \"Cannot remove {} levels from an index with {} levels: \"\n \"at least one level must be left.\".format(len(level), nlevels)\n )\n\n index_spark_columns, index_names, index_fields = zip(\n *[\n item\n for i, item in enumerate(\n zip(\n self._internal.index_spark_columns,\n self._internal.index_names,\n self._internal.index_fields,\n )\n )\n if i not in int_level\n ]\n )\n\n internal = self._internal.copy(\n index_spark_columns=list(index_spark_columns),\n index_names=list(index_names),\n index_fields=list(index_fields),\n )\n return DataFrame(internal)\n else:\n psdf = self.copy()\n psdf.columns = psdf.columns.droplevel(level) # type: ignore[arg-type]\n return psdf\n\n def drop(\n self,\n labels: Optional[Union[Name, List[Name]]] = None,\n axis: Optional[Axis] = 0,\n index: Union[Name, List[Name]] = None,\n columns: Union[Name, List[Name]] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Drop specified labels from columns.\n\n Remove rows and/or columns by specifying label names and corresponding axis,\n or by specifying directly index and/or column names.\n Drop rows of a MultiIndex DataFrame is not supported yet.\n\n Parameters\n ----------\n labels : single label or list-like\n Column labels to drop.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionchanged:: 3.3\n Set dropping by index by default.\n index : single label or list-like\n Alternative to specifying axis (``labels, axis=0``\n is quivalent to ``index=columns``).\n\n .. versionchanged:: 3.3\n Added dropping rows by 'index'.\n columns : single label or list-like\n Alternative to specifying axis (``labels, axis=1``\n is equivalent to ``columns=labels``).\n\n Returns\n -------\n dropped : DataFrame\n\n See Also\n --------\n Series.dropna\n\n Examples\n --------\n >>> df = ps.DataFrame(np.arange(12).reshape(3, 4), columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 0 1 2 3\n 1 4 5 6 7\n 2 8 9 10 11\n\n Drop columns\n\n >>> df.drop(['B', 'C'], axis=1)\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n >>> df.drop(columns=['B', 'C'])\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n Drop a row by index\n\n >>> df.drop([0, 1])\n A B C D\n 2 8 9 10 11\n\n >>> df.drop(index=[0, 1], columns='A')\n B C D\n 2 9 10 11\n\n Also support dropping columns for MultiIndex\n\n >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},\n ... columns=['x', 'y', 'z', 'w'])\n >>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')]\n >>> df.columns = pd.MultiIndex.from_tuples(columns)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n a b\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n >>> df.drop(labels='a', axis=1) # doctest: +NORMALIZE_WHITESPACE\n b\n z w\n 0 5 7\n 1 6 8\n\n Notes\n -----\n Currently, dropping rows of a MultiIndex DataFrame is not supported yet.\n \"\"\"\n if labels is not None:\n if index is not None or columns is not None:\n raise ValueError(\"Cannot specify both 'labels' and 'index'/'columns'\")\n axis = validate_axis(axis)\n if axis == 1:\n return self.drop(index=index, columns=labels)\n else:\n return self.drop(index=labels, columns=columns)\n else:\n if index is None and columns is None:\n raise ValueError(\"Need to specify at least one of 'labels' or 'columns' or 'index'\")\n\n internal = self._internal\n if index is not None:\n if is_name_like_tuple(index) or is_name_like_value(index):\n index = [index]\n\n if len(index) > 0:\n if internal.index_level == 1:\n internal = internal.resolved_copy\n\n if len(index) <= ps.get_option(\"compute.isin_limit\"):\n self_index_type = self.index.spark.data_type\n cond = ~internal.index_spark_columns[0].isin(\n [SF.lit(label).cast(self_index_type) for label in index]\n )\n internal = internal.with_filter(cond)\n else:\n index_sdf_col = \"__index\"\n index_sdf = default_session().createDataFrame(\n pd.DataFrame({index_sdf_col: index})\n )\n joined_sdf = internal.spark_frame.join(\n other=F.broadcast(index_sdf),\n on=(\n internal.index_spark_columns[0]\n == scol_for(index_sdf, index_sdf_col)\n ),\n how=\"anti\",\n )\n internal = internal.with_new_sdf(joined_sdf)\n else:\n raise NotImplementedError(\n \"Drop rows of MultiIndex DataFrame is not supported yet\"\n )\n if columns is not None:\n if is_name_like_tuple(columns):\n columns = [columns]\n elif is_name_like_value(columns):\n columns = [(columns,)]\n else:\n columns = [col if is_name_like_tuple(col) else (col,) for col in columns]\n\n if len(columns) > 0:\n drop_column_labels = set(\n label\n for label in internal.column_labels\n for col in columns\n if label[: len(col)] == col\n )\n if len(drop_column_labels) == 0:\n raise KeyError(columns)\n\n keep_columns_and_labels = [\n (column, label)\n for column, label in zip(\n self._internal.data_spark_column_names, self._internal.column_labels\n )\n if label not in drop_column_labels\n ]\n\n cols, labels = (\n zip(*keep_columns_and_labels)\n if len(keep_columns_and_labels) > 0\n else ([], [])\n )\n internal = internal.with_new_columns(\n [self._psser_for(label) for label in labels]\n )\n return DataFrame(internal)\n\n def _prepare_sort_by_scols(self, by: Union[Name, List[Name]]) -> List[Column]:\n if is_name_like_value(by):\n by = [by]\n else:\n assert is_list_like(by), type(by)\n new_by = []\n for colname in by:\n ser = self[colname]\n if not isinstance(ser, ps.Series):\n raise ValueError(\n \"The column %s is not unique. For a multi-index, the label must be a tuple \"\n \"with elements corresponding to each level.\" % name_like_string(colname)\n )\n new_by.append(ser.spark.column)\n return new_by\n\n def _sort(\n self,\n by: List[Column],\n ascending: Union[bool, List[bool]],\n na_position: str,\n keep: str = \"first\",\n ) -> \"DataFrame\":\n if isinstance(ascending, bool):\n ascending = [ascending] * len(by)\n if len(ascending) != len(by):\n raise ValueError(\n \"Length of ascending ({}) != length of by ({})\".format(len(ascending), len(by))\n )\n if na_position not in (\"first\", \"last\"):\n raise ValueError(\"invalid na_position: '{}'\".format(na_position))\n\n # Mapper: Get a spark column function for (ascending, na_position) combination\n mapper = {\n (True, \"first\"): Column.asc_nulls_first,\n (True, \"last\"): Column.asc_nulls_last,\n (False, \"first\"): Column.desc_nulls_first,\n (False, \"last\"): Column.desc_nulls_last,\n }\n by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)]\n\n natural_order_scol = F.col(NATURAL_ORDER_COLUMN_NAME)\n\n if keep == \"last\":\n natural_order_scol = Column.desc(natural_order_scol)\n elif keep == \"all\":\n raise NotImplementedError(\"`keep`=all is not implemented yet.\")\n elif keep != \"first\":\n raise ValueError('keep must be either \"first\", \"last\" or \"all\".')\n sdf = self._internal.resolved_copy.spark_frame.sort(*by, natural_order_scol)\n return DataFrame(self._internal.with_new_sdf(sdf))\n\n def sort_values(\n self,\n by: Union[Name, List[Name]],\n ascending: Union[bool, List[bool]] = True,\n inplace: bool = False,\n na_position: str = \"last\",\n ignore_index: bool = False,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Sort by the values along either axis.\n\n Parameters\n ----------\n by : str or list of str\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n if True, perform operation in-place\n na_position : {'first', 'last'}, default 'last'\n `first` puts NaNs at the beginning, `last` puts NaNs at the end\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n Returns\n -------\n sorted_obj : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'col1': ['A', 'B', None, 'D', 'C'],\n ... 'col2': [2, 9, 8, 7, 4],\n ... 'col3': [0, 9, 4, 2, 3],\n ... },\n ... columns=['col1', 'col2', 'col3'],\n ... index=['a', 'b', 'c', 'd', 'e'])\n >>> df\n col1 col2 col3\n a A 2 0\n b B 9 9\n c None 8 4\n d D 7 2\n e C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n a A 2 0\n b B 9 9\n e C 4 3\n d D 7 2\n c None 8 4\n\n Ignore index for the resulting axis\n\n >>> df.sort_values(by=['col1'], ignore_index=True)\n col1 col2 col3\n 0 A 2 0\n 1 B 9 9\n 2 C 4 3\n 3 D 7 2\n 4 None 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n d D 7 2\n e C 4 3\n b B 9 9\n a A 2 0\n c None 8 4\n\n Sort by multiple columns\n\n >>> df = ps.DataFrame({\n ... 'col1': ['A', 'A', 'B', None, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... },\n ... columns=['col1', 'col2', 'col3'])\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 None 8 4\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n new_by = self._prepare_sort_by_scols(by)\n\n psdf = self._sort(by=new_by, ascending=ascending, na_position=na_position)\n\n if inplace:\n if ignore_index:\n psdf.reset_index(drop=True, inplace=inplace)\n self._update_internal_frame(psdf._internal)\n return None\n else:\n return psdf.reset_index(drop=True) if ignore_index else psdf\n\n def sort_index(\n self,\n axis: Axis = 0,\n level: Optional[Union[int, List[int]]] = None,\n ascending: bool = True,\n inplace: bool = False,\n kind: str = None,\n na_position: str = \"last\",\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Sort object by labels (along an axis)\n\n Parameters\n ----------\n axis : index, columns to direct sorting. Currently, only axis = 0 is supported.\n level : int or level name or list of ints or list of level names\n if not None, sort on values in specified index level(s)\n ascending : boolean, default True\n Sort ascending vs. descending\n inplace : bool, default False\n if True, perform operation in-place\n kind : str, default None\n pandas-on-Spark does not allow specifying the sorting algorithm at the moment,\n default None\n na_position : {‘first’, ‘last’}, default ‘last’\n first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for\n MultiIndex.\n\n Returns\n -------\n sorted_obj : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])\n\n >>> df.sort_index()\n A\n a 1.0\n b 2.0\n NaN NaN\n\n >>> df.sort_index(ascending=False)\n A\n b 2.0\n a 1.0\n NaN NaN\n\n >>> df.sort_index(na_position='first')\n A\n NaN NaN\n a 1.0\n b 2.0\n\n >>> df.sort_index(inplace=True)\n >>> df\n A\n a 1.0\n b 2.0\n NaN NaN\n\n >>> df = ps.DataFrame({'A': range(4), 'B': range(4)[::-1]},\n ... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],\n ... columns=['A', 'B'])\n\n >>> df.sort_index()\n A B\n a 0 3 0\n 1 2 1\n b 0 1 2\n 1 0 3\n\n >>> df.sort_index(level=1) # doctest: +SKIP\n A B\n a 0 3 0\n b 0 1 2\n a 1 2 1\n b 1 0 3\n\n >>> df.sort_index(level=[1, 0])\n A B\n a 0 3 0\n b 0 1 2\n a 1 2 1\n b 1 0 3\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError(\"No other axis than 0 are supported at the moment\")\n if kind is not None:\n raise NotImplementedError(\n \"Specifying the sorting algorithm is not supported at the moment.\"\n )\n\n if level is None or (is_list_like(level) and len(level) == 0): # type: ignore[arg-type]\n by = self._internal.index_spark_columns\n elif is_list_like(level):\n by = [\n self._internal.index_spark_columns[lvl] for lvl in level # type: ignore[union-attr]\n ]\n else:\n by = [self._internal.index_spark_columns[level]] # type: ignore[index]\n\n psdf = self._sort(by=by, ascending=ascending, na_position=na_position)\n if inplace:\n self._update_internal_frame(psdf._internal)\n return None\n else:\n return psdf\n\n def swaplevel(\n self, i: Union[int, Name] = -2, j: Union[int, Name] = -1, axis: Axis = 0\n ) -> \"DataFrame\":\n \"\"\"\n Swap levels i and j in a MultiIndex on a particular axis.\n\n Parameters\n ----------\n i, j : int or str\n Levels of the indices to be swapped. Can pass level name as string.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to swap levels on. 0 or 'index' for row-wise, 1 or\n 'columns' for column-wise.\n\n Returns\n -------\n DataFrame\n DataFrame with levels swapped in MultiIndex.\n\n Examples\n --------\n >>> midx = pd.MultiIndex.from_arrays(\n ... [['red', 'blue'], [1, 2], ['s', 'm']], names = ['color', 'number', 'size'])\n >>> midx # doctest: +SKIP\n MultiIndex([( 'red', 1, 's'),\n ('blue', 2, 'm')],\n names=['color', 'number', 'size'])\n\n Swap levels in a MultiIndex on index.\n\n >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]}, index=midx)\n >>> psdf # doctest: +NORMALIZE_WHITESPACE\n x y\n color number size\n red 1 s 5 5\n blue 2 m 6 6\n\n >>> psdf.swaplevel() # doctest: +NORMALIZE_WHITESPACE\n x y\n color size number\n red s 1 5 5\n blue m 2 6 6\n\n >>> psdf.swaplevel(0, 1) # doctest: +NORMALIZE_WHITESPACE\n x y\n number color size\n 1 red s 5 5\n 2 blue m 6 6\n\n >>> psdf.swaplevel('number', 'size') # doctest: +NORMALIZE_WHITESPACE\n x y\n color size number\n red s 1 5 5\n blue m 2 6 6\n\n Swap levels in a MultiIndex on columns.\n\n >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]})\n >>> psdf.columns = midx\n >>> psdf\n color red blue\n number 1 2\n size s m\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel(axis=1)\n color red blue\n size s m\n number 1 2\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel(axis=1)\n color red blue\n size s m\n number 1 2\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel(0, 1, axis=1)\n number 1 2\n color red blue\n size s m\n 0 5 5\n 1 6 6\n\n >>> psdf.swaplevel('number', 'color', axis=1)\n number 1 2\n color red blue\n size s m\n 0 5 5\n 1 6 6\n \"\"\"\n axis = validate_axis(axis)\n if axis == 0:\n internal = self._swaplevel_index(i, j)\n else:\n assert axis == 1\n internal = self._swaplevel_columns(i, j)\n\n return DataFrame(internal)\n\n def swapaxes(self, i: Axis, j: Axis, copy: bool = True) -> \"DataFrame\":\n \"\"\"\n Interchange axes and swap values axes appropriately.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n then group twice - it is a huge operation. To prevent misusage, this method\n has the 'compute.max_rows' default limit of input length, and raises a ValueError.\n\n >>> from pyspark.pandas.config import option_context\n >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE\n ... ps.DataFrame({'a': range(1001)}).swapaxes(i=0, j=1)\n Traceback (most recent call last):\n ...\n ValueError: Current DataFrame has more then the given limit 1000 rows.\n Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'\n to retrieve to retrieve more than 1000 rows. Note that, before changing the\n 'compute.max_rows', this operation is considerably expensive.\n\n Parameters\n ----------\n i: {0 or 'index', 1 or 'columns'}. The axis to swap.\n j: {0 or 'index', 1 or 'columns'}. The axis to swap.\n copy : bool, default True.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> psdf = ps.DataFrame(\n ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['x', 'y', 'z'], columns=['a', 'b', 'c']\n ... )\n >>> psdf\n a b c\n x 1 2 3\n y 4 5 6\n z 7 8 9\n >>> psdf.swapaxes(i=1, j=0)\n x y z\n a 1 4 7\n b 2 5 8\n c 3 6 9\n >>> psdf.swapaxes(i=1, j=1)\n a b c\n x 1 2 3\n y 4 5 6\n z 7 8 9\n \"\"\"\n assert copy is True\n\n i = validate_axis(i)\n j = validate_axis(j)\n\n return self.copy() if i == j else self.transpose()\n\n def _swaplevel_columns(self, i: Union[int, Name], j: Union[int, Name]) -> InternalFrame:\n assert isinstance(self.columns, pd.MultiIndex)\n for index in (i, j):\n if not isinstance(index, int) and index not in self.columns.names:\n raise KeyError(\"Level %s not found\" % index)\n\n i = i if isinstance(i, int) else self.columns.names.index(i)\n j = j if isinstance(j, int) else self.columns.names.index(j)\n for index in (i, j):\n if index >= len(self.columns) or index < -len(self.columns):\n raise IndexError(\n \"Too many levels: Columns have only %s levels, \"\n \"%s is not a valid level number\" % (self._internal.index_level, index)\n )\n\n column_label_names = self._internal.column_label_names.copy()\n column_label_names[i], column_label_names[j], = (\n column_label_names[j],\n column_label_names[i],\n )\n column_labels = self._internal._column_labels\n column_label_list = [list(label) for label in column_labels]\n for label_list in column_label_list:\n label_list[i], label_list[j] = label_list[j], label_list[i]\n column_labels = [tuple(x) for x in column_label_list]\n internal = self._internal.copy(\n column_label_names=list(column_label_names), column_labels=list(column_labels)\n )\n return internal\n\n def _swaplevel_index(self, i: Union[int, Name], j: Union[int, Name]) -> InternalFrame:\n assert isinstance(self.index, ps.MultiIndex)\n for index in (i, j):\n if not isinstance(index, int) and index not in self.index.names:\n raise KeyError(\"Level %s not found\" % index)\n\n i = i if isinstance(i, int) else self.index.names.index(i)\n j = j if isinstance(j, int) else self.index.names.index(j)\n for index in (i, j):\n if index >= self._internal.index_level or index < -self._internal.index_level:\n raise IndexError(\n \"Too many levels: Index has only %s levels, \"\n \"%s is not a valid level number\" % (self._internal.index_level, index)\n )\n\n index_map = list(\n zip(\n self._internal.index_spark_columns,\n self._internal.index_names,\n self._internal.index_fields,\n )\n )\n index_map[i], index_map[j] = index_map[j], index_map[i]\n index_spark_columns, index_names, index_fields = zip(*index_map)\n internal = self._internal.copy(\n index_spark_columns=list(index_spark_columns),\n index_names=list(index_names),\n index_fields=list(index_fields),\n )\n return internal\n\n def nlargest(\n self, n: int, columns: Union[Name, List[Name]], keep: str = \"first\"\n ) -> \"DataFrame\":\n \"\"\"\n Return the first `n` rows ordered by `columns` in descending order.\n\n Return the first `n` rows with the largest values in `columns`, in\n descending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=False).head(n)``, but more\n performant in pandas.\n In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,\n the two would have same performance.\n\n Parameters\n ----------\n n : int\n Number of rows to return.\n columns : label or list of labels\n Column label(s) to order by.\n keep : {'first', 'last'}, default 'first'. 'all' is not implemented yet.\n Determines which duplicates (if any) to keep.\n - ``first`` : Keep the first occurrence.\n - ``last`` : Keep the last occurrence.\n\n Returns\n -------\n DataFrame\n The first `n` rows ordered by the given columns in descending\n order.\n\n See Also\n --------\n DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in\n ascending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Notes\n -----\n\n This function cannot be used with all column types. For example, when\n specifying columns with `object` or `category` dtypes, ``TypeError`` is\n raised.\n\n Examples\n --------\n >>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],\n ... 'Y': [6, 7, 8, 9, 10, 11, 12]})\n >>> df\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n 3 5.0 9\n 4 6.0 10\n 5 7.0 11\n 6 NaN 12\n\n In the following example, we will use ``nlargest`` to select the three\n rows having the largest values in column \"X\".\n\n >>> df.nlargest(n=3, columns='X')\n X Y\n 5 7.0 11\n 4 6.0 10\n 3 5.0 9\n\n To order by the largest values in column \"Y\" and then \"X\", we can\n specify multiple columns like in the next example.\n\n >>> df.nlargest(n=3, columns=['Y', 'X'])\n X Y\n 6 NaN 12\n 5 7.0 11\n 4 6.0 10\n\n The examples below show how ties are resolved, which is decided by `keep`.\n\n >>> tied_df = ps.DataFrame({'X': [1, 2, 2, 3, 3]}, index=['a', 'b', 'c', 'd', 'e'])\n >>> tied_df\n X\n a 1\n b 2\n c 2\n d 3\n e 3\n\n When using keep='first' (by default), ties are resolved in order:\n\n >>> tied_df.nlargest(3, 'X')\n X\n d 3\n e 3\n b 2\n\n >>> tied_df.nlargest(3, 'X', keep='first')\n X\n d 3\n e 3\n b 2\n\n When using keep='last', ties are resolved in reverse order:\n\n >>> tied_df.nlargest(3, 'X', keep='last')\n X\n e 3\n d 3\n c 2\n \"\"\"\n by_scols = self._prepare_sort_by_scols(columns)\n return self._sort(by=by_scols, ascending=False, na_position=\"last\", keep=keep).head(n=n)\n\n def nsmallest(\n self, n: int, columns: Union[Name, List[Name]], keep: str = \"first\"\n ) -> \"DataFrame\":\n \"\"\"\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Return the first `n` rows with the smallest values in `columns`, in\n ascending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,\n but more performant. In pandas-on-Spark, thanks to Spark's lazy execution and query\n optimizer, the two would have same performance.\n\n Parameters\n ----------\n n : int\n Number of items to retrieve.\n columns : list or str\n Column name or names to order by.\n keep : {'first', 'last'}, default 'first'. 'all' is not implemented yet.\n Determines which duplicates (if any) to keep.\n - ``first`` : Keep the first occurrence.\n - ``last`` : Keep the last occurrence.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.nlargest : Return the first `n` rows ordered by `columns` in\n descending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Examples\n --------\n >>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],\n ... 'Y': [6, 7, 8, 9, 10, 11, 12]})\n >>> df\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n 3 5.0 9\n 4 6.0 10\n 5 7.0 11\n 6 NaN 12\n\n In the following example, we will use ``nsmallest`` to select the\n three rows having the smallest values in column \"X\".\n\n >>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n\n To order by the smallest values in column \"Y\" and then \"X\", we can\n specify multiple columns like in the next example.\n\n >>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n\n The examples below show how ties are resolved, which is decided by `keep`.\n\n >>> tied_df = ps.DataFrame({'X': [1, 1, 2, 2, 3]}, index=['a', 'b', 'c', 'd', 'e'])\n >>> tied_df\n X\n a 1\n b 1\n c 2\n d 2\n e 3\n\n When using keep='first' (by default), ties are resolved in order:\n\n >>> tied_df.nsmallest(3, 'X')\n X\n a 1\n b 1\n c 2\n\n >>> tied_df.nsmallest(3, 'X', keep='first')\n X\n a 1\n b 1\n c 2\n\n When using keep='last', ties are resolved in reverse order:\n\n >>> tied_df.nsmallest(3, 'X', keep='last')\n X\n b 1\n a 1\n d 2\n \"\"\"\n by_scols = self._prepare_sort_by_scols(columns)\n return self._sort(by=by_scols, ascending=True, na_position=\"last\", keep=keep).head(n=n)\n\n def isin(self, values: Union[List, Dict]) -> \"DataFrame\":\n \"\"\"\n Whether each element in the DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable or dict\n The sequence of values to test. If values is a dict,\n the keys must be the column names, which must match.\n Series and DataFrame are not supported.\n\n Returns\n -------\n DataFrame\n DataFrame of booleans showing whether each element in the DataFrame\n is contained in values.\n\n Examples\n --------\n >>> df = ps.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},\n ... index=['falcon', 'dog'],\n ... columns=['num_legs', 'num_wings'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n\n When ``values`` is a list check whether every value in the DataFrame\n is present in the list (which animals have 0 or 2 legs or wings)\n\n >>> df.isin([0, 2])\n num_legs num_wings\n falcon True True\n dog False True\n\n When ``values`` is a dict, we can pass values to check for each\n column separately:\n\n >>> df.isin({'num_wings': [0, 3]})\n num_legs num_wings\n falcon False False\n dog False True\n \"\"\"\n if isinstance(values, (pd.DataFrame, pd.Series)):\n raise NotImplementedError(\"DataFrame and Series are not supported\")\n if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):\n raise AttributeError(\n \"'DataFrame' object has no attribute %s\"\n % (set(values.keys()).difference(self.columns))\n )\n\n data_spark_columns = []\n if isinstance(values, dict):\n for i, col in enumerate(self.columns):\n if col in values:\n item = values[col]\n item = item.tolist() if isinstance(item, np.ndarray) else list(item)\n\n scol = self._internal.spark_column_for(self._internal.column_labels[i]).isin(\n [SF.lit(v) for v in item]\n )\n scol = F.coalesce(scol, F.lit(False))\n else:\n scol = SF.lit(False)\n data_spark_columns.append(scol.alias(self._internal.data_spark_column_names[i]))\n elif is_list_like(values):\n values = (\n cast(np.ndarray, values).tolist()\n if isinstance(values, np.ndarray)\n else list(values)\n )\n\n for label in self._internal.column_labels:\n scol = self._internal.spark_column_for(label).isin([SF.lit(v) for v in values])\n scol = F.coalesce(scol, F.lit(False))\n data_spark_columns.append(scol.alias(self._internal.spark_column_name_for(label)))\n else:\n raise TypeError(\"Values should be iterable, Series, DataFrame or dict.\")\n\n return DataFrame(\n self._internal.with_new_columns(\n data_spark_columns,\n data_fields=[\n field.copy(dtype=np.dtype(\"bool\"), spark_type=BooleanType(), nullable=False)\n for field in self._internal.data_fields\n ],\n )\n )\n\n @property\n def shape(self) -> Tuple[int, int]:\n \"\"\"\n Return a tuple representing the dimensionality of the DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.shape\n (2, 2)\n\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4],\n ... 'col3': [5, 6]})\n >>> df.shape\n (2, 3)\n \"\"\"\n return len(self), len(self.columns)\n\n def merge(\n self,\n right: \"DataFrame\",\n how: str = \"inner\",\n on: Optional[Union[Name, List[Name]]] = None,\n left_on: Optional[Union[Name, List[Name]]] = None,\n right_on: Optional[Union[Name, List[Name]]] = None,\n left_index: bool = False,\n right_index: bool = False,\n suffixes: Tuple[str, str] = (\"_x\", \"_y\"),\n ) -> \"DataFrame\":\n \"\"\"\n Merge DataFrame objects with a database-style join.\n\n The index of the resulting DataFrame will be one of the following:\n - 0...n if no index is used for merging\n - Index of the left DataFrame if merged only on the index of the right DataFrame\n - Index of the right DataFrame if merged only on the index of the left DataFrame\n - All involved indices if merged using the indices of both DataFrames\n e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will\n be an index (x, a, b)\n\n Parameters\n ----------\n right: Object to merge with.\n how: Type of merge to be performed.\n {'left', 'right', 'outer', 'inner'}, default 'inner'\n\n left: use only keys from left frame, similar to a SQL left outer join; not preserve\n key order unlike pandas.\n right: use only keys from right frame, similar to a SQL right outer join; not preserve\n key order unlike pandas.\n outer: use union of keys from both frames, similar to a SQL full outer join; sort keys\n lexicographically.\n inner: use intersection of keys from both frames, similar to a SQL inner join;\n not preserve the order of the left keys unlike pandas.\n on: Column or index level names to join on. These must be found in both DataFrames. If on\n is None and not merging on indexes then this defaults to the intersection of the\n columns in both DataFrames.\n left_on: Column or index level names to join on in the left DataFrame. Can also\n be an array or list of arrays of the length of the left DataFrame.\n These arrays are treated as if they are columns.\n right_on: Column or index level names to join on in the right DataFrame. Can also\n be an array or list of arrays of the length of the right DataFrame.\n These arrays are treated as if they are columns.\n left_index: Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index or a number of\n columns) must match the number of levels.\n right_index: Use the index from the right DataFrame as the join key. Same caveats as\n left_index.\n suffixes: Suffix to apply to overlapping column names in the left and right side,\n respectively.\n\n Returns\n -------\n DataFrame\n A DataFrame of the two merged objects.\n\n See Also\n --------\n DataFrame.join : Join columns of another DataFrame.\n DataFrame.update : Modify in place using non-NA values from another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n broadcast : Marks a DataFrame as small enough for use in broadcast joins.\n\n Examples\n --------\n >>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n ... 'value': [1, 2, 3, 5]},\n ... columns=['lkey', 'value'])\n >>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],\n ... 'value': [5, 6, 7, 8]},\n ... columns=['rkey', 'value'])\n >>> df1\n lkey value\n 0 foo 1\n 1 bar 2\n 2 baz 3\n 3 foo 5\n >>> df2\n rkey value\n 0 foo 5\n 1 bar 6\n 2 baz 7\n 3 foo 8\n\n Merge df1 and df2 on the lkey and rkey columns. The value columns have\n the default suffixes, _x and _y, appended.\n\n >>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')\n >>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS\n lkey value_x rkey value_y\n ...bar 2 bar 6\n ...baz 3 baz 7\n ...foo 1 foo 5\n ...foo 1 foo 8\n ...foo 5 foo 5\n ...foo 5 foo 8\n\n >>> left_psdf = ps.DataFrame({'A': [1, 2]})\n >>> right_psdf = ps.DataFrame({'B': ['x', 'y']}, index=[1, 2])\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True).sort_index()\n A B\n 1 2 x\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='left').sort_index()\n A B\n 0 1 None\n 1 2 x\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='right').sort_index()\n A B\n 1 2.0 x\n 2 NaN y\n\n >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='outer').sort_index()\n A B\n 0 1.0 None\n 1 2.0 x\n 2 NaN y\n\n Notes\n -----\n As described in #263, joining string columns currently returns None for missing values\n instead of NaN.\n \"\"\"\n\n def to_list(os: Optional[Union[Name, List[Name]]]) -> List[Label]:\n if os is None:\n return []\n elif is_name_like_tuple(os):\n return [cast(Label, os)]\n elif is_name_like_value(os):\n return [(os,)]\n else:\n return [o if is_name_like_tuple(o) else (o,) for o in os]\n\n if isinstance(right, ps.Series):\n right = right.to_frame()\n\n if on:\n if left_on or right_on:\n raise ValueError(\n 'Can only pass argument \"on\" OR \"left_on\" and \"right_on\", '\n \"not a combination of both.\"\n )\n left_key_names = list(map(self._internal.spark_column_name_for, to_list(on)))\n right_key_names = list(map(right._internal.spark_column_name_for, to_list(on)))\n else:\n # TODO: need special handling for multi-index.\n if left_index:\n left_key_names = self._internal.index_spark_column_names\n else:\n left_key_names = list(map(self._internal.spark_column_name_for, to_list(left_on)))\n if right_index:\n right_key_names = right._internal.index_spark_column_names\n else:\n right_key_names = list(\n map(right._internal.spark_column_name_for, to_list(right_on))\n )\n\n if left_key_names and not right_key_names:\n raise ValueError(\"Must pass right_on or right_index=True\")\n if right_key_names and not left_key_names:\n raise ValueError(\"Must pass left_on or left_index=True\")\n if not left_key_names and not right_key_names:\n common = list(self.columns.intersection(right.columns))\n if len(common) == 0:\n raise ValueError(\n \"No common columns to perform merge on. Merge options: \"\n \"left_on=None, right_on=None, left_index=False, right_index=False\"\n )\n left_key_names = list(map(self._internal.spark_column_name_for, to_list(common)))\n right_key_names = list(map(right._internal.spark_column_name_for, to_list(common)))\n if len(left_key_names) != len(right_key_names):\n raise ValueError(\"len(left_keys) must equal len(right_keys)\")\n\n # We should distinguish the name to avoid ambiguous column name after merging.\n right_prefix = \"__right_\"\n right_key_names = [right_prefix + right_key_name for right_key_name in right_key_names]\n\n how = validate_how(how)\n\n def resolve(internal: InternalFrame, side: str) -> InternalFrame:\n def rename(col: str) -> str:\n return \"__{}_{}\".format(side, col)\n\n internal = internal.resolved_copy\n sdf = internal.spark_frame\n sdf = sdf.select(\n *[\n scol_for(sdf, col).alias(rename(col))\n for col in sdf.columns\n if col not in HIDDEN_COLUMNS\n ],\n *HIDDEN_COLUMNS,\n )\n return internal.copy(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, rename(col)) for col in internal.index_spark_column_names\n ],\n index_fields=[\n field.copy(name=rename(field.name)) for field in internal.index_fields\n ],\n data_spark_columns=[\n scol_for(sdf, rename(col)) for col in internal.data_spark_column_names\n ],\n data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],\n )\n\n left_internal = self._internal.resolved_copy\n right_internal = resolve(right._internal, \"right\")\n\n left_table = left_internal.spark_frame.alias(\"left_table\")\n right_table = right_internal.spark_frame.alias(\"right_table\")\n\n left_key_columns = [scol_for(left_table, label) for label in left_key_names]\n right_key_columns = [scol_for(right_table, label) for label in right_key_names]\n\n join_condition = reduce(\n lambda x, y: x & y,\n [lkey == rkey for lkey, rkey in zip(left_key_columns, right_key_columns)],\n )\n\n joined_table = left_table.join(right_table, join_condition, how=how)\n\n # Unpack suffixes tuple for convenience\n left_suffix = suffixes[0]\n right_suffix = suffixes[1]\n\n # Append suffixes to columns with the same name to avoid conflicts later\n duplicate_columns = set(left_internal.column_labels) & set(right_internal.column_labels)\n\n exprs = []\n data_columns = []\n column_labels = []\n\n def left_scol_for(label: Label) -> Column:\n return scol_for(left_table, left_internal.spark_column_name_for(label))\n\n def right_scol_for(label: Label) -> Column:\n return scol_for(right_table, right_internal.spark_column_name_for(label))\n\n for label in left_internal.column_labels:\n col = left_internal.spark_column_name_for(label)\n scol = left_scol_for(label)\n if label in duplicate_columns:\n spark_column_name = left_internal.spark_column_name_for(label)\n if (\n spark_column_name in left_key_names\n and (right_prefix + spark_column_name) in right_key_names\n ):\n right_scol = right_scol_for(label)\n if how == \"right\":\n scol = right_scol.alias(col)\n elif how == \"full\":\n scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col)\n else:\n pass\n else:\n col = col + left_suffix\n scol = scol.alias(col)\n label = tuple([str(label[0]) + left_suffix] + list(label[1:]))\n exprs.append(scol)\n data_columns.append(col)\n column_labels.append(label)\n for label in right_internal.column_labels:\n # recover `right_prefix` here.\n col = right_internal.spark_column_name_for(label)[len(right_prefix) :]\n scol = right_scol_for(label).alias(col)\n if label in duplicate_columns:\n spark_column_name = left_internal.spark_column_name_for(label)\n if (\n spark_column_name in left_key_names\n and (right_prefix + spark_column_name) in right_key_names\n ):\n continue\n else:\n col = col + right_suffix\n scol = scol.alias(col)\n label = tuple([str(label[0]) + right_suffix] + list(label[1:]))\n exprs.append(scol)\n data_columns.append(col)\n column_labels.append(label)\n\n left_index_scols = left_internal.index_spark_columns\n right_index_scols = right_internal.index_spark_columns\n\n # Retain indices if they are used for joining\n if left_index:\n if right_index:\n if how in (\"inner\", \"left\"):\n exprs.extend(left_index_scols)\n index_spark_column_names = left_internal.index_spark_column_names\n index_names = left_internal.index_names\n elif how == \"right\":\n exprs.extend(right_index_scols)\n index_spark_column_names = right_internal.index_spark_column_names\n index_names = right_internal.index_names\n else:\n index_spark_column_names = left_internal.index_spark_column_names\n index_names = left_internal.index_names\n for col, left_scol, right_scol in zip(\n index_spark_column_names, left_index_scols, right_index_scols\n ):\n scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol)\n exprs.append(scol.alias(col))\n else:\n exprs.extend(right_index_scols)\n index_spark_column_names = right_internal.index_spark_column_names\n index_names = right_internal.index_names\n elif right_index:\n exprs.extend(left_index_scols)\n index_spark_column_names = left_internal.index_spark_column_names\n index_names = left_internal.index_names\n else:\n index_spark_column_names = []\n index_names = []\n\n selected_columns = joined_table.select(*exprs)\n\n internal = InternalFrame(\n spark_frame=selected_columns,\n index_spark_columns=[\n scol_for(selected_columns, col) for col in index_spark_column_names\n ],\n index_names=index_names,\n column_labels=column_labels,\n data_spark_columns=[scol_for(selected_columns, col) for col in data_columns],\n )\n return DataFrame(internal)\n\n def join(\n self,\n right: \"DataFrame\",\n on: Optional[Union[Name, List[Name]]] = None,\n how: str = \"left\",\n lsuffix: str = \"\",\n rsuffix: str = \"\",\n ) -> \"DataFrame\":\n \"\"\"\n Join columns of another DataFrame.\n\n Join columns with `right` DataFrame either on index or on a key column. Efficiently join\n multiple DataFrame objects by index at once by passing a list.\n\n Parameters\n ----------\n right: DataFrame, Series\n on: str, list of str, or array-like, optional\n Column or index level name(s) in the caller to join on the index in `right`, otherwise\n joins index-on-index. If multiple values given, the `right` DataFrame must have a\n MultiIndex. Can pass an array as the join key if it is not already contained in the\n calling DataFrame. Like an Excel VLOOKUP operation.\n how: {'left', 'right', 'outer', 'inner'}, default 'left'\n How to handle the operation of the two objects.\n\n * left: use `left` frame’s index (or column if on is specified).\n * right: use `right`’s index.\n * outer: form union of `left` frame’s index (or column if on is specified) with\n right’s index, and sort it. lexicographically.\n * inner: form intersection of `left` frame’s index (or column if on is specified)\n with `right`’s index, preserving the order of the `left`’s one.\n lsuffix : str, default ''\n Suffix to use from left frame's overlapping columns.\n rsuffix : str, default ''\n Suffix to use from `right` frame's overlapping columns.\n\n Returns\n -------\n DataFrame\n A dataframe containing columns from both the `left` and `right`.\n\n See Also\n --------\n DataFrame.merge: For column(s)-on-columns(s) operations.\n DataFrame.update : Modify in place using non-NA values from another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n broadcast : Marks a DataFrame as small enough for use in broadcast joins.\n\n Notes\n -----\n Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame\n objects.\n\n Examples\n --------\n >>> psdf1 = ps.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],\n ... 'A': ['A0', 'A1', 'A2', 'A3']},\n ... columns=['key', 'A'])\n >>> psdf2 = ps.DataFrame({'key': ['K0', 'K1', 'K2'],\n ... 'B': ['B0', 'B1', 'B2']},\n ... columns=['key', 'B'])\n >>> psdf1\n key A\n 0 K0 A0\n 1 K1 A1\n 2 K2 A2\n 3 K3 A3\n >>> psdf2\n key B\n 0 K0 B0\n 1 K1 B1\n 2 K2 B2\n\n Join DataFrames using their indexes.\n\n >>> join_psdf = psdf1.join(psdf2, lsuffix='_left', rsuffix='_right')\n >>> join_psdf.sort_values(by=join_psdf.columns)\n key_left A key_right B\n 0 K0 A0 K0 B0\n 1 K1 A1 K1 B1\n 2 K2 A2 K2 B2\n 3 K3 A3 None None\n\n If we want to join using the key columns, we need to set key to be the index in both df and\n right. The joined DataFrame will have key as its index.\n\n >>> join_psdf = psdf1.set_index('key').join(psdf2.set_index('key'))\n >>> join_psdf.sort_values(by=join_psdf.columns) # doctest: +NORMALIZE_WHITESPACE\n A B\n key\n K0 A0 B0\n K1 A1 B1\n K2 A2 B2\n K3 A3 None\n\n Another option to join using the key columns is to use the on parameter. DataFrame.join\n always uses right’s index but we can use any column in df. This method not preserve the\n original DataFrame’s index in the result unlike pandas.\n\n >>> join_psdf = psdf1.join(psdf2.set_index('key'), on='key')\n >>> join_psdf.index\n Int64Index([0, 1, 2, 3], dtype='int64')\n \"\"\"\n if isinstance(right, ps.Series):\n common = list(self.columns.intersection([right.name]))\n else:\n common = list(self.columns.intersection(right.columns))\n if len(common) > 0 and not lsuffix and not rsuffix:\n raise ValueError(\n \"columns overlap but no suffix specified: \" \"{rename}\".format(rename=common)\n )\n\n need_set_index = False\n if on:\n if not is_list_like(on):\n on = [on]\n if len(on) != right._internal.index_level:\n raise ValueError(\n 'len(left_on) must equal the number of levels in the index of \"right\"'\n )\n\n need_set_index = len(set(on) & set(self.index.names)) == 0\n if need_set_index:\n self = self.set_index(on)\n join_psdf = self.merge(\n right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix)\n )\n return join_psdf.reset_index() if need_set_index else join_psdf\n\n def combine_first(self, other: \"DataFrame\") -> \"DataFrame\":\n \"\"\"\n Update null elements with value in the same location in `other`.\n\n Combine two DataFrame objects by filling null values in one DataFrame\n with non-null values from other DataFrame. The row and column indexes\n of the resulting DataFrame will be the union of the two.\n\n .. versionadded:: 3.3.0\n\n Parameters\n ----------\n other : DataFrame\n Provided DataFrame to use to fill null values.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> ps.set_option(\"compute.ops_on_diff_frames\", True)\n >>> df1 = ps.DataFrame({'A': [None, 0], 'B': [None, 4]})\n >>> df2 = ps.DataFrame({'A': [1, 1], 'B': [3, 3]})\n\n >>> df1.combine_first(df2).sort_index()\n A B\n 0 1.0 3.0\n 1 0.0 4.0\n\n Null values still persist if the location of that null value does not exist in other\n\n >>> df1 = ps.DataFrame({'A': [None, 0], 'B': [4, None]})\n >>> df2 = ps.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])\n\n >>> df1.combine_first(df2).sort_index()\n A B C\n 0 NaN 4.0 NaN\n 1 0.0 3.0 1.0\n 2 NaN 3.0 1.0\n >>> ps.reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n if not isinstance(other, DataFrame):\n raise TypeError(\"`combine_first` only allows `DataFrame` for parameter `other`\")\n if same_anchor(self, other):\n combined = self\n this = self\n that = other\n else:\n combined = combine_frames(self, other)\n this = combined[\"this\"]\n that = combined[\"that\"]\n\n intersect_column_labels = set(self._internal.column_labels).intersection(\n set(other._internal.column_labels)\n )\n\n column_labels, data_spark_columns = [], []\n for column_label in this._internal.column_labels:\n this_scol = this._internal.spark_column_for(column_label)\n if column_label in intersect_column_labels:\n that_scol = that._internal.spark_column_for(column_label)\n this_scol_name = this._internal.spark_column_name_for(column_label)\n combined_scol = (\n F.when(this_scol.isNull(), that_scol).otherwise(this_scol).alias(this_scol_name)\n )\n data_spark_columns.append(combined_scol)\n else:\n data_spark_columns.append(this_scol)\n column_labels.append(column_label)\n\n for column_label in that._internal.column_labels:\n if column_label not in intersect_column_labels:\n that_scol = that._internal.spark_column_for(column_label)\n data_spark_columns.append(that_scol)\n column_labels.append(column_label)\n\n internal = combined._internal.copy(\n column_labels=column_labels,\n data_spark_columns=data_spark_columns,\n data_fields=None, # TODO: dtype?\n column_label_names=self._internal.column_label_names,\n )\n return DataFrame(internal)\n\n def append(\n self,\n other: \"DataFrame\",\n ignore_index: bool = False,\n verify_integrity: bool = False,\n sort: bool = False,\n ) -> \"DataFrame\":\n \"\"\"\n Append rows of other to the end of caller, returning a new object.\n\n Columns in other that are not in the caller are added as new columns.\n\n Parameters\n ----------\n other : DataFrame or Series/dict-like object, or list of these\n The data to append.\n\n ignore_index : boolean, default False\n If True, do not use the index labels.\n\n verify_integrity : boolean, default False\n If True, raise ValueError on creating index with duplicates.\n\n sort : boolean, default False\n Currently not supported.\n\n Returns\n -------\n appended : DataFrame\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2], [3, 4]], columns=list('AB'))\n\n >>> df.append(df)\n A B\n 0 1 2\n 1 3 4\n 0 1 2\n 1 3 4\n\n >>> df.append(df, ignore_index=True)\n A B\n 0 1 2\n 1 3 4\n 2 1 2\n 3 3 4\n \"\"\"\n if isinstance(other, ps.Series):\n raise TypeError(\"DataFrames.append() does not support appending Series to DataFrames\")\n if sort:\n raise NotImplementedError(\"The 'sort' parameter is currently not supported\")\n\n if not ignore_index:\n index_scols = self._internal.index_spark_columns\n if len(index_scols) != other._internal.index_level:\n raise ValueError(\"Both DataFrames have to have the same number of index levels\")\n\n if verify_integrity and len(index_scols) > 0:\n if (\n self._internal.spark_frame.select(index_scols)\n .intersect(\n other._internal.spark_frame.select(other._internal.index_spark_columns)\n )\n .count()\n ) > 0:\n raise ValueError(\"Indices have overlapping values\")\n\n # Lazy import to avoid circular dependency issues\n from pyspark.pandas.namespace import concat\n\n return cast(DataFrame, concat([self, other], ignore_index=ignore_index))\n\n # TODO: add 'filter_func' and 'errors' parameter\n def update(self, other: \"DataFrame\", join: str = \"left\", overwrite: bool = True) -> None:\n \"\"\"\n Modify in place using non-NA values from another DataFrame.\n Aligns on indices. There is no return value.\n\n Parameters\n ----------\n other : DataFrame, or Series\n join : 'left', default 'left'\n Only left join is implemented, keeping the index and columns of the original object.\n overwrite : bool, default True\n How to handle non-NA values for overlapping keys:\n\n * True: overwrite original DataFrame's values with values from `other`.\n * False: only update values that are NA in the original DataFrame.\n\n Returns\n -------\n None : method directly changes calling object\n\n See Also\n --------\n DataFrame.merge : For column(s)-on-columns(s) operations.\n DataFrame.join : Join columns of another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n broadcast : Marks a DataFrame as small enough for use in broadcast joins.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])\n >>> new_df = ps.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])\n >>> df.update(new_df)\n >>> df.sort_index()\n A B\n 0 1 4\n 1 2 5\n 2 3 6\n\n The DataFrame's length does not increase as a result of the update,\n only values at matching index/column labels are updated.\n\n >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])\n >>> new_df = ps.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])\n >>> df.update(new_df)\n >>> df.sort_index()\n A B\n 0 a d\n 1 b e\n 2 c f\n\n For Series, it's name attribute must be set.\n\n >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])\n >>> new_column = ps.Series(['d', 'e'], name='B', index=[0, 2])\n >>> df.update(new_column)\n >>> df.sort_index()\n A B\n 0 a d\n 1 b y\n 2 c e\n\n If `other` contains None the corresponding values are not updated in the original dataframe.\n\n >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])\n >>> new_df = ps.DataFrame({'B': [4, None, 6]}, columns=['B'])\n >>> df.update(new_df)\n >>> df.sort_index()\n A B\n 0 1 4.0\n 1 2 500.0\n 2 3 6.0\n \"\"\"\n if join != \"left\":\n raise NotImplementedError(\"Only left join is supported\")\n\n if isinstance(other, ps.Series):\n other = other.to_frame()\n\n update_columns = list(\n set(self._internal.column_labels).intersection(set(other._internal.column_labels))\n )\n update_sdf = self.join(\n other[update_columns], rsuffix=\"_new\"\n )._internal.resolved_copy.spark_frame\n\n data_fields = self._internal.data_fields.copy()\n for column_labels in update_columns:\n column_name = self._internal.spark_column_name_for(column_labels)\n old_col = scol_for(update_sdf, column_name)\n new_col = scol_for(\n update_sdf, other._internal.spark_column_name_for(column_labels) + \"_new\"\n )\n if overwrite:\n update_sdf = update_sdf.withColumn(\n column_name, F.when(new_col.isNull(), old_col).otherwise(new_col)\n )\n else:\n update_sdf = update_sdf.withColumn(\n column_name, F.when(old_col.isNull(), new_col).otherwise(old_col)\n )\n data_fields[self._internal.column_labels.index(column_labels)] = None\n sdf = update_sdf.select(\n *[scol_for(update_sdf, col) for col in self._internal.spark_column_names],\n *HIDDEN_COLUMNS,\n )\n internal = self._internal.with_new_sdf(sdf, data_fields=data_fields)\n self._update_internal_frame(internal, requires_same_anchor=False)\n\n # TODO: ddof should be implemented.\n def cov(self, min_periods: Optional[int] = None) -> \"DataFrame\":\n \"\"\"\n Compute pairwise covariance of columns, excluding NA/null values.\n\n Compute the pairwise covariance among the series of a DataFrame.\n The returned data frame is the `covariance matrix\n <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns\n of the DataFrame.\n\n Both NA and null values are automatically excluded from the\n calculation. (See the note below about bias from missing values.)\n A threshold can be set for the minimum number of\n observations for each value created. Comparisons with observations\n below this threshold will be returned as ``NaN``.\n\n This method is generally used for the analysis of time series data to\n understand the relationship between different measures\n across time.\n\n .. versionadded:: 3.3.0\n\n Parameters\n ----------\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n\n Returns\n -------\n DataFrame\n The covariance matrix of the series of the DataFrame.\n\n See Also\n --------\n Series.cov : Compute covariance with another Series.\n\n Examples\n --------\n >>> df = ps.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],\n ... columns=['dogs', 'cats'])\n >>> df.cov()\n dogs cats\n dogs 0.666667 -1.000000\n cats -1.000000 1.666667\n\n >>> np.random.seed(42)\n >>> df = ps.DataFrame(np.random.randn(1000, 5),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n >>> df.cov()\n a b c d e\n a 0.998438 -0.020161 0.059277 -0.008943 0.014144\n b -0.020161 1.059352 -0.008543 -0.024738 0.009826\n c 0.059277 -0.008543 1.010670 -0.001486 -0.000271\n d -0.008943 -0.024738 -0.001486 0.921297 -0.013692\n e 0.014144 0.009826 -0.000271 -0.013692 0.977795\n\n **Minimum number of periods**\n\n This method also supports an optional ``min_periods`` keyword\n that specifies the required minimum number of non-NA observations for\n each column pair in order to have a valid result:\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame(np.random.randn(20, 3),\n ... columns=['a', 'b', 'c'])\n >>> df.loc[df.index[:5], 'a'] = np.nan\n >>> df.loc[df.index[5:10], 'b'] = np.nan\n >>> sdf = ps.from_pandas(df)\n >>> sdf.cov(min_periods=12)\n a b c\n a 0.316741 NaN -0.150812\n b NaN 1.248003 0.191417\n c -0.150812 0.191417 0.895202\n \"\"\"\n min_periods = 1 if min_periods is None else min_periods\n\n # Only compute covariance for Boolean and Numeric except Decimal\n psdf = self[\n [\n col\n for col in self.columns\n if isinstance(self[col].spark.data_type, BooleanType)\n or (\n isinstance(self[col].spark.data_type, NumericType)\n and not isinstance(self[col].spark.data_type, DecimalType)\n )\n ]\n ]\n\n num_cols = len(psdf.columns)\n cov = np.zeros([num_cols, num_cols])\n\n if num_cols == 0:\n return DataFrame()\n\n if len(psdf) < min_periods:\n cov.fill(np.nan)\n return DataFrame(cov, columns=psdf.columns, index=psdf.columns)\n\n data_cols = psdf._internal.data_spark_column_names\n cov_scols = []\n count_not_null_scols = []\n\n # Count number of null row between two columns\n # Example:\n # a b c\n # 0 1 1 1\n # 1 NaN 2 2\n # 2 3 NaN 3\n # 3 4 4 4\n #\n # a b c\n # a count(a, a) count(a, b) count(a, c)\n # b count(b, b) count(b, c)\n # c count(c, c)\n #\n # count_not_null_scols =\n # [F.count(a, a), F.count(a, b), F.count(a, c), F.count(b, b), F.count(b, c), F.count(c, c)]\n for r in range(0, num_cols):\n for c in range(r, num_cols):\n count_not_null_scols.append(\n F.count(\n F.when(F.col(data_cols[r]).isNotNull() & F.col(data_cols[c]).isNotNull(), 1)\n )\n )\n\n count_not_null = (\n psdf._internal.spark_frame.replace(float(\"nan\"), None)\n .select(*count_not_null_scols)\n .head(1)[0]\n )\n\n # Calculate covariance between two columns\n # Example:\n # with min_periods = 3\n # a b c\n # 0 1 1 1\n # 1 NaN 2 2\n # 2 3 NaN 3\n # 3 4 4 4\n #\n # a b c\n # a cov(a, a) None cov(a, c)\n # b cov(b, b) cov(b, c)\n # c cov(c, c)\n #\n # cov_scols = [F.cov(a, a), None, F.cov(a, c), F.cov(b, b), F.cov(b, c), F.cov(c, c)]\n step = 0\n for r in range(0, num_cols):\n step += r\n for c in range(r, num_cols):\n cov_scols.append(\n F.covar_samp(\n F.col(data_cols[r]).cast(\"double\"), F.col(data_cols[c]).cast(\"double\")\n )\n if count_not_null[r * num_cols + c - step] >= min_periods\n else F.lit(None)\n )\n\n pair_cov = psdf._internal.spark_frame.select(*cov_scols).head(1)[0]\n\n # Convert from row to 2D array\n # Example:\n # pair_cov = [cov(a, a), None, cov(a, c), cov(b, b), cov(b, c), cov(c, c)]\n #\n # cov =\n #\n # a b c\n # a cov(a, a) None cov(a, c)\n # b cov(b, b) cov(b, c)\n # c cov(c, c)\n step = 0\n for r in range(0, num_cols):\n step += r\n for c in range(r, num_cols):\n cov[r][c] = pair_cov[r * num_cols + c - step]\n\n # Copy values\n # Example:\n # cov =\n # a b c\n # a cov(a, a) None cov(a, c)\n # b None cov(b, b) cov(b, c)\n # c cov(a, c) cov(b, c) cov(c, c)\n cov = cov + cov.T - np.diag(np.diag(cov))\n return DataFrame(cov, columns=psdf.columns, index=psdf.columns)\n\n def sample(\n self,\n n: Optional[int] = None,\n frac: Optional[float] = None,\n replace: bool = False,\n random_state: Optional[int] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Return a random sample of items from an axis of object.\n\n Please call this function using named argument by specifying the ``frac`` argument.\n\n You can use `random_state` for reproducibility. However, note that different from pandas,\n specifying a seed in pandas-on-Spark/Spark does not guarantee the sampled rows will\n be fixed. The result set depends on not only the seed, but also how the data is distributed\n across machines and to some extent network randomness when shuffle operations are involved.\n Even in the simplest case, the result set will depend on the system's CPU core count.\n\n Parameters\n ----------\n n : int, optional\n Number of items to return. This is currently NOT supported. Use frac instead.\n frac : float, optional\n Fraction of axis items to return.\n replace : bool, default False\n Sample with or without replacement.\n random_state : int, optional\n Seed for the random number generator (if int).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing the sampled items.\n\n Examples\n --------\n >>> df = ps.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'],\n ... columns=['num_legs', 'num_wings', 'num_specimen_seen'])\n >>> df # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n A random 25% sample of the ``DataFrame``.\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n\n Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,\n so the same items could appear more than once.\n\n >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP\n falcon 2\n spider 8\n spider 8\n Name: num_legs, dtype: int64\n\n Specifying the exact number of items to return is not supported at the moment.\n\n >>> df.sample(n=5) # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n NotImplementedError: Function sample currently does not support specifying ...\n \"\"\"\n # Note: we don't run any of the doctests because the result can change depending on the\n # system's core count.\n if n is not None:\n raise NotImplementedError(\n \"Function sample currently does not support specifying \"\n \"exact number of items to return. Use frac instead.\"\n )\n\n if frac is None:\n raise ValueError(\"frac must be specified.\")\n\n sdf = self._internal.resolved_copy.spark_frame.sample(\n withReplacement=replace, fraction=frac, seed=random_state\n )\n return DataFrame(self._internal.with_new_sdf(sdf))\n\n def astype(self, dtype: Union[str, Dtype, Dict[Name, Union[str, Dtype]]]) -> \"DataFrame\":\n \"\"\"\n Cast a pandas-on-Spark object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas-on-Spark object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')\n >>> df\n a b\n 0 1 1\n 1 2 2\n 2 3 3\n\n Convert to float type:\n\n >>> df.astype('float')\n a b\n 0 1.0 1.0\n 1 2.0 2.0\n 2 3.0 3.0\n\n Convert to int64 type back:\n\n >>> df.astype('int64')\n a b\n 0 1 1\n 1 2 2\n 2 3 3\n\n Convert column a to float type:\n\n >>> df.astype({'a': float})\n a b\n 0 1.0 1\n 1 2.0 2\n 2 3.0 3\n\n \"\"\"\n applied = []\n if is_dict_like(dtype):\n dtype_dict = cast(Dict[Name, Union[str, Dtype]], dtype)\n for col_name in dtype_dict.keys():\n if col_name not in self.columns:\n raise KeyError(\n \"Only a column name can be used for the \"\n \"key in a dtype mappings argument.\"\n )\n for col_name, col in self.items():\n if col_name in dtype_dict:\n applied.append(col.astype(dtype=dtype_dict[col_name]))\n else:\n applied.append(col)\n else:\n for col_name, col in self.items():\n applied.append(col.astype(dtype=cast(Union[str, Dtype], dtype)))\n return DataFrame(self._internal.with_new_columns(applied))\n\n def add_prefix(self, prefix: str) -> \"DataFrame\":\n \"\"\"\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n DataFrame\n New DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n assert isinstance(prefix, str)\n return self._apply_series_op(\n lambda psser: psser.rename(tuple([prefix + i for i in psser._column_label]))\n )\n\n def add_suffix(self, suffix: str) -> \"DataFrame\":\n \"\"\"\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add before each label.\n\n Returns\n -------\n DataFrame\n New DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n assert isinstance(suffix, str)\n return self._apply_series_op(\n lambda psser: psser.rename(tuple([i + suffix for i in psser._column_label]))\n )\n\n # TODO: include, and exclude should be implemented.\n def describe(self, percentiles: Optional[List[float]] = None) -> \"DataFrame\":\n \"\"\"\n Generate descriptive statistics that summarize the central tendency,\n dispersion and shape of a dataset's distribution, excluding\n ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]\n A list of percentiles to be computed.\n\n Returns\n -------\n DataFrame\n Summary statistics of the Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the observations.\n\n Notes\n -----\n For numeric data, the result's index will include ``count``,\n ``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.\n\n For object data (e.g. strings or timestamps), the result’s index will include\n ``count``, ``unique``, ``top``, and ``freq``.\n The ``top`` is the most common value. The ``freq`` is the most common value’s frequency.\n Timestamps also include the ``first`` and ``last`` items.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = ps.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.0\n 50% 2.0\n 75% 3.0\n max 3.0\n dtype: float64\n\n Describing a ``DataFrame``. Only numeric fields are returned.\n\n >>> df = ps.DataFrame({'numeric1': [1, 2, 3],\n ... 'numeric2': [4.0, 5.0, 6.0],\n ... 'object': ['a', 'b', 'c']\n ... },\n ... columns=['numeric1', 'numeric2', 'object'])\n >>> df.describe()\n numeric1 numeric2\n count 3.0 3.0\n mean 2.0 5.0\n std 1.0 1.0\n min 1.0 4.0\n 25% 1.0 4.0\n 50% 2.0 5.0\n 75% 3.0 6.0\n max 3.0 6.0\n\n For multi-index columns:\n\n >>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')]\n >>> df.describe() # doctest: +NORMALIZE_WHITESPACE\n num\n a b\n count 3.0 3.0\n mean 2.0 5.0\n std 1.0 1.0\n min 1.0 4.0\n 25% 1.0 4.0\n 50% 2.0 5.0\n 75% 3.0 6.0\n max 3.0 6.0\n\n >>> df[('num', 'b')].describe()\n count 3.0\n mean 5.0\n std 1.0\n min 4.0\n 25% 4.0\n 50% 5.0\n 75% 6.0\n max 6.0\n Name: (num, b), dtype: float64\n\n Describing a ``DataFrame`` and selecting custom percentiles.\n\n >>> df = ps.DataFrame({'numeric1': [1, 2, 3],\n ... 'numeric2': [4.0, 5.0, 6.0]\n ... },\n ... columns=['numeric1', 'numeric2'])\n >>> df.describe(percentiles = [0.85, 0.15])\n numeric1 numeric2\n count 3.0 3.0\n mean 2.0 5.0\n std 1.0 1.0\n min 1.0 4.0\n 15% 1.0 4.0\n 50% 2.0 5.0\n 85% 3.0 6.0\n max 3.0 6.0\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric1.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.0\n 50% 2.0\n 75% 3.0\n max 3.0\n Name: numeric1, dtype: float64\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute and selecting custom percentiles.\n\n >>> df.numeric1.describe(percentiles = [0.85, 0.15])\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 15% 1.0\n 50% 2.0\n 85% 3.0\n max 3.0\n Name: numeric1, dtype: float64\n \"\"\"\n psser_numeric: List[Series] = []\n psser_string: List[Series] = []\n psser_timestamp: List[Series] = []\n spark_data_types: List[DataType] = []\n column_labels: Optional[List[Label]] = []\n column_names: List[str] = []\n for label in self._internal.column_labels:\n psser = self._psser_for(label)\n spark_data_type = psser.spark.data_type\n if isinstance(spark_data_type, NumericType):\n psser_numeric.append(psser)\n column_labels.append(label)\n spark_data_types.append(spark_data_type)\n elif isinstance(spark_data_type, (TimestampType, TimestampNTZType)):\n psser_timestamp.append(psser)\n column_labels.append(label)\n spark_data_types.append(spark_data_type)\n else:\n psser_string.append(psser)\n column_names.append(self._internal.spark_column_name_for(label))\n\n if percentiles is not None:\n if any((p < 0.0) or (p > 1.0) for p in percentiles):\n raise ValueError(\"Percentiles should all be in the interval [0, 1]\")\n # appending 50% if not in percentiles already\n percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles\n else:\n percentiles = [0.25, 0.5, 0.75]\n\n # Identify the cases\n is_all_string_type = (\n len(psser_numeric) == 0 and len(psser_timestamp) == 0 and len(psser_string) > 0\n )\n is_all_numeric_type = len(psser_numeric) > 0 and len(psser_timestamp) == 0\n has_timestamp_type = len(psser_timestamp) > 0\n has_numeric_type = len(psser_numeric) > 0\n\n if is_all_string_type:\n # Handling string type columns\n # We will retrive the `count`, `unique`, `top` and `freq`.\n internal = self._internal.resolved_copy\n exprs_string = [\n internal.spark_column_for(psser._column_label) for psser in psser_string\n ]\n sdf = internal.spark_frame.select(*exprs_string)\n\n # Get `count` & `unique` for each columns\n counts, uniques = map(lambda x: x[1:], sdf.summary(\"count\", \"count_distinct\").take(2))\n # Handling Empty DataFrame\n if len(counts) == 0 or counts[0] == \"0\":\n data = dict()\n for psser in psser_string:\n data[psser.name] = [0, 0, np.nan, np.nan]\n return DataFrame(data, index=[\"count\", \"unique\", \"top\", \"freq\"])\n\n # Get `top` & `freq` for each columns\n tops = []\n freqs = []\n # TODO(SPARK-37711): We should do it in single pass since invoking Spark job\n # for every columns is too expensive.\n for column in exprs_string:\n top, freq = sdf.groupby(column).count().sort(\"count\", ascending=False).first()\n tops.append(str(top))\n freqs.append(str(freq))\n\n stats = [counts, uniques, tops, freqs]\n stats_names = [\"count\", \"unique\", \"top\", \"freq\"]\n\n result: DataFrame = DataFrame(\n data=stats,\n index=stats_names,\n columns=column_names,\n )\n elif is_all_numeric_type:\n # Handling numeric columns\n exprs_numeric = [\n psser._dtype_op.nan_to_null(psser).spark.column for psser in psser_numeric\n ]\n formatted_perc = [\"{:.0%}\".format(p) for p in sorted(percentiles)]\n stats = [\"count\", \"mean\", \"stddev\", \"min\", *formatted_perc, \"max\"]\n\n # In this case, we can simply use `summary` to calculate the stats.\n sdf = self._internal.spark_frame.select(*exprs_numeric).summary(*stats)\n sdf = sdf.replace(\"stddev\", \"std\", subset=[\"summary\"])\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, \"summary\")],\n column_labels=column_labels,\n data_spark_columns=[\n scol_for(sdf, self._internal.spark_column_name_for(label))\n for label in column_labels\n ],\n )\n result = DataFrame(internal).astype(\"float64\")\n elif has_timestamp_type:\n internal = self._internal.resolved_copy\n column_names = [\n internal.spark_column_name_for(column_label) for column_label in column_labels\n ]\n column_length = len(column_labels)\n\n # Apply stat functions for each column.\n count_exprs = map(F.count, column_names)\n min_exprs = map(F.min, column_names)\n # Here we try to flat the multiple map into single list that contains each calculated\n # percentile using `chain`.\n # e.g. flat the `[<map object at 0x7fc1907dc280>, <map object at 0x7fc1907dcc70>]`\n # to `[Column<'percentile_approx(A, 0.2, 10000)'>,\n # Column<'percentile_approx(B, 0.2, 10000)'>,\n # Column<'percentile_approx(A, 0.5, 10000)'>,\n # Column<'percentile_approx(B, 0.5, 10000)'>]`\n perc_exprs = chain(\n *[\n map(F.percentile_approx, column_names, [percentile] * column_length)\n for percentile in percentiles\n ]\n )\n max_exprs = map(F.max, column_names)\n mean_exprs = []\n for column_name, spark_data_type in zip(column_names, spark_data_types):\n mean_exprs.append(F.mean(column_name).astype(spark_data_type))\n exprs = [*count_exprs, *mean_exprs, *min_exprs, *perc_exprs, *max_exprs]\n\n formatted_perc = [\"{:.0%}\".format(p) for p in sorted(percentiles)]\n stats_names = [\"count\", \"mean\", \"min\", *formatted_perc, \"max\"]\n\n # If not all columns are timestamp type,\n # we also need to calculate the `std` for numeric columns\n if has_numeric_type:\n std_exprs = []\n for label, spark_data_type in zip(column_labels, spark_data_types):\n column_name = label[0]\n if isinstance(spark_data_type, (TimestampType, TimestampNTZType)):\n std_exprs.append(F.lit(None).alias(\"stddev_samp({})\".format(column_name)))\n else:\n std_exprs.append(F.stddev(column_name))\n exprs.extend(std_exprs)\n stats_names.append(\"std\")\n\n # Select stats for all columns at once.\n sdf = internal.spark_frame.select(exprs)\n stat_values = sdf.first()\n\n num_stats = int(len(exprs) / column_length)\n # `column_name_stats_kv` is key-value store that has column name as key, and\n # the stats as values e.g. {\"A\": [{count_value}, {min_value}, ...],\n # \"B\": [{count_value}, {min_value} ...]}\n column_name_stats_kv: Dict[str, List[str]] = defaultdict(list)\n for i, column_name in enumerate(column_names):\n for first_stat_idx in range(num_stats):\n column_name_stats_kv[column_name].append(\n stat_values[(first_stat_idx * column_length) + i]\n )\n\n # For timestamp type columns, we should cast the column type to string.\n for key, spark_data_type in zip(column_name_stats_kv, spark_data_types):\n if isinstance(spark_data_type, (TimestampType, TimestampNTZType)):\n column_name_stats_kv[key] = [str(value) for value in column_name_stats_kv[key]]\n\n result: DataFrame = DataFrame( # type: ignore[no-redef]\n data=column_name_stats_kv,\n index=stats_names,\n columns=column_names,\n )\n else:\n # Empty DataFrame without column\n raise ValueError(\"Cannot describe a DataFrame without columns\")\n\n return result\n\n def drop_duplicates(\n self,\n subset: Optional[Union[Name, List[Name]]] = None,\n keep: Union[bool, str] = \"first\",\n inplace: bool = False,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Return DataFrame with duplicate rows removed, optionally only\n considering certain columns.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns.\n keep : {'first', 'last', False}, default 'first'\n Determines which duplicates (if any) to keep.\n - ``first`` : Drop duplicates except for the first occurrence.\n - ``last`` : Drop duplicates except for the last occurrence.\n - False : Drop all duplicates.\n inplace : boolean, default False\n Whether to drop duplicates in place or to return a copy.\n\n Returns\n -------\n DataFrame\n DataFrame with duplicates removed or None if ``inplace=True``.\n\n >>> df = ps.DataFrame(\n ... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])\n >>> df\n a b\n 0 1 a\n 1 2 a\n 2 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates().sort_index()\n a b\n 0 1 a\n 1 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates('a').sort_index()\n a b\n 0 1 a\n 1 2 a\n 4 3 d\n\n >>> df.drop_duplicates(['a', 'b']).sort_index()\n a b\n 0 1 a\n 1 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates(keep='last').sort_index()\n a b\n 0 1 a\n 2 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates(keep=False).sort_index()\n a b\n 0 1 a\n 3 2 c\n 4 3 d\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n sdf, column = self._mark_duplicates(subset, keep)\n\n sdf = sdf.where(~scol_for(sdf, column)).drop(column)\n internal = self._internal.with_new_sdf(sdf)\n if inplace:\n self._update_internal_frame(internal)\n return None\n else:\n return DataFrame(internal)\n\n def reindex(\n self,\n labels: Optional[Sequence[Any]] = None,\n index: Optional[Union[\"Index\", Sequence[Any]]] = None,\n columns: Optional[Union[pd.Index, Sequence[Any]]] = None,\n axis: Optional[Axis] = None,\n copy: Optional[bool] = True,\n fill_value: Optional[Any] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Conform DataFrame to new index with optional filling logic, placing\n NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n labels: array-like, optional\n New labels / index to conform the axis specified by ‘axis’ to.\n index, columns: array-like, optional\n New labels / index to conform to, should be specified using keywords.\n Preferably an Index object to avoid duplicating data\n axis: int or str, optional\n Axis to target. Can be either the axis name (‘index’, ‘columns’) or\n number (0, 1).\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n\n Returns\n -------\n DataFrame with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n\n Examples\n --------\n\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']\n >>> df = ps.DataFrame({\n ... 'http_status': [200, 200, 404, 404, 301],\n ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},\n ... index=index,\n ... columns=['http_status', 'response_time'])\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',\n ... 'Chrome']\n >>> df.reindex(new_index).sort_index()\n http_status response_time\n Chrome 200.0 0.02\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Iceweasel NaN NaN\n Safari 404.0 0.07\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``.\n\n >>> df.reindex(new_index, fill_value=0, copy=False).sort_index()\n http_status response_time\n Chrome 200 0.02\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Iceweasel 0 0.00\n Safari 404 0.07\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=['http_status', 'user_agent']).sort_index()\n http_status user_agent\n Chrome 200 NaN\n Firefox 200 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n Safari 404 NaN\n\n Or we can use \"axis-style\" keyword arguments\n\n >>> df.reindex(['http_status', 'user_agent'], axis=\"columns\").sort_index()\n http_status user_agent\n Chrome 200 NaN\n Firefox 200 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n Safari 404 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')\n >>> df2 = ps.DataFrame({\"prices\": [100, 101, np.nan, 100, 89, 88]},\n ... index=date_index)\n >>> df2.sort_index()\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')\n >>> df2.reindex(date_index2).sort_index()\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n \"\"\"\n if axis is not None and (index is not None or columns is not None):\n raise TypeError(\"Cannot specify both 'axis' and any of 'index' or 'columns'.\")\n\n if labels is not None:\n axis = validate_axis(axis)\n if axis == 0:\n index = labels\n elif axis == 1:\n columns = labels\n\n if index is not None and not is_list_like(index):\n raise TypeError(\n \"Index must be called with a collection of some kind, \"\n \"%s was passed\" % type(index)\n )\n\n if columns is not None and not is_list_like(columns):\n raise TypeError(\n \"Columns must be called with a collection of some kind, \"\n \"%s was passed\" % type(columns)\n )\n\n df = self\n\n if index is not None:\n df = df._reindex_index(index, fill_value)\n\n if columns is not None:\n df = df._reindex_columns(columns, fill_value)\n\n # Copy\n if copy and df is self:\n return df.copy()\n else:\n return df\n\n def _reindex_index(\n self, index: Optional[Union[\"Index\", Sequence[Any]]], fill_value: Optional[Any]\n ) -> \"DataFrame\":\n # When axis is index, we can mimic pandas' by a right outer join.\n nlevels = self._internal.index_level\n assert nlevels <= 1 or (\n isinstance(index, ps.MultiIndex) and nlevels == index.nlevels\n ), \"MultiIndex DataFrame can only be reindexed with a similar pandas-on-Spark MultiIndex.\"\n\n index_columns = self._internal.index_spark_column_names\n frame = self._internal.resolved_copy.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)\n\n if isinstance(index, ps.Index):\n if nlevels != index.nlevels:\n return DataFrame(index._internal.with_new_columns([])).reindex(\n columns=self.columns, fill_value=fill_value\n )\n\n index_names = index._internal.index_names\n scols = index._internal.index_spark_columns\n labels = index._internal.spark_frame.select(\n [scol.alias(index_column) for scol, index_column in zip(scols, index_columns)]\n )\n else:\n index = ps.Index(list(index))\n labels = index._internal.spark_frame.select(index.spark.column.alias(index_columns[0]))\n index_names = self._internal.index_names\n\n if fill_value is not None:\n frame_index_columns = [\n verify_temp_column_name(frame, \"__frame_index_column_{}__\".format(i))\n for i in range(nlevels)\n ]\n index_scols = [\n scol_for(frame, index_col).alias(frame_index_col)\n for index_col, frame_index_col in zip(index_columns, frame_index_columns)\n ]\n scols = self._internal.resolved_copy.data_spark_columns\n frame = frame.select(index_scols + scols)\n\n temp_fill_value = verify_temp_column_name(frame, \"__fill_value__\")\n labels = labels.withColumn(temp_fill_value, SF.lit(fill_value))\n\n frame_index_scols = [scol_for(frame, col) for col in frame_index_columns]\n labels_index_scols = [scol_for(labels, col) for col in index_columns]\n\n joined_df = frame.join(\n labels,\n on=[fcol == lcol for fcol, lcol in zip(frame_index_scols, labels_index_scols)],\n how=\"right\",\n )\n\n joined_df = joined_df.select(\n *labels_index_scols,\n *[\n F.when(\n reduce(\n lambda c1, c2: c1 & c2,\n [\n fcol.isNull() & lcol.isNotNull()\n for fcol, lcol in zip(frame_index_scols, labels_index_scols)\n ],\n ),\n scol_for(joined_df, temp_fill_value),\n )\n .otherwise(scol_for(joined_df, col))\n .alias(col)\n for col in self._internal.data_spark_column_names\n ],\n )\n data_fields = None\n else:\n joined_df = frame.join(labels, on=index_columns, how=\"right\")\n data_fields = [field.copy(nullable=True) for field in self._internal.data_fields]\n\n sdf = joined_df.drop(NATURAL_ORDER_COLUMN_NAME)\n internal = self._internal.copy(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=index_names,\n index_fields=[\n field.copy(name=name)\n for field, name in zip(\n index._internal.index_fields, self._internal.index_spark_column_names\n )\n ],\n data_spark_columns=[\n scol_for(sdf, col) for col in self._internal.data_spark_column_names\n ],\n data_fields=data_fields,\n )\n return DataFrame(internal)\n\n def _reindex_columns(\n self, columns: Optional[Union[pd.Index, Sequence[Any]]], fill_value: Optional[Any]\n ) -> \"DataFrame\":\n level = self._internal.column_labels_level\n if level > 1:\n label_columns = list(columns)\n for col in label_columns:\n if not isinstance(col, tuple):\n raise TypeError(\"Expected tuple, got {}\".format(type(col).__name__))\n else:\n label_columns = [(col,) for col in columns]\n for col in label_columns:\n if len(col) != level:\n raise ValueError(\n \"shape (1,{}) doesn't match the shape (1,{})\".format(len(col), level)\n )\n fill_value = np.nan if fill_value is None else fill_value\n scols_or_pssers: List[Union[Series, Column]] = []\n labels = []\n for label in label_columns:\n if label in self._internal.column_labels:\n scols_or_pssers.append(self._psser_for(label))\n else:\n scols_or_pssers.append(SF.lit(fill_value).alias(name_like_string(label)))\n labels.append(label)\n\n if isinstance(columns, pd.Index):\n column_label_names = [\n name if is_name_like_tuple(name) else (name,) for name in columns.names\n ]\n internal = self._internal.with_new_columns(\n scols_or_pssers, column_labels=labels, column_label_names=column_label_names\n )\n else:\n internal = self._internal.with_new_columns(scols_or_pssers, column_labels=labels)\n\n return DataFrame(internal)\n\n def reindex_like(self, other: \"DataFrame\", copy: bool = True) -> \"DataFrame\":\n \"\"\"\n Return a DataFrame with matching indices as other object.\n\n Conform the object to the same index on all axes. Places NA/NaN in locations\n having no value in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : DataFrame\n Its row and column indices are used to define the new indices\n of this object.\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n\n Returns\n -------\n DataFrame\n DataFrame with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n\n >>> df1 = ps.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit',\n ... 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = ps.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1).sort_index() # doctest: +NORMALIZE_WHITESPACE\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN None\n 2014-02-15 35.1 NaN medium\n \"\"\"\n\n if isinstance(other, DataFrame):\n return self.reindex(index=other.index, columns=other.columns, copy=copy)\n else:\n raise TypeError(\"other must be a pandas-on-Spark DataFrame\")\n\n def melt(\n self,\n id_vars: Optional[Union[Name, List[Name]]] = None,\n value_vars: Optional[Union[Name, List[Name]]] = None,\n var_name: Optional[Union[str, List[str]]] = None,\n value_name: str = \"value\",\n ) -> \"DataFrame\":\n \"\"\"\n Unpivot a DataFrame from wide format to long format, optionally\n leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where one\n or more columns are identifier variables (`id_vars`), while all other\n columns, considered measured variables (`value_vars`), are \"unpivoted\" to\n the row axis, leaving just two non-identifier columns, 'variable' and\n 'value'.\n\n Parameters\n ----------\n frame : DataFrame\n id_vars : tuple, list, or ndarray, optional\n Column(s) to use as identifier variables.\n value_vars : tuple, list, or ndarray, optional\n Column(s) to unpivot. If not specified, uses all columns that\n are not set as `id_vars`.\n var_name : scalar, default 'variable'\n Name to use for the 'variable' column. If None it uses `frame.columns.name` or\n ‘variable’.\n value_name : scalar, default 'value'\n Name to use for the 'value' column.\n\n Returns\n -------\n DataFrame\n Unpivoted DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},\n ... 'B': {0: 1, 1: 3, 2: 5},\n ... 'C': {0: 2, 1: 4, 2: 6}},\n ... columns=['A', 'B', 'C'])\n >>> df\n A B C\n 0 a 1 2\n 1 b 3 4\n 2 c 5 6\n\n >>> ps.melt(df)\n variable value\n 0 A a\n 1 B 1\n 2 C 2\n 3 A b\n 4 B 3\n 5 C 4\n 6 A c\n 7 B 5\n 8 C 6\n\n >>> df.melt(id_vars='A')\n A variable value\n 0 a B 1\n 1 a C 2\n 2 b B 3\n 3 b C 4\n 4 c B 5\n 5 c C 6\n\n >>> df.melt(value_vars='A')\n variable value\n 0 A a\n 1 A b\n 2 A c\n\n >>> ps.melt(df, id_vars=['A', 'B'])\n A B variable value\n 0 a 1 C 2\n 1 b 3 C 4\n 2 c 5 C 6\n\n >>> df.melt(id_vars=['A'], value_vars=['C'])\n A variable value\n 0 a C 2\n 1 b C 4\n 2 c C 6\n\n The names of 'variable' and 'value' columns can be customized:\n\n >>> ps.melt(df, id_vars=['A'], value_vars=['B'],\n ... var_name='myVarname', value_name='myValname')\n A myVarname myValname\n 0 a B 1\n 1 b B 3\n 2 c B 5\n \"\"\"\n column_labels = self._internal.column_labels\n\n if id_vars is None:\n id_vars = []\n else:\n if isinstance(id_vars, tuple):\n if self._internal.column_labels_level == 1:\n id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars]\n else:\n raise ValueError(\n \"id_vars must be a list of tuples\" \" when columns are a MultiIndex\"\n )\n elif is_name_like_value(id_vars):\n id_vars = [(id_vars,)]\n else:\n id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars]\n\n non_existence_col = [idv for idv in id_vars if idv not in column_labels]\n if len(non_existence_col) != 0:\n raveled_column_labels = np.ravel(column_labels)\n missing = [\n nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels\n ]\n if len(missing) != 0:\n raise KeyError(\n \"The following 'id_vars' are not present\"\n \" in the DataFrame: {}\".format(missing)\n )\n else:\n raise KeyError(\n \"None of {} are in the {}\".format(non_existence_col, column_labels)\n )\n\n if value_vars is None:\n value_vars = []\n else:\n if isinstance(value_vars, tuple):\n if self._internal.column_labels_level == 1:\n value_vars = [\n valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars\n ]\n else:\n raise ValueError(\n \"value_vars must be a list of tuples\" \" when columns are a MultiIndex\"\n )\n elif is_name_like_value(value_vars):\n value_vars = [(value_vars,)]\n else:\n value_vars = [valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars]\n\n non_existence_col = [valv for valv in value_vars if valv not in column_labels]\n if len(non_existence_col) != 0:\n raveled_column_labels = np.ravel(column_labels)\n missing = [\n nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels\n ]\n if len(missing) != 0:\n raise KeyError(\n \"The following 'value_vars' are not present\"\n \" in the DataFrame: {}\".format(missing)\n )\n else:\n raise KeyError(\n \"None of {} are in the {}\".format(non_existence_col, column_labels)\n )\n\n if len(value_vars) == 0:\n value_vars = column_labels\n\n column_labels = [label for label in column_labels if label not in id_vars]\n\n sdf = self._internal.spark_frame\n\n if var_name is None:\n if (\n self._internal.column_labels_level == 1\n and self._internal.column_label_names[0] is None\n ):\n var_name = [\"variable\"]\n else:\n var_name = [\n name_like_string(name) if name is not None else \"variable_{}\".format(i)\n for i, name in enumerate(self._internal.column_label_names)\n ]\n elif isinstance(var_name, str):\n var_name = [var_name]\n\n pairs = F.explode(\n F.array(\n *[\n F.struct(\n *[SF.lit(c).alias(name) for c, name in zip(label, var_name)],\n *[self._internal.spark_column_for(label).alias(value_name)],\n )\n for label in column_labels\n if label in value_vars\n ]\n )\n )\n\n columns = (\n [\n self._internal.spark_column_for(label).alias(name_like_string(label))\n for label in id_vars\n ]\n + [F.col(\"pairs.`%s`\" % name) for name in var_name]\n + [F.col(\"pairs.`%s`\" % value_name)]\n )\n exploded_df = sdf.withColumn(\"pairs\", pairs).select(columns)\n\n return DataFrame(\n InternalFrame(\n spark_frame=exploded_df,\n index_spark_columns=None,\n column_labels=(\n [label if len(label) == 1 else (name_like_string(label),) for label in id_vars]\n + [(name,) for name in var_name]\n + [(value_name,)]\n ),\n )\n )\n\n def stack(self) -> DataFrameOrSeries:\n \"\"\"\n Stack the prescribed level(s) from columns to index.\n\n Return a reshaped DataFrame or Series having a multi-level\n index with one or more new inner-most levels compared to the current\n DataFrame. The new inner-most levels are created by pivoting the\n columns of the current dataframe:\n\n - if the columns have a single level, the output is a Series;\n - if the columns have multiple levels, the new index\n level(s) is (are) taken from the prescribed level(s) and\n the output is a DataFrame.\n\n The new index levels are sorted.\n\n Returns\n -------\n DataFrame or Series\n Stacked dataframe or series.\n\n See Also\n --------\n DataFrame.unstack : Unstack prescribed level(s) from index axis\n onto column axis.\n DataFrame.pivot : Reshape dataframe from long format to wide\n format.\n DataFrame.pivot_table : Create a spreadsheet-style pivot table\n as a DataFrame.\n\n Notes\n -----\n The function is named by analogy with a collection of books\n being reorganized from being side by side on a horizontal\n position (the columns of the dataframe) to being stacked\n vertically on top of each other (in the index of the\n dataframe).\n\n Examples\n --------\n **Single level columns**\n\n >>> df_single_level_cols = ps.DataFrame([[0, 1], [2, 3]],\n ... index=['cat', 'dog'],\n ... columns=['weight', 'height'])\n\n Stacking a dataframe with a single level column axis returns a Series:\n\n >>> df_single_level_cols\n weight height\n cat 0 1\n dog 2 3\n >>> df_single_level_cols.stack().sort_index()\n cat height 1\n weight 0\n dog height 3\n weight 2\n dtype: int64\n\n **Multi level columns: simple case**\n\n >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('weight', 'pounds')])\n >>> df_multi_level_cols1 = ps.DataFrame([[1, 2], [2, 4]],\n ... index=['cat', 'dog'],\n ... columns=multicol1)\n\n Stacking a dataframe with a multi-level column axis:\n\n >>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE\n weight\n kg pounds\n cat 1 2\n dog 2 4\n >>> df_multi_level_cols1.stack().sort_index()\n weight\n cat kg 1\n pounds 2\n dog kg 2\n pounds 4\n\n **Missing values**\n\n >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('height', 'm')])\n >>> df_multi_level_cols2 = ps.DataFrame([[1.0, 2.0], [3.0, 4.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n It is common to have missing values when stacking a dataframe\n with multi-level columns, as the stacked dataframe typically\n has more values than the original dataframe. Missing values\n are filled with NaNs:\n\n >>> df_multi_level_cols2\n weight height\n kg m\n cat 1.0 2.0\n dog 3.0 4.0\n >>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP\n height weight\n cat kg NaN 1.0\n m 2.0 NaN\n dog kg NaN 3.0\n m 4.0 NaN\n \"\"\"\n from pyspark.pandas.series import first_series\n\n if len(self._internal.column_labels) == 0:\n return DataFrame(\n self._internal.copy(\n column_label_names=self._internal.column_label_names[:-1]\n ).with_filter(SF.lit(False))\n )\n\n column_labels: Dict[Label, Dict[Any, Column]] = defaultdict(dict)\n index_values = set()\n should_returns_series = False\n for label in self._internal.column_labels:\n new_label = label[:-1]\n if len(new_label) == 0:\n new_label = None\n should_returns_series = True\n value = label[-1]\n\n scol = self._internal.spark_column_for(label)\n column_labels[new_label][value] = scol\n\n index_values.add(value)\n\n column_labels = dict(sorted(column_labels.items(), key=lambda x: x[0]))\n\n index_name = self._internal.column_label_names[-1]\n column_label_names = self._internal.column_label_names[:-1]\n if len(column_label_names) == 0:\n column_label_names = [None]\n\n index_column = SPARK_INDEX_NAME_FORMAT(self._internal.index_level)\n data_columns = [name_like_string(label) for label in column_labels]\n\n structs = [\n F.struct(\n *[SF.lit(value).alias(index_column)],\n *[\n (\n column_labels[label][value]\n if value in column_labels[label]\n else SF.lit(None)\n ).alias(name)\n for label, name in zip(column_labels, data_columns)\n ],\n ).alias(value)\n for value in index_values\n ]\n\n pairs = F.explode(F.array(*structs))\n\n sdf = self._internal.spark_frame.withColumn(\"pairs\", pairs)\n sdf = sdf.select(\n self._internal.index_spark_columns\n + [sdf[\"pairs\"][index_column].alias(index_column)]\n + [sdf[\"pairs\"][name].alias(name) for name in data_columns]\n )\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col)\n for col in (self._internal.index_spark_column_names + [index_column])\n ],\n index_names=self._internal.index_names + [index_name],\n index_fields=self._internal.index_fields + [None],\n column_labels=list(column_labels),\n data_spark_columns=[scol_for(sdf, col) for col in data_columns],\n column_label_names=column_label_names,\n )\n psdf: DataFrame = DataFrame(internal)\n\n if should_returns_series:\n return first_series(psdf)\n else:\n return psdf\n\n def unstack(self) -> DataFrameOrSeries:\n \"\"\"\n Pivot the (necessarily hierarchical) index labels.\n\n Returns a DataFrame having a new level of column labels whose inner-most level\n consists of the pivoted index labels.\n\n If the index is not a MultiIndex, the output will be a Series.\n\n .. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and\n it could cause a serious performance degradation since Spark partitions it row based.\n\n Returns\n -------\n Series or DataFrame\n\n See Also\n --------\n DataFrame.pivot : Pivot a table based on column values.\n DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack).\n\n Examples\n --------\n >>> df = ps.DataFrame({\"A\": {\"0\": \"a\", \"1\": \"b\", \"2\": \"c\"},\n ... \"B\": {\"0\": \"1\", \"1\": \"3\", \"2\": \"5\"},\n ... \"C\": {\"0\": \"2\", \"1\": \"4\", \"2\": \"6\"}},\n ... columns=[\"A\", \"B\", \"C\"])\n >>> df\n A B C\n 0 a 1 2\n 1 b 3 4\n 2 c 5 6\n\n >>> df.unstack().sort_index()\n A 0 a\n 1 b\n 2 c\n B 0 1\n 1 3\n 2 5\n C 0 2\n 1 4\n 2 6\n dtype: object\n\n >>> df.columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')])\n >>> df.unstack().sort_index()\n X A 0 a\n 1 b\n 2 c\n B 0 1\n 1 3\n 2 5\n Y C 0 2\n 1 4\n 2 6\n dtype: object\n\n For MultiIndex case:\n\n >>> df = ps.DataFrame({\"A\": [\"a\", \"b\", \"c\"],\n ... \"B\": [1, 3, 5],\n ... \"C\": [2, 4, 6]},\n ... columns=[\"A\", \"B\", \"C\"])\n >>> df = df.set_index('A', append=True)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n B C\n A\n 0 a 1 2\n 1 b 3 4\n 2 c 5 6\n >>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE\n B C\n A a b c a b c\n 0 1.0 NaN NaN 2.0 NaN NaN\n 1 NaN 3.0 NaN NaN 4.0 NaN\n 2 NaN NaN 5.0 NaN NaN 6.0\n \"\"\"\n from pyspark.pandas.series import first_series\n\n if self._internal.index_level > 1:\n # The index after `reset_index()` will never be used, so use \"distributed\" index\n # as a dummy to avoid overhead.\n with option_context(\"compute.default_index_type\", \"distributed\"):\n df = self.reset_index()\n index = df._internal.column_labels[: self._internal.index_level - 1]\n columns = df.columns[self._internal.index_level - 1]\n df = df.pivot_table(\n index=index, columns=columns, values=self._internal.column_labels, aggfunc=\"first\"\n )\n internal = df._internal.copy(\n index_names=self._internal.index_names[:-1],\n index_fields=df._internal.index_fields[: self._internal.index_level - 1],\n column_label_names=(\n df._internal.column_label_names[:-1]\n + [\n None\n if self._internal.index_names[-1] is None\n else df._internal.column_label_names[-1]\n ]\n ),\n )\n return DataFrame(internal)\n\n # TODO: Codes here are similar with melt. Should we deduplicate?\n column_labels = self._internal.column_labels\n ser_name = SPARK_DEFAULT_SERIES_NAME\n sdf = self._internal.spark_frame\n new_index_columns = [\n SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)\n ]\n\n new_index_map = list(zip_longest(new_index_columns, self._internal.column_label_names, []))\n\n pairs = F.explode(\n F.array(\n *[\n F.struct(\n *[SF.lit(c).alias(name) for c, name in zip(idx, new_index_columns)],\n *[self._internal.spark_column_for(idx).alias(ser_name)],\n )\n for idx in column_labels\n ]\n )\n )\n\n columns = [\n F.col(\"pairs.%s\" % name)\n for name in new_index_columns[: self._internal.column_labels_level]\n ] + [F.col(\"pairs.%s\" % ser_name)]\n\n new_index_len = len(new_index_columns)\n existing_index_columns = []\n for i, (index_name, index_field) in enumerate(\n zip(self._internal.index_names, self._internal.index_fields)\n ):\n name = SPARK_INDEX_NAME_FORMAT(i + new_index_len)\n new_index_map.append((name, index_name, index_field.copy(name=name)))\n existing_index_columns.append(self._internal.index_spark_columns[i].alias(name))\n\n exploded_df = sdf.withColumn(\"pairs\", pairs).select(existing_index_columns + columns)\n\n index_spark_column_names, index_names, index_fields = zip(*new_index_map)\n return first_series(\n DataFrame(\n InternalFrame(\n exploded_df,\n index_spark_columns=[\n scol_for(exploded_df, col) for col in index_spark_column_names\n ],\n index_names=list(index_names),\n index_fields=list(index_fields),\n column_labels=[None],\n )\n )\n )\n\n # TODO: axis, skipna, level and **kwargs should be implemented.\n def all(self, axis: Axis = 0, bool_only: Optional[bool] = None) -> \"Series\":\n \"\"\"\n Return whether all elements are True.\n\n Returns True unless there is at least one element within a series that is\n False or equivalent (e.g. zero or empty)\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n bool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data.\n\n Returns\n -------\n Series\n\n Examples\n --------\n Create a dataframe from a dictionary.\n\n >>> df = ps.DataFrame({\n ... 'col1': [True, True, True],\n ... 'col2': [True, False, False],\n ... 'col3': [0, 0, 0],\n ... 'col4': [1, 2, 3],\n ... 'col5': [True, True, None],\n ... 'col6': [True, False, None]},\n ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])\n\n Default behaviour checks if column-wise values all return True.\n\n >>> df.all()\n col1 True\n col2 False\n col3 False\n col4 True\n col5 True\n col6 False\n dtype: bool\n\n Include only boolean columns when set `bool_only=True`.\n\n >>> df.all(bool_only=True)\n col1 True\n col2 False\n dtype: bool\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n column_labels = self._internal.column_labels\n if bool_only:\n column_labels = self._bool_column_labels(column_labels)\n if len(column_labels) == 0:\n return ps.Series([], dtype=bool)\n\n applied = []\n for label in column_labels:\n scol = self._internal.spark_column_for(label)\n all_col = F.min(F.coalesce(scol.cast(\"boolean\"), SF.lit(True)))\n applied.append(F.when(all_col.isNull(), True).otherwise(all_col))\n\n return self._result_aggregated(column_labels, applied)\n\n # TODO: axis, skipna, level and **kwargs should be implemented.\n def any(self, axis: Axis = 0, bool_only: Optional[bool] = None) -> \"Series\":\n \"\"\"\n Return whether any element is True.\n\n Returns False unless there is at least one element within a series that is\n True or equivalent (e.g. non-zero or non-empty).\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n bool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data.\n\n Returns\n -------\n Series\n\n Examples\n --------\n Create a dataframe from a dictionary.\n\n >>> df = ps.DataFrame({\n ... 'col1': [False, False, False],\n ... 'col2': [True, False, False],\n ... 'col3': [0, 0, 1],\n ... 'col4': [0, 1, 2],\n ... 'col5': [False, False, None],\n ... 'col6': [True, False, None]},\n ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])\n\n Default behaviour checks if column-wise values all return True.\n\n >>> df.any()\n col1 False\n col2 True\n col3 True\n col4 True\n col5 False\n col6 True\n dtype: bool\n\n Include only boolean columns when set `bool_only=True`.\n\n >>> df.any(bool_only=True)\n col1 False\n col2 True\n dtype: bool\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n column_labels = self._internal.column_labels\n if bool_only:\n column_labels = self._bool_column_labels(column_labels)\n if len(column_labels) == 0:\n return ps.Series([], dtype=bool)\n\n applied = []\n for label in column_labels:\n scol = self._internal.spark_column_for(label)\n any_col = F.max(F.coalesce(scol.cast(\"boolean\"), SF.lit(False)))\n applied.append(F.when(any_col.isNull(), False).otherwise(any_col))\n\n return self._result_aggregated(column_labels, applied)\n\n def _bool_column_labels(self, column_labels: List[Label]) -> List[Label]:\n \"\"\"\n Filter column labels of boolean columns (without None).\n \"\"\"\n bool_column_labels = []\n for label in column_labels:\n psser = self._psser_for(label)\n if is_bool_dtype(psser):\n # Rely on dtype rather than spark type because\n # columns that consist of bools and Nones should be excluded\n # if bool_only is True\n bool_column_labels.append(label)\n return bool_column_labels\n\n def _result_aggregated(self, column_labels: List[Label], scols: List[Column]) -> \"Series\":\n \"\"\"\n Given aggregated Spark columns and respective column labels from the original\n pandas-on-Spark DataFrame, construct the result Series.\n \"\"\"\n from pyspark.pandas.series import first_series\n\n cols = []\n result_scol_name = \"value\"\n for label, applied_col in zip(column_labels, scols):\n cols.append(\n F.struct(\n *[SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)],\n *[applied_col.alias(result_scol_name)],\n )\n )\n # Statements under this comment implement spark frame transformations as below:\n # From:\n # +-------------------------------------------------------------------------------------+\n # |arrays |\n # +-------------------------------------------------------------------------------------+\n # |[{col1, true}, {col2, true}, {col3, false}, {col4, true}]|\n # +-------------------------------------------------------------------------------------+\n # To:\n # +-------------+\n # |col |\n # +-------------+\n # |{col1, true} |\n # |{col2, true} |\n # |{col3, false}|\n # |{col4, true} |\n # +-------------+\n # To:\n # +-----------------+-----+\n # |__index_level_0__|value|\n # +-----------------+-----+\n # |col1 |true |\n # |col2 |true |\n # |col3 |false|\n # |col4 |true |\n # +-----------------+-----+\n sdf = self._internal.spark_frame.select(F.array(*cols).alias(\"arrays\")).select(\n F.explode(F.col(\"arrays\"))\n )\n sdf = sdf.selectExpr(\"col.*\")\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i))\n for i in range(self._internal.column_labels_level)\n ],\n index_names=self._internal.column_label_names,\n column_labels=[None],\n data_spark_columns=[scol_for(sdf, result_scol_name)],\n )\n\n # (cont.) The result Series should look as below:\n # col1 False\n # col2 True\n # col3 True\n # col4 True\n # dtype: bool\n return first_series(DataFrame(internal))\n\n # TODO: add axis, pct, na_option parameter\n def rank(\n self, method: str = \"average\", ascending: bool = True, numeric_only: Optional[bool] = None\n ) -> \"DataFrame\":\n \"\"\"\n Compute numerical data ranks (1 through n) along axis. Equal values are\n assigned a rank that is the average of the ranks of those values.\n\n .. note:: the current implementation of rank uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n numeric_only : bool, optional\n For DataFrame objects, rank only numeric columns if set to True.\n\n Returns\n -------\n ranks : same type as caller\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns=['A', 'B'])\n >>> df\n A B\n 0 1 4\n 1 2 3\n 2 2 2\n 3 3 1\n\n >>> df.rank().sort_index()\n A B\n 0 1.0 4.0\n 1 2.5 3.0\n 2 2.5 2.0\n 3 4.0 1.0\n\n If method is set to 'min', it use lowest rank in group.\n\n >>> df.rank(method='min').sort_index()\n A B\n 0 1.0 4.0\n 1 2.0 3.0\n 2 2.0 2.0\n 3 4.0 1.0\n\n If method is set to 'max', it use highest rank in group.\n\n >>> df.rank(method='max').sort_index()\n A B\n 0 1.0 4.0\n 1 3.0 3.0\n 2 3.0 2.0\n 3 4.0 1.0\n\n If method is set to 'dense', it leaves no gaps in group.\n\n >>> df.rank(method='dense').sort_index()\n A B\n 0 1.0 4.0\n 1 2.0 3.0\n 2 2.0 2.0\n 3 3.0 1.0\n\n If numeric_only is set to 'True', rank only numeric columns.\n\n >>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': ['a', 'b', 'd', 'c']}, columns= ['A', 'B'])\n >>> df\n A B\n 0 1 a\n 1 2 b\n 2 2 d\n 3 3 c\n >>> df.rank(numeric_only=True)\n A\n 0 1.0\n 1 2.5\n 2 2.5\n 3 4.0\n \"\"\"\n if numeric_only:\n numeric_col_names = []\n for label in self._internal.column_labels:\n psser = self._psser_for(label)\n if isinstance(psser.spark.data_type, (NumericType, BooleanType)):\n numeric_col_names.append(psser.name)\n\n psdf = self[numeric_col_names] if numeric_only else self\n return psdf._apply_series_op(\n lambda psser: psser._rank(method=method, ascending=ascending), should_resolve=True\n )\n\n def filter(\n self,\n items: Optional[Sequence[Any]] = None,\n like: Optional[str] = None,\n regex: Optional[str] = None,\n axis: Optional[Axis] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Subset rows or columns of dataframe according to labels in\n the specified index.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n Keep labels from axis which are in items.\n like : string\n Keep labels from axis for which \"like in label == True\".\n regex : string (regular expression)\n Keep labels from axis for which re.search(regex, label) == True.\n axis : int or string axis name\n The axis to filter on. By default this is the info axis,\n 'index' for Series, 'columns' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = ps.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),\n ... index=['mouse', 'rabbit'],\n ... columns=['one', 'two', 'three'])\n\n >>> # select columns by name\n >>> df.filter(items=['one', 'three'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex='e$', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing 'bbi'\n >>> df.filter(like='bbi', axis=0)\n one two three\n rabbit 4 5 6\n\n For a Series,\n\n >>> # select rows by name\n >>> df.one.filter(items=['rabbit'])\n rabbit 4\n Name: one, dtype: int64\n\n >>> # select rows by regular expression\n >>> df.one.filter(regex='e$')\n mouse 1\n Name: one, dtype: int64\n\n >>> # select rows containing 'bbi'\n >>> df.one.filter(like='bbi')\n rabbit 4\n Name: one, dtype: int64\n \"\"\"\n if sum(x is not None for x in (items, like, regex)) > 1:\n raise TypeError(\n \"Keyword arguments `items`, `like`, or `regex` \" \"are mutually exclusive\"\n )\n\n axis = validate_axis(axis, none_axis=1)\n\n index_scols = self._internal.index_spark_columns\n\n if items is not None:\n if is_list_like(items):\n items = list(items)\n else:\n raise ValueError(\"items should be a list-like object.\")\n if axis == 0:\n if len(index_scols) == 1:\n if len(items) <= ps.get_option(\"compute.isin_limit\"):\n col = index_scols[0].isin([SF.lit(item) for item in items])\n return DataFrame(self._internal.with_filter(col))\n else:\n item_sdf_col = verify_temp_column_name(\n self._internal.spark_frame, \"__item__\"\n )\n item_sdf = default_session().createDataFrame(\n pd.DataFrame({item_sdf_col: items})\n )\n joined_sdf = self._internal.spark_frame.join(\n other=F.broadcast(item_sdf),\n on=(index_scols[0] == scol_for(item_sdf, item_sdf_col)),\n how=\"semi\",\n )\n\n return DataFrame(self._internal.with_new_sdf(joined_sdf))\n\n else:\n # for multi-index\n col = None\n for item in items:\n if not isinstance(item, tuple):\n raise TypeError(\"Unsupported type {}\".format(type(item).__name__))\n if not item:\n raise ValueError(\"The item should not be empty.\")\n midx_col = None\n for i, element in enumerate(item):\n if midx_col is None:\n midx_col = index_scols[i] == SF.lit(element)\n else:\n midx_col = midx_col & (index_scols[i] == SF.lit(element))\n if col is None:\n col = midx_col\n else:\n col = col | midx_col\n return DataFrame(self._internal.with_filter(col))\n else:\n return self[items]\n elif like is not None:\n if axis == 0:\n col = None\n for index_scol in index_scols:\n if col is None:\n col = index_scol.contains(like)\n else:\n col = col | index_scol.contains(like)\n return DataFrame(self._internal.with_filter(col))\n else:\n column_labels = self._internal.column_labels\n output_labels = [label for label in column_labels if any(like in i for i in label)]\n return self[output_labels]\n elif regex is not None:\n if axis == 0:\n col = None\n for index_scol in index_scols:\n if col is None:\n col = index_scol.rlike(regex)\n else:\n col = col | index_scol.rlike(regex)\n return DataFrame(self._internal.with_filter(col))\n else:\n column_labels = self._internal.column_labels\n matcher = re.compile(regex)\n output_labels = [\n label\n for label in column_labels\n if any(matcher.search(i) is not None for i in label)\n ]\n return self[output_labels]\n else:\n raise TypeError(\"Must pass either `items`, `like`, or `regex`\")\n\n def rename(\n self,\n mapper: Optional[Union[Dict, Callable[[Any], Any]]] = None,\n index: Optional[Union[Dict, Callable[[Any], Any]]] = None,\n columns: Optional[Union[Dict, Callable[[Any], Any]]] = None,\n axis: Axis = \"index\",\n inplace: bool = False,\n level: Optional[int] = None,\n errors: str = \"ignore\",\n ) -> Optional[\"DataFrame\"]:\n\n \"\"\"\n Alter axes labels.\n Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series\n will be left as-is. Extra labels listed don’t throw an error.\n\n Parameters\n ----------\n mapper : dict-like or function\n Dict-like or functions transformations to apply to that axis’ values.\n Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index`\n and `columns`.\n index : dict-like or function\n Alternative to specifying axis (\"mapper, axis=0\" is equivalent to \"index=mapper\").\n columns : dict-like or function\n Alternative to specifying axis (\"mapper, axis=1\" is equivalent to \"columns=mapper\").\n axis : int or str, default 'index'\n Axis to target with mapper. Can be either the axis name ('index', 'columns') or\n number (0, 1).\n inplace : bool, default False\n Whether to return a new DataFrame.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified level.\n errors : {'ignore', 'raise}, default 'ignore'\n If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns`\n contains labels that are not present in the Index being transformed. If 'ignore',\n existing keys will be renamed and extra keys will be ignored.\n\n Returns\n -------\n DataFrame with the renamed axis labels.\n\n Raises\n ------\n `KeyError`\n If any of the labels is not found in the selected axis and \"errors='raise'\".\n\n Examples\n --------\n >>> psdf1 = ps.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> psdf1.rename(columns={\"A\": \"a\", \"B\": \"c\"}) # doctest: +NORMALIZE_WHITESPACE\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> psdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE\n A B\n 0 1 4\n 10 2 5\n 20 3 6\n\n >>> def str_lower(s) -> str:\n ... return str.lower(s)\n >>> psdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> def mul10(x) -> int:\n ... return x * 10\n >>> psdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE\n A B\n 0 1 4\n 10 2 5\n 20 3 6\n\n >>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')])\n >>> psdf2 = ps.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)\n >>> psdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE\n x y\n A B C D\n 0 1 2 3 4\n 1 5 6 7 8\n\n >>> psdf3 = ps.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab'))\n >>> psdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE\n a b\n x a 1 2\n b 3 4\n y c 5 6\n d 7 8\n \"\"\"\n\n def gen_mapper_fn(\n mapper: Union[Dict, Callable[[Any], Any]]\n ) -> Tuple[Callable[[Any], Any], Dtype, DataType]:\n if isinstance(mapper, dict):\n mapper_dict = mapper\n\n type_set = set(map(lambda x: type(x), mapper_dict.values()))\n if len(type_set) > 1:\n raise ValueError(\"Mapper dict should have the same value type.\")\n dtype, spark_return_type = pandas_on_spark_type(list(type_set)[0])\n\n def mapper_fn(x: Any) -> Any:\n if x in mapper_dict:\n return mapper_dict[x]\n else:\n if errors == \"raise\":\n raise KeyError(\"Index include value which is not in the `mapper`\")\n return x\n\n elif callable(mapper):\n mapper_callable = cast(Callable, mapper)\n return_type = cast(ScalarType, infer_return_type(mapper))\n dtype = return_type.dtype\n spark_return_type = return_type.spark_type\n\n def mapper_fn(x: Any) -> Any:\n return mapper_callable(x)\n\n else:\n raise ValueError(\n \"`mapper` or `index` or `columns` should be \"\n \"either dict-like or function type.\"\n )\n return mapper_fn, dtype, spark_return_type\n\n index_mapper_fn = None\n index_mapper_ret_stype = None\n columns_mapper_fn = None\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if mapper:\n axis = validate_axis(axis)\n if axis == 0:\n index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype = gen_mapper_fn(\n mapper\n )\n elif axis == 1:\n columns_mapper_fn, _, _ = gen_mapper_fn(mapper)\n else:\n if index:\n index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype = gen_mapper_fn(\n index\n )\n if columns:\n columns_mapper_fn, _, _ = gen_mapper_fn(columns)\n\n if not index and not columns:\n raise ValueError(\"Either `index` or `columns` should be provided.\")\n\n psdf = self.copy()\n if index_mapper_fn:\n # rename index labels, if `level` is None, rename all index columns, otherwise only\n # rename the corresponding level index.\n # implement this by transform the underlying spark dataframe,\n # Example:\n # suppose the psdf index column in underlying spark dataframe is \"index_0\", \"index_1\",\n # if rename level 0 index labels, will do:\n # ``psdf._sdf.withColumn(\"index_0\", mapper_fn_udf(col(\"index_0\"))``\n # if rename all index labels (`level` is None), then will do:\n # ```\n # psdf._sdf.withColumn(\"index_0\", mapper_fn_udf(col(\"index_0\"))\n # .withColumn(\"index_1\", mapper_fn_udf(col(\"index_1\"))\n # ```\n\n index_columns = psdf._internal.index_spark_column_names\n num_indices = len(index_columns)\n if level:\n if level < 0 or level >= num_indices:\n raise ValueError(\"level should be an integer between [0, num_indices)\")\n\n @pandas_udf(returnType=index_mapper_ret_stype) # type: ignore[call-overload]\n def index_mapper_udf(s: pd.Series) -> pd.Series:\n return s.map(index_mapper_fn)\n\n index_spark_columns = psdf._internal.index_spark_columns.copy()\n index_fields = psdf._internal.index_fields.copy()\n if level is None:\n for i in range(num_indices):\n index_spark_columns[i] = index_mapper_udf(index_spark_columns[i]).alias(\n index_columns[i]\n )\n index_fields[i] = index_fields[i].copy(\n dtype=index_mapper_ret_dtype,\n spark_type=index_mapper_ret_stype,\n nullable=True,\n )\n else:\n index_spark_columns[level] = index_mapper_udf(index_spark_columns[level]).alias(\n index_columns[level]\n )\n index_fields[level] = index_fields[level].copy(\n dtype=index_mapper_ret_dtype,\n spark_type=index_mapper_ret_stype,\n nullable=True,\n )\n psdf = DataFrame(\n psdf._internal.copy(\n index_spark_columns=index_spark_columns, index_fields=index_fields\n )\n )\n if columns_mapper_fn:\n # rename column name.\n # Will modify the `_internal._column_labels` and transform underlying spark dataframe\n # to the same column name with `_internal._column_labels`.\n if level:\n if level < 0 or level >= psdf._internal.column_labels_level:\n raise ValueError(\"level should be an integer between [0, column_labels_level)\")\n\n def gen_new_column_labels_entry(column_labels_entry: Label) -> Label:\n if level is None:\n # rename all level columns\n return tuple(map(columns_mapper_fn, column_labels_entry))\n else:\n # only rename specified level column\n entry_list = list(column_labels_entry)\n entry_list[level] = columns_mapper_fn(entry_list[level])\n return tuple(entry_list)\n\n new_column_labels = list(map(gen_new_column_labels_entry, psdf._internal.column_labels))\n\n new_data_pssers = [\n psdf._psser_for(old_label).rename(new_label)\n for old_label, new_label in zip(psdf._internal.column_labels, new_column_labels)\n ]\n psdf = DataFrame(psdf._internal.with_new_columns(new_data_pssers))\n if inplace:\n self._update_internal_frame(psdf._internal)\n return None\n else:\n return psdf\n\n def rename_axis(\n self,\n mapper: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None,\n index: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None,\n columns: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None,\n axis: Optional[Axis] = 0,\n inplace: Optional[bool] = False,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis' values.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to rename.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new DataFrame.\n\n Returns\n -------\n DataFrame, or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n\n The second calling convention will modify the names of the\n corresponding index specified by axis.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n >>> df = ps.DataFrame({\"num_legs\": [4, 4, 2],\n ... \"num_arms\": [0, 0, 2]},\n ... index=[\"dog\", \"cat\", \"monkey\"],\n ... columns=[\"num_legs\", \"num_arms\"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df = df.rename_axis(\"animal\").sort_index()\n >>> df # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n animal\n cat 4 0\n dog 4 0\n monkey 2 2\n\n >>> df = df.rename_axis(\"limbs\", axis=\"columns\").sort_index()\n >>> df # doctest: +NORMALIZE_WHITESPACE\n limbs num_legs num_arms\n animal\n cat 4 0\n dog 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> index = pd.MultiIndex.from_product([['mammal'],\n ... ['dog', 'cat', 'monkey']],\n ... names=['type', 'name'])\n >>> df = ps.DataFrame({\"num_legs\": [4, 4, 2],\n ... \"num_arms\": [0, 0, 2]},\n ... index=index,\n ... columns=[\"num_legs\", \"num_arms\"])\n >>> df # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n class name\n mammal cat 4 0\n dog 4 0\n monkey 2 2\n\n >>> df.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n TYPE NAME\n mammal cat 4 0\n dog 4 0\n monkey 2 2\n \"\"\"\n\n def gen_names(\n v: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]],\n curnames: List[Name],\n ) -> List[Label]:\n newnames: List[Name]\n if is_scalar(v):\n newnames = [cast(Name, v)]\n elif is_list_like(v) and not is_dict_like(v):\n newnames = list(cast(Sequence[Name], v))\n elif is_dict_like(v):\n v_dict = cast(Dict[Name, Name], v)\n newnames = [v_dict[name] if name in v_dict else name for name in curnames]\n elif callable(v):\n v_callable = cast(Callable[[Name], Name], v)\n newnames = [v_callable(name) for name in curnames]\n else:\n raise ValueError(\n \"`mapper` or `index` or `columns` should be \"\n \"either dict-like or function type.\"\n )\n\n if len(newnames) != len(curnames):\n raise ValueError(\n \"Length of new names must be {}, got {}\".format(len(curnames), len(newnames))\n )\n\n return [name if is_name_like_tuple(name) else (name,) for name in newnames]\n\n if mapper is not None and (index is not None or columns is not None):\n raise TypeError(\"Cannot specify both 'mapper' and any of 'index' or 'columns'.\")\n\n if mapper is not None:\n axis = validate_axis(axis)\n if axis == 0:\n index = mapper\n elif axis == 1:\n columns = mapper\n\n column_label_names = (\n gen_names(columns, self.columns.names)\n if columns is not None\n else self._internal.column_label_names\n )\n index_names = (\n gen_names(index, self.index.names) if index is not None else self._internal.index_names\n )\n\n internal = self._internal.copy(\n index_names=index_names, column_label_names=column_label_names\n )\n if inplace:\n self._update_internal_frame(internal)\n return None\n else:\n return DataFrame(internal)\n\n def keys(self) -> pd.Index:\n \"\"\"\n Return alias for columns.\n\n Returns\n -------\n Index\n Columns of the DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', 'sidewinder'],\n ... columns=['max_speed', 'shield'])\n >>> df\n max_speed shield\n cobra 1 2\n viper 4 5\n sidewinder 7 8\n\n >>> df.keys()\n Index(['max_speed', 'shield'], dtype='object')\n \"\"\"\n return self.columns\n\n def pct_change(self, periods: int = 1) -> \"DataFrame\":\n \"\"\"\n Percentage change between the current and a prior element.\n\n .. note:: the current implementation of this API uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n Percentage change in French franc, Deutsche Mark, and Italian lira\n from 1980-01-01 to 1980-03-01.\n\n >>> df = ps.DataFrame({\n ... 'FR': [4.0405, 4.0963, 4.3149],\n ... 'GR': [1.7246, 1.7482, 1.8519],\n ... 'IT': [804.74, 810.01, 860.13]},\n ... index=['1980-01-01', '1980-02-01', '1980-03-01'])\n >>> df\n FR GR IT\n 1980-01-01 4.0405 1.7246 804.74\n 1980-02-01 4.0963 1.7482 810.01\n 1980-03-01 4.3149 1.8519 860.13\n\n >>> df.pct_change()\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 0.013810 0.013684 0.006549\n 1980-03-01 0.053365 0.059318 0.061876\n\n You can set periods to shift for forming percent change\n\n >>> df.pct_change(2)\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 NaN NaN NaN\n 1980-03-01 0.067912 0.073814 0.06883\n \"\"\"\n window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods)\n\n def op(psser: ps.Series) -> Column:\n prev_row = F.lag(psser.spark.column, periods).over(window)\n return ((psser.spark.column - prev_row) / prev_row).alias(\n psser._internal.data_spark_column_names[0]\n )\n\n return self._apply_series_op(op, should_resolve=True)\n\n # TODO: axis = 1\n def idxmax(self, axis: Axis = 0) -> \"Series\":\n \"\"\"\n Return index of first occurrence of maximum over requested axis.\n NA/null values are excluded.\n\n .. note:: This API collect all rows with maximum value using `to_pandas()`\n because we suppose the number of rows with max values are usually small in general.\n\n Parameters\n ----------\n axis : 0 or 'index'\n Can only be set to 0 at the moment.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.idxmax\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf\n a b c\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmax()\n a 2\n b 0\n c 2\n dtype: int64\n\n For Multi-column Index\n\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmax()\n a x 2\n b y 0\n c z 2\n dtype: int64\n \"\"\"\n max_cols = map(lambda scol: F.max(scol), self._internal.data_spark_columns)\n sdf_max = self._internal.spark_frame.select(*max_cols).head()\n # `sdf_max` looks like below\n # +------+------+------+\n # |(a, x)|(b, y)|(c, z)|\n # +------+------+------+\n # | 3| 4.0| 400|\n # +------+------+------+\n\n conds = (\n scol == max_val for scol, max_val in zip(self._internal.data_spark_columns, sdf_max)\n )\n cond = reduce(lambda x, y: x | y, conds)\n\n psdf: DataFrame = DataFrame(self._internal.with_filter(cond))\n\n return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmax()))\n\n # TODO: axis = 1\n def idxmin(self, axis: Axis = 0) -> \"Series\":\n \"\"\"\n Return index of first occurrence of minimum over requested axis.\n NA/null values are excluded.\n\n .. note:: This API collect all rows with minimum value using `to_pandas()`\n because we suppose the number of rows with min values are usually small in general.\n\n Parameters\n ----------\n axis : 0 or 'index'\n Can only be set to 0 at the moment.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.idxmin\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf\n a b c\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmin()\n a 0\n b 3\n c 1\n dtype: int64\n\n For Multi-column Index\n\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmin()\n a x 0\n b y 3\n c z 1\n dtype: int64\n \"\"\"\n min_cols = map(lambda scol: F.min(scol), self._internal.data_spark_columns)\n sdf_min = self._internal.spark_frame.select(*min_cols).head()\n\n conds = (\n scol == min_val for scol, min_val in zip(self._internal.data_spark_columns, sdf_min)\n )\n cond = reduce(lambda x, y: x | y, conds)\n\n psdf: DataFrame = DataFrame(self._internal.with_filter(cond))\n\n return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmin()))\n\n def info(\n self,\n verbose: Optional[bool] = None,\n buf: Optional[IO[str]] = None,\n max_cols: Optional[int] = None,\n null_counts: Optional[bool] = None,\n ) -> None:\n \"\"\"\n Print a concise summary of a DataFrame.\n\n This method prints information about a DataFrame including\n the index dtype and column dtypes, non-null values and memory usage.\n\n Parameters\n ----------\n verbose : bool, optional\n Whether to print the full summary.\n buf : writable buffer, defaults to sys.stdout\n Where to send the output. By default, the output is printed to\n sys.stdout. Pass a writable buffer if you need to further process\n the output.\n max_cols : int, optional\n When to switch from the verbose to the truncated output. If the\n DataFrame has more than `max_cols` columns, the truncated output\n is used.\n null_counts : bool, optional\n Whether to show the non-null counts.\n\n Returns\n -------\n None\n This method prints a summary of a DataFrame and returns None.\n\n See Also\n --------\n DataFrame.describe: Generate descriptive statistics of DataFrame\n columns.\n\n Examples\n --------\n >>> int_values = [1, 2, 3, 4, 5]\n >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']\n >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]\n >>> df = ps.DataFrame(\n ... {\"int_col\": int_values, \"text_col\": text_values, \"float_col\": float_values},\n ... columns=['int_col', 'text_col', 'float_col'])\n >>> df\n int_col text_col float_col\n 0 1 alpha 0.00\n 1 2 beta 0.25\n 2 3 gamma 0.50\n 3 4 delta 0.75\n 4 5 epsilon 1.00\n\n Prints information of all columns:\n\n >>> df.info(verbose=True) # doctest: +SKIP\n <class 'pyspark.pandas.frame.DataFrame'>\n Index: 5 entries, 0 to 4\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 int_col 5 non-null int64\n 1 text_col 5 non-null object\n 2 float_col 5 non-null float64\n dtypes: float64(1), int64(1), object(1)\n\n Prints a summary of columns count and its dtypes but not per column\n information:\n\n >>> df.info(verbose=False) # doctest: +SKIP\n <class 'pyspark.pandas.frame.DataFrame'>\n Index: 5 entries, 0 to 4\n Columns: 3 entries, int_col to float_col\n dtypes: float64(1), int64(1), object(1)\n\n Pipe output of DataFrame.info to buffer instead of sys.stdout, get\n buffer content and writes to a text file:\n\n >>> import io\n >>> buffer = io.StringIO()\n >>> df.info(buf=buffer)\n >>> s = buffer.getvalue()\n >>> with open('%s/info.txt' % path, \"w\",\n ... encoding=\"utf-8\") as f:\n ... _ = f.write(s)\n >>> with open('%s/info.txt' % path) as f:\n ... f.readlines() # doctest: +SKIP\n [\"<class 'pyspark.pandas.frame.DataFrame'>\\\\n\",\n 'Index: 5 entries, 0 to 4\\\\n',\n 'Data columns (total 3 columns):\\\\n',\n ' # Column Non-Null Count Dtype \\\\n',\n '--- ------ -------------- ----- \\\\n',\n ' 0 int_col 5 non-null int64 \\\\n',\n ' 1 text_col 5 non-null object \\\\n',\n ' 2 float_col 5 non-null float64\\\\n',\n 'dtypes: float64(1), int64(1), object(1)']\n \"\"\"\n # To avoid pandas' existing config affects pandas-on-Spark.\n # TODO: should we have corresponding pandas-on-Spark configs?\n with pd.option_context(\n \"display.max_info_columns\", sys.maxsize, \"display.max_info_rows\", sys.maxsize\n ):\n try:\n # hack to use pandas' info as is.\n object.__setattr__(self, \"_data\", self)\n count_func = self.count\n self.count = ( # type: ignore[assignment]\n lambda: count_func()._to_pandas() # type: ignore[assignment, misc, union-attr]\n )\n return pd.DataFrame.info(\n self, # type: ignore[arg-type]\n verbose=verbose,\n buf=buf,\n max_cols=max_cols,\n memory_usage=False,\n null_counts=null_counts,\n )\n finally:\n del self._data\n self.count = count_func # type: ignore[assignment]\n\n # TODO: fix parameter 'axis' and 'numeric_only' to work same as pandas'\n def quantile(\n self,\n q: Union[float, Iterable[float]] = 0.5,\n axis: Axis = 0,\n numeric_only: bool = True,\n accuracy: int = 10000,\n ) -> DataFrameOrSeries:\n \"\"\"\n Return value at the given quantile.\n\n .. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile\n based upon approximate percentile computation because computing quantile across a\n large dataset is extremely expensive.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n 0 <= q <= 1, the quantile(s) to compute.\n axis : int or str, default 0 or 'index'\n Can only be set to 0 at the moment.\n numeric_only : bool, default True\n If False, the quantile of datetime and timedelta data will be computed as well.\n Can only be set to True at the moment.\n accuracy : int, optional\n Default accuracy of approximation. Larger value means better accuracy.\n The relative error can be deduced by 1.0 / accuracy.\n\n Returns\n -------\n Series or DataFrame\n If q is an array, a DataFrame will be returned where the\n index is q, the columns are the columns of self, and the values are the quantiles.\n If q is a float, a Series will be returned where the\n index is the columns of self and the values are the quantiles.\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]})\n >>> psdf\n a b\n 0 1 6\n 1 2 7\n 2 3 8\n 3 4 9\n 4 5 0\n\n >>> psdf.quantile(.5)\n a 3.0\n b 7.0\n Name: 0.5, dtype: float64\n\n >>> psdf.quantile([.25, .5, .75])\n a b\n 0.25 2.0 6.0\n 0.50 3.0 7.0\n 0.75 4.0 8.0\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n if not isinstance(accuracy, int):\n raise TypeError(\n \"accuracy must be an integer; however, got [%s]\" % type(accuracy).__name__\n )\n\n qq: Union[float, List[float]] = list(q) if isinstance(q, Iterable) else q\n\n for v in qq if isinstance(qq, list) else [qq]:\n if not isinstance(v, float):\n raise TypeError(\n \"q must be a float or an array of floats; however, [%s] found.\" % type(v)\n )\n if v < 0.0 or v > 1.0:\n raise ValueError(\"percentiles should all be in the interval [0, 1].\")\n\n def quantile(psser: \"Series\") -> Column:\n spark_type = psser.spark.data_type\n spark_column = psser.spark.column\n if isinstance(spark_type, (BooleanType, NumericType)):\n return F.percentile_approx(spark_column.cast(DoubleType()), qq, accuracy)\n else:\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n\n if isinstance(qq, list):\n # First calculate the percentiles from all columns and map it to each `quantiles`\n # by creating each entry as a struct. So, it becomes an array of structs as below:\n #\n # +-----------------------------------------+\n # | arrays|\n # +-----------------------------------------+\n # |[[0.25, 2, 6], [0.5, 3, 7], [0.75, 4, 8]]|\n # +-----------------------------------------+\n\n percentile_cols: List[Column] = []\n percentile_col_names: List[str] = []\n column_labels: List[Label] = []\n for label, column in zip(\n self._internal.column_labels, self._internal.data_spark_column_names\n ):\n psser = self._psser_for(label)\n\n is_numeric_or_boolean = isinstance(\n psser.spark.data_type, (NumericType, BooleanType)\n )\n keep_column = not numeric_only or is_numeric_or_boolean\n\n if keep_column:\n percentile_col = quantile(psser)\n percentile_cols.append(percentile_col.alias(column))\n percentile_col_names.append(column)\n column_labels.append(label)\n\n if len(percentile_cols) == 0:\n return DataFrame(index=qq)\n\n sdf = self._internal.spark_frame.select(percentile_cols)\n # Here, after select percentile cols, a spark_frame looks like below:\n # +---------+---------+\n # | a| b|\n # +---------+---------+\n # |[2, 3, 4]|[6, 7, 8]|\n # +---------+---------+\n\n cols_dict: Dict[str, List[Column]] = {}\n for column in percentile_col_names:\n cols_dict[column] = list()\n for i in range(len(qq)):\n cols_dict[column].append(scol_for(sdf, column)[i].alias(column))\n\n internal_index_column = SPARK_DEFAULT_INDEX_NAME\n cols = []\n for i, col in enumerate(zip(*cols_dict.values())):\n cols.append(F.struct(SF.lit(qq[i]).alias(internal_index_column), *col))\n sdf = sdf.select(F.array(*cols).alias(\"arrays\"))\n\n # And then, explode it and manually set the index.\n # +-----------------+---+---+\n # |__index_level_0__| a| b|\n # +-----------------+---+---+\n # | 0.25| 2| 6|\n # | 0.5| 3| 7|\n # | 0.75| 4| 8|\n # +-----------------+---+---+\n sdf = sdf.select(F.explode(F.col(\"arrays\"))).selectExpr(\"col.*\")\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, internal_index_column)],\n column_labels=column_labels,\n data_spark_columns=[scol_for(sdf, col) for col in percentile_col_names],\n )\n return DataFrame(internal)\n else:\n return self._reduce_for_stat_function(\n quantile, name=\"quantile\", numeric_only=numeric_only\n ).rename(qq)\n\n def query(self, expr: str, inplace: bool = False) -> Optional[\"DataFrame\"]:\n \"\"\"\n Query the columns of a DataFrame with a boolean expression.\n\n .. note:: Internal columns that starting with a '__' prefix are able to access, however,\n they are not supposed to be accessed.\n\n .. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the\n pandas specific syntax such as `@` is not supported. If you want the pandas syntax,\n you can work around with :meth:`DataFrame.pandas_on_spark.apply_batch`, but you should\n be aware that `query_func` will be executed at different nodes in a distributed manner.\n So, for example, to use `@` syntax, make sure the variable is serialized by, for\n example, putting it within the closure as below.\n\n >>> df = ps.DataFrame({'A': range(2000), 'B': range(2000)})\n >>> def query_func(pdf):\n ... num = 1995\n ... return pdf.query('A > @num')\n >>> df.pandas_on_spark.apply_batch(query_func)\n A B\n 1996 1996 1996\n 1997 1997 1997\n 1998 1998 1998\n 1999 1999 1999\n\n Parameters\n ----------\n expr : str\n The query string to evaluate.\n\n You can refer to column names that contain spaces by surrounding\n them in backticks.\n\n For example, if one of your columns is called ``a a`` and you want\n to sum it with ``b``, your query should be ```a a` + b``.\n\n inplace : bool\n Whether the query should modify the data in place or return\n a modified copy.\n\n Returns\n -------\n DataFrame\n DataFrame resulting from the provided query expression.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': range(1, 6),\n ... 'B': range(10, 0, -2),\n ... 'C C': range(10, 5, -1)})\n >>> df\n A B C C\n 0 1 10 10\n 1 2 8 9\n 2 3 6 8\n 3 4 4 7\n 4 5 2 6\n\n >>> df.query('A > B')\n A B C C\n 4 5 2 6\n\n The previous expression is equivalent to\n\n >>> df[df.A > df.B]\n A B C C\n 4 5 2 6\n\n For columns with spaces in their name, you can use backtick quoting.\n\n >>> df.query('B == `C C`')\n A B C C\n 0 1 10 10\n\n The previous expression is equivalent to\n\n >>> df[df.B == df['C C']]\n A B C C\n 0 1 10 10\n \"\"\"\n if isinstance(self.columns, pd.MultiIndex):\n raise TypeError(\"Doesn't support for MultiIndex columns\")\n if not isinstance(expr, str):\n raise TypeError(\n \"expr must be a string to be evaluated, {} given\".format(type(expr).__name__)\n )\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n data_columns = [label[0] for label in self._internal.column_labels]\n sdf = self._internal.spark_frame.select(\n self._internal.index_spark_columns\n + [\n scol.alias(col)\n for scol, col in zip(self._internal.data_spark_columns, data_columns)\n ]\n ).filter(expr)\n internal = self._internal.with_new_sdf(sdf, data_columns=data_columns)\n\n if inplace:\n self._update_internal_frame(internal)\n return None\n else:\n return DataFrame(internal)\n\n def take(self, indices: List[int], axis: Axis = 0, **kwargs: Any) -> \"DataFrame\":\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3]).sort_index()\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2]).sort_index()\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n \"\"\"\n axis = validate_axis(axis)\n if not is_list_like(indices) or isinstance(indices, (dict, set)):\n raise TypeError(\"`indices` must be a list-like except dict or set\")\n if axis == 0:\n return cast(DataFrame, self.iloc[indices, :])\n else:\n return cast(DataFrame, self.iloc[:, indices])\n\n def eval(self, expr: str, inplace: bool = False) -> Optional[DataFrameOrSeries]:\n \"\"\"\n Evaluate a string describing operations on DataFrame columns.\n\n Operates on columns only, not specific rows or elements. This allows\n `eval` to run arbitrary code, which can make you vulnerable to code\n injection if you pass user input to this function.\n\n Parameters\n ----------\n expr : str\n The expression string to evaluate.\n inplace : bool, default False\n If the expression contains an assignment, whether to perform the\n operation inplace and mutate the existing DataFrame. Otherwise,\n a new DataFrame is returned.\n\n Returns\n -------\n The result of the evaluation.\n\n See Also\n --------\n DataFrame.query : Evaluates a boolean expression to query the columns\n of a frame.\n DataFrame.assign : Can evaluate an expression or function to create new\n values for a column.\n eval : Evaluate a Python expression as a string using various\n backends.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n >>> df.eval('A + B')\n 0 11\n 1 10\n 2 9\n 3 8\n 4 7\n dtype: int64\n\n Assignment is allowed though by default the original DataFrame is not\n modified.\n\n >>> df.eval('C = A + B')\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n\n Use ``inplace=True`` to modify the original DataFrame.\n\n >>> df.eval('C = A + B', inplace=True)\n >>> df\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n \"\"\"\n from pyspark.pandas.series import first_series\n\n if isinstance(self.columns, pd.MultiIndex):\n raise TypeError(\"`eval` is not supported for multi-index columns\")\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n should_return_series = False\n series_name = None\n should_return_scalar = False\n\n # Since `eval_func` doesn't have a type hint, inferring the schema is always preformed\n # in the `apply_batch`. Hence, the variables `should_return_series`, `series_name`,\n # and `should_return_scalar` can be updated.\n def eval_func(pdf): # type: ignore[no-untyped-def]\n nonlocal should_return_series\n nonlocal series_name\n nonlocal should_return_scalar\n result_inner = pdf.eval(expr, inplace=inplace)\n if inplace:\n result_inner = pdf\n if isinstance(result_inner, pd.Series):\n should_return_series = True\n series_name = result_inner.name\n result_inner = result_inner.to_frame()\n elif is_scalar(result_inner):\n should_return_scalar = True\n result_inner = pd.Series(result_inner).to_frame()\n return result_inner\n\n result = self.pandas_on_spark.apply_batch(eval_func)\n if inplace:\n # Here, the result is always a frame because the error is thrown during schema inference\n # from pandas.\n self._update_internal_frame(result._internal, requires_same_anchor=False)\n return None\n elif should_return_series:\n return first_series(result).rename(series_name)\n elif should_return_scalar:\n return first_series(result)[0]\n else:\n # Returns a frame\n return result\n\n def explode(self, column: Name) -> \"DataFrame\":\n \"\"\"\n Transform each element of a list-like to a row, replicating index values.\n\n Parameters\n ----------\n column : str or tuple\n Column to explode.\n\n Returns\n -------\n DataFrame\n Exploded lists to rows of the subset columns;\n index will be duplicated for these rows.\n\n See Also\n --------\n DataFrame.unstack : Pivot a level of the (necessarily hierarchical)\n index labels.\n DataFrame.melt : Unpivot a DataFrame from wide format to long format.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [[1, 2, 3], [], [3, 4]], 'B': 1})\n >>> df\n A B\n 0 [1, 2, 3] 1\n 1 [] 1\n 2 [3, 4] 1\n\n >>> df.explode('A')\n A B\n 0 1.0 1\n 0 2.0 1\n 0 3.0 1\n 1 NaN 1\n 2 3.0 1\n 2 4.0 1\n \"\"\"\n from pyspark.pandas.series import Series\n\n if not is_name_like_value(column):\n raise TypeError(\"column must be a scalar\")\n\n psdf: DataFrame = DataFrame(self._internal.resolved_copy)\n psser = psdf[column]\n if not isinstance(psser, Series):\n raise ValueError(\n \"The column %s is not unique. For a multi-index, the label must be a tuple \"\n \"with elements corresponding to each level.\" % name_like_string(column)\n )\n if not isinstance(psser.spark.data_type, ArrayType):\n return self.copy()\n\n sdf = psdf._internal.spark_frame.withColumn(\n psser._internal.data_spark_column_names[0], F.explode_outer(psser.spark.column)\n )\n\n data_fields = psdf._internal.data_fields.copy()\n idx = psdf._internal.column_labels.index(psser._column_label)\n field = data_fields[idx]\n spark_type = cast(ArrayType, field.spark_type).elementType\n dtype = spark_type_to_pandas_dtype(spark_type)\n data_fields[idx] = field.copy(dtype=dtype, spark_type=spark_type, nullable=True)\n\n internal = psdf._internal.with_new_sdf(sdf, data_fields=data_fields)\n return DataFrame(internal)\n\n def mad(self, axis: Axis = 0) -> \"Series\":\n \"\"\"\n Return the mean absolute deviation of values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n >>> df.mad()\n a 0.666667\n b 0.066667\n dtype: float64\n\n >>> df.mad(axis=1)\n 0 0.45\n 1 0.90\n 2 1.35\n 3 NaN\n dtype: float64\n \"\"\"\n from pyspark.pandas.series import first_series\n\n axis = validate_axis(axis)\n\n if axis == 0:\n\n def get_spark_column(psdf: DataFrame, label: Label) -> Column:\n scol = psdf._internal.spark_column_for(label)\n col_type = psdf._internal.spark_type_for(label)\n\n if isinstance(col_type, BooleanType):\n scol = scol.cast(\"integer\")\n\n return scol\n\n new_column_labels: List[Label] = []\n for label in self._internal.column_labels:\n # Filtering out only columns of numeric and boolean type column.\n dtype = self._psser_for(label).spark.data_type\n if isinstance(dtype, (NumericType, BooleanType)):\n new_column_labels.append(label)\n\n new_columns = [\n F.avg(get_spark_column(self, label)).alias(name_like_string(label))\n for label in new_column_labels\n ]\n\n mean_data = self._internal.spark_frame.select(*new_columns).first()\n\n new_columns = [\n F.avg(\n F.abs(get_spark_column(self, label) - mean_data[name_like_string(label)])\n ).alias(name_like_string(label))\n for label in new_column_labels\n ]\n\n sdf = self._internal.spark_frame.select(\n *[SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)], *new_columns\n )\n\n # The data is expected to be small so it's fine to transpose/use default index.\n with ps.option_context(\"compute.max_rows\", 1):\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],\n column_labels=new_column_labels,\n column_label_names=self._internal.column_label_names,\n )\n return first_series(DataFrame(internal).transpose())\n\n else:\n\n @pandas_udf(returnType=DoubleType()) # type: ignore[call-overload]\n def calculate_columns_axis(*cols: pd.Series) -> pd.Series:\n return pd.concat(cols, axis=1).mad(axis=1)\n\n internal = self._internal.copy(\n column_labels=[None],\n data_spark_columns=[\n calculate_columns_axis(*self._internal.data_spark_columns).alias(\n SPARK_DEFAULT_SERIES_NAME\n )\n ],\n data_fields=[None],\n column_label_names=None,\n )\n return first_series(DataFrame(internal))\n\n def tail(self, n: int = 5) -> \"DataFrame\":\n \"\"\"\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n For negative values of `n`, this function returns all rows except\n the first `n` rows, equivalent to ``df[n:]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = ps.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail() # doctest: +SKIP\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3) # doctest: +SKIP\n animal\n 6 shark\n 7 whale\n 8 zebra\n\n For negative values of `n`\n\n >>> df.tail(-3) # doctest: +SKIP\n animal\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n \"\"\"\n if not isinstance(n, int):\n raise TypeError(\"bad operand type for unary -: '{}'\".format(type(n).__name__))\n if n < 0:\n n = len(self) + n\n if n <= 0:\n return ps.DataFrame(self._internal.with_filter(SF.lit(False)))\n # Should use `resolved_copy` here for the case like `(psdf + 1).tail()`\n sdf = self._internal.resolved_copy.spark_frame\n rows = sdf.tail(n)\n new_sdf = default_session().createDataFrame(rows, sdf.schema)\n\n return DataFrame(self._internal.with_new_sdf(new_sdf))\n\n def align(\n self,\n other: DataFrameOrSeries,\n join: str = \"outer\",\n axis: Optional[Axis] = None,\n copy: bool = True,\n ) -> Tuple[\"DataFrame\", DataFrameOrSeries]:\n \"\"\"\n Align two objects on their axes with the specified join method.\n\n Join method is specified for each axis Index.\n\n Parameters\n ----------\n other : DataFrame or Series\n join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'\n axis : allowed axis of the other object, default None\n Align on index (0), columns (1), or both (None).\n copy : bool, default True\n Always returns new objects. If copy=False and no reindexing is\n required then original objects are returned.\n\n Returns\n -------\n (left, right) : (DataFrame, type of other)\n Aligned objects.\n\n Examples\n --------\n >>> ps.set_option(\"compute.ops_on_diff_frames\", True)\n >>> df1 = ps.DataFrame({\"a\": [1, 2, 3], \"b\": [\"a\", \"b\", \"c\"]}, index=[10, 20, 30])\n >>> df2 = ps.DataFrame({\"a\": [4, 5, 6], \"c\": [\"d\", \"e\", \"f\"]}, index=[10, 11, 12])\n\n Align both axis:\n\n >>> aligned_l, aligned_r = df1.align(df2)\n >>> aligned_l.sort_index()\n a b c\n 10 1.0 a NaN\n 11 NaN None NaN\n 12 NaN None NaN\n 20 2.0 b NaN\n 30 3.0 c NaN\n >>> aligned_r.sort_index()\n a b c\n 10 4.0 NaN d\n 11 5.0 NaN e\n 12 6.0 NaN f\n 20 NaN NaN None\n 30 NaN NaN None\n\n Align only axis=0 (index):\n\n >>> aligned_l, aligned_r = df1.align(df2, axis=0)\n >>> aligned_l.sort_index()\n a b\n 10 1.0 a\n 11 NaN None\n 12 NaN None\n 20 2.0 b\n 30 3.0 c\n >>> aligned_r.sort_index()\n a c\n 10 4.0 d\n 11 5.0 e\n 12 6.0 f\n 20 NaN None\n 30 NaN None\n\n Align only axis=1 (column):\n\n >>> aligned_l, aligned_r = df1.align(df2, axis=1)\n >>> aligned_l.sort_index()\n a b c\n 10 1 a NaN\n 20 2 b NaN\n 30 3 c NaN\n >>> aligned_r.sort_index()\n a b c\n 10 4 NaN d\n 11 5 NaN e\n 12 6 NaN f\n\n Align with the join type \"inner\":\n\n >>> aligned_l, aligned_r = df1.align(df2, join=\"inner\")\n >>> aligned_l.sort_index()\n a\n 10 1\n >>> aligned_r.sort_index()\n a\n 10 4\n\n Align with a Series:\n\n >>> s = ps.Series([7, 8, 9], index=[10, 11, 12])\n >>> aligned_l, aligned_r = df1.align(s, axis=0)\n >>> aligned_l.sort_index()\n a b\n 10 1.0 a\n 11 NaN None\n 12 NaN None\n 20 2.0 b\n 30 3.0 c\n >>> aligned_r.sort_index()\n 10 7.0\n 11 8.0\n 12 9.0\n 20 NaN\n 30 NaN\n dtype: float64\n\n >>> ps.reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n from pyspark.pandas.series import Series, first_series\n\n if not isinstance(other, (DataFrame, Series)):\n raise TypeError(\"unsupported type: {}\".format(type(other).__name__))\n\n how = validate_how(join)\n axis = validate_axis(axis, None)\n\n right_is_series = isinstance(other, Series)\n if right_is_series:\n if axis is None:\n raise ValueError(\"Must specify axis=0 or 1\")\n elif axis != 0:\n raise NotImplementedError(\n \"align currently only works for axis=0 when right is Series\"\n )\n\n left = self\n right = other\n\n if (axis is None or axis == 0) and not same_anchor(left, right):\n combined = combine_frames(left, right, how=how)\n left = combined[\"this\"]\n right = combined[\"that\"]\n\n if right_is_series:\n right = first_series(cast(DataFrame[Any], right)).rename(other.name)\n\n if (\n axis is None or axis == 1\n ) and left._internal.column_labels != right._internal.column_labels:\n\n if left._internal.column_labels_level != right._internal.column_labels_level:\n raise ValueError(\"cannot join with no overlapping index names\")\n\n left = left.copy()\n right = right.copy()\n\n if how == \"full\":\n column_labels = sorted(\n list(set(left._internal.column_labels) | set(right._internal.column_labels))\n )\n elif how == \"inner\":\n column_labels = sorted(\n list(set(left._internal.column_labels) & set(right._internal.column_labels))\n )\n elif how == \"left\":\n column_labels = left._internal.column_labels\n else:\n column_labels = right._internal.column_labels\n\n for label in column_labels:\n if label not in left._internal.column_labels:\n left[label] = SF.lit(None).cast(DoubleType())\n left = left[column_labels]\n for label in column_labels:\n if label not in right._internal.column_labels:\n right[label] = SF.lit(None).cast(DoubleType())\n right = right[column_labels]\n\n return (left.copy(), right.copy()) if copy else (left, right)\n\n @staticmethod\n def from_dict(\n data: Dict[Name, Sequence[Any]],\n orient: str = \"columns\",\n dtype: Union[str, Dtype] = None,\n columns: Optional[List[Name]] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Construct DataFrame from dict of array-like or dicts.\n\n Creates DataFrame object from dictionary by columns or by index\n allowing dtype specification.\n\n Parameters\n ----------\n data : dict\n Of the form {field : array-like} or {field : dict}.\n orient : {'columns', 'index'}, default 'columns'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass 'columns'\n (default). Otherwise if the keys should be rows, pass 'index'.\n dtype : dtype, default None\n Data type to force, otherwise infer.\n columns : list, default None\n Column labels to use when ``orient='index'``. Raises a ValueError\n if used with ``orient='columns'``.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.from_records : DataFrame from structured ndarray, sequence\n of tuples or dicts, or DataFrame.\n DataFrame : DataFrame object creation using constructor.\n\n Examples\n --------\n By default the keys of the dict become the DataFrame columns:\n\n >>> data = {'col_1': [3, 2, 1, 0], 'col_2': [10, 20, 30, 40]}\n >>> ps.DataFrame.from_dict(data)\n col_1 col_2\n 0 3 10\n 1 2 20\n 2 1 30\n 3 0 40\n\n Specify ``orient='index'`` to create the DataFrame using dictionary\n keys as rows:\n\n >>> data = {'row_1': [3, 2, 1, 0], 'row_2': [10, 20, 30, 40]}\n >>> ps.DataFrame.from_dict(data, orient='index').sort_index()\n 0 1 2 3\n row_1 3 2 1 0\n row_2 10 20 30 40\n\n When using the 'index' orientation, the column names can be\n specified manually:\n\n >>> ps.DataFrame.from_dict(data, orient='index',\n ... columns=['A', 'B', 'C', 'D']).sort_index()\n A B C D\n row_1 3 2 1 0\n row_2 10 20 30 40\n \"\"\"\n return DataFrame(\n pd.DataFrame.from_dict(\n data, orient=orient, dtype=dtype, columns=columns # type: ignore[arg-type]\n )\n )\n\n # Override the `groupby` to specify the actual return type annotation.\n def groupby(\n self,\n by: Union[Name, \"Series\", List[Union[Name, \"Series\"]]],\n axis: Axis = 0,\n as_index: bool = True,\n dropna: bool = True,\n ) -> \"DataFrameGroupBy\":\n return cast(\n \"DataFrameGroupBy\", super().groupby(by=by, axis=axis, as_index=as_index, dropna=dropna)\n )\n\n groupby.__doc__ = Frame.groupby.__doc__\n\n def _build_groupby(\n self, by: List[Union[\"Series\", Label]], as_index: bool, dropna: bool\n ) -> \"DataFrameGroupBy\":\n from pyspark.pandas.groupby import DataFrameGroupBy\n\n return DataFrameGroupBy._build(self, by, as_index=as_index, dropna=dropna)\n\n def _to_internal_pandas(self) -> pd.DataFrame:\n \"\"\"\n Return a pandas DataFrame directly from _internal to avoid overhead of copy.\n\n This method is for internal use only.\n \"\"\"\n return self._internal.to_pandas_frame\n\n def _get_or_create_repr_pandas_cache(self, n: int) -> Union[pd.DataFrame, pd.Series]:\n if not hasattr(self, \"_repr_pandas_cache\") or n not in self._repr_pandas_cache:\n object.__setattr__(\n self, \"_repr_pandas_cache\", {n: self.head(n + 1)._to_internal_pandas()}\n )\n return self._repr_pandas_cache[n]\n\n def __repr__(self) -> str:\n max_display_count = get_option(\"display.max_rows\")\n if max_display_count is None:\n return self._to_internal_pandas().to_string()\n\n pdf = cast(\"DataFrame\", self._get_or_create_repr_pandas_cache(max_display_count))\n pdf_length = len(pdf)\n pdf = cast(\"DataFrame\", pdf.iloc[:max_display_count])\n if pdf_length > max_display_count:\n repr_string = pdf.to_string(show_dimensions=True)\n match = REPR_PATTERN.search(repr_string)\n if match is not None:\n nrows = match.group(\"rows\")\n ncols = match.group(\"columns\")\n footer = \"\\n\\n[Showing only the first {nrows} rows x {ncols} columns]\".format(\n nrows=nrows, ncols=ncols\n )\n return REPR_PATTERN.sub(footer, repr_string)\n return pdf.to_string()\n\n def _repr_html_(self) -> str:\n max_display_count = get_option(\"display.max_rows\")\n if max_display_count is None:\n return self._to_internal_pandas().to_html(notebook=True)\n\n pdf = self._get_or_create_repr_pandas_cache(max_display_count)\n pdf_length = len(pdf)\n pdf = pdf.iloc[:max_display_count]\n if pdf_length > max_display_count:\n repr_html = pdf.to_html(show_dimensions=True, notebook=True)\n match = REPR_HTML_PATTERN.search(repr_html)\n if match is not None:\n nrows = match.group(\"rows\")\n ncols = match.group(\"columns\")\n by = chr(215)\n footer = (\n \"\\n<p>Showing only the first {rows} rows \"\n \"{by} {cols} columns</p>\\n</div>\".format(rows=nrows, by=by, cols=ncols)\n )\n return REPR_HTML_PATTERN.sub(footer, repr_html)\n return pdf.to_html(notebook=True)\n\n def __getitem__(self, key: Any) -> Any:\n from pyspark.pandas.series import Series\n\n if key is None:\n raise KeyError(\"none key\")\n elif isinstance(key, Series):\n return self.loc[key.astype(bool)]\n elif isinstance(key, slice):\n if any(type(n) == int or None for n in [key.start, key.stop]):\n # Seems like pandas Frame always uses int as positional search when slicing\n # with ints.\n return self.iloc[key]\n return self.loc[key]\n elif is_name_like_value(key):\n return self.loc[:, key]\n elif is_list_like(key):\n return self.loc[:, list(key)]\n raise NotImplementedError(key)\n\n def __setitem__(self, key: Any, value: Any) -> None:\n from pyspark.pandas.series import Series\n\n if isinstance(value, (DataFrame, Series)) and not same_anchor(value, self):\n # Different Series or DataFrames\n level = self._internal.column_labels_level\n key = DataFrame._index_normalized_label(level, key)\n value = DataFrame._index_normalized_frame(level, value)\n\n def assign_columns(\n psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label]\n ) -> Iterator[Tuple[\"Series\", Label]]:\n assert len(key) == len(that_column_labels)\n # Note that here intentionally uses `zip_longest` that combine\n # that_columns.\n for k, this_label, that_label in zip_longest(\n key, this_column_labels, that_column_labels\n ):\n yield (psdf._psser_for(that_label), tuple([\"that\", *k]))\n if this_label is not None and this_label[1:] != k:\n yield (psdf._psser_for(this_label), this_label)\n\n psdf = align_diff_frames(assign_columns, self, value, fillna=False, how=\"left\")\n elif isinstance(value, list):\n if len(self) != len(value):\n raise ValueError(\"Length of values does not match length of index\")\n\n # TODO: avoid using default index?\n with option_context(\n \"compute.default_index_type\",\n \"distributed-sequence\",\n \"compute.ops_on_diff_frames\",\n True,\n ):\n psdf = self.reset_index()\n psdf[key] = ps.DataFrame(value)\n psdf = psdf.set_index(psdf.columns[: self._internal.index_level])\n psdf.index.names = self.index.names\n\n elif isinstance(key, list):\n assert isinstance(value, DataFrame)\n # Same DataFrames.\n field_names = value.columns\n psdf = self._assign({k: value[c] for k, c in zip(key, field_names)})\n else:\n # Same Series.\n psdf = self._assign({key: value})\n\n self._update_internal_frame(psdf._internal)\n\n @staticmethod\n def _index_normalized_label(level: int, labels: Union[Name, Sequence[Name]]) -> List[Label]:\n \"\"\"\n Returns a label that is normalized against the current column index level.\n For example, the key \"abc\" can be (\"abc\", \"\", \"\") if the current Frame has\n a multi-index for its column\n \"\"\"\n if is_name_like_tuple(labels):\n labels = [labels]\n elif is_name_like_value(labels):\n labels = [(labels,)]\n else:\n labels = [k if is_name_like_tuple(k) else (k,) for k in labels]\n\n if any(len(label) > level for label in labels):\n raise KeyError(\n \"Key length ({}) exceeds index depth ({})\".format(\n max(len(label) for label in labels), level\n )\n )\n return [tuple(list(label) + ([\"\"] * (level - len(label)))) for label in labels]\n\n @staticmethod\n def _index_normalized_frame(level: int, psser_or_psdf: DataFrameOrSeries) -> \"DataFrame\":\n \"\"\"\n Returns a frame that is normalized against the current column index level.\n For example, the name in `pd.Series([...], name=\"abc\")` can be can be\n (\"abc\", \"\", \"\") if the current DataFrame has a multi-index for its column\n \"\"\"\n from pyspark.pandas.series import Series\n\n if isinstance(psser_or_psdf, Series):\n psdf = psser_or_psdf.to_frame()\n else:\n assert isinstance(psser_or_psdf, DataFrame), type(psser_or_psdf)\n psdf = psser_or_psdf.copy()\n\n psdf.columns = pd.MultiIndex.from_tuples(\n [\n tuple([name_like_string(label)] + ([\"\"] * (level - 1)))\n for label in psdf._internal.column_labels\n ],\n )\n\n return psdf\n\n def __getattr__(self, key: str) -> Any:\n if key.startswith(\"__\"):\n raise AttributeError(key)\n if hasattr(_MissingPandasLikeDataFrame, key):\n property_or_func = getattr(_MissingPandasLikeDataFrame, key)\n if isinstance(property_or_func, property):\n return property_or_func.fget(self)\n else:\n return partial(property_or_func, self)\n\n try:\n return self.loc[:, key]\n except KeyError:\n raise AttributeError(\n \"'%s' object has no attribute '%s'\" % (self.__class__.__name__, key)\n )\n\n def __setattr__(self, key: str, value: Any) -> None:\n try:\n object.__getattribute__(self, key)\n return object.__setattr__(self, key, value)\n except AttributeError:\n pass\n\n if (key,) in self._internal.column_labels:\n self[key] = value\n else:\n msg = \"pandas-on-Spark doesn't allow columns to be created via a new attribute name\"\n if is_testing():\n raise AssertionError(msg)\n else:\n warnings.warn(msg, UserWarning)\n\n def __len__(self) -> int:\n return self._internal.resolved_copy.spark_frame.count()\n\n def __dir__(self) -> Iterable[str]:\n fields = [\n f for f in self._internal.resolved_copy.spark_frame.schema.fieldNames() if \" \" not in f\n ]\n return list(super().__dir__()) + fields\n\n def __iter__(self) -> Iterator[Name]:\n return iter(self.columns)\n\n # NDArray Compat\n def __array_ufunc__(\n self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any\n ) -> \"DataFrame\":\n # TODO: is it possible to deduplicate it with '_map_series_op'?\n if all(isinstance(inp, DataFrame) for inp in inputs) and any(\n not same_anchor(inp, inputs[0]) for inp in inputs\n ):\n # binary only\n assert len(inputs) == 2\n this = inputs[0]\n that = inputs[1]\n if this._internal.column_labels_level != that._internal.column_labels_level:\n raise ValueError(\"cannot join with no overlapping index names\")\n\n # Different DataFrames\n def apply_op(\n psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label]\n ) -> Iterator[Tuple[\"Series\", Label]]:\n for this_label, that_label in zip(this_column_labels, that_column_labels):\n yield (\n ufunc(\n psdf._psser_for(this_label), psdf._psser_for(that_label), **kwargs\n ).rename(this_label),\n this_label,\n )\n\n return align_diff_frames(apply_op, this, that, fillna=True, how=\"full\")\n else:\n # DataFrame and Series\n applied = []\n this = inputs[0]\n assert all(inp is this for inp in inputs if isinstance(inp, DataFrame))\n\n for label in this._internal.column_labels:\n arguments = []\n for inp in inputs:\n arguments.append(inp[label] if isinstance(inp, DataFrame) else inp)\n # both binary and unary.\n applied.append(ufunc(*arguments, **kwargs).rename(label))\n\n internal = this._internal.with_new_columns(applied)\n return DataFrame(internal)\n\n def __class_getitem__(cls, params: Any) -> object:\n # This is a workaround to support variadic generic in DataFrame in Python 3.7.\n # See https://github.com/python/typing/issues/193\n # we always wraps the given type hints by a tuple to mimic the variadic generic.\n return create_tuple_for_frame_type(params)\n\n\ndef _reduce_spark_multi(sdf: SparkDataFrame, aggs: List[Column]) -> Any:\n \"\"\"\n Performs a reduction on a spark DataFrame, the functions being known sql aggregate functions.\n \"\"\"\n assert isinstance(sdf, SparkDataFrame)\n sdf0 = sdf.agg(*aggs)\n lst = sdf0.limit(2).toPandas()\n assert len(lst) == 1, (sdf, lst)\n row = lst.iloc[0]\n lst2 = list(row)\n assert len(lst2) == len(aggs), (row, lst2)\n return lst2\n\n\nclass CachedDataFrame(DataFrame):\n \"\"\"\n Cached pandas-on-Spark DataFrame, which corresponds to pandas DataFrame logically, but\n internally it caches the corresponding Spark DataFrame.\n \"\"\"\n\n def __init__(self, internal: InternalFrame, storage_level: Optional[StorageLevel] = None):\n if storage_level is None:\n object.__setattr__(self, \"_cached\", internal.spark_frame.cache())\n elif isinstance(storage_level, StorageLevel):\n object.__setattr__(self, \"_cached\", internal.spark_frame.persist(storage_level))\n else:\n raise TypeError(\n \"Only a valid pyspark.StorageLevel type is acceptable for the `storage_level`\"\n )\n super().__init__(internal)\n\n def __enter__(self) -> \"CachedDataFrame\":\n return self\n\n def __exit__(\n self,\n exception_type: Optional[Type[BaseException]],\n exception_value: Optional[BaseException],\n traceback: Optional[TracebackType],\n ) -> Optional[bool]:\n self.spark.unpersist()\n return None\n\n # create accessor for Spark related methods.\n spark = CachedAccessor(\"spark\", CachedSparkFrameMethods)\n\n\ndef _test() -> None:\n import os\n import doctest\n import shutil\n import sys\n import tempfile\n import uuid\n from pyspark.sql import SparkSession\n import pyspark.pandas.frame\n\n os.chdir(os.environ[\"SPARK_HOME\"])\n\n globs = pyspark.pandas.frame.__dict__.copy()\n globs[\"ps\"] = pyspark.pandas\n spark = (\n SparkSession.builder.master(\"local[4]\").appName(\"pyspark.pandas.frame tests\").getOrCreate()\n )\n\n db_name = \"db%s\" % str(uuid.uuid4()).replace(\"-\", \"\")\n spark.sql(\"CREATE DATABASE %s\" % db_name)\n globs[\"db\"] = db_name\n\n path = tempfile.mkdtemp()\n globs[\"path\"] = path\n\n (failure_count, test_count) = doctest.testmod(\n pyspark.pandas.frame,\n globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,\n )\n\n shutil.rmtree(path, ignore_errors=True)\n spark.sql(\"DROP DATABASE IF EXISTS %s CASCADE\" % db_name)\n spark.stop()\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n"
] |
[
[
"pandas.concat",
"numpy.dtype",
"pandas.DataFrame.from_records",
"pandas.DataFrame.info",
"pandas.DataFrame",
"pandas.core.dtypes.inference.is_sequence",
"pandas.core.dtypes.common.infer_dtype_from_object",
"pandas.api.types.is_dict_like",
"pandas.api.types.is_list_like",
"numpy.zeros",
"pandas.MultiIndex.from_tuples",
"pandas.api.types.is_bool_dtype",
"pandas.Index",
"pandas.tseries.frequencies.to_offset",
"pandas.api.types.is_scalar",
"pandas.DataFrame.from_dict",
"pandas.core.accessor.CachedAccessor",
"pandas.option_context",
"numpy.ravel",
"pandas.Series",
"numpy.diag"
]
] |
takeitbillykyle/EL2805-Reinforcement-Learning-
|
[
"ba16f1bff7d6676fcafd2ec19e6d2bcd195ebb1f"
] |
[
"Assignment 2/robbing_banks.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom IPython import display\n\n# Implemented methods\nmethods = ['DynProg', 'ValIter'];\n\n# Some colours\nLIGHT_RED = '#FFC4CC';\nLIGHT_GREEN = '#95FD99';\nBLACK = '#000000';\nWHITE = '#FFFFFF';\nLIGHT_PURPLE = '#E8D0FF';\nLIGHT_ORANGE = '#FAE0C3';\nSEB_GREEN = '#52B92C';\nBUSTED_BLUE = '#5993B5'\nclass RobbingBanks:\n\n # Actions\n STAY = 0\n MOVE_LEFT = 1\n MOVE_RIGHT = 2\n MOVE_UP = 3\n MOVE_DOWN = 4\n\n # Give names to actions\n actions_names = {\n STAY: \"stay\",\n MOVE_LEFT: \"move left\",\n MOVE_RIGHT: \"move right\",\n MOVE_UP: \"move up\",\n MOVE_DOWN: \"move down\"\n }\n\n # Reward values\n\n\n\n def __init__(self, town_map):\n \"\"\" Constructor of the environment town_map.\n \"\"\"\n self.STEP_REWARD = 0\n self.BANK_REWARD = 10\n self.CAUGHT_REWARD = -50\n self.town_map = town_map;\n self.initial_state = np.array([0,0,1,2])\n self.actions = self.__actions();\n self.states, self.map = self.__states();\n self.n_actions = len(self.actions);\n self.n_states = len(self.states);\n self.transition_probabilities = self.__transitions();\n self.rewards = self.__rewards();\n\n\n def __actions(self):\n actions = dict();\n actions[self.STAY] = np.array([0, 0]);\n actions[self.MOVE_LEFT] = np.array([0,-1]);\n actions[self.MOVE_RIGHT] = np.array([0, 1]);\n actions[self.MOVE_UP] = np.array([-1,0]);\n actions[self.MOVE_DOWN] = np.array([1,0]);\n return actions;\n\n def __states(self):\n states = dict();\n states_vec = dict();\n\n s = 0;\n for i in range(self.town_map.shape[0]):\n for j in range(self.town_map.shape[1]):\n for k in range(self.town_map.shape[0]):\n for l in range(self.town_map.shape[1]):\n states[s] = np.array([i,j,k,l]);\n states_vec[(i,j,k,l)] = s;\n s += 1;\n \n return states, states_vec\n\n def __move(self, state, action):\n \"\"\" Makes a step in the town_map, given a current position and an action.\n If the action STAY or an inadmissible action is used, the robber stays in place.\n\n :return integer next_cell corresponding to position (x,y) x (x,y) on the town_map that agent transitions to.\n \"\"\"\n # Compute the future position given current (state, action)\n row = self.states[state][0] + self.actions[action][0];\n col = self.states[state][1] + self.actions[action][1];\n # Is the future position an impossible one ?\n hitting_town_walls = (row == -1) or (row == self.town_map.shape[0]) or \\\n (col == -1) or (col == self.town_map.shape[1])\n # Based on the impossiblity check return the next state.\n list_police_pos = self.__police_positions(state)\n new_police_pos = list_police_pos[np.random.randint(len(list_police_pos))]\n \n #caught = (row, col) == (new_police_pos[0], new_police_pos[1])\n caught = all(self.states[state][0:2] == self.states[state][2:])\n if caught:\n return self.map[tuple(self.initial_state)];\n #Hot take: If you \"unintentionally\" hit the wall, the result should be that you (and the police) stay in place since it's not a \"deliberate\" move\n elif hitting_town_walls:\n return state\n else:\n return self.map[(row, col, new_police_pos[0], new_police_pos[1])];\n \n def __police_positions(self, state):\n \"\"\"\n Input: The state as an int\n Returns: A list of possible new minotaur positions from current state \n \"\"\"\n agent_pos = self.states[state][0:2]\n police_pos = self.states[state][2:]\n diff_pos = np.sign(agent_pos - police_pos)\n list_pos = [[1,0], [-1,0], [0, diff_pos[1]]] if diff_pos[0] == 0 else [[0,1], [0,-1], [diff_pos[0],0]] if diff_pos[1] == 0 else [[0,diff_pos[1]], [diff_pos[0],0]]\n list_pos += police_pos\n list_pos = list(filter(None,[tuple(pos)*(0<=pos[0]<self.town_map.shape[0] and 0<=pos[1]<self.town_map.shape[1]) for pos in list_pos]))\n return list_pos\n\n\n def __transitions(self):\n \"\"\" Computes the transition probabilities for every state action pair.\n :return numpy.tensor transition probabilities: tensor of transition\n probabilities of dimension S*S*A\n \"\"\"\n # Initialize the transition probailities tensor (S,S,A)\n dimensions = (self.n_states,self.n_states,self.n_actions);\n transition_probabilities = np.zeros(dimensions);\n\n # Compute the transition probabilities. Note that the transitions\n # are deterministic.\n for s in range(self.n_states):\n #if we are in the same position as the police, we return to initial\n if (self.states[s][0],self.states[s][1])==(self.states[s][2],self.states[s][3]):\n transition_probabilities[self.initial_state, s, :] = 1/3\n\n else:\n for a in range(self.n_actions):\n list_pos = self.__police_positions(s) #police positions\n for police_pos in list_pos:\n next_s = self.__move(s,a);\n new_pos = np.copy(self.states[next_s])\n new_pos[2:] = police_pos\n next_s = self.map[tuple(new_pos)]\n transition_probabilities[next_s, s, a] = 1/len(list_pos);\n return transition_probabilities;\n\n def __rewards(self):\n\n rewards = np.zeros((self.n_states, self.n_actions));\n # rewards[i,j,k] = r(s' | s, a): tensor of rewards of dimension S x S x A\n for s in range(self.n_states):\n \n list_pos = self.__police_positions(s)\n for a in range(self.n_actions):\n next_s = self.__move(s,a); \n\n #if we can get caught in the next move\n if (tuple(self.states[next_s][0:2]) in list_pos):\n #if our next position is not a bank\n if self.town_map[tuple(self.states[next_s][0:2])] != 1:\n rewards[s,a] = self.CAUGHT_REWARD/len(list_pos)\n\n #if our next position is a bank\n if self.town_map[tuple(self.states[next_s][0:2])] == 1:\n rewards[s,a] = self.CAUGHT_REWARD/len(list_pos) + (len(list_pos)-1)*self.BANK_REWARD/len(list_pos)\n\n #if we cannot get caught in the next move\n else:\n #reward for standing in a bank\n if self.town_map[tuple(self.states[next_s][0:2])] == 1:\n rewards[s,a] = self.BANK_REWARD\n\n \n\n # list_pos = self.__police_positions(s)\n # for a in range(self.n_actions):\n # next_s = self.__move(s,a);\n\n return rewards;\n\n def simulate(self,policy):\n path = list();\n\n # Initialize current state, next state and time\n t = 1;\n s = self.map[tuple(self.initial_state)];\n # Add the starting position in the town_map to the path\n path.append(self.initial_state);\n # Move to next state given the policy and the current state\n next_s = self.__move(s,policy[s]);\n # Add the position in the town_map corresponding to the next state\n # to the pygame.freetype.path\n path.append(self.states[next_s]);\n # Loop while state is not the goal state\n T = 40\n while t<T:\n # Update state\n s = next_s;\n # Move to next state given the policy and the current state\n next_s = self.__move(s,policy[s]);\n # Add the position in the town_map corresponding to the next state\n # to the path\n path.append(self.states[next_s])\n # Update time and state for next iteration\n t +=1;\n\n return path\n\n\n def show(self):\n print('The states are :')\n print(self.states)\n print('The actions are:')\n print(self.actions)\n print('The mapping of the states:')\n print(self.map)\n print('The rewards:')\n print(self.rewards)\n\n\ndef value_iteration(env, gamma, epsilon):\n \"\"\" Solves the shortest path problem using value iteration\n :input town_map env : The town_map environment in which we seek to\n find the shortest path.\n :input float gamma : The discount factor.\n :input float epsilon : accuracy of the value iteration procedure.\n :return numpy.array V : Optimal values for every state at every\n time, dimension S*T\n :return numpy.array policy: Optimal time-varying policy at every state,\n dimension S*T\n \"\"\"\n # The value itearation algorithm requires the knowledge of :\n # - Transition probabilities\n # - Rewards\n # - State space\n # - Action space\n # - The finite horizon\n p = env.transition_probabilities;\n r = env.rewards;\n n_states = env.n_states;\n n_actions = env.n_actions;\n\n # Required variables and temporary ones for the VI to run\n V = np.zeros(n_states);\n Q = np.zeros((n_states, n_actions));\n BV = np.zeros(n_states);\n # Iteration counter\n n = 0;\n # Tolerance error\n tol = (1 - gamma)* epsilon/gamma;\n #tol = 100\n # Initialization of the VI\n for s in range(n_states):\n for a in range(n_actions):\n Q[s, a] = r[s, a] + gamma*np.dot(p[:,s,a],V);\n BV = np.max(Q, 1);\n\n # Iterate until convergence\n while np.linalg.norm(V - BV) >= tol and n < 2600:\n # Increment by one the numbers of iteration\n n += 1;\n # Update the value function\n V = np.copy(BV);\n # Compute the new BV\n\n for s in range(n_states):\n for a in range(n_actions):\n Q[s, a] = r[s, a] + gamma*np.dot(p[:,s,a],V);\n BV = np.max(Q, 1);\n # Show error\n #print(np.linalg.norm(V - BV))\n # Compute policy\n policy = np.argmax(Q,1);\n # Return the obtained policy\n return V, policy;\n\ndef draw_town_map(town_map):\n\n # Map a color to each cell in the town_map\n col_map = {0: WHITE, 1: BLACK, 2: LIGHT_GREEN, -6: LIGHT_RED, -1: LIGHT_RED};\n\n # Give a color to each cell\n rows,cols = town_map.shape;\n colored_town_map = [[col_map[town_map[j,i]] for i in range(cols)] for j in range(rows)];\n\n # Create figure of the size of the town_map\n fig = plt.figure(1, figsize=(cols,rows));\n\n # Remove the axis ticks and add title title\n ax = plt.gca();\n ax.set_title('The town_map');\n ax.set_xticks([]);\n ax.set_yticks([]);\n\n # Give a color to each cell\n rows,cols = town_map.shape;\n colored_town_map = [[col_map[town_map[j,i]] for i in range(cols)] for j in range(rows)];\n\n # Create figure of the size of the town_map\n fig = plt.figure(1, figsize=(cols,rows))\n\n # Create a table to color\n grid = plt.table(cellText=None,\n cellColours=colored_town_map,\n cellLoc='center',\n loc=(0,0),\n edges='closed');\n # Modify the hight and width of the cells in the table\n tc = grid.properties()['children']\n for cell in tc:\n cell.set_height(1.0/rows);\n cell.set_width(1.0/cols);\n\ndef animate_solution(town_map, path, save_anim = False, until_caught = False, gamma = 0):\n\n # Map a color to each cell in the town_map\n col_map = {0: WHITE, 1: SEB_GREEN, 2: LIGHT_GREEN, -6: LIGHT_RED, -1: LIGHT_RED};\n\n # Size of the town_map\n rows,cols = town_map.shape;\n\n # Create figure of the size of the town_map\n fig = plt.figure(1, figsize=(cols,rows));\n\n # Remove the axis ticks and add title title\n ax = plt.gca();\n ax.set_title('Policy simulation: $\\lambda$ = %0.1f' %gamma);\n ax.set_xticks([]);\n ax.set_yticks([]);\n\n # Give a color to each cell\n colored_town_map = [[col_map[town_map[j,i]] for i in range(cols)] for j in range(rows)];\n\n # Create figure of the size of the town_map\n fig = plt.figure(1, figsize=(cols,rows))\n\n # Create a table to color\n grid = plt.table(cellText=None,\n cellColours=colored_town_map,\n cellLoc='center',\n loc=(0,0),\n edges='closed');\n\n # Modify the hight and width of the cells in the table\n tc = grid.properties()['children']\n for cell in tc:\n cell.set_height(1.0/rows);\n cell.set_width(1.0/cols);\n\n\n # Update the color at each frame\n path_robber = [tuple(p)[0:2] for p in path]\n path_police = [tuple(p)[2:] for p in path]\n for i in range(len(path_robber)):\n\n\n if i == 0:\n grid.get_celld()[(path_robber[i])].set_facecolor(LIGHT_ORANGE)\n grid.get_celld()[(path_robber[i])].get_text().set_text('Robber')\n\n grid.get_celld()[(path_police[i])].set_facecolor(LIGHT_RED)\n grid.get_celld()[(path_police[i])].get_text().set_text('Police') \n if save_anim:\n plt.savefig('optimal_policy_'+str(i))\n else:\n if until_caught and path_robber[i] == path_police[i]:\n grid.get_celld()[(path_robber[i-1])].set_facecolor(col_map[town_map[path_robber[i-1]]])\n grid.get_celld()[(path_robber[i-1])].get_text().set_text('')\n grid.get_celld()[(path_police[i-1])].set_facecolor(col_map[town_map[path_police[i-1]]])\n grid.get_celld()[(path_police[i-1])].get_text().set_text('') \n grid.get_celld()[(path_police[i])].set_facecolor(BUSTED_BLUE) \n grid.get_celld()[(path_police[i])].get_text().set_text('BUSTED')\n print(\"BUSTED!!!\", gamma)\n if save_anim: \n plt.savefig(str(gamma)+'_'+str(i)+'.png')\n break\n\n if save_anim:\n plt.savefig(str(gamma)+'_'+str(i)+'.png')\n grid.get_celld()[(path_robber[i-1])].set_facecolor(col_map[town_map[path_robber[i-1]]])\n grid.get_celld()[(path_robber[i-1])].get_text().set_text('')\n grid.get_celld()[(path_police[i-1])].set_facecolor(col_map[town_map[path_police[i-1]]])\n grid.get_celld()[(path_police[i-1])].get_text().set_text('') \n\n grid.get_celld()[(path_robber[i])].set_facecolor(LIGHT_ORANGE)\n grid.get_celld()[(path_robber[i])].get_text().set_text('Robber')\n\n grid.get_celld()[(path_police[i])].set_facecolor(LIGHT_RED)\n grid.get_celld()[(path_police[i])].get_text().set_text('Police')\n grid.get_celld()[0,0].get_text().set_text('SEB')\n grid.get_celld()[0,0].get_text().set_color('white')\n grid.get_celld()[0,5].get_text().set_text('SEB')\n grid.get_celld()[0,5].get_text().set_color('white')\n grid.get_celld()[2,0].get_text().set_text('SEB')\n grid.get_celld()[2,0].get_text().set_color('white')\n grid.get_celld()[2,5].get_text().set_text('SEB')\n grid.get_celld()[2,5].get_text().set_color('white')\n plt.pause(0.7)\n plt.show()\n\n \ntown_map= np.array([\n [ 1, 0, 0, 0, 0, 1],\n [ 0, 0, 0, 0, 0, 0],\n [ 1, 0, 0, 0, 0, 1]\n])\n\n\nrb = RobbingBanks(town_map)\np=rb.transition_probabilities\nn=rb.n_states\nfor s in range(n):\n summ=np.sum(p[:,s,3])\n if summ>1:\n print(rb.states[s])\n# PLOTTING VALUE_FUNC(INIT_STATE) AS A FUNCTION OF LAMBDA/GAMMA\n\n\"\"\"\ngammas = np.linspace(0.01,1,100,endpoint=False)\nvalues = []\nfor gamma in gammas:\n V, policy = value_iteration(rb, gamma, epsilon = 1e-6)\n values.append(V[rb.map[(0,0,1,2)]])\nplt.semilogy(gammas,values,'--')\nplt.xlabel('Discount rate $\\lambda$')\nplt.ylabel('Value function V')\nplt.title('Effect of $\\lambda$ on V')\nplt.plot()\n#plt.show()\nplt.savefig('Value_2b.png')\n\"\"\"\n\n\n# PLOTTING OPTIMAL POLICY FOR DIFFERENT LAMBDAS\n\n\"\"\"\ngammas = [0.1,0.5,0.8]\n\nfor gamma in gammas:\n V, policy = value_iteration(rb, gamma, 1e-6)\n path = rb.simulate(policy)\n animate_solution(town_map, path, save_anim = False, until_caught = True,gamma=gamma)\n\"\"\""
] |
[
[
"numpy.max",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"numpy.zeros",
"numpy.sum",
"numpy.copy",
"matplotlib.pyplot.figure",
"numpy.sign",
"numpy.argmax",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.table"
]
] |
JohnMLarkin/pydap
|
[
"68650ae51d33cc5ac840b307edcc7b70cbb645a6"
] |
[
"src/pydap/responses/das.py"
] |
[
"\"\"\"The DAS response.\n\nThe DAS response describes the attributes associated with a dataset and its\nvariables. Together with the DDS the DAS response completely describes the\nmetadata of a dataset, allowing it to be introspected and data to be\ndownloaded.\n\n\"\"\"\n\ntry:\n from functools import singledispatch\nexcept ImportError:\n from singledispatch import singledispatch\nfrom collections import Iterable\n\nfrom six import string_types, integer_types\nfrom six.moves import map\n\nimport numpy as np\n\nfrom ..model import (DatasetType, BaseType,\n StructureType, SequenceType,\n GridType)\nfrom ..lib import encode, quote, __version__, NUMPY_TO_DAP2_TYPEMAP\nfrom .lib import BaseResponse\n\n\nINDENT = ' ' * 4\n\n\nclass DASResponse(BaseResponse):\n\n \"\"\"The DAS response.\"\"\"\n\n __version__ = __version__\n\n def __init__(self, dataset):\n BaseResponse.__init__(self, dataset)\n self.headers.extend([\n ('Content-description', 'dods_das'),\n ('Content-type', 'text/plain; charset=ascii'),\n ])\n\n def __iter__(self):\n for line in das(self.dataset):\n try:\n yield line.encode('ascii')\n except UnicodeDecodeError:\n yield line.encode('UTF-8')\n\n\n@singledispatch\ndef das(var, level=0):\n \"\"\"Single dispatcher that generates the DAS response.\"\"\"\n raise StopIteration\n\n\n@das.register(DatasetType)\ndef _datasettype(var, level=0):\n yield '{indent}Attributes {{\\n'.format(indent=level*INDENT)\n\n for attr in sorted(var.attributes.keys()):\n values = var.attributes[attr]\n for line in build_attributes(attr, values, level+1):\n yield line\n\n for child in var.children():\n for line in das(child, level=level+1):\n yield line\n yield '{indent}}}\\n'.format(indent=level*INDENT)\n\n\n@das.register(StructureType)\n@das.register(SequenceType)\ndef _structuretype(var, level=0):\n yield '{indent}{name} {{\\n'.format(indent=level*INDENT, name=var.name)\n\n for attr in sorted(var.attributes.keys()):\n values = var.attributes[attr]\n for line in build_attributes(attr, values, level+1):\n yield line\n\n for child in var.children():\n for line in das(child, level=level+1):\n yield line\n yield '{indent}}}\\n'.format(indent=level*INDENT)\n\n\n@das.register(BaseType)\n@das.register(GridType)\ndef _basetypegridtype(var, level=0):\n yield '{indent}{name} {{\\n'.format(indent=level*INDENT, name=var.name)\n\n for attr in sorted(var.attributes.keys()):\n values = var.attributes[attr]\n if np.asarray(values).size > 0:\n for line in build_attributes(attr, values, level+1):\n yield line\n yield '{indent}}}\\n'.format(indent=level*INDENT)\n\n\ndef build_attributes(attr, values, level=0):\n \"\"\"Recursive function to build the DAS.\"\"\"\n # check for metadata\n if isinstance(values, dict):\n yield '{indent}{attr} {{\\n'.format(indent=(level)*INDENT, attr=attr)\n for k, v in values.items():\n for line in build_attributes(k, v, level+1):\n yield line\n yield '{indent}}}\\n'.format(indent=(level)*INDENT)\n else:\n # get type\n type = get_type(values)\n\n # encode values\n if (isinstance(values, string_types) or\n not isinstance(values, Iterable) or\n getattr(values, 'shape', None) == ()):\n values = [encode(values)]\n else:\n values = map(encode, values)\n\n yield '{indent}{type} {attr} {values};\\n'.format(\n indent=(level)*INDENT,\n type=type,\n attr=quote(attr),\n values=', '.join(values))\n\n\ndef get_type(values):\n \"\"\"Extract the type of a variable.\n\n This function tries to determine the DAP type of a Python variable using\n several methods. Returns the DAP type as a string.\n\n \"\"\"\n if hasattr(values, 'dtype'):\n return NUMPY_TO_DAP2_TYPEMAP[values.dtype.char]\n elif isinstance(values, string_types) or not isinstance(values, Iterable):\n return type_convert(values)\n else:\n # if there are several values, they may have different types, so we\n # need to convert all of them and use a precedence table\n types = [type_convert(val) for val in values]\n precedence = ['String', 'Float64', 'Int32']\n types.sort(key=precedence.index)\n return types[0]\n\n\ndef type_convert(obj):\n \"\"\"Map Python objects to the corresponding Opendap types.\n\n Returns the DAP representation of the type as a string.\n\n \"\"\"\n if isinstance(obj, float):\n return 'Float64'\n elif isinstance(obj, integer_types):\n return 'Int32'\n else:\n return 'String'\n"
] |
[
[
"numpy.asarray"
]
] |
fastflair/docarray
|
[
"0bbdbc816b2f4a3b399779f6816875fbc1dfe862"
] |
[
"docarray/array/storage/qdrant/helper.py"
] |
[
"from typing import List, TYPE_CHECKING\n\nimport numpy as np\nimport scipy.sparse\nfrom qdrant_openapi_client.models.models import Distance\n\nfrom docarray.math.helper import EPSILON\n\nif TYPE_CHECKING:\n from docarray.types import ArrayType\n\n\nclass QdrantStorageHelper:\n @classmethod\n def embedding_to_array(\n cls, embedding: 'ArrayType', default_dim: int\n ) -> List[float]:\n if embedding is None:\n embedding = np.random.rand(default_dim)\n else:\n from ....math.ndarray import to_numpy_array\n\n embedding = to_numpy_array(embedding)\n\n if embedding.ndim > 1:\n embedding = np.asarray(embedding).squeeze()\n\n if np.all(embedding == 0):\n embedding = embedding + EPSILON\n return embedding.tolist()\n\n\nDISTANCES = {\n 'cosine': Distance.COSINE,\n 'euclidean': Distance.EUCLID,\n 'dot': Distance.DOT,\n}\n"
] |
[
[
"numpy.all",
"numpy.random.rand",
"numpy.asarray"
]
] |
tugrabatin/backdoors101
|
[
"af12c08280fe59380f74c05e2737eb2e92a80fdf"
] |
[
"utils/min_norm_solvers.py"
] |
[
"# Credits to Ozan Sener\n# https://github.com/intel-isl/MultiObjectiveOptimization\n\nimport numpy as np\nimport torch\n\n\nclass MGDASolver:\n MAX_ITER = 250\n STOP_CRIT = 1e-5\n\n @staticmethod\n def _min_norm_element_from2(v1v1, v1v2, v2v2):\n \"\"\"\n Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2\n d is the distance (objective) optimzed\n v1v1 = <x1,x1>\n v1v2 = <x1,x2>\n v2v2 = <x2,x2>\n \"\"\"\n if v1v2 >= v1v1:\n # Case: Fig 1, third column\n gamma = 0.999\n cost = v1v1\n return gamma, cost\n if v1v2 >= v2v2:\n # Case: Fig 1, first column\n gamma = 0.001\n cost = v2v2\n return gamma, cost\n # Case: Fig 1, second column\n gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2))\n cost = v2v2 + gamma * (v1v2 - v2v2)\n return gamma, cost\n\n @staticmethod\n def _min_norm_2d(vecs: list, dps):\n \"\"\"\n Find the minimum norm solution as combination of two points\n This is correct only in 2D\n ie. min_c |\\sum c_i x_i|_2^2 st. \\sum c_i = 1 , 1 >= c_1 >= 0\n for all i, c_i + c_j = 1.0 for some i, j\n \"\"\"\n dmin = 1e8\n sol = 0\n for i in range(len(vecs)):\n for j in range(i + 1, len(vecs)):\n if (i, j) not in dps:\n dps[(i, j)] = 0.0\n for k in range(len(vecs[i])):\n dps[(i, j)] += torch.dot(vecs[i][k].view(-1),\n vecs[j][k].view(-1)).detach()\n dps[(j, i)] = dps[(i, j)]\n if (i, i) not in dps:\n dps[(i, i)] = 0.0\n for k in range(len(vecs[i])):\n dps[(i, i)] += torch.dot(vecs[i][k].view(-1),\n vecs[i][k].view(-1)).detach()\n if (j, j) not in dps:\n dps[(j, j)] = 0.0\n for k in range(len(vecs[i])):\n dps[(j, j)] += torch.dot(vecs[j][k].view(-1),\n vecs[j][k].view(-1)).detach()\n c, d = MGDASolver._min_norm_element_from2(dps[(i, i)],\n dps[(i, j)],\n dps[(j, j)])\n if d < dmin:\n dmin = d\n sol = [(i, j), c, d]\n return sol, dps\n\n @staticmethod\n def _projection2simplex(y):\n \"\"\"\n Given y, it solves argmin_z |y-z|_2 st \\sum z = 1 , 1 >= z_i >= 0 for all i\n \"\"\"\n m = len(y)\n sorted_y = np.flip(np.sort(y), axis=0)\n tmpsum = 0.0\n tmax_f = (np.sum(y) - 1.0) / m\n for i in range(m - 1):\n tmpsum += sorted_y[i]\n tmax = (tmpsum - 1) / (i + 1.0)\n if tmax > sorted_y[i + 1]:\n tmax_f = tmax\n break\n return np.maximum(y - tmax_f, np.zeros(y.shape))\n\n @staticmethod\n def _next_point(cur_val, grad, n):\n proj_grad = grad - (np.sum(grad) / n)\n tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0]\n tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0])\n\n skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7)\n t = 1\n if len(tm1[tm1 > 1e-7]) > 0:\n t = np.min(tm1[tm1 > 1e-7])\n if len(tm2[tm2 > 1e-7]) > 0:\n t = min(t, np.min(tm2[tm2 > 1e-7]))\n\n next_point = proj_grad * t + cur_val\n next_point = MGDASolver._projection2simplex(next_point)\n return next_point\n\n @staticmethod\n def find_min_norm_element(vecs: list):\n \"\"\"\n Given a list of vectors (vecs), this method finds the minimum norm\n element in the convex hull as min |u|_2 st. u = \\sum c_i vecs[i]\n and \\sum c_i = 1. It is quite geometric, and the main idea is the\n fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution\n lies in (0, d_{i,j})Hence, we find the best 2-task solution , and\n then run the projected gradient descent until convergence\n \"\"\"\n # Solution lying at the combination of two points\n dps = {}\n init_sol, dps = MGDASolver._min_norm_2d(vecs, dps)\n\n n = len(vecs)\n sol_vec = np.zeros(n)\n sol_vec[init_sol[0][0]] = init_sol[1]\n sol_vec[init_sol[0][1]] = 1 - init_sol[1]\n\n if n < 3:\n # This is optimal for n=2, so return the solution\n return sol_vec, init_sol[2]\n\n iter_count = 0\n\n grad_mat = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n grad_mat[i, j] = dps[(i, j)]\n\n while iter_count < MGDASolver.MAX_ITER:\n grad_dir = -1.0 * np.dot(grad_mat, sol_vec)\n new_point = MGDASolver._next_point(sol_vec, grad_dir, n)\n # Re-compute the inner products for line search\n v1v1 = 0.0\n v1v2 = 0.0\n v2v2 = 0.0\n for i in range(n):\n for j in range(n):\n v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)]\n v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)]\n v2v2 += new_point[i] * new_point[j] * dps[(i, j)]\n nc, nd = MGDASolver._min_norm_element_from2(v1v1.item(),\n v1v2.item(),\n v2v2.item())\n # try:\n new_sol_vec = nc * sol_vec + (1 - nc) * new_point\n # except AttributeError:\n # print(sol_vec)\n change = new_sol_vec - sol_vec\n if np.sum(np.abs(change)) < MGDASolver.STOP_CRIT:\n return sol_vec, nd\n sol_vec = new_sol_vec\n\n @staticmethod\n def find_min_norm_element_FW(vecs):\n \"\"\"\n Given a list of vectors (vecs), this method finds the minimum norm\n element in the convex hull\n as min |u|_2 st. u = \\sum c_i vecs[i] and \\sum c_i = 1.\n It is quite geometric, and the main idea is the fact that if\n d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies\n in (0, d_{i,j})Hence, we find the best 2-task solution, and then\n run the Frank Wolfe until convergence\n \"\"\"\n # Solution lying at the combination of two points\n dps = {}\n init_sol, dps = MGDASolver._min_norm_2d(vecs, dps)\n\n n = len(vecs)\n sol_vec = np.zeros(n)\n sol_vec[init_sol[0][0]] = init_sol[1]\n sol_vec[init_sol[0][1]] = 1 - init_sol[1]\n\n if n < 3:\n # This is optimal for n=2, so return the solution\n return sol_vec, init_sol[2]\n\n iter_count = 0\n\n grad_mat = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n grad_mat[i, j] = dps[(i, j)]\n\n while iter_count < MGDASolver.MAX_ITER:\n t_iter = np.argmin(np.dot(grad_mat, sol_vec))\n\n v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))\n v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])\n v2v2 = grad_mat[t_iter, t_iter]\n\n nc, nd = MGDASolver._min_norm_element_from2(v1v1, v1v2, v2v2)\n new_sol_vec = nc * sol_vec\n new_sol_vec[t_iter] += 1 - nc\n\n change = new_sol_vec - sol_vec\n if np.sum(np.abs(change)) < MGDASolver.STOP_CRIT:\n return sol_vec, nd\n sol_vec = new_sol_vec\n\n @classmethod\n def get_scales(cls, grads, losses, normalization_type, tasks):\n scale = {}\n gn = gradient_normalizers(grads, losses, normalization_type)\n for t in tasks:\n for gr_i in range(len(grads[t])):\n grads[t][gr_i] = grads[t][gr_i] / (gn[t] + 1e-5)\n sol, min_norm = cls.find_min_norm_element([grads[t] for t in tasks])\n for zi, t in enumerate(tasks):\n scale[t] = float(sol[zi])\n\n return scale\n\n\ndef gradient_normalizers(grads, losses, normalization_type):\n gn = {}\n if normalization_type == 'l2':\n for t in grads:\n gn[t] = torch.sqrt(\n torch.stack([gr.pow(2).sum().data for gr in grads[t]]).sum())\n elif normalization_type == 'loss':\n for t in grads:\n gn[t] = min(losses[t].mean(), 10.0)\n elif normalization_type == 'loss+':\n for t in grads:\n gn[t] = min(losses[t].mean() * torch.sqrt(\n torch.stack([gr.pow(2).sum().data for gr in grads[t]]).sum()),\n 10)\n\n elif normalization_type == 'none' or normalization_type == 'eq':\n for t in grads:\n gn[t] = 1.0\n else:\n raise ValueError('ERROR: Invalid Normalization Type')\n return gn\n"
] |
[
[
"numpy.dot",
"numpy.zeros",
"numpy.sum",
"numpy.min",
"numpy.sort",
"numpy.abs"
]
] |
keithyin/tensorflow-demos
|
[
"e716eb1469cf8985018dc913cff91fc07fb073e9"
] |
[
"one_hot_demo.py"
] |
[
"import tensorflow as tf\nif __name__ == '__main__':\n a = tf.constant([[1], [2], [3]], dtype=tf.int64)\n one_hotted = tf.one_hot(a, depth=4)\n print(one_hotted.shape)\n with tf.Session() as sess:\n print(sess.run(one_hotted))"
] |
[
[
"tensorflow.constant",
"tensorflow.one_hot",
"tensorflow.Session"
]
] |
unanan/deep-text-recognition-benchmark-mnn-ncnn
|
[
"7b68ff66c518869897990e9f5fb00dbb7024d614"
] |
[
"train.py"
] |
[
"import os\nimport sys\nimport time\nimport random\nimport string\nimport argparse\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.optim as optim\nimport torch.utils.data\nimport numpy as np\n\nfrom utils import CTCLabelConverter, AttnLabelConverter, Averager\nfrom dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset\nfrom model import Model\nfrom test import validation\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef train(opt):\n \"\"\" dataset preparation \"\"\"\n if not opt.data_filtering_off:\n print('Filtering the images containing characters which are not in opt.character')\n print('Filtering the images whose label is longer than opt.batch_max_length')\n # see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130\n\n opt.select_data = opt.select_data.split('-')\n opt.batch_ratio = opt.batch_ratio.split('-')\n train_dataset = Batch_Balanced_Dataset(opt)\n\n log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')\n AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)\n valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)\n valid_loader = torch.utils.data.DataLoader(\n valid_dataset, batch_size=opt.batch_size,\n shuffle=True, # 'True' to check training progress with validation function.\n num_workers=int(opt.workers),\n collate_fn=AlignCollate_valid, pin_memory=True)\n log.write(valid_dataset_log)\n print('-' * 80)\n log.write('-' * 80 + '\\n')\n log.close()\n \n \"\"\" model configuration \"\"\"\n if 'CTC' in opt.Prediction:\n converter = CTCLabelConverter(opt.character)\n else:\n converter = AttnLabelConverter(opt.character)\n opt.num_class = len(converter.character)\n\n if opt.rgb:\n opt.input_channel = 3\n model = Model(opt)\n print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,\n opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,\n opt.SequenceModeling, opt.Prediction)\n\n # weight initialization\n for name, param in model.named_parameters():\n if 'localization_fc2' in name:\n print(f'Skip {name} as it is already initialized')\n continue\n try:\n if 'bias' in name:\n init.constant_(param, 0.0)\n elif 'weight' in name:\n init.kaiming_normal_(param)\n except Exception as e: # for batchnorm.\n if 'weight' in name:\n param.data.fill_(1)\n continue\n\n # data parallel for multi-GPU\n model = torch.nn.DataParallel(model).to(device)\n model.train()\n if opt.saved_model != '':\n print(f'loading pretrained model from {opt.saved_model}')\n if opt.FT:\n model.load_state_dict(torch.load(opt.saved_model), strict=False)\n else:\n model.load_state_dict(torch.load(opt.saved_model))\n print(\"Model:\")\n print(model)\n\n \"\"\" setup loss \"\"\"\n if 'CTC' in opt.Prediction:\n criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)\n else:\n criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0\n # loss averager\n loss_avg = Averager()\n\n # filter that only require gradient decent\n filtered_parameters = []\n params_num = []\n for p in filter(lambda p: p.requires_grad, model.parameters()):\n filtered_parameters.append(p)\n params_num.append(np.prod(p.size()))\n print('Trainable params num : ', sum(params_num))\n # [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]\n\n # setup optimizer\n if opt.adam:\n optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))\n else:\n optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps)\n print(\"Optimizer:\")\n print(optimizer)\n\n \"\"\" final options \"\"\"\n # print(opt)\n with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:\n opt_log = '------------ Options -------------\\n'\n args = vars(opt)\n for k, v in args.items():\n opt_log += f'{str(k)}: {str(v)}\\n'\n opt_log += '---------------------------------------\\n'\n print(opt_log)\n opt_file.write(opt_log)\n\n \"\"\" start training \"\"\"\n start_iter = 0\n if opt.saved_model != '':\n try:\n start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])\n print(f'continue to train, start_iter: {start_iter}')\n except:\n pass\n\n start_time = time.time()\n best_accuracy = -1\n best_norm_ED = -1\n iteration = start_iter\n\n while(True):\n # train part\n image_tensors, labels = train_dataset.get_batch()\n image = image_tensors.to(device)\n text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)\n batch_size = image.size(0)\n\n if 'CTC' in opt.Prediction:\n preds = model(image, text)\n preds_size = torch.IntTensor([preds.size(1)] * batch_size)\n preds = preds.log_softmax(2).permute(1, 0, 2)\n cost = criterion(preds, text, preds_size, length)\n\n else:\n preds = model(image, text[:, :-1]) # align with Attention.forward\n target = text[:, 1:] # without [GO] Symbol\n cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))\n\n model.zero_grad()\n cost.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) # gradient clipping with 5 (Default)\n optimizer.step()\n\n loss_avg.add(cost)\n\n\n # validation part\n if (iteration + 1) % opt.valInterval == 0 or iteration == 0: # To see training progress, we also conduct validation when 'iteration == 0'\n elapsed_time = time.time() - start_time\n # for log\n with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:\n model.eval()\n with torch.no_grad():\n valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data = validation(\n model, criterion, valid_loader, converter, opt)\n model.train()\n\n # training loss and validation loss\n loss_log = f'[{iteration+1}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}'\n loss_avg.reset()\n\n current_model_log = f'{\"Current_accuracy\":17s}: {current_accuracy:0.3f}, {\"Current_norm_ED\":17s}: {current_norm_ED:0.2f}'\n\n # keep best accuracy model (on valid dataset)\n if current_accuracy > best_accuracy:\n best_accuracy = current_accuracy\n torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth')\n if current_norm_ED > best_norm_ED:\n best_norm_ED = current_norm_ED\n torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth')\n best_model_log = f'{\"Best_accuracy\":17s}: {best_accuracy:0.3f}, {\"Best_norm_ED\":17s}: {best_norm_ED:0.2f}'\n\n loss_model_log = f'{loss_log}\\n{current_model_log}\\n{best_model_log}'\n print(loss_model_log)\n log.write(loss_model_log + '\\n')\n\n # show some predicted results\n dashed_line = '-' * 80\n head = f'{\"Ground Truth\":25s} | {\"Prediction\":25s} | Confidence Score & T/F'\n predicted_result_log = f'{dashed_line}\\n{head}\\n{dashed_line}\\n'\n for gt, pred, confidence in zip(labels[:5], preds[:5], confidence_score[:5]):\n if 'Attn' in opt.Prediction:\n gt = gt[:gt.find('[s]')]\n pred = pred[:pred.find('[s]')]\n\n predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f}\\t{str(pred == gt)}\\n'\n predicted_result_log += f'{dashed_line}'\n print(predicted_result_log)\n log.write(predicted_result_log + '\\n')\n\n # save model per 1e+5 iter.\n if (iteration + 1) % 1e+5 == 0:\n torch.save(\n model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')\n\n if (iteration + 1) == opt.num_iter:\n print('end the training')\n sys.exit()\n iteration += 1\n\n\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = \"2,3\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp_name', help='Where to store logs and models')\n parser.add_argument('--train_data', default=\"/path/to/your/lmdb/train\", help='path to training dataset')\n parser.add_argument('--valid_data', default=\"/path/to/your/lmdb/val\", help='path to validation dataset')\n parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')\n parser.add_argument('--workers', default=4, type=int, help='number of data loading workers')\n parser.add_argument('--batch_size', default=64, type=int, help='input batch size')\n parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')\n parser.add_argument('--valInterval', type=int, default=500, help='Interval between each validation')\n parser.add_argument('--saved_model', default='', help=\"path to model to continue training\")\n parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')\n parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is Adadelta)')\n parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')\n parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')\n parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')\n parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')\n parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')\n \"\"\" Data processing \"\"\"\n parser.add_argument('--select_data', type=str, default='/',\n help='select training data (default is MJ-ST, which means MJ and ST used as training data)')\n parser.add_argument('--batch_ratio', type=str, default='1',\n help='assign ratio for each selected data in the batch')\n\n parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',\n help='total data usage ratio, this ratio is multiplied to total number of data.')\n parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')\n parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')\n parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')\n parser.add_argument('--rgb', action='store_true', help='use rgb input')\n parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')\n parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')\n parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')\n parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')\n \"\"\" Model Architecture \"\"\"\n parser.add_argument('--Transformation', type=str, default=\"TPS\", help='Transformation stage. None|TPS')\n parser.add_argument('--FeatureExtraction', type=str, default=\"ResNet\", help='FeatureExtraction stage. VGG|RCNN|ResNet')\n parser.add_argument('--SequenceModeling', type=str, default=\"BiLSTM\", help='SequenceModeling stage. None|BiLSTM')\n parser.add_argument('--Prediction', type=str, default=\"Attn\", help='Prediction stage. CTC|Attn')\n parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')\n parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor')\n parser.add_argument('--output_channel', type=int, default=512, help='the number of output channel of Feature extractor')\n parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')\n\n opt = parser.parse_args()\n\n if not opt.exp_name:\n opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-{opt.Prediction}'\n opt.exp_name += f'-Seed{opt.manualSeed}'\n # print(opt.exp_name)\n\n os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)\n\n \"\"\" vocab / character number configuration \"\"\"\n if opt.sensitive:\n # opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).\n\n \"\"\" Seed and GPU setting \"\"\"\n # print(\"Random Seed: \", opt.manualSeed)\n random.seed(opt.manualSeed)\n np.random.seed(opt.manualSeed)\n torch.manual_seed(opt.manualSeed)\n torch.cuda.manual_seed(opt.manualSeed)\n\n cudnn.benchmark = True\n cudnn.deterministic = True\n opt.num_gpu = torch.cuda.device_count()\n # print('device count', opt.num_gpu)\n if opt.num_gpu > 1:\n print('------ Use multi-GPU setting ------')\n print('if you stuck too long time with multi-GPU setting, try to set --workers 0')\n # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1\n opt.workers = opt.workers * opt.num_gpu\n opt.batch_size = opt.batch_size * opt.num_gpu\n\n \"\"\" previous version\n print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)\n opt.batch_size = opt.batch_size * opt.num_gpu\n print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')\n If you dont care about it, just commnet out these line.)\n opt.num_iter = int(opt.num_iter / opt.num_gpu)\n \"\"\"\n\n train(opt)\n"
] |
[
[
"torch.cuda.manual_seed",
"torch.nn.init.constant_",
"numpy.random.seed",
"torch.no_grad",
"torch.optim.Adam",
"torch.nn.CTCLoss",
"torch.nn.init.kaiming_normal_",
"torch.optim.Adadelta",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel"
]
] |
ufopcsilab/ECGClassification
|
[
"e45044e98be45becc04b032713a727eec7fb029a"
] |
[
"python/qrs/qrs_net.py"
] |
[
"#!/usr/bin/python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\n@ide: PyCharm\n@author: Pedro Silva\n@contact: pedroh21.silva@gmail.com\n@created: out-10 of 2019\n\"\"\"\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras.backend as kback\n\nfrom tensorflow import keras\n\n\nclass QRSNet(object):\n\n @classmethod\n def _cnn_net(cls):\n \"\"\"\n Create the CNN net topology.\n :return keras.Sequential(): CNN topology.\n \"\"\"\n qrs_detector = keras.Sequential()\n\n # CONV1\n qrs_detector.add(keras.layers.Conv1D(96, 49, activation=tf.nn.relu, input_shape=(300, 1), strides=1, name='conv1'))\n\n # POOLING 1\n qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool1'))\n\n # CONV2\n qrs_detector.add(keras.layers.Conv1D(128, 25, activation=tf.nn.relu, strides=1, name='conv2'))\n\n # POOLING 2\n qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool2'))\n\n # CONV3\n qrs_detector.add(keras.layers.Conv1D(256, 9, activation=tf.nn.relu, strides=1, name='conv3'))\n\n # POOLING 3\n qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool3'))\n\n # CONV4\n qrs_detector.add(keras.layers.Conv1D(512, 9, activation=tf.nn.relu, strides=1, name='conv4'))\n\n # POOLING 4\n qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool4'))\n\n qrs_detector.add(keras.layers.Flatten(data_format=None, name='flatten'))\n\n # FC1\n qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc1'))\n\n # FC2\n qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc2'))\n\n # DROP1\n qrs_detector.add(keras.layers.Dropout(rate=0.5, name='drop1'))\n\n # Classes\n qrs_detector.add(keras.layers.Dense(units=2, name='classes'))\n\n # SoftMax\n qrs_detector.add(keras.layers.Activation(activation=tf.nn.softmax, name='softmax'))\n\n return qrs_detector\n\n @classmethod\n def build(cls, net_type):\n \"\"\"\n Build the CNN topology.\n :param str net_type: the network type, CNN or LSTM.\n :return keras.Sequential(): CNN topology.\n \"\"\"\n if net_type == 'cnn':\n qrs_detector = cls._cnn_net()\n else:\n raise NotImplementedError('Only the CNN network was implemented.')\n\n return qrs_detector\n\n @classmethod\n def _prepare_data(cls, data_x, input_shape, data_y, number_of_classes, normalize):\n \"\"\"\n Prepare the data for the training, turning it into a numpy array.\n :param list data_x: data that will be used to train.\n :param tuple input_shape: the input shape that the data must have to be used as training data.\n :param list data_y: the labels related to the data used to train.\n :param int number_of_classes: number of classes of the problem.\n :param bool normalize: if the data should be normalized (True) or not (False).\n :return np.array: the data processed.\n \"\"\"\n if len(input_shape) == 2:\n data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1]) # Reshape for CNN - should work!!\n elif len(input_shape) == 3:\n data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1], input_shape[2]) # Reshape for CNN - should work!!\n else:\n raise Exception('Only inputs of two and three dimensions were implemented.')\n if normalize:\n data_x = data_x / np.amax(data_x)\n data_y = keras.utils.to_categorical(data_y).reshape(-1, number_of_classes)\n\n return data_x, data_y\n\n @classmethod\n def train(cls, model, train_x, train_y, validation_x, validation_y, number_of_classes, input_shape=(300, 1),\n epochs=10, lr=1e-4, batch_size=4, optimizer=None, loss=None, metrics=None, normalize=False, show_net_info=True):\n \"\"\"\n Function used to train the model.\n :param keras.Sequential model: model to be trained.\n :param list train_x: data that will be used to train.\n :param list train_y: the labels related to the data used to train.\n :param list validation_x: data that will be used to validate the model trained.\n :param list validation_y: the labels related to the data used to validate the model trained.\n :param int number_of_classes: number of classes of the problem.\n :param tuple input_shape: the input shape that the data must have to be used as training data.\n :param int epochs: total epochs that the model will be trained.\n :param float lr: learning rate used to train.\n :param int batch_size: batch size used to train.\n :param optimizer: which optimizer will be used to train.\n :param str loss: loss function used during the training.\n :param list metrics: metrics used to evaluate the trained model.\n :param bool normalize: if the data should be normalized (True) or not (False).\n :param bool show_net_info: if the network topology should be showed (True) or not (False).\n :return keras.Sequential, dict: model trained and the history of the training process.\n \"\"\"\n if optimizer is None:\n optimizer = keras.optimizers.SGD(lr=lr, momentum=0.9, decay=1e-4/epochs)\n if loss is None:\n loss = keras.losses.categorical_crossentropy\n if metrics is None:\n metrics = ['acc']\n elif type(metrics) is not list:\n metrics = [metrics]\n\n # Set optimizer\n model.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n\n if show_net_info:\n print(model.summary())\n\n # Prepare data\n train_x, train_y = cls._prepare_data(train_x, input_shape, train_y, number_of_classes, normalize)\n validation_x, validation_y = cls._prepare_data(validation_x, input_shape, validation_y, number_of_classes, normalize)\n\n kback.set_value(model.optimizer.lr, lr)\n train_history = model.fit(x=train_x, y=train_y, validation_data=(validation_x, validation_y), batch_size=batch_size, epochs=epochs)\n # H = model.fit(x=train_x, y=train_y, batch_size=batch_size, epochs=epochs)\n\n return model, train_history\n\n @classmethod\n def save_model(cls, model, model_name):\n try:\n model.save(model_name)\n except OSError:\n # serialize model to JSON\n model_json = model.to_json()\n with open(model_name.replace('.h5', '.json'), 'w') as json_file:\n json_file.write(model_json)\n # serialize weights to HDF5\n model.save_weights(model_name)\n\n @classmethod\n def load_model(cls, model_name):\n if os.path.exists(model_name.replace('.h5', '.json')):\n # load json and create model\n json_file = open(model_name.replace('.h5', '.json'), 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = keras.models.model_from_json(loaded_model_json)\n\n # load weights into new model\n loaded_model.load_weights(model_name)\n return loaded_model\n else:\n return keras.models.load_model(model_name)\n"
] |
[
[
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.optimizers.SGD",
"numpy.asarray",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.backend.set_value",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.models.load_model",
"numpy.amax",
"tensorflow.keras.layers.MaxPool1D",
"tensorflow.keras.models.model_from_json"
]
] |
mmccarty/dask
|
[
"5602876f3389d039aba0d1a860922777843dbcb9",
"5602876f3389d039aba0d1a860922777843dbcb9",
"5602876f3389d039aba0d1a860922777843dbcb9"
] |
[
"dask/dataframe/tests/test_shuffle.py",
"dask/array/overlap.py",
"dask/dataframe/io/tests/test_parquet.py"
] |
[
"import itertools\nimport multiprocessing as mp\nimport os\nimport pickle\nimport random\nimport string\nimport tempfile\nfrom concurrent.futures import ProcessPoolExecutor\nfrom copy import copy\nfrom functools import partial\nfrom unittest import mock\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask\nimport dask.dataframe as dd\nfrom dask import delayed\nfrom dask.base import compute_as_if_collection\nfrom dask.dataframe._compat import PANDAS_GT_120, assert_categorical_equal, tm\nfrom dask.dataframe.shuffle import (\n _noop,\n maybe_buffered_partd,\n partitioning_index,\n rearrange_by_column,\n rearrange_by_divisions,\n remove_nans,\n shuffle,\n)\nfrom dask.dataframe.utils import assert_eq, make_meta\nfrom dask.optimization import cull\n\ndsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [1, 4, 7]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [2, 5, 8]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [3, 6, 9]}, index=[9, 9, 9]),\n}\nmeta = make_meta(\n {\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"), parent_meta=pd.DataFrame()\n)\nd = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\nfull = d.compute()\nCHECK_FREQ = {}\nif dd._compat.PANDAS_GT_110:\n CHECK_FREQ[\"check_freq\"] = False\n\n\nshuffle_func = shuffle # conflicts with keyword argument\n\n\ndef test_shuffle(shuffle_method):\n s = shuffle_func(d, d.b, shuffle=shuffle_method)\n assert isinstance(s, dd.DataFrame)\n assert s.npartitions == d.npartitions\n\n x = dask.get(s.dask, (s._name, 0))\n y = dask.get(s.dask, (s._name, 1))\n\n assert not (set(x.b) & set(y.b)) # disjoint\n assert set(s.dask).issuperset(d.dask)\n\n assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name\n\n\ndef test_default_partitions():\n assert shuffle(d, d.b).npartitions == d.npartitions\n\n\ndef test_shuffle_npartitions(shuffle_method):\n df = pd.DataFrame({\"x\": np.random.random(100)})\n ddf = dd.from_pandas(df, npartitions=10)\n s = shuffle(ddf, ddf.x, shuffle=shuffle_method, npartitions=17, max_branch=4)\n sc = s.compute()\n assert s.npartitions == 17\n assert set(s.dask).issuperset(set(ddf.dask))\n\n assert len(sc) == len(df)\n assert list(s.columns) == list(df.columns)\n assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))\n\n\ndef test_shuffle_npartitions_lt_input_partitions(shuffle_method):\n df = pd.DataFrame({\"x\": np.random.random(100)})\n ddf = dd.from_pandas(df, npartitions=20)\n s = shuffle(ddf, ddf.x, shuffle=shuffle_method, npartitions=5, max_branch=2)\n sc = s.compute()\n assert s.npartitions == 5\n assert set(s.dask).issuperset(set(ddf.dask))\n\n assert len(sc) == len(df)\n assert list(s.columns) == list(df.columns)\n assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))\n\n\ndef test_index_with_non_series(shuffle_method):\n from dask.dataframe.tests.test_multi import list_eq\n\n list_eq(\n shuffle(d, d.b, shuffle=shuffle_method), shuffle(d, \"b\", shuffle=shuffle_method)\n )\n\n\ndef test_index_with_dataframe(shuffle_method):\n res1 = shuffle(d, d[[\"b\"]], shuffle=shuffle_method).compute()\n res2 = shuffle(d, [\"b\"], shuffle=shuffle_method).compute()\n res3 = shuffle(d, \"b\", shuffle=shuffle_method).compute()\n\n assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())\n assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())\n\n\ndef test_shuffle_from_one_partition_to_one_other(shuffle_method):\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n a = dd.from_pandas(df, 1)\n\n for i in [1, 2]:\n b = shuffle(a, \"x\", npartitions=i, shuffle=shuffle_method)\n assert len(a.compute(scheduler=\"sync\")) == len(b.compute(scheduler=\"sync\"))\n\n\ndef test_shuffle_empty_partitions(shuffle_method):\n df = pd.DataFrame({\"x\": [1, 2, 3] * 10})\n ddf = dd.from_pandas(df, npartitions=3)\n s = shuffle(ddf, ddf.x, npartitions=6, shuffle=shuffle_method)\n parts = compute_as_if_collection(dd.DataFrame, s.dask, s.__dask_keys__())\n for p in parts:\n assert s.columns == p.columns\n\n\ndf2 = pd.DataFrame(\n {\n \"i32\": np.array([1, 2, 3] * 3, dtype=\"int32\"),\n \"f32\": np.array([None, 2.5, 3.5] * 3, dtype=\"float32\"),\n \"cat\": pd.Series([\"a\", \"b\", \"c\"] * 3).astype(\"category\"),\n \"obj\": pd.Series([\"d\", \"e\", \"f\"] * 3),\n \"bool\": np.array([True, False, True] * 3),\n \"dt\": pd.Series(pd.date_range(\"20130101\", periods=9)),\n \"dt_tz\": pd.Series(pd.date_range(\"20130101\", periods=9, tz=\"US/Eastern\")),\n \"td\": pd.Series(pd.timedelta_range(\"2000\", periods=9)),\n }\n)\n\n\ndef test_partitioning_index():\n res = partitioning_index(df2.i32, 3)\n assert ((res < 3) & (res >= 0)).all()\n assert len(np.unique(res)) > 1\n\n assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()\n\n res = partitioning_index(df2[[\"i32\"]], 3)\n assert ((res < 3) & (res >= 0)).all()\n assert len(np.unique(res)) > 1\n\n res = partitioning_index(df2[[\"cat\", \"bool\", \"f32\"]], 2)\n assert ((0 <= res) & (res < 2)).all()\n\n res = partitioning_index(df2.index, 4)\n assert ((res < 4) & (res >= 0)).all()\n assert len(np.unique(res)) > 1\n\n\ndef test_partitioning_index_categorical_on_values():\n df = pd.DataFrame({\"a\": list(string.ascii_letters), \"b\": [1, 2, 3, 4] * 13})\n df.a = df.a.astype(\"category\")\n df2 = df.copy()\n df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))\n\n res = partitioning_index(df.a, 5)\n res2 = partitioning_index(df2.a, 5)\n assert (res == res2).all()\n\n res = partitioning_index(df, 5)\n res2 = partitioning_index(df2, 5)\n assert (res == res2).all()\n\n\n@pytest.mark.parametrize(\n \"npartitions\", [1, 4, 7, pytest.param(23, marks=pytest.mark.slow)]\n)\ndef test_set_index_general(npartitions, shuffle_method):\n df = pd.DataFrame(\n {\"x\": np.random.random(100), \"y\": np.random.random(100) // 0.2},\n index=np.random.random(100),\n )\n\n ddf = dd.from_pandas(df, npartitions=npartitions)\n\n assert_eq(df.set_index(\"x\"), ddf.set_index(\"x\", shuffle=shuffle_method))\n\n assert_eq(df.set_index(\"y\"), ddf.set_index(\"y\", shuffle=shuffle_method))\n\n assert_eq(df.set_index(df.x), ddf.set_index(ddf.x, shuffle=shuffle_method))\n\n assert_eq(\n df.set_index(df.x + df.y), ddf.set_index(ddf.x + ddf.y, shuffle=shuffle_method)\n )\n\n assert_eq(df.set_index(df.x + 1), ddf.set_index(ddf.x + 1, shuffle=shuffle_method))\n\n assert_eq(df.set_index(df.index), ddf.set_index(ddf.index, shuffle=shuffle_method))\n\n\ndef test_set_index_self_index(shuffle_method):\n df = pd.DataFrame(\n {\"x\": np.random.random(100), \"y\": np.random.random(100) // 0.2},\n index=np.random.random(100),\n )\n\n a = dd.from_pandas(df, npartitions=4)\n b = a.set_index(a.index, shuffle=shuffle_method)\n assert a is b\n\n assert_eq(b, df.set_index(df.index))\n\n\ndef test_set_index_names(shuffle_method):\n if shuffle_method == \"disk\":\n pytest.xfail(\"dsk names in disk shuffle are not deterministic\")\n\n df = pd.DataFrame(\n {\"x\": np.random.random(100), \"y\": np.random.random(100) // 0.2},\n index=np.random.random(100),\n )\n\n ddf = dd.from_pandas(df, npartitions=4)\n\n assert set(ddf.set_index(\"x\", shuffle=shuffle_method).dask) == set(\n ddf.set_index(\"x\", shuffle=shuffle_method).dask\n )\n assert set(ddf.set_index(\"x\", shuffle=shuffle_method).dask) != set(\n ddf.set_index(\"y\", shuffle=shuffle_method).dask\n )\n assert set(ddf.set_index(\"x\", max_branch=4, shuffle=shuffle_method).dask) != set(\n ddf.set_index(\"x\", max_branch=3, shuffle=shuffle_method).dask\n )\n assert set(ddf.set_index(\"x\", drop=True, shuffle=shuffle_method).dask) != set(\n ddf.set_index(\"x\", drop=False, shuffle=shuffle_method).dask\n )\n\n\ndef test_set_index_2(shuffle_method):\n df = dd.demo.make_timeseries(\n \"2000\",\n \"2004\",\n {\"value\": float, \"name\": str, \"id\": int},\n freq=\"2H\",\n partition_freq=\"1M\",\n seed=1,\n )\n\n df2 = df.set_index(\"name\", shuffle=shuffle_method)\n df2.value.sum().compute(scheduler=\"sync\")\n\n\ndef test_set_index_3(shuffle_method):\n df = pd.DataFrame(np.random.random((10, 2)), columns=[\"x\", \"y\"])\n ddf = dd.from_pandas(df, npartitions=5)\n\n ddf2 = ddf.set_index(\n \"x\", shuffle=shuffle_method, max_branch=2, npartitions=ddf.npartitions\n )\n df2 = df.set_index(\"x\")\n assert_eq(df2, ddf2)\n assert ddf2.npartitions == ddf.npartitions\n\n\ndef test_shuffle_sort(shuffle_method):\n df = pd.DataFrame({\"x\": [1, 2, 3, 2, 1], \"y\": [9, 8, 7, 1, 5]})\n ddf = dd.from_pandas(df, npartitions=3)\n\n df2 = df.set_index(\"x\").sort_index()\n ddf2 = ddf.set_index(\"x\", shuffle=shuffle_method)\n\n assert_eq(ddf2.loc[2:3], df2.loc[2:3])\n\n\n@pytest.mark.parametrize(\"scheduler\", [\"threads\", \"processes\"])\ndef test_rearrange(shuffle_method, scheduler):\n df = pd.DataFrame({\"x\": np.random.random(10)})\n ddf = dd.from_pandas(df, npartitions=4)\n ddf2 = ddf.assign(_partitions=ddf.x % 4)\n\n result = rearrange_by_column(\n ddf2, \"_partitions\", max_branch=32, shuffle=shuffle_method\n )\n assert result.npartitions == ddf.npartitions\n assert set(ddf.dask).issubset(result.dask)\n\n # Every value in exactly one partition\n a = result.compute(scheduler=scheduler)\n get = dask.base.get_scheduler(scheduler=scheduler)\n parts = get(result.dask, result.__dask_keys__())\n\n for i in a._partitions.drop_duplicates():\n assert sum(i in set(part._partitions) for part in parts) == 1\n\n\ndef test_rearrange_cleanup():\n df = pd.DataFrame({\"x\": np.random.random(10)})\n ddf = dd.from_pandas(df, npartitions=4)\n ddf2 = ddf.assign(_partitions=ddf.x % 4)\n\n tmpdir = tempfile.mkdtemp()\n\n with dask.config.set(temporay_directory=str(tmpdir)):\n result = rearrange_by_column(ddf2, \"_partitions\", max_branch=32, shuffle=\"disk\")\n result.compute(scheduler=\"processes\")\n\n assert len(os.listdir(tmpdir)) == 0\n\n\ndef mock_shuffle_group_3(df, col, npartitions, p):\n raise ValueError(\"Mock exception!\")\n\n\ndef test_rearrange_disk_cleanup_with_exception():\n # ensure temporary files are cleaned up when there's an internal exception.\n\n with mock.patch(\"dask.dataframe.shuffle.shuffle_group_3\", new=mock_shuffle_group_3):\n df = pd.DataFrame({\"x\": np.random.random(10)})\n ddf = dd.from_pandas(df, npartitions=4)\n ddf2 = ddf.assign(_partitions=ddf.x % 4)\n\n tmpdir = tempfile.mkdtemp()\n\n with dask.config.set(temporay_directory=str(tmpdir)):\n with pytest.raises(ValueError, match=\"Mock exception!\"):\n result = rearrange_by_column(\n ddf2, \"_partitions\", max_branch=32, shuffle=\"disk\"\n )\n result.compute(scheduler=\"processes\")\n\n assert len(os.listdir(tmpdir)) == 0\n\n\ndef test_rearrange_by_column_with_narrow_divisions():\n from dask.dataframe.tests.test_multi import list_eq\n\n A = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6], \"y\": [1, 1, 2, 2, 3, 4]})\n a = dd.repartition(A, [0, 4, 5])\n\n df = rearrange_by_divisions(a, \"x\", (0, 2, 5))\n list_eq(df, a)\n\n\ndef test_maybe_buffered_partd(tmp_path):\n import partd\n\n f = maybe_buffered_partd()\n p1 = f()\n assert isinstance(p1.partd, partd.Buffer)\n f2 = pickle.loads(pickle.dumps(f))\n assert not f2.buffer\n p2 = f2()\n assert isinstance(p2.partd, partd.File)\n\n f3 = maybe_buffered_partd(tempdir=tmp_path)\n p3 = f3()\n assert isinstance(p3.partd, partd.Buffer)\n contents = list(tmp_path.iterdir())\n assert len(contents) == 1\n assert contents[0].suffix == \".partd\"\n assert contents[0].parent == tmp_path\n f4 = pickle.loads(pickle.dumps(f3))\n assert not f4.buffer\n assert f4.tempdir == tmp_path\n\n\ndef test_set_index_with_explicit_divisions():\n df = pd.DataFrame({\"x\": [4, 1, 2, 5]}, index=[10, 20, 30, 40])\n\n ddf = dd.from_pandas(df, npartitions=2)\n\n def throw(*args, **kwargs):\n raise Exception()\n\n with dask.config.set(get=throw):\n ddf2 = ddf.set_index(\"x\", divisions=[1, 3, 5])\n assert ddf2.divisions == (1, 3, 5)\n\n df2 = df.set_index(\"x\")\n assert_eq(ddf2, df2)\n\n # Divisions must be sorted\n with pytest.raises(ValueError):\n ddf.set_index(\"x\", divisions=[3, 1, 5])\n\n\ndef test_set_index_divisions_2():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")})\n ddf = dd.from_pandas(df, 2)\n\n result = ddf.set_index(\"y\", divisions=[\"a\", \"c\", \"d\"])\n assert result.divisions == (\"a\", \"c\", \"d\")\n\n assert list(result.compute(scheduler=\"sync\").index[-2:]) == [\"d\", \"d\"]\n\n\ndef test_set_index_divisions_compute():\n d2 = d.set_index(\"b\", divisions=[0, 2, 9], compute=False)\n d3 = d.set_index(\"b\", divisions=[0, 2, 9], compute=True)\n\n assert_eq(d2, d3)\n assert_eq(d2, full.set_index(\"b\"))\n assert_eq(d3, full.set_index(\"b\"))\n assert len(d2.dask) > len(d3.dask)\n\n d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)\n d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)\n exp = full.copy()\n exp.index = exp.b\n assert_eq(d4, d5)\n assert_eq(d4, exp)\n assert_eq(d5, exp)\n assert len(d4.dask) > len(d5.dask)\n\n\ndef test_set_index_divisions_sorted():\n p1 = pd.DataFrame({\"x\": [10, 11, 12], \"y\": [\"a\", \"a\", \"a\"]})\n p2 = pd.DataFrame({\"x\": [13, 14, 15], \"y\": [\"b\", \"b\", \"c\"]})\n p3 = pd.DataFrame({\"x\": [16, 17, 18], \"y\": [\"d\", \"e\", \"e\"]})\n\n ddf = dd.DataFrame(\n {(\"x\", 0): p1, (\"x\", 1): p2, (\"x\", 2): p3}, \"x\", p1, [None, None, None, None]\n )\n df = ddf.compute()\n\n def throw(*args, **kwargs):\n raise Exception(\"Shouldn't have computed\")\n\n with dask.config.set(get=throw):\n res = ddf.set_index(\"x\", divisions=[10, 13, 16, 18], sorted=True)\n assert_eq(res, df.set_index(\"x\"))\n\n with dask.config.set(get=throw):\n res = ddf.set_index(\"y\", divisions=[\"a\", \"b\", \"d\", \"e\"], sorted=True)\n assert_eq(res, df.set_index(\"y\"))\n\n # with sorted=True, divisions must be same length as df.divisions\n with pytest.raises(ValueError):\n ddf.set_index(\"y\", divisions=[\"a\", \"b\", \"c\", \"d\", \"e\"], sorted=True)\n\n # Divisions must be sorted\n with pytest.raises(ValueError):\n ddf.set_index(\"y\", divisions=[\"a\", \"b\", \"d\", \"c\"], sorted=True)\n\n\n@pytest.mark.slow\ndef test_set_index_consistent_divisions():\n # See https://github.com/dask/dask/issues/3867\n df = pd.DataFrame(\n {\"x\": np.random.random(100), \"y\": np.random.random(100) // 0.2},\n index=np.random.random(100),\n )\n ddf = dd.from_pandas(df, npartitions=4)\n ddf = ddf.clear_divisions()\n\n ctx = mp.get_context(\"spawn\")\n with ProcessPoolExecutor(8, ctx) as pool:\n func = partial(_set_index, df=ddf, idx=\"x\")\n divisions_set = set(pool.map(func, range(100)))\n assert len(divisions_set) == 1\n\n\ndef _set_index(i, df, idx):\n return df.set_index(idx).divisions\n\n\ndef test_set_index_reduces_partitions_small(shuffle_method):\n df = pd.DataFrame({\"x\": np.random.random(100)})\n ddf = dd.from_pandas(df, npartitions=50)\n\n ddf2 = ddf.set_index(\"x\", shuffle=shuffle_method, npartitions=\"auto\")\n assert ddf2.npartitions < 10\n\n\ndef make_part(n):\n return pd.DataFrame({\"x\": np.random.random(n), \"y\": np.random.random(n)})\n\n\ndef test_set_index_reduces_partitions_large(shuffle_method):\n nbytes = 1e6\n nparts = 50\n n = int(nbytes / (nparts * 8))\n ddf = dd.DataFrame(\n {(\"x\", i): (make_part, n) for i in range(nparts)},\n \"x\",\n make_part(1),\n [None] * (nparts + 1),\n )\n ddf2 = ddf.set_index(\n \"x\", shuffle=shuffle_method, npartitions=\"auto\", partition_size=nbytes\n )\n assert 1 < ddf2.npartitions < 20\n\n\ndef test_set_index_doesnt_increase_partitions(shuffle_method):\n nparts = 2\n nbytes = 1e6\n n = int(nbytes / (nparts * 8))\n ddf = dd.DataFrame(\n {(\"x\", i): (make_part, n) for i in range(nparts)},\n \"x\",\n make_part(1),\n [None] * (nparts + 1),\n )\n ddf2 = ddf.set_index(\n \"x\", shuffle=shuffle_method, npartitions=\"auto\", partition_size=nbytes\n )\n assert ddf2.npartitions <= ddf.npartitions\n\n\ndef test_set_index_detects_sorted_data(shuffle_method):\n df = pd.DataFrame({\"x\": range(100), \"y\": range(100)})\n ddf = dd.from_pandas(df, npartitions=10, name=\"x\", sort=False)\n\n ddf2 = ddf.set_index(\"x\", shuffle=shuffle_method)\n assert len(ddf2.dask) < ddf.npartitions * 4\n\n\ndef test_set_index_sorts():\n # https://github.com/dask/dask/issues/2288\n vals = np.array(\n [\n 1348550149000000000,\n 1348550149000000000,\n 1348558142000000000,\n 1348558142000000000,\n 1348585928000000000,\n 1348585928000000000,\n 1348600739000000000,\n 1348601706000000000,\n 1348600739000000000,\n 1348601706000000000,\n 1348614789000000000,\n 1348614789000000000,\n 1348621037000000000,\n 1348621038000000000,\n 1348621040000000000,\n 1348621037000000000,\n 1348621038000000000,\n 1348621040000000000,\n 1348637628000000000,\n 1348638159000000000,\n 1348638160000000000,\n 1348638159000000000,\n 1348638160000000000,\n 1348637628000000000,\n 1348646354000000000,\n 1348646354000000000,\n 1348659107000000000,\n 1348657111000000000,\n 1348659107000000000,\n 1348657111000000000,\n 1348672876000000000,\n 1348672876000000000,\n 1348682787000000000,\n 1348681985000000000,\n 1348682787000000000,\n 1348681985000000000,\n 1348728167000000000,\n 1348728167000000000,\n 1348730745000000000,\n 1348730745000000000,\n 1348750198000000000,\n 1348750198000000000,\n 1348750198000000000,\n 1348753539000000000,\n 1348753539000000000,\n 1348753539000000000,\n 1348754449000000000,\n 1348754449000000000,\n 1348761333000000000,\n 1348761554000000000,\n 1348761610000000000,\n 1348761333000000000,\n 1348761554000000000,\n 1348761610000000000,\n 1348782624000000000,\n 1348782624000000000,\n 1348782624000000000,\n 1348782624000000000,\n ]\n )\n vals = pd.to_datetime(vals, unit=\"ns\")\n breaks = [10, 36, 58]\n dfs = []\n\n for i in range(len(breaks)):\n lo = sum(breaks[:i])\n hi = sum(breaks[i : i + 1])\n\n dfs.append(pd.DataFrame({\"timestamp\": vals[lo:hi]}, index=range(lo, hi)))\n\n ddf = dd.concat(dfs).clear_divisions()\n assert ddf.set_index(\"timestamp\").index.compute().is_monotonic_increasing is True\n\n\n@pytest.mark.parametrize(\n \"engine\", [\"pandas\", pytest.param(\"cudf\", marks=pytest.mark.gpu)]\n)\ndef test_set_index(engine):\n if engine == \"cudf\":\n # NOTE: engine == \"cudf\" requires cudf/dask_cudf,\n # will be skipped by non-GPU CI.\n\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 2, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 5, 8]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [9, 1, 8]}, index=[9, 9, 9]),\n }\n d = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n\n if engine == \"cudf\":\n d = dask_cudf.from_dask_dataframe(d)\n\n full = d.compute()\n\n d2 = d.set_index(\"b\", npartitions=3)\n assert d2.npartitions == 3\n assert d2.index.name == \"b\"\n assert_eq(d2, full.set_index(\"b\"))\n\n d3 = d.set_index(d.b, npartitions=3)\n assert d3.npartitions == 3\n assert d3.index.name == \"b\"\n assert_eq(d3, full.set_index(full.b))\n\n d4 = d.set_index(\"b\")\n assert d4.index.name == \"b\"\n assert_eq(d4, full.set_index(\"b\"))\n\n d5 = d.set_index([\"b\"])\n assert d5.index.name == \"b\"\n assert_eq(d5, full.set_index([\"b\"]))\n\n\n@pytest.mark.parametrize(\n \"engine\", [\"pandas\", pytest.param(\"cudf\", marks=pytest.mark.gpu)]\n)\ndef test_set_index_interpolate(engine):\n if engine == \"cudf\":\n # NOTE: engine == \"cudf\" requires cudf/dask_cudf,\n # will be skipped by non-GPU CI.\n\n cudf = pytest.importorskip(\"cudf\")\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n df = pd.DataFrame({\"x\": [4, 1, 1, 3, 3], \"y\": [1.0, 1, 1, 1, 2]})\n\n if engine == \"cudf\":\n gdf = cudf.from_pandas(df)\n d = dask_cudf.from_cudf(gdf, npartitions=3)\n else:\n d = dd.from_pandas(df, 2)\n\n d1 = d.set_index(\"x\", npartitions=3)\n assert d1.npartitions == 3\n assert set(d1.divisions) == {1, 2, 4}\n\n d2 = d.set_index(\"y\", npartitions=3)\n assert d2.divisions[0] == 1.0\n assert 1.0 < d2.divisions[1] < d2.divisions[2] < 2.0\n assert d2.divisions[3] == 2.0\n\n\n@pytest.mark.parametrize(\n \"engine\", [\"pandas\", pytest.param(\"cudf\", marks=pytest.mark.gpu)]\n)\ndef test_set_index_interpolate_int(engine):\n if engine == \"cudf\":\n # NOTE: engine == \"cudf\" requires cudf/dask_cudf,\n # will be skipped by non-GPU CI.\n\n cudf = pytest.importorskip(\"cudf\")\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n L = sorted(list(range(0, 200, 10)) * 2)\n df = pd.DataFrame({\"x\": 2 * L})\n\n if engine == \"cudf\":\n gdf = cudf.from_pandas(df)\n d = dask_cudf.from_cudf(gdf, npartitions=2)\n else:\n d = dd.from_pandas(df, 2)\n\n d1 = d.set_index(\"x\", npartitions=10)\n assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)\n\n\n@pytest.mark.parametrize(\n \"engine\", [\"pandas\", pytest.param(\"cudf\", marks=pytest.mark.gpu)]\n)\ndef test_set_index_interpolate_large_uint(engine):\n if engine == \"cudf\":\n # NOTE: engine == \"cudf\" requires cudf/dask_cudf,\n # will be skipped by non-GPU CI.\n\n cudf = pytest.importorskip(\"cudf\")\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n \"\"\"This test is for #7304\"\"\"\n df = pd.DataFrame(\n {\"x\": np.array([612509347682975743, 616762138058293247], dtype=np.uint64)}\n )\n\n if engine == \"cudf\":\n gdf = cudf.from_pandas(df)\n d = dask_cudf.from_cudf(gdf, npartitions=2)\n else:\n d = dd.from_pandas(df, 1)\n\n d1 = d.set_index(\"x\", npartitions=1)\n assert d1.npartitions == 1\n assert set(d1.divisions) == {612509347682975743, 616762138058293247}\n\n\ndef test_set_index_timezone():\n s_naive = pd.Series(pd.date_range(\"20130101\", periods=3))\n s_aware = pd.Series(pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\"))\n df = pd.DataFrame({\"tz\": s_aware, \"notz\": s_naive})\n d = dd.from_pandas(df, 2)\n\n d1 = d.set_index(\"notz\", npartitions=1)\n s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)\n assert d1.divisions[0] == s_naive[0] == s1[0]\n assert d1.divisions[-1] == s_naive[2] == s1[2]\n\n # We currently lose \"freq\". Converting data with pandas-defined dtypes\n # to numpy or pure Python can be lossy like this.\n d2 = d.set_index(\"tz\", npartitions=1)\n s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)\n assert d2.divisions[0] == s2[0]\n assert d2.divisions[-1] == s2[2]\n assert d2.divisions[0].tz == s2[0].tz\n assert d2.divisions[0].tz is not None\n s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)\n if PANDAS_GT_120:\n # starting with pandas 1.2.0, comparing equality of timestamps with different\n # timezones returns False instead of raising an error\n assert not d2.divisions[0] == s2badtype[0]\n else:\n with pytest.raises(TypeError):\n d2.divisions[0] == s2badtype[0]\n\n\ndef test_set_index_npartitions():\n # https://github.com/dask/dask/issues/6974\n data = pd.DataFrame(\n index=pd.Index(\n [\"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"B\", \"B\", \"B\", \"C\"]\n )\n )\n data = dd.from_pandas(data, npartitions=2)\n output = data.reset_index().set_index(\"index\", npartitions=1)\n assert output.npartitions == 1\n\n\n@pytest.mark.parametrize(\"unit\", [\"ns\", \"us\"])\ndef test_set_index_datetime_precision(unit):\n # https://github.com/dask/dask/issues/6864\n\n df = pd.DataFrame(\n [\n [1567703791155681, 1],\n [1567703792155681, 2],\n [1567703790155681, 0],\n [1567703793155681, 3],\n ],\n columns=[\"ts\", \"rank\"],\n )\n df.ts = pd.to_datetime(df.ts, unit=unit)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf = ddf.set_index(\"ts\")\n\n assert_eq(ddf, df.set_index(\"ts\"))\n\n\n@pytest.mark.parametrize(\"drop\", [True, False])\ndef test_set_index_drop(drop):\n pdf = pd.DataFrame(\n {\n \"A\": list(\"ABAABBABAA\"),\n \"B\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n \"C\": [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],\n }\n )\n ddf = dd.from_pandas(pdf, 3)\n\n assert_eq(ddf.set_index(\"A\", drop=drop), pdf.set_index(\"A\", drop=drop))\n assert_eq(ddf.set_index(\"B\", drop=drop), pdf.set_index(\"B\", drop=drop))\n assert_eq(ddf.set_index(\"C\", drop=drop), pdf.set_index(\"C\", drop=drop))\n assert_eq(ddf.set_index(ddf.A, drop=drop), pdf.set_index(pdf.A, drop=drop))\n assert_eq(ddf.set_index(ddf.B, drop=drop), pdf.set_index(pdf.B, drop=drop))\n assert_eq(ddf.set_index(ddf.C, drop=drop), pdf.set_index(pdf.C, drop=drop))\n\n # numeric columns\n pdf = pd.DataFrame(\n {\n 0: list(\"ABAABBABAA\"),\n 1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n 2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],\n }\n )\n ddf = dd.from_pandas(pdf, 3)\n assert_eq(ddf.set_index(0, drop=drop), pdf.set_index(0, drop=drop))\n assert_eq(ddf.set_index(2, drop=drop), pdf.set_index(2, drop=drop))\n\n\ndef test_set_index_raises_error_on_bad_input():\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n ddf = dd.from_pandas(df, 2)\n\n msg = r\"Dask dataframe does not yet support multi-indexes\"\n with pytest.raises(NotImplementedError) as err:\n ddf.set_index([\"a\", \"b\"])\n assert msg in str(err.value)\n\n with pytest.raises(NotImplementedError) as err:\n ddf.set_index([[\"a\", \"b\"]])\n assert msg in str(err.value)\n\n with pytest.raises(NotImplementedError) as err:\n ddf.set_index([[\"a\"]])\n assert msg in str(err.value)\n\n\ndef test_set_index_sorted_true():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 20, 40], \"z\": [4, 3, 2, 1]})\n a = dd.from_pandas(df, 2, sort=False)\n assert not a.known_divisions\n\n b = a.set_index(\"x\", sorted=True)\n assert b.known_divisions\n assert set(a.dask).issubset(set(b.dask))\n\n for drop in [True, False]:\n assert_eq(a.set_index(\"x\", drop=drop), df.set_index(\"x\", drop=drop))\n assert_eq(\n a.set_index(a.x, sorted=True, drop=drop), df.set_index(df.x, drop=drop)\n )\n assert_eq(\n a.set_index(a.x + 1, sorted=True, drop=drop),\n df.set_index(df.x + 1, drop=drop),\n )\n\n with pytest.raises(ValueError):\n a.set_index(a.z, sorted=True)\n\n\ndef test_set_index_sorted_single_partition():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [1, 0, 1, 0]})\n ddf = dd.from_pandas(df, npartitions=1)\n assert_eq(ddf.set_index(\"x\", sorted=True), df.set_index(\"x\"))\n\n\ndef test_set_index_sorted_min_max_same():\n a = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [0, 0, 0]})\n b = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [1, 1, 1]})\n\n aa = delayed(a)\n bb = delayed(b)\n\n df = dd.from_delayed([aa, bb], meta=a)\n assert not df.known_divisions\n\n df2 = df.set_index(\"y\", sorted=True)\n assert df2.divisions == (0, 1, 1)\n\n\ndef test_set_index_empty_partition():\n test_vals = [1, 2, 3]\n\n converters = [int, float, str, lambda x: pd.to_datetime(x, unit=\"ns\")]\n\n for conv in converters:\n df = pd.DataFrame(\n [{\"x\": conv(i), \"y\": i} for i in test_vals], columns=[\"x\", \"y\"]\n )\n ddf = dd.concat(\n [\n dd.from_pandas(df, npartitions=1),\n dd.from_pandas(df[df.y > df.y.max()], npartitions=1),\n ]\n )\n\n assert any(ddf.get_partition(p).compute().empty for p in range(ddf.npartitions))\n assert assert_eq(ddf.set_index(\"x\"), df.set_index(\"x\"))\n\n\ndef test_set_index_on_empty():\n test_vals = [1, 2, 3, 4]\n converters = [int, float, str, lambda x: pd.to_datetime(x, unit=\"ns\")]\n\n for converter in converters:\n df = pd.DataFrame([{\"x\": converter(x), \"y\": x} for x in test_vals])\n ddf = dd.from_pandas(df, npartitions=4)\n\n assert ddf.npartitions > 1\n\n ddf = ddf[ddf.y > df.y.max()].set_index(\"x\")\n expected_df = df[df.y > df.y.max()].set_index(\"x\")\n\n assert assert_eq(ddf, expected_df, **CHECK_FREQ)\n assert ddf.npartitions == 1\n\n\ndef test_set_index_categorical():\n # https://github.com/dask/dask/issues/5671\n order = list(reversed(string.ascii_letters))\n values = list(string.ascii_letters)\n random.shuffle(values)\n dtype = pd.api.types.CategoricalDtype(order, ordered=True)\n df = pd.DataFrame({\"A\": pd.Categorical(values, dtype=dtype), \"B\": 1})\n\n result = dd.from_pandas(df, npartitions=2).set_index(\"A\")\n assert len(result) == len(df)\n\n # sorted with the metric defined by the Categorical\n divisions = pd.Categorical(result.divisions, dtype=dtype)\n assert_categorical_equal(divisions, divisions.sort_values())\n\n\ndef test_compute_divisions():\n from dask.dataframe.shuffle import compute_and_set_divisions\n\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4], \"y\": [10, 20, 20, 40], \"z\": [4, 3, 2, 1]},\n index=[1, 3, 10, 20],\n )\n a = dd.from_pandas(df, 2, sort=False)\n assert not a.known_divisions\n\n b = compute_and_set_divisions(copy(a))\n\n assert_eq(a, b, check_divisions=False)\n assert b.known_divisions\n\n\ndef test_empty_partitions():\n # See https://github.com/dask/dask/issues/2408\n df = pd.DataFrame({\"a\": list(range(10))})\n df[\"b\"] = df[\"a\"] % 3\n df[\"c\"] = df[\"b\"].astype(str)\n\n ddf = dd.from_pandas(df, npartitions=3)\n ddf = ddf.set_index(\"b\")\n ddf = ddf.repartition(npartitions=3)\n ddf.get_partition(0).compute()\n assert_eq(ddf, df.set_index(\"b\"))\n\n ddf = ddf.set_index(\"c\")\n assert_eq(ddf, df.set_index(\"b\").set_index(\"c\"))\n\n\ndef test_remove_nans():\n tests = [\n ((1, 1, 2), (1, 1, 2)),\n ((None, 1, 2), (1, 1, 2)),\n ((1, None, 2), (1, 2, 2)),\n ((1, 2, None), (1, 2, 2)),\n ((1, 2, None, None), (1, 2, 2, 2)),\n ((None, None, 1, 2), (1, 1, 1, 2)),\n ((1, None, None, 2), (1, 2, 2, 2)),\n ((None, 1, None, 2, None, 3, None), (1, 1, 2, 2, 3, 3, 3)),\n ]\n\n converters = [\n (int, np.nan),\n (float, np.nan),\n (str, np.nan),\n (lambda x: pd.to_datetime(x, unit=\"ns\"), np.datetime64(\"NaT\")),\n ]\n\n for conv, none_val in converters:\n for inputs, expected in tests:\n params = [none_val if x is None else conv(x) for x in inputs]\n expected = [conv(x) for x in expected]\n assert remove_nans(params) == expected\n\n\n@pytest.mark.slow\ndef test_gh_2730():\n large = pd.DataFrame({\"KEY\": np.arange(0, 50000)})\n small = pd.DataFrame({\"KEY\": np.arange(25, 500)})\n\n dd_left = dd.from_pandas(small, npartitions=3)\n dd_right = dd.from_pandas(large, npartitions=257)\n\n with dask.config.set(shuffle=\"tasks\", scheduler=\"sync\"):\n dd_merged = dd_left.merge(dd_right, how=\"inner\", on=\"KEY\")\n result = dd_merged.compute()\n\n expected = large.merge(small, how=\"inner\", on=\"KEY\")\n\n tm.assert_frame_equal(result.sort_values(\"KEY\").reset_index(drop=True), expected)\n\n\n@pytest.mark.parametrize(\"npartitions\", [None, \"auto\"])\ndef test_set_index_does_not_repeat_work_due_to_optimizations(npartitions):\n # Atomic counter\n count = itertools.count()\n\n def increment():\n next(count)\n\n def make_part(dummy, n):\n return pd.DataFrame({\"x\": np.random.random(n), \"y\": np.random.random(n)})\n\n nbytes = 1e6\n nparts = 50\n n = int(nbytes / (nparts * 8))\n\n dsk = {(\"inc\", i): (increment,) for i in range(nparts)}\n dsk.update({(\"x\", i): (make_part, (\"inc\", i), n) for i in range(nparts)})\n ddf = dd.DataFrame(dsk, \"x\", make_part(None, 1), [None] * (nparts + 1))\n\n ddf.set_index(\"x\", npartitions=npartitions)\n ntimes = next(count)\n assert ntimes == nparts\n\n\ndef test_set_index_errors_with_inplace_kwarg():\n df = pd.DataFrame({\"a\": [9, 8, 7], \"b\": [6, 5, 4], \"c\": [3, 2, 1]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n ddf.set_index(\"a\")\n\n with pytest.raises(NotImplementedError):\n ddf.set_index(\"a\", inplace=True)\n\n\ndef test_set_index_timestamp():\n df = pd.DataFrame({\"A\": pd.date_range(\"2000\", periods=12, tz=\"US/Central\"), \"B\": 1})\n ddf = dd.from_pandas(df, 2)\n divisions = (\n pd.Timestamp(\"2000-01-01 00:00:00-0600\", tz=\"US/Central\"),\n pd.Timestamp(\"2000-01-12 00:00:00-0600\", tz=\"US/Central\"),\n )\n\n # Note: `freq` is lost during round trip\n df2 = df.set_index(\"A\")\n ddf_new_div = ddf.set_index(\"A\", divisions=divisions)\n for (ts1, ts2) in zip(divisions, ddf_new_div.divisions):\n assert ts1.value == ts2.value\n assert ts1.tz == ts2.tz\n\n assert_eq(df2, ddf_new_div, **CHECK_FREQ)\n assert_eq(df2, ddf.set_index(\"A\"), **CHECK_FREQ)\n\n\n@pytest.mark.parametrize(\"compression\", [None, \"ZLib\"])\ndef test_disk_shuffle_with_compression_option(compression):\n # test if dataframe shuffle works both with and without compression\n with dask.config.set({\"dataframe.shuffle-compression\": compression}):\n test_shuffle(\"disk\")\n\n\n@pytest.mark.parametrize(\"compression\", [\"UNKOWN_COMPRESSION_ALGO\"])\ndef test_disk_shuffle_with_unknown_compression(compression):\n # test if dask raises an error in case of fault config string\n with dask.config.set({\"dataframe.shuffle-compression\": compression}):\n with pytest.raises(\n ImportError,\n match=(\n \"Not able to import and load {} as compression algorithm.\"\n \"Please check if the library is installed and supported by Partd.\".format(\n compression\n )\n ),\n ):\n test_shuffle(\"disk\")\n\n\ndef test_disk_shuffle_check_actual_compression():\n # test if the compression switch is really respected by testing the size of the actual partd-data on disk\n def generate_raw_partd_file(compression):\n # generate and write a dummy dataframe to disk and return the raw data bytes\n df1 = pd.DataFrame({\"a\": list(range(10000))})\n df1[\"b\"] = (df1[\"a\"] * 123).astype(str)\n with dask.config.set({\"dataframe.shuffle-compression\": compression}):\n p1 = maybe_buffered_partd(buffer=False, tempdir=None)()\n p1.append({\"x\": df1})\n # get underlying filename from partd - depending on nested structure of partd object\n filename = (\n p1.partd.partd.filename(\"x\") if compression else p1.partd.filename(\"x\")\n )\n with open(filename, \"rb\") as f:\n return f.read()\n\n # get compressed and uncompressed raw data\n uncompressed_data = generate_raw_partd_file(compression=None)\n compressed_data = generate_raw_partd_file(compression=\"BZ2\")\n\n assert len(uncompressed_data) > len(compressed_data)\n\n\n@pytest.mark.parametrize(\"ignore_index\", [None, True, False])\n@pytest.mark.parametrize(\n \"on\", [\"id\", \"name\", [\"id\", \"name\"], pd.Series([\"id\", \"name\"])]\n)\n@pytest.mark.parametrize(\"max_branch\", [None, 4])\ndef test_dataframe_shuffle_on_arg(on, ignore_index, max_branch, shuffle_method):\n # Make sure DataFrame.shuffle API returns the same result\n # whether the ``on`` argument is a list of column names,\n # or a separate DataFrame with equivalent values...\n df_in = dask.datasets.timeseries(\n \"2000\",\n \"2001\",\n types={\"value\": float, \"name\": str, \"id\": int},\n freq=\"2H\",\n partition_freq=\"1M\",\n seed=1,\n )\n if isinstance(on, str):\n ext_on = df_in[[on]].copy()\n else:\n ext_on = df_in[on].copy()\n df_out_1 = df_in.shuffle(\n on, shuffle=shuffle_method, ignore_index=ignore_index, max_branch=max_branch\n )\n df_out_2 = df_in.shuffle(ext_on, shuffle=shuffle_method, ignore_index=ignore_index)\n\n assert_eq(df_out_1, df_out_2, check_index=(not ignore_index))\n\n # disk shuffling doesn't support ignore_index\n if ignore_index and shuffle_method == \"tasks\":\n assert df_out_1.index.dtype != df_in.index.dtype\n else:\n assert df_out_1.index.dtype == df_in.index.dtype\n\n\ndef test_set_index_overlap():\n A = pd.DataFrame({\"key\": [1, 2, 3, 4, 4, 5, 6, 7], \"value\": list(\"abcd\" * 2)})\n a = dd.from_pandas(A, npartitions=2)\n a = a.set_index(\"key\", sorted=True)\n b = a.repartition(divisions=a.divisions)\n assert_eq(a, b)\n\n\ndef test_set_index_overlap_2():\n data = pd.DataFrame(\n index=pd.Index(\n [\"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"B\", \"B\", \"B\", \"C\"],\n name=\"index\",\n )\n )\n ddf1 = dd.from_pandas(data, npartitions=2)\n ddf2 = ddf1.reset_index().repartition(8).set_index(\"index\", sorted=True)\n\n assert_eq(ddf1, ddf2)\n assert ddf2.npartitions == 8\n\n\ndef test_shuffle_hlg_layer():\n # This test checks that the `ShuffleLayer` HLG Layer\n # is used (as expected) for a multi-stage shuffle.\n ddf = dd.from_pandas(\n pd.DataFrame({\"a\": np.random.randint(0, 10, 100)}), npartitions=10\n )\n # Disk-based shuffle doesn't use HLG layers at the moment, so we only test tasks\n ddf_shuffled = ddf.shuffle(\"a\", max_branch=3, shuffle=\"tasks\")\n keys = [(ddf_shuffled._name, i) for i in range(ddf_shuffled.npartitions)]\n\n # Cull the HLG\n dsk = ddf_shuffled.__dask_graph__()\n dsk_culled = dsk.cull(set(keys))\n assert isinstance(dsk_culled, dask.highlevelgraph.HighLevelGraph)\n\n # Ensure we have ShuffleLayers\n assert any(\n isinstance(layer, dd.shuffle.ShuffleLayer) for layer in dsk.layers.values()\n )\n\n # Check that the ShuffleLayers are non-materialized\n for layer in dsk.layers.values():\n if isinstance(layer, dd.shuffle.ShuffleLayer):\n assert not hasattr(layer, \"_cached_dict\")\n\n # Make sure HLG culling reduces the graph size\n assert len(dsk_culled) < len(dsk)\n\n # Check ShuffleLayer names\n for name, layer in dsk.layers.items():\n if isinstance(layer, dd.shuffle.ShuffleLayer):\n assert name.startswith(\"shuffle-\")\n\n # Since we already culled the HLG,\n # culling the dictionary should not change the graph\n dsk_dict = dict(dsk_culled)\n dsk_dict_culled, _ = cull(dsk_dict, keys)\n assert dsk_dict_culled == dsk_dict\n\n\n@pytest.mark.parametrize(\n \"npartitions\",\n [\n 10, # ShuffleLayer\n 1, # SimpleShuffleLayer\n ],\n)\ndef test_shuffle_hlg_layer_serialize(npartitions):\n ddf = dd.from_pandas(\n pd.DataFrame({\"a\": np.random.randint(0, 10, 100)}), npartitions=npartitions\n )\n # Disk-based shuffle doesn't use HLG layers at the moment, so we only test tasks\n ddf_shuffled = ddf.shuffle(\"a\", max_branch=3, shuffle=\"tasks\")\n\n # Ensure shuffle layers can be serialized and don't result in\n # the underlying low-level graph being materialized\n dsk = ddf_shuffled.__dask_graph__()\n for layer in dsk.layers.values():\n if not isinstance(layer, dd.shuffle.SimpleShuffleLayer):\n continue\n assert not hasattr(layer, \"_cached_dict\")\n layer_roundtrip = pickle.loads(pickle.dumps(layer))\n assert type(layer_roundtrip) == type(layer)\n assert not hasattr(layer_roundtrip, \"_cached_dict\")\n assert layer_roundtrip.keys() == layer.keys()\n\n\ndef test_set_index_nan_partition():\n d[d.a > 3].set_index(\"a\") # Set index with 1 null partition\n d[d.a > 1].set_index(\"a\", sorted=True) # Set sorted index with 0 null partitions\n a = d[d.a > 3].set_index(\"a\", sorted=True) # Set sorted index with 1 null partition\n assert_eq(a, a)\n\n\n@pytest.mark.parametrize(\"ascending\", [True, False])\n@pytest.mark.parametrize(\"by\", [\"a\", \"b\"])\n@pytest.mark.parametrize(\"nelem\", [10, 500])\ndef test_sort_values(nelem, by, ascending):\n np.random.seed(0)\n df = pd.DataFrame()\n df[\"a\"] = np.ascontiguousarray(np.arange(nelem)[::-1])\n df[\"b\"] = np.arange(100, nelem + 100)\n ddf = dd.from_pandas(df, npartitions=10)\n\n # run on single-threaded scheduler for debugging purposes\n with dask.config.set(scheduler=\"single-threaded\"):\n got = ddf.sort_values(by=by, ascending=ascending)\n expect = df.sort_values(by=by, ascending=ascending)\n dd.assert_eq(got, expect, check_index=False)\n\n\n@pytest.mark.parametrize(\"ascending\", [True, False, [False, True], [True, False]])\n@pytest.mark.parametrize(\"by\", [[\"a\", \"b\"], [\"b\", \"a\"]])\n@pytest.mark.parametrize(\"nelem\", [10, 500])\ndef test_sort_values_single_partition(nelem, by, ascending):\n np.random.seed(0)\n df = pd.DataFrame()\n df[\"a\"] = np.ascontiguousarray(np.arange(nelem)[::-1])\n df[\"b\"] = np.arange(100, nelem + 100)\n ddf = dd.from_pandas(df, npartitions=1)\n\n # run on single-threaded scheduler for debugging purposes\n with dask.config.set(scheduler=\"single-threaded\"):\n got = ddf.sort_values(by=by, ascending=ascending)\n expect = df.sort_values(by=by, ascending=ascending)\n dd.assert_eq(got, expect, check_index=False)\n\n\n@pytest.mark.parametrize(\"na_position\", [\"first\", \"last\"])\n@pytest.mark.parametrize(\"ascending\", [True, False])\n@pytest.mark.parametrize(\"by\", [\"a\", \"b\"])\n@pytest.mark.parametrize(\"nparts\", [1, 5])\n@pytest.mark.parametrize(\n \"data\",\n [\n {\n \"a\": list(range(50)) + [None] * 50 + list(range(50, 100)), # type: ignore\n \"b\": [None] * 100 + list(range(100, 150)), # type: ignore\n },\n {\n \"a\": list(range(15)) + [None] * 5, # type: ignore\n \"b\": list(reversed(range(20))),\n },\n ],\n)\ndef test_sort_values_with_nulls(data, nparts, by, ascending, na_position):\n df = pd.DataFrame(data)\n ddf = dd.from_pandas(df, npartitions=nparts)\n\n # run on single-threaded scheduler for debugging purposes\n with dask.config.set(scheduler=\"single-threaded\"):\n got = ddf.sort_values(by=by, ascending=ascending, na_position=na_position)\n expect = df.sort_values(by=by, ascending=ascending, na_position=na_position)\n dd.assert_eq(got, expect, check_index=False)\n\n\ndef test_shuffle_values_raises():\n df = pd.DataFrame({\"a\": [1, 3, 2]})\n ddf = dd.from_pandas(df, npartitions=3)\n with pytest.raises(\n ValueError, match=\"na_position must be either 'first' or 'last'\"\n ):\n ddf.sort_values(by=\"a\", na_position=\"invalid\")\n\n\ndef test_shuffle_by_as_list():\n df = pd.DataFrame({\"a\": [1, 3, 2]})\n ddf = dd.from_pandas(df, npartitions=3)\n with dask.config.set(scheduler=\"single-threaded\"):\n got = ddf.sort_values(by=[\"a\"], npartitions=\"auto\", ascending=True)\n expect = pd.DataFrame({\"a\": [1, 2, 3]})\n dd.assert_eq(got, expect, check_index=False)\n\n\ndef test_noop():\n assert _noop(1, None) == 1\n assert _noop(\"test\", None) == \"test\"\n\n\n@pytest.mark.parametrize(\"by\", [[\"a\", \"b\"], [\"b\", \"a\"]])\n@pytest.mark.parametrize(\"nparts\", [1, 10])\ndef test_sort_values_custom_function(by, nparts):\n df = pd.DataFrame({\"a\": [1, 2, 3] * 20, \"b\": [4, 5, 6, 7] * 15})\n ddf = dd.from_pandas(df, npartitions=nparts)\n\n def f(partition, by_columns, ascending, na_position, **kwargs):\n return partition.sort_values(\n by_columns, ascending=ascending, na_position=na_position\n )\n\n # run on single-threaded scheduler for debugging purposes\n with dask.config.set(scheduler=\"single-threaded\"):\n got = ddf.sort_values(\n by=by[0], sort_function=f, sort_function_kwargs={\"by_columns\": by}\n )\n expect = df.sort_values(by=by)\n dd.assert_eq(got, expect, check_index=False)\n\n\ndef test_sort_values_bool_ascending():\n df = pd.DataFrame({\"a\": [1, 2, 3] * 20, \"b\": [4, 5, 6, 7] * 15})\n ddf = dd.from_pandas(df, npartitions=10)\n\n # attempt to sort with list of ascending booleans\n with pytest.raises(NotImplementedError):\n ddf.sort_values(by=\"a\", ascending=[True, False])\n",
"import warnings\nfrom numbers import Integral, Number\n\nimport numpy as np\nfrom tlz import concat, get, partial\nfrom tlz.curried import map\n\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..layers import ArrayOverlapLayer\nfrom ..utils import derived_from\nfrom . import chunk, numpy_compat\nfrom .core import Array, concatenate, map_blocks, unify_chunks\nfrom .creation import empty_like, full_like\n\n\ndef _overlap_internal_chunks(original_chunks, axes):\n \"\"\"Get new chunks for array with overlap.\"\"\"\n chunks = []\n for i, bds in enumerate(original_chunks):\n depth = axes.get(i, 0)\n if isinstance(depth, tuple):\n left_depth = depth[0]\n right_depth = depth[1]\n else:\n left_depth = depth\n right_depth = depth\n\n if len(bds) == 1:\n chunks.append(bds)\n else:\n left = [bds[0] + right_depth]\n right = [bds[-1] + left_depth]\n mid = []\n for bd in bds[1:-1]:\n mid.append(bd + left_depth + right_depth)\n chunks.append(left + mid + right)\n return chunks\n\n\ndef overlap_internal(x, axes):\n \"\"\"Share boundaries between neighboring blocks\n\n Parameters\n ----------\n\n x: da.Array\n A dask array\n axes: dict\n The size of the shared boundary per axis\n\n The axes input informs how many cells to overlap between neighboring blocks\n {0: 2, 2: 5} means share two cells in 0 axis, 5 cells in 2 axis\n \"\"\"\n token = tokenize(x, axes)\n name = \"overlap-\" + token\n\n graph = ArrayOverlapLayer(\n name=x.name,\n axes=axes,\n chunks=x.chunks,\n numblocks=x.numblocks,\n token=token,\n )\n graph = HighLevelGraph.from_collections(name, graph, dependencies=[x])\n chunks = _overlap_internal_chunks(x.chunks, axes)\n\n return Array(graph, name, chunks, meta=x)\n\n\ndef trim_overlap(x, depth, boundary=None):\n \"\"\"Trim sides from each block.\n\n This couples well with the ``map_overlap`` operation which may leave\n excess data on each block.\n\n See also\n --------\n dask.array.overlap.map_overlap\n\n \"\"\"\n\n # parameter to be passed to trim_internal\n axes = coerce_depth(x.ndim, depth)\n return trim_internal(x, axes=axes, boundary=boundary)\n\n\ndef trim_internal(x, axes, boundary=None):\n \"\"\"Trim sides from each block\n\n This couples well with the overlap operation, which may leave excess data on\n each block\n\n See also\n --------\n dask.array.chunk.trim\n dask.array.map_blocks\n \"\"\"\n boundary = coerce_boundary(x.ndim, boundary)\n\n olist = []\n for i, bd in enumerate(x.chunks):\n bdy = boundary.get(i, \"none\")\n overlap = axes.get(i, 0)\n ilist = []\n for j, d in enumerate(bd):\n if bdy != \"none\":\n if isinstance(overlap, tuple):\n d = d - sum(overlap)\n else:\n d = d - overlap * 2\n\n else:\n if isinstance(overlap, tuple):\n d = d - overlap[0] if j != 0 else d\n d = d - overlap[1] if j != len(bd) - 1 else d\n else:\n d = d - overlap if j != 0 else d\n d = d - overlap if j != len(bd) - 1 else d\n\n ilist.append(d)\n olist.append(tuple(ilist))\n chunks = tuple(olist)\n\n return map_blocks(\n partial(_trim, axes=axes, boundary=boundary),\n x,\n chunks=chunks,\n dtype=x.dtype,\n meta=x._meta,\n )\n\n\ndef _trim(x, axes, boundary, block_info):\n \"\"\"Similar to dask.array.chunk.trim but requires one to specificy the\n boundary condition.\n\n ``axes``, and ``boundary`` are assumed to have been coerced.\n\n \"\"\"\n axes = [axes.get(i, 0) for i in range(x.ndim)]\n axes_front = (ax[0] if isinstance(ax, tuple) else ax for ax in axes)\n axes_back = (\n -ax[1]\n if isinstance(ax, tuple) and ax[1]\n else -ax\n if isinstance(ax, Integral) and ax\n else None\n for ax in axes\n )\n\n trim_front = (\n 0 if (chunk_location == 0 and boundary.get(i, \"none\") == \"none\") else ax\n for i, (chunk_location, ax) in enumerate(\n zip(block_info[0][\"chunk-location\"], axes_front)\n )\n )\n trim_back = (\n None\n if (chunk_location == chunks - 1 and boundary.get(i, \"none\") == \"none\")\n else ax\n for i, (chunks, chunk_location, ax) in enumerate(\n zip(block_info[0][\"num-chunks\"], block_info[0][\"chunk-location\"], axes_back)\n )\n )\n ind = tuple(slice(front, back) for front, back in zip(trim_front, trim_back))\n return x[ind]\n\n\ndef periodic(x, axis, depth):\n \"\"\"Copy a slice of an array around to its other side\n\n Useful to create periodic boundary conditions for overlap\n \"\"\"\n\n left = (\n (slice(None, None, None),) * axis\n + (slice(0, depth),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n right = (\n (slice(None, None, None),) * axis\n + (slice(-depth, None),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n l = x[left]\n r = x[right]\n\n l, r = _remove_overlap_boundaries(l, r, axis, depth)\n\n return concatenate([r, x, l], axis=axis)\n\n\ndef reflect(x, axis, depth):\n \"\"\"Reflect boundaries of array on the same side\n\n This is the converse of ``periodic``\n \"\"\"\n if depth == 1:\n left = (\n (slice(None, None, None),) * axis\n + (slice(0, 1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n else:\n left = (\n (slice(None, None, None),) * axis\n + (slice(depth - 1, None, -1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n right = (\n (slice(None, None, None),) * axis\n + (slice(-1, -depth - 1, -1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n l = x[left]\n r = x[right]\n\n l, r = _remove_overlap_boundaries(l, r, axis, depth)\n\n return concatenate([l, x, r], axis=axis)\n\n\ndef nearest(x, axis, depth):\n \"\"\"Each reflect each boundary value outwards\n\n This mimics what the skimage.filters.gaussian_filter(... mode=\"nearest\")\n does.\n \"\"\"\n left = (\n (slice(None, None, None),) * axis\n + (slice(0, 1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n right = (\n (slice(None, None, None),) * axis\n + (slice(-1, -2, -1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n\n l = concatenate([x[left]] * depth, axis=axis)\n r = concatenate([x[right]] * depth, axis=axis)\n\n l, r = _remove_overlap_boundaries(l, r, axis, depth)\n\n return concatenate([l, x, r], axis=axis)\n\n\ndef constant(x, axis, depth, value):\n \"\"\"Add constant slice to either side of array\"\"\"\n chunks = list(x.chunks)\n chunks[axis] = (depth,)\n\n c = full_like(\n x,\n value,\n shape=tuple(map(sum, chunks)),\n chunks=tuple(chunks),\n dtype=x.dtype,\n )\n\n return concatenate([c, x, c], axis=axis)\n\n\ndef _remove_overlap_boundaries(l, r, axis, depth):\n lchunks = list(l.chunks)\n lchunks[axis] = (depth,)\n rchunks = list(r.chunks)\n rchunks[axis] = (depth,)\n\n l = l.rechunk(tuple(lchunks))\n r = r.rechunk(tuple(rchunks))\n return l, r\n\n\ndef boundaries(x, depth=None, kind=None):\n \"\"\"Add boundary conditions to an array before overlaping\n\n See Also\n --------\n periodic\n constant\n \"\"\"\n if not isinstance(kind, dict):\n kind = {i: kind for i in range(x.ndim)}\n if not isinstance(depth, dict):\n depth = {i: depth for i in range(x.ndim)}\n\n for i in range(x.ndim):\n d = depth.get(i, 0)\n if d == 0:\n continue\n\n this_kind = kind.get(i, \"none\")\n if this_kind == \"none\":\n continue\n elif this_kind == \"periodic\":\n x = periodic(x, i, d)\n elif this_kind == \"reflect\":\n x = reflect(x, i, d)\n elif this_kind == \"nearest\":\n x = nearest(x, i, d)\n elif i in kind:\n x = constant(x, i, d, kind[i])\n\n return x\n\n\ndef ensure_minimum_chunksize(size, chunks):\n \"\"\"Determine new chunks to ensure that every chunk >= size\n\n Parameters\n ----------\n size: int\n The maximum size of any chunk.\n chunks: tuple\n Chunks along one axis, e.g. ``(3, 3, 2)``\n\n Examples\n --------\n >>> ensure_minimum_chunksize(10, (20, 20, 1))\n (20, 11, 10)\n >>> ensure_minimum_chunksize(3, (1, 1, 3))\n (5,)\n\n See Also\n --------\n overlap\n \"\"\"\n if size <= min(chunks):\n return chunks\n\n # add too-small chunks to chunks before them\n output = []\n new = 0\n for c in chunks:\n if c < size:\n if new > size + (size - c):\n output.append(new - (size - c))\n new = size\n else:\n new += c\n if new >= size:\n output.append(new)\n new = 0\n if c >= size:\n new += c\n if new >= size:\n output.append(new)\n elif len(output) >= 1:\n output[-1] += new\n else:\n raise ValueError(\n f\"The overlapping depth {size} is larger than your \" f\"array {sum(chunks)}.\"\n )\n\n return tuple(output)\n\n\ndef overlap(x, depth, boundary):\n \"\"\"Share boundaries between neighboring blocks\n\n Parameters\n ----------\n\n x: da.Array\n A dask array\n depth: dict\n The size of the shared boundary per axis\n boundary: dict\n The boundary condition on each axis. Options are 'reflect', 'periodic',\n 'nearest', 'none', or an array value. Such a value will fill the\n boundary with that value.\n\n The depth input informs how many cells to overlap between neighboring\n blocks ``{0: 2, 2: 5}`` means share two cells in 0 axis, 5 cells in 2 axis.\n Axes missing from this input will not be overlapped.\n\n Any axis containing chunks smaller than depth will be rechunked if\n possible.\n\n Examples\n --------\n >>> import numpy as np\n >>> import dask.array as da\n\n >>> x = np.arange(64).reshape((8, 8))\n >>> d = da.from_array(x, chunks=(4, 4))\n >>> d.chunks\n ((4, 4), (4, 4))\n\n >>> g = da.overlap.overlap(d, depth={0: 2, 1: 1},\n ... boundary={0: 100, 1: 'reflect'})\n >>> g.chunks\n ((8, 8), (6, 6))\n\n >>> np.array(g)\n array([[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [ 0, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 7],\n [ 8, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 15],\n [ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],\n [ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],\n [ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],\n [ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],\n [ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],\n [ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],\n [ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],\n [ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],\n [ 48, 48, 49, 50, 51, 52, 51, 52, 53, 54, 55, 55],\n [ 56, 56, 57, 58, 59, 60, 59, 60, 61, 62, 63, 63],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]])\n \"\"\"\n depth2 = coerce_depth(x.ndim, depth)\n boundary2 = coerce_boundary(x.ndim, boundary)\n\n # rechunk if new chunks are needed to fit depth in every chunk\n depths = [max(d) if isinstance(d, tuple) else d for d in depth2.values()]\n new_chunks = tuple(\n ensure_minimum_chunksize(size, c) for size, c in zip(depths, x.chunks)\n )\n x1 = x.rechunk(new_chunks) # this is a no-op if x.chunks == new_chunks\n\n x2 = boundaries(x1, depth2, boundary2)\n x3 = overlap_internal(x2, depth2)\n trim = {\n k: v * 2 if boundary2.get(k, \"none\") != \"none\" else 0 for k, v in depth2.items()\n }\n x4 = chunk.trim(x3, trim)\n return x4\n\n\ndef add_dummy_padding(x, depth, boundary):\n \"\"\"\n Pads an array which has 'none' as the boundary type.\n Used to simplify trimming arrays which use 'none'.\n\n >>> import dask.array as da\n >>> x = da.arange(6, chunks=3)\n >>> add_dummy_padding(x, {0: 1}, {0: 'none'}).compute() # doctest: +NORMALIZE_WHITESPACE\n array([..., 0, 1, 2, 3, 4, 5, ...])\n \"\"\"\n for k, v in boundary.items():\n d = depth.get(k, 0)\n if v == \"none\" and d > 0:\n empty_shape = list(x.shape)\n empty_shape[k] = d\n\n empty_chunks = list(x.chunks)\n empty_chunks[k] = (d,)\n\n empty = empty_like(\n getattr(x, \"_meta\", x),\n shape=empty_shape,\n chunks=empty_chunks,\n dtype=x.dtype,\n )\n\n out_chunks = list(x.chunks)\n ax_chunks = list(out_chunks[k])\n ax_chunks[0] += d\n ax_chunks[-1] += d\n out_chunks[k] = tuple(ax_chunks)\n\n x = concatenate([empty, x, empty], axis=k)\n x = x.rechunk(out_chunks)\n return x\n\n\ndef map_overlap(\n func, *args, depth=None, boundary=None, trim=True, align_arrays=True, **kwargs\n):\n \"\"\"Map a function over blocks of arrays with some overlap\n\n We share neighboring zones between blocks of the array, map a\n function, and then trim away the neighboring strips. If depth is\n larger than any chunk along a particular axis, then the array is\n rechunked.\n\n Note that this function will attempt to automatically determine the output\n array type before computing it, please refer to the ``meta`` keyword argument\n in ``map_blocks`` if you expect that the function will not succeed when\n operating on 0-d arrays.\n\n Parameters\n ----------\n func: function\n The function to apply to each extended block.\n If multiple arrays are provided, then the function should expect to\n receive chunks of each array in the same order.\n args : dask arrays\n depth: int, tuple, dict or list\n The number of elements that each block should share with its neighbors\n If a tuple or dict then this can be different per axis.\n If a list then each element of that list must be an int, tuple or dict\n defining depth for the corresponding array in `args`.\n Asymmetric depths may be specified using a dict value of (-/+) tuples.\n Note that asymmetric depths are currently only supported when\n ``boundary`` is 'none'.\n The default value is 0.\n boundary: str, tuple, dict or list\n How to handle the boundaries.\n Values include 'reflect', 'periodic', 'nearest', 'none',\n or any constant value like 0 or np.nan.\n If a list then each element must be a str, tuple or dict defining the\n boundary for the corresponding array in `args`.\n The default value is 'reflect'.\n trim: bool\n Whether or not to trim ``depth`` elements from each block after\n calling the map function.\n Set this to False if your mapping function already does this for you\n align_arrays: bool\n Whether or not to align chunks along equally sized dimensions when\n multiple arrays are provided. This allows for larger chunks in some\n arrays to be broken into smaller ones that match chunk sizes in other\n arrays such that they are compatible for block function mapping. If\n this is false, then an error will be thrown if arrays do not already\n have the same number of blocks in each dimension.\n **kwargs:\n Other keyword arguments valid in ``map_blocks``\n\n Examples\n --------\n >>> import numpy as np\n >>> import dask.array as da\n\n >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])\n >>> x = da.from_array(x, chunks=5)\n >>> def derivative(x):\n ... return x - np.roll(x, 1)\n\n >>> y = x.map_overlap(derivative, depth=1, boundary=0)\n >>> y.compute()\n array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])\n\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> d.map_overlap(lambda x: x + x.size, depth=1, boundary='reflect').compute()\n array([[16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27],\n [28, 29, 30, 31]])\n\n >>> func = lambda x: x + x.size\n >>> depth = {0: 1, 1: 1}\n >>> boundary = {0: 'reflect', 1: 'none'}\n >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27]])\n\n The ``da.map_overlap`` function can also accept multiple arrays.\n\n >>> func = lambda x, y: x + y\n >>> x = da.arange(8).reshape(2, 4).rechunk((1, 2))\n >>> y = da.arange(4).rechunk(2)\n >>> da.map_overlap(func, x, y, depth=1, boundary='reflect').compute() # doctest: +NORMALIZE_WHITESPACE\n array([[ 0, 2, 4, 6],\n [ 4, 6, 8, 10]])\n\n When multiple arrays are given, they do not need to have the\n same number of dimensions but they must broadcast together.\n Arrays are aligned block by block (just as in ``da.map_blocks``)\n so the blocks must have a common chunk size. This common chunking\n is determined automatically as long as ``align_arrays`` is True.\n\n >>> x = da.arange(8, chunks=4)\n >>> y = da.arange(8, chunks=2)\n >>> r = da.map_overlap(func, x, y, depth=1, boundary='reflect', align_arrays=True)\n >>> len(r.to_delayed())\n 4\n\n >>> da.map_overlap(func, x, y, depth=1, boundary='reflect', align_arrays=False).compute()\n Traceback (most recent call last):\n ...\n ValueError: Shapes do not align {'.0': {2, 4}}\n\n Note also that this function is equivalent to ``map_blocks``\n by default. A non-zero ``depth`` must be defined for any\n overlap to appear in the arrays provided to ``func``.\n\n >>> func = lambda x: x.sum()\n >>> x = da.ones(10, dtype='int')\n >>> block_args = dict(chunks=(), drop_axis=0)\n >>> da.map_blocks(func, x, **block_args).compute()\n 10\n >>> da.map_overlap(func, x, **block_args, boundary='reflect').compute()\n 10\n >>> da.map_overlap(func, x, **block_args, depth=1, boundary='reflect').compute()\n 12\n\n For functions that may not handle 0-d arrays, it's also possible to specify\n ``meta`` with an empty array matching the type of the expected result. In\n the example below, ``func`` will result in an ``IndexError`` when computing\n ``meta``:\n\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=np.array(()))\n >>> y\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray>\n >>> y.compute()\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n\n Similarly, it's possible to specify a non-NumPy array to ``meta``:\n\n >>> import cupy # doctest: +SKIP\n >>> x = cupy.arange(16).reshape((4, 4)) # doctest: +SKIP\n >>> d = da.from_array(x, chunks=(2, 2)) # doctest: +SKIP\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=cupy.array(())) # doctest: +SKIP\n >>> y # doctest: +SKIP\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=cupy.ndarray>\n >>> y.compute() # doctest: +SKIP\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n \"\"\"\n # Look for invocation using deprecated single-array signature\n # map_overlap(x, func, depth, boundary=None, trim=True, **kwargs)\n if isinstance(func, Array) and callable(args[0]):\n warnings.warn(\n \"The use of map_overlap(array, func, **kwargs) is deprecated since dask 2.17.0 \"\n \"and will be an error in a future release. To silence this warning, use the syntax \"\n \"map_overlap(func, array0,[ array1, ...,] **kwargs) instead.\",\n FutureWarning,\n )\n sig = [\"func\", \"depth\", \"boundary\", \"trim\"]\n depth = get(sig.index(\"depth\"), args, depth)\n boundary = get(sig.index(\"boundary\"), args, boundary)\n trim = get(sig.index(\"trim\"), args, trim)\n func, args = args[0], [func]\n\n if not callable(func):\n raise TypeError(\n \"First argument must be callable function, not {}\\n\"\n \"Usage: da.map_overlap(function, x)\\n\"\n \" or: da.map_overlap(function, x, y, z)\".format(type(func).__name__)\n )\n if not all(isinstance(x, Array) for x in args):\n raise TypeError(\n \"All variadic arguments must be arrays, not {}\\n\"\n \"Usage: da.map_overlap(function, x)\\n\"\n \" or: da.map_overlap(function, x, y, z)\".format(\n [type(x).__name__ for x in args]\n )\n )\n\n # Coerce depth and boundary arguments to lists of individual\n # specifications for each array argument\n def coerce(xs, arg, fn):\n if not isinstance(arg, list):\n arg = [arg] * len(xs)\n return [fn(x.ndim, a) for x, a in zip(xs, arg)]\n\n depth = coerce(args, depth, coerce_depth)\n boundary = coerce(args, boundary, coerce_boundary)\n\n # Align chunks in each array to a common size\n if align_arrays:\n # Reverse unification order to allow block broadcasting\n inds = [list(reversed(range(x.ndim))) for x in args]\n _, args = unify_chunks(*list(concat(zip(args, inds))), warn=False)\n\n # Escape to map_blocks if depth is zero (a more efficient computation)\n if all([all(depth_val == 0 for depth_val in d.values()) for d in depth]):\n return map_blocks(func, *args, **kwargs)\n\n for i, x in enumerate(args):\n for j in range(x.ndim):\n if isinstance(depth[i][j], tuple) and boundary[i][j] != \"none\":\n raise NotImplementedError(\n \"Asymmetric overlap is currently only implemented \"\n \"for boundary='none', however boundary for dimension \"\n \"{} in array argument {} is {}\".format(j, i, boundary[i][j])\n )\n\n def assert_int_chunksize(xs):\n assert all(type(c) is int for x in xs for cc in x.chunks for c in cc)\n\n assert_int_chunksize(args)\n if not trim and \"chunks\" not in kwargs:\n kwargs[\"chunks\"] = args[0].chunks\n args = [overlap(x, depth=d, boundary=b) for x, d, b in zip(args, depth, boundary)]\n assert_int_chunksize(args)\n x = map_blocks(func, *args, **kwargs)\n assert_int_chunksize([x])\n if trim:\n # Find index of array argument with maximum rank and break ties by choosing first provided\n i = sorted(enumerate(args), key=lambda v: (v[1].ndim, -v[0]))[-1][0]\n # Trim using depth/boundary setting for array of highest rank\n depth = depth[i]\n boundary = boundary[i]\n # remove any dropped axes from depth and boundary variables\n drop_axis = kwargs.pop(\"drop_axis\", None)\n if drop_axis is not None:\n if isinstance(drop_axis, Number):\n drop_axis = [drop_axis]\n\n # convert negative drop_axis to equivalent positive value\n ndim_out = max(a.ndim for a in args if isinstance(a, Array))\n drop_axis = [d % ndim_out for d in drop_axis]\n\n kept_axes = tuple(ax for ax in range(args[i].ndim) if ax not in drop_axis)\n # note that keys are relabeled to match values in range(x.ndim)\n depth = {n: depth[ax] for n, ax in enumerate(kept_axes)}\n boundary = {n: boundary[ax] for n, ax in enumerate(kept_axes)}\n return trim_internal(x, depth, boundary)\n else:\n return x\n\n\ndef coerce_depth(ndim, depth):\n default = 0\n if depth is None:\n depth = default\n if isinstance(depth, Integral):\n depth = (depth,) * ndim\n if isinstance(depth, tuple):\n depth = dict(zip(range(ndim), depth))\n if isinstance(depth, dict):\n depth = {ax: depth.get(ax, default) for ax in range(ndim)}\n return coerce_depth_type(ndim, depth)\n\n\ndef coerce_depth_type(ndim, depth):\n for i in range(ndim):\n if isinstance(depth[i], tuple):\n depth[i] = tuple(int(d) for d in depth[i])\n else:\n depth[i] = int(depth[i])\n return depth\n\n\ndef coerce_boundary(ndim, boundary):\n default = \"none\"\n if boundary is None:\n boundary = default\n if not isinstance(boundary, (tuple, dict)):\n boundary = (boundary,) * ndim\n if isinstance(boundary, tuple):\n boundary = dict(zip(range(ndim), boundary))\n if isinstance(boundary, dict):\n boundary = {ax: boundary.get(ax, default) for ax in range(ndim)}\n return boundary\n\n\n@derived_from(numpy_compat)\ndef sliding_window_view(x, window_shape, axis=None):\n from numpy.core.numeric import normalize_axis_tuple\n\n window_shape = tuple(window_shape) if np.iterable(window_shape) else (window_shape,)\n\n window_shape_array = np.array(window_shape)\n if np.any(window_shape_array <= 0):\n raise ValueError(\"`window_shape` must contain values > 0\")\n\n if axis is None:\n axis = tuple(range(x.ndim))\n if len(window_shape) != len(axis):\n raise ValueError(\n f\"Since axis is `None`, must provide \"\n f\"window_shape for all dimensions of `x`; \"\n f\"got {len(window_shape)} window_shape elements \"\n f\"and `x.ndim` is {x.ndim}.\"\n )\n else:\n axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)\n if len(window_shape) != len(axis):\n raise ValueError(\n f\"Must provide matching length window_shape and \"\n f\"axis; got {len(window_shape)} window_shape \"\n f\"elements and {len(axis)} axes elements.\"\n )\n\n depths = [0] * x.ndim\n for ax, window in zip(axis, window_shape):\n depths[ax] += window - 1\n\n # Ensure that each chunk is big enough to leave at least a size-1 chunk\n # after windowing (this is only really necessary for the last chunk).\n safe_chunks = tuple(\n ensure_minimum_chunksize(d + 1, c) for d, c in zip(depths, x.chunks)\n )\n x = x.rechunk(safe_chunks)\n\n # result.shape = x_shape_trimmed + window_shape,\n # where x_shape_trimmed is x.shape with every entry\n # reduced by one less than the corresponding window size.\n # trim chunks to match x_shape_trimmed\n newchunks = tuple(c[:-1] + (c[-1] - d,) for d, c in zip(depths, x.chunks)) + tuple(\n (window,) for window in window_shape\n )\n\n return map_overlap(\n numpy_compat.sliding_window_view,\n x,\n depth=tuple((0, d) for d in depths), # Overlap on +ve side only\n boundary=\"none\",\n meta=x._meta,\n new_axis=range(x.ndim, x.ndim + len(axis)),\n chunks=newchunks,\n trim=False,\n align_arrays=False,\n window_shape=window_shape,\n axis=axis,\n )\n",
"import glob\nimport math\nimport os\nimport sys\nimport warnings\nfrom decimal import Decimal\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom packaging.version import parse as parse_version\n\nimport dask\nimport dask.dataframe as dd\nimport dask.multiprocessing\nfrom dask.blockwise import Blockwise, optimize_blockwise\nfrom dask.dataframe._compat import PANDAS_GT_110, PANDAS_GT_121, PANDAS_GT_130\nfrom dask.dataframe.io.parquet.utils import _parse_pandas_metadata\nfrom dask.dataframe.optimize import optimize_dataframe_getitem\nfrom dask.dataframe.utils import assert_eq\nfrom dask.layers import DataFrameIOLayer\nfrom dask.utils import natural_sort_key\nfrom dask.utils_test import hlg_layer\n\ntry:\n import fastparquet\nexcept ImportError:\n fastparquet = False\n fastparquet_version = parse_version(\"0\")\nelse:\n fastparquet_version = parse_version(fastparquet.__version__)\n\n\ntry:\n import pyarrow as pa\nexcept ImportError:\n pa = False\n pa_version = parse_version(\"0\")\nelse:\n pa_version = parse_version(pa.__version__)\n\ntry:\n import pyarrow.parquet as pq\nexcept ImportError:\n pq = False\n\n\nSKIP_FASTPARQUET = not fastparquet\nFASTPARQUET_MARK = pytest.mark.skipif(SKIP_FASTPARQUET, reason=\"fastparquet not found\")\n\nif sys.platform == \"win32\" and pa and pa_version == parse_version(\"2.0.0\"):\n SKIP_PYARROW = True\n SKIP_PYARROW_REASON = (\n \"skipping pyarrow 2.0.0 on windows: \"\n \"https://github.com/dask/dask/issues/6093\"\n \"|https://github.com/dask/dask/issues/6754\"\n )\nelse:\n SKIP_PYARROW = not pq\n SKIP_PYARROW_REASON = \"pyarrow not found\"\nPYARROW_MARK = pytest.mark.skipif(SKIP_PYARROW, reason=SKIP_PYARROW_REASON)\n\n# \"Legacy\" and \"Dataset\"-specific MARK definitions\nSKIP_PYARROW_LE = SKIP_PYARROW\nSKIP_PYARROW_LE_REASON = \"pyarrow not found\"\nSKIP_PYARROW_DS = SKIP_PYARROW\nSKIP_PYARROW_DS_REASON = \"pyarrow not found\"\nif not SKIP_PYARROW_LE:\n # NOTE: We should use PYARROW_LE_MARK to skip\n # pyarrow-legacy tests once pyarrow officially\n # removes ParquetDataset support in the future.\n PYARROW_LE_MARK = pytest.mark.filterwarnings(\n \"ignore::DeprecationWarning\",\n \"ignore::FutureWarning\",\n )\nelse:\n PYARROW_LE_MARK = pytest.mark.skipif(SKIP_PYARROW_LE, reason=SKIP_PYARROW_LE_REASON)\nPYARROW_DS_MARK = pytest.mark.skipif(SKIP_PYARROW_DS, reason=SKIP_PYARROW_DS_REASON)\n\nANY_ENGINE_MARK = pytest.mark.skipif(\n SKIP_FASTPARQUET and SKIP_PYARROW,\n reason=\"No parquet engine (fastparquet or pyarrow) found\",\n)\n\n\nnrows = 40\nnpartitions = 15\ndf = pd.DataFrame(\n {\n \"x\": [i * 7 % 5 for i in range(nrows)], # Not sorted\n \"y\": [i * 2.5 for i in range(nrows)], # Sorted\n },\n index=pd.Index([10 * i for i in range(nrows)], name=\"myindex\"),\n)\n\nddf = dd.from_pandas(df, npartitions=npartitions)\n\n\n@pytest.fixture(\n params=[\n pytest.param(\"fastparquet\", marks=FASTPARQUET_MARK),\n pytest.param(\"pyarrow-legacy\", marks=PYARROW_LE_MARK),\n pytest.param(\"pyarrow-dataset\", marks=PYARROW_DS_MARK),\n ]\n)\ndef engine(request):\n return request.param\n\n\ndef write_read_engines(**kwargs):\n \"\"\"Product of both engines for write/read:\n\n To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,\n or `mark_engine=reason` to apply to all parameters with that engine.\"\"\"\n backends = {\"pyarrow-dataset\", \"pyarrow-legacy\", \"fastparquet\"}\n\n # Skip if uninstalled\n skip_marks = {\n \"fastparquet\": FASTPARQUET_MARK,\n \"pyarrow-legacy\": PYARROW_LE_MARK,\n \"pyarrow-dataset\": PYARROW_DS_MARK,\n }\n marks = {(w, r): [skip_marks[w], skip_marks[r]] for w in backends for r in backends}\n\n # Custom marks\n for kw, val in kwargs.items():\n kind, rest = kw.split(\"_\", 1)\n key = tuple(rest.split(\"_\"))\n if kind not in (\"xfail\", \"skip\") or len(key) > 2 or set(key) - backends:\n raise ValueError(\"unknown keyword %r\" % kw)\n val = getattr(pytest.mark, kind)(reason=val)\n if len(key) == 2:\n marks[key].append(val)\n else:\n for k in marks:\n if key in k:\n marks[k].append(val)\n\n return pytest.mark.parametrize(\n (\"write_engine\", \"read_engine\"),\n [pytest.param(*k, marks=tuple(v)) for (k, v) in sorted(marks.items())],\n )\n\n\npyarrow_fastparquet_msg = \"pyarrow schema and pandas metadata may disagree\"\nwrite_read_engines_xfail = write_read_engines(\n **{\n \"xfail_pyarrow-dataset_fastparquet\": pyarrow_fastparquet_msg,\n \"xfail_pyarrow-legacy_fastparquet\": pyarrow_fastparquet_msg,\n }\n)\n\nif (\n fastparquet\n and fastparquet_version < parse_version(\"0.5\")\n and PANDAS_GT_110\n and not PANDAS_GT_121\n):\n # a regression in pandas 1.1.x / 1.2.0 caused a failure in writing partitioned\n # categorical columns when using fastparquet 0.4.x, but this was (accidentally)\n # fixed in fastparquet 0.5.0\n fp_pandas_msg = \"pandas with fastparquet engine does not preserve index\"\n fp_pandas_xfail = write_read_engines(\n **{\n \"xfail_pyarrow-dataset_fastparquet\": pyarrow_fastparquet_msg,\n \"xfail_pyarrow-legacy_fastparquet\": pyarrow_fastparquet_msg,\n \"xfail_fastparquet_fastparquet\": fp_pandas_msg,\n \"xfail_fastparquet_pyarrow-dataset\": fp_pandas_msg,\n \"xfail_fastparquet_pyarrow-legacy\": fp_pandas_msg,\n }\n )\nelse:\n fp_pandas_msg = \"pandas with fastparquet engine does not preserve index\"\n fp_pandas_xfail = write_read_engines()\n\n\n@PYARROW_MARK\ndef test_pyarrow_getengine():\n from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine\n from dask.dataframe.io.parquet.core import get_engine\n\n # Check that the default engine for \"pyarrow\"/\"arrow\"\n # is the `pyarrow.dataset`-based engine\n assert get_engine(\"pyarrow\") == ArrowDatasetEngine\n assert get_engine(\"arrow\") == ArrowDatasetEngine\n\n if SKIP_PYARROW_LE:\n with pytest.warns(FutureWarning):\n get_engine(\"pyarrow-legacy\")\n\n\n@write_read_engines()\ndef test_local(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n data = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df = dd.from_pandas(data, chunksize=500)\n\n df.to_parquet(tmp, write_index=False, engine=write_engine)\n\n files = os.listdir(tmp)\n assert \"_common_metadata\" in files\n assert \"_metadata\" in files\n assert \"part.0.parquet\" in files\n\n df2 = dd.read_parquet(tmp, index=False, engine=read_engine)\n\n assert len(df2.divisions) > 1\n\n out = df2.compute(scheduler=\"sync\").reset_index()\n\n for column in df.columns:\n assert (data[column] == out[column]).all()\n\n\n@pytest.mark.parametrize(\"index\", [False, True])\n@write_read_engines_xfail\ndef test_empty(tmpdir, write_engine, read_engine, index):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"b\"], \"b\": [4, 5, 6]})[:0]\n if index:\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.to_parquet(fn, write_index=index, engine=write_engine)\n read_df = dd.read_parquet(fn, engine=read_engine)\n assert_eq(ddf, read_df)\n\n\n@write_read_engines()\ndef test_simple(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n if write_engine != \"fastparquet\":\n df = pd.DataFrame({\"a\": [b\"a\", b\"b\", b\"b\"], \"b\": [4, 5, 6]})\n else:\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"b\"], \"b\": [4, 5, 6]})\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=write_engine)\n read_df = dd.read_parquet(fn, index=[\"a\"], engine=read_engine)\n assert_eq(ddf, read_df)\n\n\n@write_read_engines()\ndef test_delayed_no_metadata(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"b\"], \"b\": [4, 5, 6]})\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(\n fn, engine=write_engine, compute=False, write_metadata_file=False\n ).compute()\n files = os.listdir(fn)\n assert \"_metadata\" not in files\n # Fastparquet doesn't currently handle a directory without \"_metadata\"\n read_df = dd.read_parquet(\n os.path.join(fn, \"*.parquet\"),\n index=[\"a\"],\n engine=read_engine,\n gather_statistics=True,\n )\n assert_eq(ddf, read_df)\n\n\n@write_read_engines()\ndef test_read_glob(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n if os.path.exists(os.path.join(tmp_path, \"_metadata\")):\n os.unlink(os.path.join(tmp_path, \"_metadata\"))\n files = os.listdir(tmp_path)\n assert \"_metadata\" not in files\n\n ddf2 = dd.read_parquet(\n os.path.join(tmp_path, \"*.parquet\"),\n engine=read_engine,\n index=\"myindex\", # Must specify index without _metadata\n gather_statistics=True,\n )\n assert_eq(ddf, ddf2)\n\n\n@write_read_engines()\ndef test_gather_statistics_false(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, write_index=False, engine=write_engine)\n\n ddf2 = dd.read_parquet(\n tmp_path,\n engine=read_engine,\n index=False,\n gather_statistics=False,\n )\n assert_eq(ddf, ddf2, check_index=False, check_divisions=False)\n\n\n@write_read_engines()\ndef test_read_list(tmpdir, write_engine, read_engine):\n if write_engine == read_engine == \"fastparquet\" and os.name == \"nt\":\n # fastparquet or dask is not normalizing filepaths correctly on\n # windows.\n pytest.skip(\"filepath bug.\")\n\n tmpdir = str(tmpdir)\n ddf.to_parquet(tmpdir, engine=write_engine)\n files = sorted(\n (\n os.path.join(tmpdir, f)\n for f in os.listdir(tmpdir)\n if not f.endswith(\"_metadata\")\n ),\n key=natural_sort_key,\n )\n\n ddf2 = dd.read_parquet(\n files, engine=read_engine, index=\"myindex\", gather_statistics=True\n )\n assert_eq(ddf, ddf2)\n\n\n@write_read_engines()\ndef test_columns_auto_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine)\n\n # XFAIL, auto index selection no longer supported (for simplicity)\n # ### Empty columns ###\n # With divisions if supported\n assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine), ddf[[]])\n\n # No divisions\n assert_eq(\n dd.read_parquet(fn, columns=[], engine=read_engine, gather_statistics=False),\n ddf[[]].clear_divisions(),\n check_divisions=True,\n )\n\n # ### Single column, auto select index ###\n # With divisions if supported\n assert_eq(dd.read_parquet(fn, columns=[\"x\"], engine=read_engine), ddf[[\"x\"]])\n\n # No divisions\n assert_eq(\n dd.read_parquet(fn, columns=[\"x\"], engine=read_engine, gather_statistics=False),\n ddf[[\"x\"]].clear_divisions(),\n check_divisions=True,\n )\n\n\n@write_read_engines()\ndef test_columns_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine)\n\n # With Index\n # ----------\n # ### Empty columns, specify index ###\n # With divisions if supported\n assert_eq(\n dd.read_parquet(fn, columns=[], engine=read_engine, index=\"myindex\"), ddf[[]]\n )\n\n # No divisions\n assert_eq(\n dd.read_parquet(\n fn, columns=[], engine=read_engine, index=\"myindex\", gather_statistics=False\n ),\n ddf[[]].clear_divisions(),\n check_divisions=True,\n )\n\n # ### Single column, specify index ###\n # With divisions if supported\n assert_eq(\n dd.read_parquet(fn, index=\"myindex\", columns=[\"x\"], engine=read_engine),\n ddf[[\"x\"]],\n )\n\n # No divisions\n assert_eq(\n dd.read_parquet(\n fn,\n index=\"myindex\",\n columns=[\"x\"],\n engine=read_engine,\n gather_statistics=False,\n ),\n ddf[[\"x\"]].clear_divisions(),\n check_divisions=True,\n )\n\n # ### Two columns, specify index ###\n # With divisions if supported\n assert_eq(\n dd.read_parquet(fn, index=\"myindex\", columns=[\"x\", \"y\"], engine=read_engine),\n ddf,\n )\n\n # No divisions\n assert_eq(\n dd.read_parquet(\n fn,\n index=\"myindex\",\n columns=[\"x\", \"y\"],\n engine=read_engine,\n gather_statistics=False,\n ),\n ddf.clear_divisions(),\n check_divisions=True,\n )\n\n\ndef test_nonsense_column(tmpdir, engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=engine)\n with pytest.raises((ValueError, KeyError)):\n dd.read_parquet(fn, columns=[\"nonesense\"], engine=engine)\n with pytest.raises((Exception, KeyError)):\n dd.read_parquet(fn, columns=[\"nonesense\"] + list(ddf.columns), engine=engine)\n\n\n@write_read_engines()\ndef test_columns_no_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine)\n ddf2 = ddf.reset_index()\n\n # No Index\n # --------\n # All columns, none as index\n assert_eq(\n dd.read_parquet(fn, index=False, engine=read_engine, gather_statistics=True),\n ddf2,\n check_index=False,\n check_divisions=True,\n )\n\n # Two columns, none as index\n assert_eq(\n dd.read_parquet(\n fn,\n index=False,\n columns=[\"x\", \"y\"],\n engine=read_engine,\n gather_statistics=True,\n ),\n ddf2[[\"x\", \"y\"]],\n check_index=False,\n check_divisions=True,\n )\n\n # One column and one index, all as columns\n assert_eq(\n dd.read_parquet(\n fn,\n index=False,\n columns=[\"myindex\", \"x\"],\n engine=read_engine,\n gather_statistics=True,\n ),\n ddf2[[\"myindex\", \"x\"]],\n check_index=False,\n check_divisions=True,\n )\n\n\n@write_read_engines()\ndef test_gather_statistics_no_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine, write_index=False)\n\n df = dd.read_parquet(fn, engine=read_engine, index=False)\n assert df.index.name is None\n assert not df.known_divisions\n\n\ndef test_columns_index_with_multi_index(tmpdir, engine):\n fn = os.path.join(str(tmpdir), \"test.parquet\")\n index = pd.MultiIndex.from_arrays(\n [np.arange(10), np.arange(10) + 1], names=[\"x0\", \"x1\"]\n )\n df = pd.DataFrame(np.random.randn(10, 2), columns=[\"a\", \"b\"], index=index)\n df2 = df.reset_index(drop=False)\n\n if engine == \"fastparquet\":\n fastparquet.write(fn, df.reset_index(), write_index=False)\n\n else:\n pq.write_table(pa.Table.from_pandas(df.reset_index(), preserve_index=False), fn)\n\n ddf = dd.read_parquet(fn, engine=engine, index=index.names)\n assert_eq(ddf, df)\n\n d = dd.read_parquet(fn, columns=\"a\", engine=engine, index=index.names)\n assert_eq(d, df[\"a\"])\n\n d = dd.read_parquet(fn, index=[\"a\", \"b\"], columns=[\"x0\", \"x1\"], engine=engine)\n assert_eq(d, df2.set_index([\"a\", \"b\"])[[\"x0\", \"x1\"]])\n\n # Just index\n d = dd.read_parquet(fn, index=False, engine=engine)\n assert_eq(d, df2)\n\n d = dd.read_parquet(fn, columns=[\"b\"], index=[\"a\"], engine=engine)\n assert_eq(d, df2.set_index(\"a\")[[\"b\"]])\n\n d = dd.read_parquet(fn, columns=[\"a\", \"b\"], index=[\"x0\"], engine=engine)\n assert_eq(d, df2.set_index(\"x0\")[[\"a\", \"b\"]])\n\n # Just columns\n d = dd.read_parquet(fn, columns=[\"x0\", \"a\"], index=[\"x1\"], engine=engine)\n assert_eq(d, df2.set_index(\"x1\")[[\"x0\", \"a\"]])\n\n # Both index and columns\n d = dd.read_parquet(fn, index=False, columns=[\"x0\", \"b\"], engine=engine)\n assert_eq(d, df2[[\"x0\", \"b\"]])\n\n for index in [\"x1\", \"b\"]:\n d = dd.read_parquet(fn, index=index, columns=[\"x0\", \"a\"], engine=engine)\n assert_eq(d, df2.set_index(index)[[\"x0\", \"a\"]])\n\n # Columns and index intersect\n for index in [\"a\", \"x0\"]:\n with pytest.raises(ValueError):\n d = dd.read_parquet(fn, index=index, columns=[\"x0\", \"a\"], engine=engine)\n\n # Series output\n for ind, col, sol_df in [\n (\"x1\", \"x0\", df2.set_index(\"x1\")),\n (False, \"b\", df2),\n (False, \"x0\", df2[[\"x0\"]]),\n (\"a\", \"x0\", df2.set_index(\"a\")[[\"x0\"]]),\n (\"a\", \"b\", df2.set_index(\"a\")),\n ]:\n d = dd.read_parquet(fn, index=ind, columns=col, engine=engine)\n assert_eq(d, sol_df[col])\n\n\n@write_read_engines()\ndef test_no_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=write_engine)\n ddf2 = dd.read_parquet(fn, engine=read_engine)\n assert_eq(df, ddf2, check_index=False)\n\n\ndef test_read_series(tmpdir, engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, columns=[\"x\"], index=\"myindex\", engine=engine)\n assert_eq(ddf[[\"x\"]], ddf2)\n\n ddf2 = dd.read_parquet(fn, columns=\"x\", index=\"myindex\", engine=engine)\n assert_eq(ddf.x, ddf2)\n\n\ndef test_names(tmpdir, engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=engine)\n\n def read(fn, **kwargs):\n return dd.read_parquet(fn, engine=engine, **kwargs)\n\n assert set(read(fn).dask) == set(read(fn).dask)\n\n assert set(read(fn).dask) != set(read(fn, columns=[\"x\"]).dask)\n\n assert set(read(fn, columns=(\"x\",)).dask) == set(read(fn, columns=[\"x\"]).dask)\n\n\n@write_read_engines()\ndef test_roundtrip_from_pandas(tmpdir, write_engine, read_engine):\n fn = str(tmpdir.join(\"test.parquet\"))\n dfp = df.copy()\n dfp.index.name = \"index\"\n dfp.to_parquet(\n fn, engine=\"pyarrow\" if write_engine.startswith(\"pyarrow\") else \"fastparquet\"\n )\n ddf = dd.read_parquet(fn, index=\"index\", engine=read_engine)\n assert_eq(dfp, ddf)\n\n\n@write_read_engines()\ndef test_categorical(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"] * 100}, dtype=\"category\")\n ddf = dd.from_pandas(df, npartitions=3)\n dd.to_parquet(ddf, tmp, engine=write_engine)\n\n ddf2 = dd.read_parquet(tmp, categories=\"x\", engine=read_engine)\n assert ddf2.compute().x.cat.categories.tolist() == [\"a\", \"b\", \"c\"]\n\n ddf2 = dd.read_parquet(tmp, categories=[\"x\"], engine=read_engine)\n assert ddf2.compute().x.cat.categories.tolist() == [\"a\", \"b\", \"c\"]\n\n # autocat\n if read_engine == \"fastparquet\":\n ddf2 = dd.read_parquet(tmp, engine=read_engine)\n assert ddf2.compute().x.cat.categories.tolist() == [\"a\", \"b\", \"c\"]\n\n ddf2.loc[:1000].compute()\n assert assert_eq(df, ddf2)\n\n # dereference cats\n ddf2 = dd.read_parquet(tmp, categories=[], engine=read_engine)\n\n ddf2.loc[:1000].compute()\n assert (df.x == ddf2.x.compute()).all()\n\n\ndef test_append(tmpdir, engine):\n \"\"\"Test that appended parquet equal to the original one.\"\"\"\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df.index.name = \"index\"\n\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)\n ddf1.to_parquet(tmp, engine=engine)\n ddf2.to_parquet(tmp, append=True, engine=engine)\n\n ddf3 = dd.read_parquet(tmp, engine=engine)\n assert_eq(df, ddf3)\n\n\ndef test_append_create(tmpdir, engine):\n \"\"\"Test that appended parquet equal to the original one.\"\"\"\n tmp_path = str(tmpdir)\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df.index.name = \"index\"\n\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)\n ddf1.to_parquet(tmp_path, append=True, engine=engine)\n ddf2.to_parquet(tmp_path, append=True, engine=engine)\n\n ddf3 = dd.read_parquet(tmp_path, engine=engine)\n assert_eq(df, ddf3)\n\n\ndef test_append_with_partition(tmpdir, engine):\n tmp = str(tmpdir)\n df0 = pd.DataFrame(\n {\n \"lat\": np.arange(0, 10, dtype=\"int64\"),\n \"lon\": np.arange(10, 20, dtype=\"int64\"),\n \"value\": np.arange(100, 110, dtype=\"int64\"),\n }\n )\n df0.index.name = \"index\"\n df1 = pd.DataFrame(\n {\n \"lat\": np.arange(10, 20, dtype=\"int64\"),\n \"lon\": np.arange(10, 20, dtype=\"int64\"),\n \"value\": np.arange(120, 130, dtype=\"int64\"),\n }\n )\n df1.index.name = \"index\"\n\n # Check that nullable dtypes work\n # (see: https://github.com/dask/dask/issues/8373)\n df0[\"lat\"] = df0[\"lat\"].astype(\"Int64\")\n df1[\"lat\"].iloc[0] = np.nan\n df1[\"lat\"] = df1[\"lat\"].astype(\"Int64\")\n\n dd_df0 = dd.from_pandas(df0, npartitions=1)\n dd_df1 = dd.from_pandas(df1, npartitions=1)\n dd.to_parquet(dd_df0, tmp, partition_on=[\"lon\"], engine=engine)\n dd.to_parquet(\n dd_df1,\n tmp,\n partition_on=[\"lon\"],\n append=True,\n ignore_divisions=True,\n engine=engine,\n )\n\n out = dd.read_parquet(\n tmp, engine=engine, index=\"index\", gather_statistics=True\n ).compute()\n # convert categorical to plain int just to pass assert\n out[\"lon\"] = out.lon.astype(\"int64\")\n # sort required since partitioning breaks index order\n assert_eq(\n out.sort_values(\"value\"), pd.concat([df0, df1])[out.columns], check_index=False\n )\n\n\ndef test_partition_on_cats(tmpdir, engine):\n tmp = str(tmpdir)\n d = pd.DataFrame(\n {\n \"a\": np.random.rand(50),\n \"b\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n \"c\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n }\n )\n d = dd.from_pandas(d, 2)\n d.to_parquet(tmp, partition_on=[\"b\"], engine=engine)\n df = dd.read_parquet(tmp, engine=engine)\n assert set(df.b.cat.categories) == {\"x\", \"y\", \"z\"}\n\n\n@PYARROW_MARK\n@pytest.mark.parametrize(\"meta\", [False, True])\n@pytest.mark.parametrize(\"stats\", [False, True])\ndef test_partition_on_cats_pyarrow(tmpdir, stats, meta):\n tmp = str(tmpdir)\n d = pd.DataFrame(\n {\n \"a\": np.random.rand(50),\n \"b\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n \"c\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n }\n )\n d = dd.from_pandas(d, 2)\n d.to_parquet(tmp, partition_on=[\"b\"], engine=\"pyarrow\", write_metadata_file=meta)\n df = dd.read_parquet(tmp, engine=\"pyarrow\", gather_statistics=stats)\n assert set(df.b.cat.categories) == {\"x\", \"y\", \"z\"}\n\n\ndef test_partition_on_cats_2(tmpdir, engine):\n tmp = str(tmpdir)\n d = pd.DataFrame(\n {\n \"a\": np.random.rand(50),\n \"b\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n \"c\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n }\n )\n d = dd.from_pandas(d, 2)\n d.to_parquet(tmp, partition_on=[\"b\", \"c\"], engine=engine)\n df = dd.read_parquet(tmp, engine=engine)\n assert set(df.b.cat.categories) == {\"x\", \"y\", \"z\"}\n assert set(df.c.cat.categories) == {\"x\", \"y\", \"z\"}\n\n df = dd.read_parquet(tmp, columns=[\"a\", \"c\"], engine=engine)\n assert set(df.c.cat.categories) == {\"x\", \"y\", \"z\"}\n assert \"b\" not in df.columns\n assert_eq(df, df.compute())\n df = dd.read_parquet(tmp, index=\"c\", engine=engine)\n assert set(df.index.categories) == {\"x\", \"y\", \"z\"}\n assert \"c\" not in df.columns\n # series\n df = dd.read_parquet(tmp, columns=\"b\", engine=engine)\n assert set(df.cat.categories) == {\"x\", \"y\", \"z\"}\n\n\ndef test_append_wo_index(tmpdir, engine):\n \"\"\"Test append with write_index=False.\"\"\"\n tmp = str(tmpdir.join(\"tmp1.parquet\"))\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)\n ddf1.to_parquet(tmp, engine=engine)\n\n with pytest.raises(ValueError) as excinfo:\n ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)\n assert \"Appended columns\" in str(excinfo.value)\n\n tmp = str(tmpdir.join(\"tmp2.parquet\"))\n ddf1.to_parquet(tmp, write_index=False, engine=engine)\n ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)\n\n ddf3 = dd.read_parquet(tmp, index=\"f\", engine=engine)\n assert_eq(df.set_index(\"f\"), ddf3)\n\n\ndef test_append_overlapping_divisions(tmpdir, engine):\n \"\"\"Test raising of error when divisions overlapping.\"\"\"\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half - 10 :], chunksize=100)\n ddf1.to_parquet(tmp, engine=engine)\n\n with pytest.raises(ValueError) as excinfo:\n ddf2.to_parquet(tmp, engine=engine, append=True)\n assert \"Appended divisions\" in str(excinfo.value)\n\n ddf2.to_parquet(tmp, engine=engine, append=True, ignore_divisions=True)\n\n\ndef test_append_different_columns(tmpdir, engine):\n \"\"\"Test raising of error when non equal columns.\"\"\"\n tmp = str(tmpdir)\n df1 = pd.DataFrame({\"i32\": np.arange(100, dtype=np.int32)})\n df2 = pd.DataFrame({\"i64\": np.arange(100, dtype=np.int64)})\n df3 = pd.DataFrame({\"i32\": np.arange(100, dtype=np.int64)})\n\n ddf1 = dd.from_pandas(df1, chunksize=2)\n ddf2 = dd.from_pandas(df2, chunksize=2)\n ddf3 = dd.from_pandas(df3, chunksize=2)\n\n ddf1.to_parquet(tmp, engine=engine)\n\n with pytest.raises(ValueError) as excinfo:\n ddf2.to_parquet(tmp, engine=engine, append=True)\n assert \"Appended columns\" in str(excinfo.value)\n\n with pytest.raises(ValueError) as excinfo:\n ddf3.to_parquet(tmp, engine=engine, append=True)\n assert \"Appended dtypes\" in str(excinfo.value)\n\n\ndef test_append_dict_column(tmpdir, engine):\n # See: https://github.com/dask/dask/issues/7492\n\n if engine == \"fastparquet\":\n pytest.xfail(\"Fastparquet engine is missing dict-column support\")\n elif pa_version < parse_version(\"1.0.1\"):\n pytest.skip(\"PyArrow 1.0.1+ required for dict-column support.\")\n\n tmp = str(tmpdir)\n dts = pd.date_range(\"2020-01-01\", \"2021-01-01\")\n df = pd.DataFrame(\n {\"value\": [{\"x\": x} for x in range(len(dts))]},\n index=dts,\n )\n ddf1 = dd.from_pandas(df, npartitions=1)\n\n # Write ddf1 to tmp, and then append it again\n ddf1.to_parquet(tmp, append=True, engine=engine)\n ddf1.to_parquet(tmp, append=True, engine=engine, ignore_divisions=True)\n\n # Read back all data (ddf1 + ddf1)\n ddf2 = dd.read_parquet(tmp, engine=engine)\n\n # Check computed result\n expect = pd.concat([df, df])\n result = ddf2.compute()\n assert_eq(expect, result)\n\n\n@write_read_engines_xfail\ndef test_ordering(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": [10, 20, 30], \"c\": [100, 200, 300]},\n index=pd.Index([-1, -2, -3], name=\"myindex\"),\n columns=[\"c\", \"a\", \"b\"],\n )\n ddf = dd.from_pandas(df, npartitions=2)\n dd.to_parquet(ddf, tmp, engine=write_engine)\n\n if read_engine == \"fastparquet\":\n pf = fastparquet.ParquetFile(tmp)\n assert pf.columns == [\"myindex\", \"c\", \"a\", \"b\"]\n\n ddf2 = dd.read_parquet(tmp, index=\"myindex\", engine=read_engine)\n assert_eq(ddf, ddf2, check_divisions=False)\n\n\ndef test_read_parquet_custom_columns(tmpdir, engine):\n tmp = str(tmpdir)\n data = pd.DataFrame(\n {\"i32\": np.arange(1000, dtype=np.int32), \"f\": np.arange(1000, dtype=np.float64)}\n )\n df = dd.from_pandas(data, chunksize=50)\n df.to_parquet(tmp, engine=engine)\n\n df2 = dd.read_parquet(tmp, columns=[\"i32\", \"f\"], engine=engine)\n assert_eq(df[[\"i32\", \"f\"]], df2, check_index=False)\n\n fns = glob.glob(os.path.join(tmp, \"*.parquet\"))\n df2 = dd.read_parquet(fns, columns=[\"i32\"], engine=engine).compute()\n df2.sort_values(\"i32\", inplace=True)\n assert_eq(df[[\"i32\"]], df2, check_index=False, check_divisions=False)\n\n df3 = dd.read_parquet(tmp, columns=[\"f\", \"i32\"], engine=engine)\n assert_eq(df[[\"f\", \"i32\"]], df3, check_index=False)\n\n\n@pytest.mark.parametrize(\n \"df,write_kwargs,read_kwargs\",\n [\n (pd.DataFrame({\"x\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [\"c\", \"a\", \"b\"]}), {}, {}),\n (pd.DataFrame({\"x\": [\"cc\", \"a\", \"bbb\"]}), {}, {}),\n (pd.DataFrame({\"x\": [b\"a\", b\"b\", b\"c\"]}), {\"object_encoding\": \"bytes\"}, {}),\n (\n pd.DataFrame({\"x\": pd.Categorical([\"a\", \"b\", \"a\"])}),\n {},\n {\"categories\": [\"x\"]},\n ),\n (pd.DataFrame({\"x\": pd.Categorical([1, 2, 1])}), {}, {\"categories\": [\"x\"]}),\n (pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"M8[ns]\"), {}, {}),\n pytest.param(\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ns]\"),\n {},\n {},\n ),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[us]\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ms]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns, UTC]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns, CET]\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"uint16\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"float32\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 1, 2]}, index=[3, 2, 1]), {}, {}),\n (pd.DataFrame({\"x\": [3, 1, 5]}, index=pd.Index([1, 2, 3], name=\"foo\")), {}, {}),\n (pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}, columns=[\"y\", \"x\"]), {}, {}),\n (pd.DataFrame({\"0\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, None]}), {}, {}),\n (pd.DataFrame({\"-\": [3.0, 2.0, None]}), {}, {}),\n (pd.DataFrame({\".\": [3.0, 2.0, None]}), {}, {}),\n (pd.DataFrame({\" \": [3.0, 2.0, None]}), {}, {}),\n ],\n)\ndef test_roundtrip(tmpdir, df, write_kwargs, read_kwargs, engine):\n if \"x\" in df and df.x.dtype == \"M8[ns]\" and \"arrow\" in engine:\n pytest.xfail(reason=\"Parquet pyarrow v1 doesn't support nanosecond precision\")\n if (\n \"x\" in df\n and df.x.dtype == \"M8[ns]\"\n and engine == \"fastparquet\"\n and fastparquet_version <= parse_version(\"0.6.3\")\n ):\n pytest.xfail(reason=\"fastparquet doesn't support nanosecond precision yet\")\n if (\n PANDAS_GT_130\n and read_kwargs.get(\"categories\", None)\n and engine == \"fastparquet\"\n and fastparquet_version <= parse_version(\"0.6.3\")\n ):\n pytest.xfail(\"https://github.com/dask/fastparquet/issues/577\")\n\n tmp = str(tmpdir)\n if df.index.name is None:\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n\n oe = write_kwargs.pop(\"object_encoding\", None)\n if oe and engine == \"fastparquet\":\n dd.to_parquet(ddf, tmp, engine=engine, object_encoding=oe, **write_kwargs)\n else:\n dd.to_parquet(ddf, tmp, engine=engine, **write_kwargs)\n ddf2 = dd.read_parquet(tmp, index=df.index.name, engine=engine, **read_kwargs)\n if str(ddf2.dtypes.get(\"x\")) == \"UInt16\" and engine == \"fastparquet\":\n # fastparquet choooses to use masked type to be able to get true repr of\n # 16-bit int\n assert_eq(ddf.astype(\"UInt16\"), ddf2)\n else:\n assert_eq(ddf, ddf2)\n\n\ndef test_categories(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": list(\"caaab\")})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf[\"y\"] = ddf.y.astype(\"category\")\n ddf.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, categories=[\"y\"], engine=engine)\n\n # Shouldn't need to specify categories explicitly\n ddf3 = dd.read_parquet(fn, engine=engine)\n assert_eq(ddf3, ddf2)\n\n with pytest.raises(NotImplementedError):\n ddf2.y.cat.categories\n assert set(ddf2.y.compute().cat.categories) == {\"a\", \"b\", \"c\"}\n cats_set = ddf2.map_partitions(lambda x: x.y.cat.categories.sort_values()).compute()\n assert cats_set.tolist() == [\"a\", \"c\", \"a\", \"b\"]\n\n if engine == \"fastparquet\":\n assert_eq(ddf.y, ddf2.y, check_names=False)\n with pytest.raises(TypeError):\n # attempt to load as category that which is not so encoded\n ddf2 = dd.read_parquet(fn, categories=[\"x\"], engine=engine).compute()\n\n with pytest.raises((ValueError, FutureWarning)):\n # attempt to load as category unknown column\n ddf2 = dd.read_parquet(fn, categories=[\"foo\"], engine=engine)\n\n\ndef test_categories_unnamed_index(tmpdir, engine):\n # Check that we can handle an unnamed categorical index\n # https://github.com/dask/dask/issues/6885\n\n tmpdir = str(tmpdir)\n\n df = pd.DataFrame(\n data={\"A\": [1, 2, 3], \"B\": [\"a\", \"a\", \"b\"]}, index=[\"x\", \"y\", \"y\"]\n )\n ddf = dd.from_pandas(df, npartitions=1)\n ddf = ddf.categorize(columns=[\"B\"])\n\n ddf.to_parquet(tmpdir, engine=engine)\n ddf2 = dd.read_parquet(tmpdir, engine=engine)\n\n assert_eq(ddf.index, ddf2.index, check_divisions=False)\n\n\ndef test_empty_partition(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": range(10), \"b\": range(10)})\n ddf = dd.from_pandas(df, npartitions=5)\n\n ddf2 = ddf[ddf.a <= 5]\n ddf2.to_parquet(fn, engine=engine)\n\n ddf3 = dd.read_parquet(fn, engine=engine)\n assert ddf3.npartitions < 5\n sol = ddf2.compute()\n assert_eq(sol, ddf3, check_names=False, check_index=False)\n\n\ndef test_timestamp_index(tmpdir, engine):\n fn = str(tmpdir)\n df = dd._compat.makeTimeDataFrame()\n df.index.name = \"foo\"\n ddf = dd.from_pandas(df, npartitions=5)\n ddf.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, engine=engine)\n assert_eq(ddf, ddf2)\n\n\n@FASTPARQUET_MARK\n@PYARROW_MARK\ndef test_to_parquet_default_writes_nulls(tmpdir):\n fn = str(tmpdir.join(\"test.parquet\"))\n\n df = pd.DataFrame({\"c1\": [1.0, np.nan, 2, np.nan, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n ddf.to_parquet(fn)\n table = pq.read_table(fn)\n assert table[1].null_count == 2\n\n\n@PYARROW_LE_MARK\ndef test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default(tmpdir):\n df = pd.DataFrame(\n {\"partition_column\": [0, 0, 1, 1], \"strings\": [\"a\", \"b\", None, None]}\n )\n\n ddf = dd.from_pandas(df, npartitions=2)\n # In order to allow pyarrow to write an inconsistent schema,\n # we need to avoid writing the _metadata file (will fail >0.17.1)\n # and need to avoid schema inference (i.e. use `schema=None`)\n ddf.to_parquet(\n str(tmpdir),\n engine=\"pyarrow\",\n partition_on=[\"partition_column\"],\n write_metadata_file=False,\n schema=None,\n )\n\n # Test that schema is not validated by default\n # (shouldn't raise error with legacy dataset)\n dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-legacy\",\n gather_statistics=False,\n ).compute()\n\n # Test that read fails when validate_schema=True\n # Note: This fails differently for pyarrow.dataset api\n with pytest.raises(ValueError) as e_info:\n dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-legacy\",\n gather_statistics=False,\n dataset={\"validate_schema\": True},\n ).compute()\n assert e_info.message.contains(\"ValueError: Schema in partition\")\n assert e_info.message.contains(\"was different\")\n\n\n@PYARROW_MARK\ndef test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema(\n tmpdir,\n):\n # Data types to test: strings, arrays, ints, timezone aware timestamps\n in_arrays = [[0, 1, 2], [3, 4], np.nan, np.nan]\n out_arrays = [[0, 1, 2], [3, 4], None, None]\n in_strings = [\"a\", \"b\", np.nan, np.nan]\n out_strings = [\"a\", \"b\", None, None]\n tstamp = pd.Timestamp(1513393355, unit=\"s\")\n in_tstamps = [tstamp, tstamp, pd.NaT, pd.NaT]\n out_tstamps = [\n # Timestamps come out in numpy.datetime64 format\n tstamp.to_datetime64(),\n tstamp.to_datetime64(),\n np.datetime64(\"NaT\"),\n np.datetime64(\"NaT\"),\n ]\n timezone = \"US/Eastern\"\n tz_tstamp = pd.Timestamp(1513393355, unit=\"s\", tz=timezone)\n in_tz_tstamps = [tz_tstamp, tz_tstamp, pd.NaT, pd.NaT]\n out_tz_tstamps = [\n # Timezones do not make it through a write-read cycle.\n tz_tstamp.tz_convert(None).to_datetime64(),\n tz_tstamp.tz_convert(None).to_datetime64(),\n np.datetime64(\"NaT\"),\n np.datetime64(\"NaT\"),\n ]\n\n df = pd.DataFrame(\n {\n \"partition_column\": [0, 0, 1, 1],\n \"arrays\": in_arrays,\n \"strings\": in_strings,\n \"tstamps\": in_tstamps,\n \"tz_tstamps\": in_tz_tstamps,\n }\n )\n\n ddf = dd.from_pandas(df, npartitions=2)\n schema = pa.schema(\n [\n (\"arrays\", pa.list_(pa.int64())),\n (\"strings\", pa.string()),\n (\"tstamps\", pa.timestamp(\"ns\")),\n (\"tz_tstamps\", pa.timestamp(\"ns\", timezone)),\n (\"partition_column\", pa.int64()),\n ]\n )\n ddf.to_parquet(\n str(tmpdir), engine=\"pyarrow\", partition_on=\"partition_column\", schema=schema\n )\n ddf_after_write = (\n dd.read_parquet(str(tmpdir), engine=\"pyarrow\", gather_statistics=False)\n .compute()\n .reset_index(drop=True)\n )\n\n # Check array support\n arrays_after_write = ddf_after_write.arrays.values\n for i in range(len(df)):\n assert np.array_equal(arrays_after_write[i], out_arrays[i]), type(out_arrays[i])\n\n # Check datetime support\n tstamps_after_write = ddf_after_write.tstamps.values\n for i in range(len(df)):\n # Need to test NaT separately\n if np.isnat(tstamps_after_write[i]):\n assert np.isnat(out_tstamps[i])\n else:\n assert tstamps_after_write[i] == out_tstamps[i]\n\n # Check timezone aware datetime support\n tz_tstamps_after_write = ddf_after_write.tz_tstamps.values\n for i in range(len(df)):\n # Need to test NaT separately\n if np.isnat(tz_tstamps_after_write[i]):\n assert np.isnat(out_tz_tstamps[i])\n else:\n assert tz_tstamps_after_write[i] == out_tz_tstamps[i]\n\n # Check string support\n assert np.array_equal(ddf_after_write.strings.values, out_strings)\n\n # Check partition column\n assert np.array_equal(ddf_after_write.partition_column, df.partition_column)\n\n\n@PYARROW_MARK\n@pytest.mark.parametrize(\"index\", [False, True])\n@pytest.mark.parametrize(\"schema\", [\"infer\", \"complex\"])\ndef test_pyarrow_schema_inference(tmpdir, index, engine, schema):\n if schema == \"complex\":\n schema = {\"index\": pa.string(), \"amount\": pa.int64()}\n\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"index\": [\"1\", \"2\", \"3\", \"2\", \"3\", \"1\", \"4\"],\n \"date\": pd.to_datetime(\n [\n \"2017-01-01\",\n \"2017-01-01\",\n \"2017-01-01\",\n \"2017-01-02\",\n \"2017-01-02\",\n \"2017-01-06\",\n \"2017-01-09\",\n ]\n ),\n \"amount\": [100, 200, 300, 400, 500, 600, 700],\n },\n index=range(7, 14),\n )\n if index:\n df = dd.from_pandas(df, npartitions=2).set_index(\"index\")\n else:\n df = dd.from_pandas(df, npartitions=2)\n\n df.to_parquet(tmpdir, engine=\"pyarrow\", schema=schema)\n df_out = dd.read_parquet(tmpdir, engine=engine)\n df_out.compute()\n\n if index and engine == \"fastparquet\":\n # Fastparquet fails to detect int64 from _metadata\n df_out[\"amount\"] = df_out[\"amount\"].astype(\"int64\")\n\n # Fastparquet not handling divisions for\n # pyarrow-written dataset with string index\n assert_eq(df, df_out, check_divisions=False)\n else:\n assert_eq(df, df_out)\n\n\ndef test_partition_on(tmpdir, engine):\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"a1\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"a2\": np.random.choice([\"X\", \"Y\", \"Z\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n \"d\": np.arange(0, 100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n d.to_parquet(tmpdir, partition_on=[\"a1\", \"a2\"], engine=engine)\n # Note #1: Cross-engine functionality is missing\n # Note #2: The index is not preserved in pyarrow when partition_on is used\n out = dd.read_parquet(\n tmpdir, engine=engine, index=False, gather_statistics=False\n ).compute()\n for val in df.a1.unique():\n assert set(df.d[df.a1 == val]) == set(out.d[out.a1 == val])\n\n # Now specify the columns and allow auto-index detection\n out = dd.read_parquet(tmpdir, engine=engine, columns=[\"d\", \"a2\"]).compute()\n for val in df.a2.unique():\n assert set(df.d[df.a2 == val]) == set(out.d[out.a2 == val])\n\n\ndef test_partition_on_duplicates(tmpdir, engine):\n # https://github.com/dask/dask/issues/6445\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"a1\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"a2\": np.random.choice([\"X\", \"Y\", \"Z\"], size=100),\n \"data\": np.random.random(size=100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n\n for _ in range(2):\n d.to_parquet(tmpdir, partition_on=[\"a1\", \"a2\"], engine=engine)\n\n out = dd.read_parquet(tmpdir, engine=engine).compute()\n\n assert len(df) == len(out)\n for root, dirs, files in os.walk(tmpdir):\n for file in files:\n assert file in (\n \"part.0.parquet\",\n \"part.1.parquet\",\n \"_common_metadata\",\n \"_metadata\",\n )\n\n\n@PYARROW_MARK\n@pytest.mark.parametrize(\"partition_on\", [\"aa\", [\"aa\"]])\ndef test_partition_on_string(tmpdir, partition_on):\n tmpdir = str(tmpdir)\n with dask.config.set(scheduler=\"single-threaded\"):\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"aa\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"bb\": np.random.random(size=100),\n \"cc\": np.random.randint(1, 5, size=100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n d.to_parquet(\n tmpdir, partition_on=partition_on, write_index=False, engine=\"pyarrow\"\n )\n out = dd.read_parquet(\n tmpdir, index=False, gather_statistics=False, engine=\"pyarrow\"\n )\n out = out.compute()\n for val in df.aa.unique():\n assert set(df.bb[df.aa == val]) == set(out.bb[out.aa == val])\n\n\n@write_read_engines()\ndef test_filters_categorical(tmpdir, write_engine, read_engine):\n tmpdir = str(tmpdir)\n cats = [\"2018-01-01\", \"2018-01-02\", \"2018-01-03\", \"2018-01-04\"]\n dftest = pd.DataFrame(\n {\n \"dummy\": [1, 1, 1, 1],\n \"DatePart\": pd.Categorical(cats, categories=cats, ordered=True),\n }\n )\n ddftest = dd.from_pandas(dftest, npartitions=4).set_index(\"dummy\")\n ddftest.to_parquet(tmpdir, partition_on=\"DatePart\", engine=write_engine)\n ddftest_read = dd.read_parquet(\n tmpdir,\n index=\"dummy\",\n engine=read_engine,\n filters=[((\"DatePart\", \"<=\", \"2018-01-02\"))],\n )\n assert len(ddftest_read) == 2\n\n\n@write_read_engines()\ndef test_filters(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n df = pd.DataFrame({\"x\": range(10), \"y\": list(\"aabbccddee\")})\n ddf = dd.from_pandas(df, npartitions=5)\n assert ddf.npartitions == 5\n\n ddf.to_parquet(tmp_path, engine=write_engine)\n\n a = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"x\", \">\", 4)])\n assert a.npartitions == 3\n assert (a.x > 3).all().compute()\n\n b = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"y\", \"==\", \"c\")])\n assert b.npartitions == 1\n assert (b.y == \"c\").all().compute()\n\n c = dd.read_parquet(\n tmp_path, engine=read_engine, filters=[(\"y\", \"==\", \"c\"), (\"x\", \">\", 6)]\n )\n assert c.npartitions <= 1\n assert not len(c)\n assert_eq(c, c)\n\n d = dd.read_parquet(\n tmp_path,\n engine=read_engine,\n filters=[\n # Select two overlapping ranges\n [(\"x\", \">\", 1), (\"x\", \"<\", 6)],\n [(\"x\", \">\", 3), (\"x\", \"<\", 8)],\n ],\n )\n assert d.npartitions == 3\n assert ((d.x > 1) & (d.x < 8)).all().compute()\n\n e = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"x\", \"in\", (0, 9))])\n assert e.npartitions == 2\n assert ((e.x < 2) | (e.x > 7)).all().compute()\n\n f = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"y\", \"=\", \"c\")])\n assert f.npartitions == 1\n assert len(f)\n assert (f.y == \"c\").all().compute()\n\n\n@write_read_engines()\ndef test_filters_v0(tmpdir, write_engine, read_engine):\n if write_engine == \"fastparquet\" or read_engine == \"fastparquet\":\n pytest.importorskip(\"fastparquet\", minversion=\"0.3.1\")\n\n # Recent versions of pyarrow support full row-wise filtering\n # (fastparquet and older pyarrow versions do not)\n pyarrow_row_filtering = read_engine == \"pyarrow-dataset\"\n\n fn = str(tmpdir)\n df = pd.DataFrame({\"at\": [\"ab\", \"aa\", \"ba\", \"da\", \"bb\"]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n # Ok with 1 partition and filters\n ddf.repartition(npartitions=1, force=True).to_parquet(\n fn, write_index=False, engine=write_engine\n )\n ddf2 = dd.read_parquet(\n fn, index=False, engine=read_engine, filters=[(\"at\", \"==\", \"aa\")]\n ).compute()\n ddf3 = dd.read_parquet(\n fn, index=False, engine=read_engine, filters=[(\"at\", \"=\", \"aa\")]\n ).compute()\n if pyarrow_row_filtering:\n assert_eq(ddf2, ddf[ddf[\"at\"] == \"aa\"], check_index=False)\n assert_eq(ddf3, ddf[ddf[\"at\"] == \"aa\"], check_index=False)\n else:\n assert_eq(ddf2, ddf)\n assert_eq(ddf3, ddf)\n\n # with >1 partition and no filters\n ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)\n ddf2 = dd.read_parquet(fn, engine=read_engine).compute()\n assert_eq(ddf2, ddf)\n\n # with >1 partition and filters using base fastparquet\n if read_engine == \"fastparquet\":\n ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)\n df2 = fastparquet.ParquetFile(fn).to_pandas(filters=[(\"at\", \"==\", \"aa\")])\n df3 = fastparquet.ParquetFile(fn).to_pandas(filters=[(\"at\", \"=\", \"aa\")])\n assert len(df2) > 0\n assert len(df3) > 0\n\n # with >1 partition and filters\n ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)\n ddf2 = dd.read_parquet(\n fn, engine=read_engine, filters=[(\"at\", \"==\", \"aa\")]\n ).compute()\n ddf3 = dd.read_parquet(\n fn, engine=read_engine, filters=[(\"at\", \"=\", \"aa\")]\n ).compute()\n assert len(ddf2) > 0\n assert len(ddf3) > 0\n assert_eq(ddf2, ddf3)\n\n\ndef test_filtering_pyarrow_dataset(tmpdir, engine):\n pytest.importorskip(\"pyarrow\", minversion=\"1.0.0\")\n\n fn = str(tmpdir)\n df = pd.DataFrame({\"aa\": range(100), \"bb\": [\"cat\", \"dog\"] * 50})\n ddf = dd.from_pandas(df, npartitions=10)\n ddf.to_parquet(fn, write_index=False, engine=engine)\n\n # Filtered read\n aa_lim = 40\n bb_val = \"dog\"\n filters = [[(\"aa\", \"<\", aa_lim), (\"bb\", \"==\", bb_val)]]\n ddf2 = dd.read_parquet(fn, index=False, engine=\"pyarrow-dataset\", filters=filters)\n\n # Check that partitions are filetered for \"aa\" filter\n nonempty = 0\n for part in ddf[ddf[\"aa\"] < aa_lim].partitions:\n nonempty += int(len(part.compute()) > 0)\n assert ddf2.npartitions == nonempty\n\n # Check that rows are filtered for \"aa\" and \"bb\" filters\n df = df[df[\"aa\"] < aa_lim]\n df = df[df[\"bb\"] == bb_val]\n assert_eq(df, ddf2.compute(), check_index=False)\n\n\ndef test_fiters_file_list(tmpdir, engine):\n df = pd.DataFrame({\"x\": range(10), \"y\": list(\"aabbccddee\")})\n ddf = dd.from_pandas(df, npartitions=5)\n\n ddf.to_parquet(str(tmpdir), engine=engine)\n fils = str(tmpdir.join(\"*.parquet\"))\n ddf_out = dd.read_parquet(\n fils, gather_statistics=True, engine=engine, filters=[(\"x\", \">\", 3)]\n )\n\n assert ddf_out.npartitions == 3\n assert_eq(df[df[\"x\"] > 3], ddf_out.compute(), check_index=False)\n\n # Check that first parition gets filtered for single-path input\n ddf2 = dd.read_parquet(\n str(tmpdir.join(\"part.0.parquet\")),\n gather_statistics=True,\n engine=engine,\n filters=[(\"x\", \">\", 3)],\n )\n assert len(ddf2) == 0\n\n\ndef test_pyarrow_filter_divisions(tmpdir):\n pytest.importorskip(\"pyarrow\")\n\n # Write simple dataset with an index that will only\n # have a sorted index if certain row-groups are filtered out.\n # In this case, we filter \"a\" <= 3 to get a sorted\n # index. Otherwise, \"a\" is NOT monotonically increasing.\n df = pd.DataFrame({\"a\": [0, 1, 10, 12, 2, 3, 8, 9], \"b\": range(8)}).set_index(\"a\")\n df.iloc[:4].to_parquet(\n str(tmpdir.join(\"file.0.parquet\")), engine=\"pyarrow\", row_group_size=2\n )\n df.iloc[4:].to_parquet(\n str(tmpdir.join(\"file.1.parquet\")), engine=\"pyarrow\", row_group_size=2\n )\n\n # Only works for ArrowDatasetEngine.\n # Legacy code will not apply filters on individual row-groups\n # when `split_row_groups=False`.\n ddf = dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-dataset\",\n split_row_groups=False,\n gather_statistics=True,\n filters=[(\"a\", \"<=\", 3)],\n )\n assert ddf.divisions == (0, 2, 3)\n\n ddf = dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-dataset\",\n split_row_groups=True,\n gather_statistics=True,\n filters=[(\"a\", \"<=\", 3)],\n )\n assert ddf.divisions == (0, 2, 3)\n\n\ndef test_divisions_read_with_filters(tmpdir):\n pytest.importorskip(\"fastparquet\", minversion=\"0.3.1\")\n tmpdir = str(tmpdir)\n # generate dataframe\n size = 100\n categoricals = []\n for value in [\"a\", \"b\", \"c\", \"d\"]:\n categoricals += [value] * int(size / 4)\n df = pd.DataFrame(\n {\n \"a\": categoricals,\n \"b\": np.random.random(size=size),\n \"c\": np.random.randint(1, 5, size=size),\n }\n )\n d = dd.from_pandas(df, npartitions=4)\n # save it\n d.to_parquet(tmpdir, write_index=True, partition_on=[\"a\"], engine=\"fastparquet\")\n # read it\n out = dd.read_parquet(tmpdir, engine=\"fastparquet\", filters=[(\"a\", \"==\", \"b\")])\n # test it\n expected_divisions = (25, 49)\n assert out.divisions == expected_divisions\n\n\ndef test_divisions_are_known_read_with_filters(tmpdir):\n pytest.importorskip(\"fastparquet\", minversion=\"0.3.1\")\n tmpdir = str(tmpdir)\n # generate dataframe\n df = pd.DataFrame(\n {\n \"unique\": [0, 0, 1, 1, 2, 2, 3, 3],\n \"id\": [\"id1\", \"id2\", \"id1\", \"id2\", \"id1\", \"id2\", \"id1\", \"id2\"],\n },\n index=[0, 0, 1, 1, 2, 2, 3, 3],\n )\n d = dd.from_pandas(df, npartitions=2)\n # save it\n d.to_parquet(tmpdir, partition_on=[\"id\"], engine=\"fastparquet\")\n # read it\n out = dd.read_parquet(tmpdir, engine=\"fastparquet\", filters=[(\"id\", \"==\", \"id1\")])\n # test it\n assert out.known_divisions\n expected_divisions = (0, 2, 3)\n assert out.divisions == expected_divisions\n\n\n@FASTPARQUET_MARK\n@pytest.mark.xfail(reason=\"No longer accept ParquetFile objects\")\ndef test_read_from_fastparquet_parquetfile(tmpdir):\n fn = str(tmpdir)\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n d.to_parquet(fn, partition_on=[\"a\"], engine=\"fastparquet\")\n\n pq_f = fastparquet.ParquetFile(fn)\n\n # OK with no filters\n out = dd.read_parquet(pq_f).compute()\n for val in df.a.unique():\n assert set(df.b[df.a == val]) == set(out.b[out.a == val])\n\n # OK with filters\n out = dd.read_parquet(pq_f, filters=[(\"a\", \"==\", \"B\")]).compute()\n assert set(df.b[df.a == \"B\"]) == set(out.b)\n\n # Engine should not be set to 'pyarrow'\n with pytest.raises(AssertionError):\n out = dd.read_parquet(pq_f, engine=\"pyarrow\")\n\n\n@pytest.mark.parametrize(\"scheduler\", [\"threads\", \"processes\"])\ndef test_to_parquet_lazy(tmpdir, scheduler, engine):\n tmpdir = str(tmpdir)\n df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [1.0, 2.0, 3.0, 4.0]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n value = ddf.to_parquet(tmpdir, compute=False, engine=engine)\n\n assert hasattr(value, \"dask\")\n value.compute(scheduler=scheduler)\n assert os.path.exists(tmpdir)\n\n ddf2 = dd.read_parquet(tmpdir, engine=engine)\n\n assert_eq(ddf, ddf2)\n\n\n@FASTPARQUET_MARK\ndef test_timestamp96(tmpdir):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [pd.to_datetime(\"now\", utc=True)]})\n ddf = dd.from_pandas(df, 1)\n ddf.to_parquet(fn, write_index=False, times=\"int96\")\n pf = fastparquet.ParquetFile(fn)\n assert pf._schema[1].type == fastparquet.parquet_thrift.Type.INT96\n out = dd.read_parquet(fn, index=False).compute()\n assert_eq(out, df)\n\n\n@FASTPARQUET_MARK\ndef test_drill_scheme(tmpdir):\n fn = str(tmpdir)\n N = 5\n df1 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate([\"a\", \"b\", \"c\"])})\n df2 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate([\"a\", \"b\", \"c\"])})\n files = []\n for d in [\"test_data1\", \"test_data2\"]:\n dn = os.path.join(fn, d)\n if not os.path.exists(dn):\n os.mkdir(dn)\n files.append(os.path.join(dn, \"data1.parq\"))\n\n fastparquet.write(files[0], df1)\n fastparquet.write(files[1], df2)\n\n df = dd.read_parquet(files)\n assert \"dir0\" in df.columns\n out = df.compute()\n assert \"dir0\" in out\n assert (np.unique(out.dir0) == [\"test_data1\", \"test_data2\"]).all()\n\n\ndef test_parquet_select_cats(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame(\n {\n \"categories\": pd.Series(\n np.random.choice([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"], size=100),\n dtype=\"category\",\n ),\n \"ints\": pd.Series(list(range(0, 100)), dtype=\"int\"),\n \"floats\": pd.Series(list(range(0, 100)), dtype=\"float\"),\n }\n )\n\n ddf = dd.from_pandas(df, 1)\n ddf.to_parquet(fn, engine=engine)\n rddf = dd.read_parquet(fn, columns=[\"ints\"], engine=engine)\n assert list(rddf.columns) == [\"ints\"]\n rddf = dd.read_parquet(fn, engine=engine)\n assert list(rddf.columns) == list(df)\n\n\ndef test_columns_name(tmpdir, engine):\n if engine == \"fastparquet\" and fastparquet_version <= parse_version(\"0.3.1\"):\n pytest.skip(\"Fastparquet does not write column_indexes up to 0.3.1\")\n tmp_path = str(tmpdir)\n df = pd.DataFrame({\"A\": [1, 2]}, index=pd.Index([\"a\", \"b\"], name=\"idx\"))\n df.columns.name = \"cols\"\n ddf = dd.from_pandas(df, 2)\n\n ddf.to_parquet(tmp_path, engine=engine)\n result = dd.read_parquet(tmp_path, engine=engine, index=[\"idx\"])\n assert_eq(result, df)\n\n\ndef check_compression(engine, filename, compression):\n if engine == \"fastparquet\":\n pf = fastparquet.ParquetFile(filename)\n md = pf.fmd.row_groups[0].columns[0].meta_data\n if compression is None:\n assert md.total_compressed_size == md.total_uncompressed_size\n else:\n assert md.total_compressed_size != md.total_uncompressed_size\n else:\n metadata = pa.parquet.ParquetDataset(filename).metadata\n names = metadata.schema.names\n for i in range(metadata.num_row_groups):\n row_group = metadata.row_group(i)\n for j in range(len(names)):\n column = row_group.column(j)\n if compression is None:\n assert (\n column.total_compressed_size == column.total_uncompressed_size\n )\n else:\n compress_expect = compression\n if compression == \"default\":\n compress_expect = \"snappy\"\n assert compress_expect.lower() == column.compression.lower()\n assert (\n column.total_compressed_size != column.total_uncompressed_size\n )\n\n\n@pytest.mark.parametrize(\"compression,\", [\"default\", None, \"gzip\", \"snappy\"])\ndef test_writing_parquet_with_compression(tmpdir, compression, engine):\n fn = str(tmpdir)\n if compression in [\"snappy\", \"default\"]:\n pytest.importorskip(\"snappy\")\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"] * 10, \"y\": [1, 2, 3] * 10})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n ddf.to_parquet(fn, compression=compression, engine=engine)\n out = dd.read_parquet(fn, engine=engine)\n assert_eq(out, ddf)\n check_compression(engine, fn, compression)\n\n\n@pytest.mark.parametrize(\"compression,\", [\"default\", None, \"gzip\", \"snappy\"])\ndef test_writing_parquet_with_partition_on_and_compression(tmpdir, compression, engine):\n fn = str(tmpdir)\n if compression in [\"snappy\", \"default\"]:\n pytest.importorskip(\"snappy\")\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"] * 10, \"y\": [1, 2, 3] * 10})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n ddf.to_parquet(fn, compression=compression, engine=engine, partition_on=[\"x\"])\n check_compression(engine, fn, compression)\n\n\n@pytest.fixture(\n params=[\n # fastparquet 0.1.3\n {\n \"columns\": [\n {\n \"metadata\": None,\n \"name\": \"idx\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"idx\"],\n \"pandas_version\": \"0.21.0\",\n },\n # pyarrow 0.7.1\n {\n \"columns\": [\n {\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"metadata\": None,\n \"name\": \"idx\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"idx\"],\n \"pandas_version\": \"0.21.0\",\n },\n # pyarrow 0.8.0\n {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"A\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": \"idx\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n },\n # TODO: fastparquet update\n ]\n)\ndef pandas_metadata(request):\n return request.param\n\n\ndef test_parse_pandas_metadata(pandas_metadata):\n index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(\n pandas_metadata\n )\n assert index_names == [\"idx\"]\n assert column_names == [\"A\"]\n assert column_index_names == [None]\n\n # for new pyarrow\n if pandas_metadata[\"index_columns\"] == [\"__index_level_0__\"]:\n assert mapping == {\"__index_level_0__\": \"idx\", \"A\": \"A\"}\n else:\n assert mapping == {\"idx\": \"idx\", \"A\": \"A\"}\n\n assert isinstance(mapping, dict)\n\n\ndef test_parse_pandas_metadata_null_index():\n # pyarrow 0.7.1 None for index\n e_index_names = [None]\n e_column_names = [\"x\"]\n e_mapping = {\"__index_level_0__\": None, \"x\": \"x\"}\n e_column_index_names = [None]\n\n md = {\n \"columns\": [\n {\n \"metadata\": None,\n \"name\": \"x\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"metadata\": None,\n \"name\": \"__index_level_0__\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)\n assert index_names == e_index_names\n assert column_names == e_column_names\n assert mapping == e_mapping\n assert column_index_names == e_column_index_names\n\n # pyarrow 0.8.0 None for index\n md = {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"x\",\n \"metadata\": None,\n \"name\": \"x\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": None,\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)\n assert index_names == e_index_names\n assert column_names == e_column_names\n assert mapping == e_mapping\n assert column_index_names == e_column_index_names\n\n\n@PYARROW_MARK\ndef test_read_no_metadata(tmpdir, engine):\n # use pyarrow.parquet to create a parquet file without\n # pandas metadata\n tmp = str(tmpdir) + \"table.parq\"\n\n table = pa.Table.from_arrays(\n [pa.array([1, 2, 3]), pa.array([3, 4, 5])], names=[\"A\", \"B\"]\n )\n pq.write_table(table, tmp)\n result = dd.read_parquet(tmp, engine=engine)\n expected = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [3, 4, 5]})\n assert_eq(result, expected)\n\n\ndef test_parse_pandas_metadata_duplicate_index_columns():\n md = {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"A\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(md)\n assert index_names == [\"A\"]\n assert column_names == [\"A\"]\n assert storage_name_mapping == {\"__index_level_0__\": \"A\", \"A\": \"A\"}\n assert column_index_names == [None]\n\n\ndef test_parse_pandas_metadata_column_with_index_name():\n md = {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"A\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(md)\n assert index_names == [\"A\"]\n assert column_names == [\"A\"]\n assert storage_name_mapping == {\"__index_level_0__\": \"A\", \"A\": \"A\"}\n assert column_index_names == [None]\n\n\ndef test_writing_parquet_with_kwargs(tmpdir, engine):\n fn = str(tmpdir)\n path1 = os.path.join(fn, \"normal\")\n path2 = os.path.join(fn, \"partitioned\")\n pytest.importorskip(\"snappy\")\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n }\n )\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n engine_kwargs = {\n \"pyarrow-dataset\": {\n \"compression\": \"snappy\",\n \"coerce_timestamps\": None,\n \"use_dictionary\": True,\n },\n \"fastparquet\": {\"compression\": \"snappy\", \"times\": \"int64\", \"fixed_text\": None},\n }\n engine_kwargs[\"pyarrow-legacy\"] = engine_kwargs[\"pyarrow-dataset\"]\n\n ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine])\n out = dd.read_parquet(path1, engine=engine)\n assert_eq(out, ddf, check_index=(engine != \"fastparquet\"))\n\n # Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets\n with dask.config.set(scheduler=\"sync\"):\n ddf.to_parquet(\n path2, engine=engine, partition_on=[\"a\"], **engine_kwargs[engine]\n )\n out = dd.read_parquet(path2, engine=engine).compute()\n for val in df.a.unique():\n assert set(df.b[df.a == val]) == set(out.b[out.a == val])\n\n\ndef test_writing_parquet_with_unknown_kwargs(tmpdir, engine):\n fn = str(tmpdir)\n\n with pytest.raises(TypeError):\n ddf.to_parquet(fn, engine=engine, unknown_key=\"unknown_value\")\n\n\n@ANY_ENGINE_MARK\ndef test_to_parquet_with_get(tmpdir):\n from dask.multiprocessing import get as mp_get\n\n tmpdir = str(tmpdir)\n\n flag = [False]\n\n def my_get(*args, **kwargs):\n flag[0] = True\n return mp_get(*args, **kwargs)\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.to_parquet(tmpdir, compute_kwargs={\"scheduler\": my_get})\n assert flag[0]\n\n result = dd.read_parquet(os.path.join(tmpdir, \"*\"))\n assert_eq(result, df, check_index=False)\n\n\ndef test_select_partitioned_column(tmpdir, engine):\n pytest.importorskip(\"snappy\")\n\n fn = str(tmpdir)\n size = 20\n d = {\n \"signal1\": np.random.normal(0, 0.3, size=size).cumsum() + 50,\n \"fake_categorical1\": np.random.choice([\"A\", \"B\", \"C\"], size=size),\n \"fake_categorical2\": np.random.choice([\"D\", \"E\", \"F\"], size=size),\n }\n df = dd.from_pandas(pd.DataFrame(d), 2)\n df.to_parquet(\n fn,\n compression=\"snappy\",\n write_index=False,\n engine=engine,\n partition_on=[\"fake_categorical1\", \"fake_categorical2\"],\n )\n\n df_partitioned = dd.read_parquet(fn, engine=engine)\n df_partitioned[df_partitioned.fake_categorical1 == \"A\"].compute()\n\n\ndef test_with_tz(tmpdir, engine):\n if engine == \"fastparquet\" and fastparquet_version < parse_version(\"0.3.0\"):\n pytest.skip(\"fastparquet<0.3.0 did not support this\")\n\n with warnings.catch_warnings():\n if engine == \"fastparquet\":\n # fastparquet-442\n warnings.simplefilter(\"ignore\", FutureWarning) # pandas 0.25\n fn = str(tmpdir)\n df = pd.DataFrame([[0]], columns=[\"a\"], dtype=\"datetime64[ns, UTC]\")\n df = dd.from_pandas(df, 1)\n df.to_parquet(fn, engine=engine)\n df2 = dd.read_parquet(fn, engine=engine)\n assert_eq(df, df2, check_divisions=False, check_index=False)\n\n\n@PYARROW_MARK\ndef test_arrow_partitioning(tmpdir):\n # Issue #3518\n path = str(tmpdir)\n data = {\n \"p\": np.repeat(np.arange(3), 2).astype(np.int8),\n \"b\": np.repeat(-1, 6).astype(np.int16),\n \"c\": np.repeat(-2, 6).astype(np.float32),\n \"d\": np.repeat(-3, 6).astype(np.float64),\n }\n pdf = pd.DataFrame(data)\n ddf = dd.from_pandas(pdf, npartitions=2)\n ddf.to_parquet(path, engine=\"pyarrow\", write_index=False, partition_on=\"p\")\n\n ddf = dd.read_parquet(path, index=False, engine=\"pyarrow\")\n\n ddf.astype({\"b\": np.float32}).compute()\n\n\ndef test_informative_error_messages():\n with pytest.raises(ValueError) as info:\n dd.read_parquet(\"foo\", engine=\"foo\")\n\n assert \"foo\" in str(info.value)\n assert \"arrow\" in str(info.value)\n assert \"fastparquet\" in str(info.value)\n\n\ndef test_append_cat_fp(tmpdir, engine):\n path = str(tmpdir)\n # https://github.com/dask/dask/issues/4120\n df = pd.DataFrame({\"x\": [\"a\", \"a\", \"b\", \"a\", \"b\"]})\n df[\"x\"] = df[\"x\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=1)\n\n dd.to_parquet(ddf, path, engine=engine)\n dd.to_parquet(ddf, path, append=True, ignore_divisions=True, engine=engine)\n\n d = dd.read_parquet(path, engine=engine).compute()\n assert d[\"x\"].tolist() == [\"a\", \"a\", \"b\", \"a\", \"b\"] * 2\n\n\n@PYARROW_MARK\n@pytest.mark.parametrize(\n \"df\",\n [\n pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]}),\n pd.DataFrame({\"x\": [\"c\", \"a\", \"b\"]}),\n pd.DataFrame({\"x\": [\"cc\", \"a\", \"bbb\"]}),\n pd.DataFrame({\"x\": [b\"a\", b\"b\", b\"c\"]}),\n pytest.param(pd.DataFrame({\"x\": pd.Categorical([\"a\", \"b\", \"a\"])})),\n pytest.param(pd.DataFrame({\"x\": pd.Categorical([1, 2, 1])})),\n pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000000, 2000000, 1000000]))}), # ms\n pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000, 2000, 1000]))}), # us\n pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"M8[ns]\"),\n # pd.DataFrame({'x': [3, 2, 1]}).astype('M8[ns]'), # Casting errors\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[us]\"),\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ms]\"),\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"uint16\"),\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"float32\"),\n pd.DataFrame({\"x\": [3, 1, 2]}, index=[3, 2, 1]),\n pd.DataFrame(\n {\"x\": [4, 5, 6, 1, 2, 3]}, index=pd.Index([1, 2, 3, 4, 5, 6], name=\"foo\")\n ),\n pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}),\n pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}, columns=[\"y\", \"x\"]),\n pd.DataFrame({\"0\": [3, 2, 1]}),\n pd.DataFrame({\"x\": [3, 2, None]}),\n pd.DataFrame({\"-\": [3.0, 2.0, None]}),\n pd.DataFrame({\".\": [3.0, 2.0, None]}),\n pd.DataFrame({\" \": [3.0, 2.0, None]}),\n ],\n)\ndef test_roundtrip_arrow(tmpdir, df):\n # Index will be given a name when preserved as index\n tmp_path = str(tmpdir)\n if not df.index.name:\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n dd.to_parquet(ddf, tmp_path, engine=\"pyarrow\", write_index=True)\n ddf2 = dd.read_parquet(tmp_path, engine=\"pyarrow\", gather_statistics=True)\n assert_eq(ddf, ddf2)\n\n\ndef test_datasets_timeseries(tmpdir, engine):\n tmp_path = str(tmpdir)\n df = dask.datasets.timeseries(\n start=\"2000-01-01\", end=\"2000-01-10\", freq=\"1d\"\n ).persist()\n df.to_parquet(tmp_path, engine=engine)\n\n df2 = dd.read_parquet(tmp_path, engine=engine)\n assert_eq(df, df2)\n\n\ndef test_pathlib_path(tmpdir, engine):\n import pathlib\n\n df = pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n path = pathlib.Path(str(tmpdir))\n ddf.to_parquet(path, engine=engine)\n ddf2 = dd.read_parquet(path, engine=engine)\n assert_eq(ddf, ddf2)\n\n\n@PYARROW_LE_MARK\ndef test_pyarrow_metadata_nthreads(tmpdir):\n tmp_path = str(tmpdir)\n df = pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(tmp_path, engine=\"pyarrow\")\n ops = {\"dataset\": {\"metadata_nthreads\": 2}}\n ddf2 = dd.read_parquet(tmp_path, engine=\"pyarrow-legacy\", **ops)\n assert_eq(ddf, ddf2)\n\n\n@FASTPARQUET_MARK\ndef test_categories_large(tmpdir, engine):\n # Issue #5112\n fn = str(tmpdir.join(\"parquet_int16.parq\"))\n numbers = np.random.randint(0, 800000, size=1000000)\n df = pd.DataFrame(numbers.T, columns=[\"name\"])\n df.name = df.name.astype(\"category\")\n\n df.to_parquet(fn, engine=\"fastparquet\", compression=\"uncompressed\")\n ddf = dd.read_parquet(fn, engine=engine, categories={\"name\": 80000})\n\n assert_eq(sorted(df.name.cat.categories), sorted(ddf.compute().name.cat.categories))\n\n\n@write_read_engines()\ndef test_read_glob_no_meta(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n\n ddf2 = dd.read_parquet(\n os.path.join(tmp_path, \"*.parquet\"), engine=read_engine, gather_statistics=False\n )\n assert_eq(ddf, ddf2, check_divisions=False)\n\n\n@write_read_engines()\ndef test_read_glob_yes_meta(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n paths = glob.glob(os.path.join(tmp_path, \"*.parquet\"))\n paths.append(os.path.join(tmp_path, \"_metadata\"))\n ddf2 = dd.read_parquet(paths, engine=read_engine, gather_statistics=False)\n assert_eq(ddf, ddf2, check_divisions=False)\n\n\n@pytest.mark.parametrize(\"statistics\", [True, False, None])\n@pytest.mark.parametrize(\"remove_common\", [True, False])\n@write_read_engines()\ndef test_read_dir_nometa(tmpdir, write_engine, read_engine, statistics, remove_common):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n if os.path.exists(os.path.join(tmp_path, \"_metadata\")):\n os.unlink(os.path.join(tmp_path, \"_metadata\"))\n files = os.listdir(tmp_path)\n assert \"_metadata\" not in files\n\n if remove_common and os.path.exists(os.path.join(tmp_path, \"_common_metadata\")):\n os.unlink(os.path.join(tmp_path, \"_common_metadata\"))\n\n ddf2 = dd.read_parquet(tmp_path, engine=read_engine, gather_statistics=statistics)\n assert_eq(ddf, ddf2, check_divisions=False)\n assert ddf.divisions == tuple(range(0, 420, 30))\n if statistics is False or statistics is None and read_engine.startswith(\"pyarrow\"):\n assert ddf2.divisions == (None,) * 14\n else:\n assert ddf2.divisions == tuple(range(0, 420, 30))\n\n\n@write_read_engines()\ndef test_statistics_nometa(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine, write_metadata_file=False)\n\n ddf2 = dd.read_parquet(tmp_path, engine=read_engine, gather_statistics=True)\n assert_eq(ddf, ddf2)\n assert ddf.divisions == tuple(range(0, 420, 30))\n assert ddf2.divisions == tuple(range(0, 420, 30))\n\n\n@pytest.mark.parametrize(\"schema\", [\"infer\", None])\ndef test_timeseries_nulls_in_schema(tmpdir, engine, schema):\n # GH#5608: relative path failing _metadata/_common_metadata detection.\n tmp_path = str(tmpdir.mkdir(\"files\"))\n tmp_path = os.path.join(tmp_path, \"../\", \"files\")\n\n ddf2 = (\n dask.datasets.timeseries(start=\"2000-01-01\", end=\"2000-01-03\", freq=\"1h\")\n .reset_index()\n .map_partitions(lambda x: x.loc[:5])\n )\n ddf2 = ddf2.set_index(\"x\").reset_index().persist()\n ddf2.name = ddf2.name.where(ddf2.timestamp == \"2000-01-01\", None)\n\n # Note: `append_row_groups` will fail with pyarrow>0.17.1 for _metadata write\n dataset = {\"validate_schema\": False} if engine == \"pyarrow-legacy\" else {}\n ddf2.to_parquet(tmp_path, engine=engine, write_metadata_file=False, schema=schema)\n ddf_read = dd.read_parquet(tmp_path, engine=engine, dataset=dataset)\n\n assert_eq(ddf_read, ddf2, check_divisions=False, check_index=False)\n\n\n@PYARROW_LE_MARK\n@pytest.mark.parametrize(\"numerical\", [True, False])\n@pytest.mark.parametrize(\n \"timestamp\", [\"2000-01-01\", \"2000-01-02\", \"2000-01-03\", \"2000-01-04\"]\n)\ndef test_timeseries_nulls_in_schema_pyarrow(tmpdir, timestamp, numerical):\n tmp_path = str(tmpdir)\n ddf2 = dd.from_pandas(\n pd.DataFrame(\n {\n \"timestamp\": [\n pd.Timestamp(\"2000-01-01\"),\n pd.Timestamp(\"2000-01-02\"),\n pd.Timestamp(\"2000-01-03\"),\n pd.Timestamp(\"2000-01-04\"),\n ],\n \"id\": np.arange(4, dtype=\"float64\"),\n \"name\": [\"cat\", \"dog\", \"bird\", \"cow\"],\n }\n ),\n npartitions=2,\n ).persist()\n if numerical:\n ddf2.id = ddf2.id.where(ddf2.timestamp == timestamp, None)\n ddf2.id = ddf2.id.astype(\"float64\")\n else:\n ddf2.name = ddf2.name.where(ddf2.timestamp == timestamp, None)\n\n # There should be no schema error if you specify a schema on write\n schema = pa.schema(\n [(\"timestamp\", pa.timestamp(\"ns\")), (\"id\", pa.float64()), (\"name\", pa.string())]\n )\n ddf2.to_parquet(tmp_path, schema=schema, write_index=False, engine=\"pyarrow\")\n assert_eq(\n dd.read_parquet(\n tmp_path,\n dataset={\"validate_schema\": True},\n index=False,\n engine=\"pyarrow-legacy\",\n ),\n ddf2,\n check_divisions=False,\n check_index=False,\n )\n\n\n@PYARROW_LE_MARK\ndef test_read_inconsistent_schema_pyarrow(tmpdir):\n # Note: This is a proxy test for a cudf-related issue fix\n # (see cudf#5062 github issue). The cause of that issue is\n # schema inconsistencies that do not actually correspond to\n # different types, but whether or not the file/column contains\n # null values.\n\n df1 = pd.DataFrame({\"id\": [0, 1], \"val\": [10, 20]})\n df2 = pd.DataFrame({\"id\": [2, 3], \"val\": [30, 40]})\n\n desired_type = \"int64\"\n other_type = \"int32\"\n df1.val = df1.val.astype(desired_type)\n df2.val = df2.val.astype(other_type)\n\n df_expect = pd.concat([df1, df2], ignore_index=True)\n df_expect[\"val\"] = df_expect.val.astype(desired_type)\n\n df1.to_parquet(os.path.join(tmpdir, \"0.parquet\"), engine=\"pyarrow\")\n df2.to_parquet(os.path.join(tmpdir, \"1.parquet\"), engine=\"pyarrow\")\n\n # Read Directory\n check = dd.read_parquet(\n str(tmpdir), dataset={\"validate_schema\": False}, engine=\"pyarrow-legacy\"\n )\n assert_eq(check.compute(), df_expect, check_index=False)\n\n # Read List\n check = dd.read_parquet(\n os.path.join(tmpdir, \"*.parquet\"),\n dataset={\"validate_schema\": False},\n engine=\"pyarrow-legacy\",\n )\n assert_eq(check.compute(), df_expect, check_index=False)\n\n\ndef test_graph_size_pyarrow(tmpdir, engine):\n import pickle\n\n fn = str(tmpdir)\n\n ddf1 = dask.datasets.timeseries(\n start=\"2000-01-01\", end=\"2000-01-02\", freq=\"60S\", partition_freq=\"1H\"\n )\n\n ddf1.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, engine=engine)\n\n assert len(pickle.dumps(ddf2.__dask_graph__())) < 25000\n\n\n@pytest.mark.parametrize(\"preserve_index\", [True, False])\n@pytest.mark.parametrize(\"index\", [None, np.random.permutation(2000)])\ndef test_getitem_optimization(tmpdir, engine, preserve_index, index):\n tmp_path_rd = str(tmpdir.mkdir(\"read\"))\n tmp_path_wt = str(tmpdir.mkdir(\"write\"))\n df = pd.DataFrame(\n {\"A\": [1, 2] * 1000, \"B\": [3, 4] * 1000, \"C\": [5, 6] * 1000}, index=index\n )\n df.index.name = \"my_index\"\n ddf = dd.from_pandas(df, 2, sort=False)\n\n ddf.to_parquet(tmp_path_rd, engine=engine, write_index=preserve_index)\n ddf = dd.read_parquet(tmp_path_rd, engine=engine)[\"B\"]\n\n # Write ddf back to disk to check that the round trip\n # preserves the getitem optimization\n out = ddf.to_frame().to_parquet(tmp_path_wt, engine=engine, compute=False)\n dsk = optimize_dataframe_getitem(out.dask, keys=[out.key])\n\n subgraph_rd = hlg_layer(dsk, \"read-parquet\")\n assert isinstance(subgraph_rd, DataFrameIOLayer)\n assert subgraph_rd.columns == [\"B\"]\n assert next(iter(subgraph_rd.dsk.values()))[0].columns == [\"B\"]\n\n subgraph_wt = hlg_layer(dsk, \"to-parquet\")\n assert isinstance(subgraph_wt, Blockwise)\n\n assert_eq(ddf.compute(optimize_graph=False), ddf.compute())\n\n\ndef test_getitem_optimization_empty(tmpdir, engine):\n df = pd.DataFrame({\"A\": [1] * 100, \"B\": [2] * 100, \"C\": [3] * 100, \"D\": [4] * 100})\n ddf = dd.from_pandas(df, 2)\n fn = os.path.join(str(tmpdir))\n ddf.to_parquet(fn, engine=engine)\n\n df2 = dd.read_parquet(fn, columns=[], engine=engine)\n dsk = optimize_dataframe_getitem(df2.dask, keys=[df2._name])\n\n subgraph = next(iter(dsk.layers.values()))\n assert isinstance(subgraph, DataFrameIOLayer)\n assert subgraph.columns == []\n\n\ndef test_getitem_optimization_multi(tmpdir, engine):\n df = pd.DataFrame({\"A\": [1] * 100, \"B\": [2] * 100, \"C\": [3] * 100, \"D\": [4] * 100})\n ddf = dd.from_pandas(df, 2)\n fn = os.path.join(str(tmpdir))\n ddf.to_parquet(fn, engine=engine)\n\n a = dd.read_parquet(fn, engine=engine)[\"B\"]\n b = dd.read_parquet(fn, engine=engine)[[\"C\"]]\n c = dd.read_parquet(fn, engine=engine)[[\"C\", \"A\"]]\n\n a1, a2, a3 = dask.compute(a, b, c)\n b1, b2, b3 = dask.compute(a, b, c, optimize_graph=False)\n\n assert_eq(a1, b1)\n assert_eq(a2, b2)\n assert_eq(a3, b3)\n\n\ndef test_layer_creation_info(tmpdir, engine):\n df = pd.DataFrame({\"a\": range(10), \"b\": [\"cat\", \"dog\"] * 5})\n dd.from_pandas(df, npartitions=1).to_parquet(\n tmpdir, engine=engine, partition_on=[\"b\"]\n )\n\n # Apply filters directly in dd.read_parquet\n filters = [(\"b\", \"==\", \"cat\")]\n ddf1 = dd.read_parquet(tmpdir, engine=engine, filters=filters)\n assert \"dog\" not in ddf1[\"b\"].compute()\n\n # Results will not match if we use dd.read_parquet\n # without filters\n ddf2 = dd.read_parquet(tmpdir, engine=engine)\n with pytest.raises(AssertionError):\n assert_eq(ddf1, ddf2)\n\n # However, we can use `creation_info` to regenerate\n # the same collection with `filters` defined\n info = ddf2.dask.layers[ddf2._name].creation_info\n kwargs = info.get(\"kwargs\", {})\n kwargs[\"filters\"] = filters\n ddf3 = info[\"func\"](*info.get(\"args\", []), **kwargs)\n assert_eq(ddf1, ddf3)\n\n\n@ANY_ENGINE_MARK\ndef test_blockwise_parquet_annotations(tmpdir):\n df = pd.DataFrame({\"a\": np.arange(40, dtype=np.int32)})\n expect = dd.from_pandas(df, npartitions=2)\n expect.to_parquet(str(tmpdir))\n\n with dask.annotate(foo=\"bar\"):\n ddf = dd.read_parquet(str(tmpdir))\n\n # `ddf` should now have ONE Blockwise layer\n layers = ddf.__dask_graph__().layers\n assert len(layers) == 1\n layer = next(iter(layers.values()))\n assert isinstance(layer, DataFrameIOLayer)\n assert layer.annotations == {\"foo\": \"bar\"}\n\n\n@ANY_ENGINE_MARK\ndef test_optimize_blockwise_parquet(tmpdir):\n size = 40\n npartitions = 2\n tmp = str(tmpdir)\n df = pd.DataFrame({\"a\": np.arange(size, dtype=np.int32)})\n expect = dd.from_pandas(df, npartitions=npartitions)\n expect.to_parquet(tmp)\n ddf = dd.read_parquet(tmp)\n\n # `ddf` should now have ONE Blockwise layer\n layers = ddf.__dask_graph__().layers\n assert len(layers) == 1\n assert isinstance(list(layers.values())[0], Blockwise)\n\n # Check single-layer result\n assert_eq(ddf, expect)\n\n # Increment by 1\n ddf += 1\n expect += 1\n\n # Increment by 10\n ddf += 10\n expect += 10\n\n # `ddf` should now have THREE Blockwise layers\n layers = ddf.__dask_graph__().layers\n assert len(layers) == 3\n assert all(isinstance(layer, Blockwise) for layer in layers.values())\n\n # Check that `optimize_blockwise` fuses all three\n # `Blockwise` layers together into a singe `Blockwise` layer\n keys = [(ddf._name, i) for i in range(npartitions)]\n graph = optimize_blockwise(ddf.__dask_graph__(), keys)\n layers = graph.layers\n name = list(layers.keys())[0]\n assert len(layers) == 1\n assert isinstance(layers[name], Blockwise)\n\n # Check final result\n assert_eq(ddf, expect)\n\n\n@PYARROW_MARK\ndef test_split_row_groups(tmpdir, engine):\n \"\"\"Test split_row_groups read_parquet kwarg\"\"\"\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\"i32\": np.arange(800, dtype=np.int32), \"f\": np.arange(800, dtype=np.float64)}\n )\n df.index.name = \"index\"\n\n half = len(df) // 2\n dd.from_pandas(df.iloc[:half], npartitions=2).to_parquet(\n tmp, engine=\"pyarrow\", row_group_size=100\n )\n\n ddf3 = dd.read_parquet(tmp, engine=engine, split_row_groups=True, chunksize=1)\n assert ddf3.npartitions == 4\n\n ddf3 = dd.read_parquet(\n tmp, engine=engine, gather_statistics=True, split_row_groups=False\n )\n assert ddf3.npartitions == 2\n\n dd.from_pandas(df.iloc[half:], npartitions=2).to_parquet(\n tmp, append=True, engine=\"pyarrow\", row_group_size=50\n )\n\n ddf3 = dd.read_parquet(\n tmp,\n engine=engine,\n gather_statistics=True,\n split_row_groups=True,\n chunksize=1,\n )\n assert ddf3.npartitions == 12\n\n ddf3 = dd.read_parquet(\n tmp, engine=engine, gather_statistics=True, split_row_groups=False\n )\n assert ddf3.npartitions == 4\n\n\n@PYARROW_MARK\n@pytest.mark.parametrize(\"split_row_groups\", [1, 12])\n@pytest.mark.parametrize(\"gather_statistics\", [True, False])\ndef test_split_row_groups_int(tmpdir, split_row_groups, gather_statistics, engine):\n tmp = str(tmpdir)\n row_group_size = 10\n npartitions = 4\n half_size = 400\n df = pd.DataFrame(\n {\n \"i32\": np.arange(2 * half_size, dtype=np.int32),\n \"f\": np.arange(2 * half_size, dtype=np.float64),\n }\n )\n half = len(df) // 2\n\n dd.from_pandas(df.iloc[:half], npartitions=npartitions).to_parquet(\n tmp, engine=\"pyarrow\", row_group_size=row_group_size\n )\n dd.from_pandas(df.iloc[half:], npartitions=npartitions).to_parquet(\n tmp, append=True, engine=\"pyarrow\", row_group_size=row_group_size\n )\n\n ddf2 = dd.read_parquet(\n tmp,\n engine=engine,\n split_row_groups=split_row_groups,\n gather_statistics=gather_statistics,\n )\n expected_rg_cout = int(half_size / row_group_size)\n assert ddf2.npartitions == 2 * math.ceil(expected_rg_cout / split_row_groups)\n\n\n@PYARROW_MARK\n@pytest.mark.parametrize(\"split_row_groups\", [8, 25])\ndef test_split_row_groups_int_aggregate_files(tmpdir, engine, split_row_groups):\n # Use pyarrow to write a multi-file dataset with\n # multiple row-groups per file\n row_group_size = 10\n size = 800\n df = pd.DataFrame(\n {\n \"i32\": np.arange(size, dtype=np.int32),\n \"f\": np.arange(size, dtype=np.float64),\n }\n )\n dd.from_pandas(df, npartitions=4).to_parquet(\n str(tmpdir), engine=\"pyarrow\", row_group_size=row_group_size, write_index=False\n )\n\n # Read back with both `split_row_groups>1` and\n # `aggregate_files=True`\n ddf2 = dd.read_parquet(\n str(tmpdir),\n engine=engine,\n split_row_groups=split_row_groups,\n aggregate_files=True,\n )\n\n # Check that we are aggregating files as expected\n npartitions_expected = math.ceil((size / row_group_size) / split_row_groups)\n assert ddf2.npartitions == npartitions_expected\n assert len(ddf2) == size\n assert_eq(df, ddf2, check_index=False)\n\n\n@PYARROW_MARK\ndef test_split_row_groups_filter(tmpdir, engine):\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\"i32\": np.arange(800, dtype=np.int32), \"f\": np.arange(800, dtype=np.float64)}\n )\n df.index.name = \"index\"\n search_val = 600\n filters = [(\"f\", \"==\", search_val)]\n\n dd.from_pandas(df, npartitions=4).to_parquet(\n tmp, append=True, engine=\"pyarrow\", row_group_size=50\n )\n\n ddf2 = dd.read_parquet(tmp, engine=engine)\n ddf3 = dd.read_parquet(\n tmp,\n engine=engine,\n gather_statistics=True,\n split_row_groups=True,\n filters=filters,\n )\n\n assert (ddf3[\"i32\"] == search_val).any().compute()\n assert_eq(\n ddf2[ddf2[\"i32\"] == search_val].compute(),\n ddf3[ddf3[\"i32\"] == search_val].compute(),\n )\n\n\n@ANY_ENGINE_MARK\ndef test_optimize_getitem_and_nonblockwise(tmpdir):\n path = os.path.join(tmpdir, \"path.parquet\")\n df = pd.DataFrame(\n {\"a\": [3, 4, 2], \"b\": [1, 2, 4], \"c\": [5, 4, 2], \"d\": [1, 2, 3]},\n index=[\"a\", \"b\", \"c\"],\n )\n df.to_parquet(path)\n\n df2 = dd.read_parquet(path)\n df2[[\"a\", \"b\"]].rolling(3).max().compute()\n\n\n@ANY_ENGINE_MARK\ndef test_optimize_and_not(tmpdir):\n path = os.path.join(tmpdir, \"path.parquet\")\n df = pd.DataFrame(\n {\"a\": [3, 4, 2], \"b\": [1, 2, 4], \"c\": [5, 4, 2], \"d\": [1, 2, 3]},\n index=[\"a\", \"b\", \"c\"],\n )\n df.to_parquet(path)\n\n df2 = dd.read_parquet(path)\n df2a = df2[\"a\"].groupby(df2[\"c\"]).first().to_delayed()\n df2b = df2[\"b\"].groupby(df2[\"c\"]).first().to_delayed()\n df2c = df2[[\"a\", \"b\"]].rolling(2).max().to_delayed()\n df2d = df2.rolling(2).max().to_delayed()\n (result,) = dask.compute(df2a + df2b + df2c + df2d)\n\n expected = [\n dask.compute(df2a)[0][0],\n dask.compute(df2b)[0][0],\n dask.compute(df2c)[0][0],\n dask.compute(df2d)[0][0],\n ]\n for a, b in zip(result, expected):\n assert_eq(a, b)\n\n\n@write_read_engines()\ndef test_chunksize_empty(tmpdir, write_engine, read_engine):\n df = pd.DataFrame({\"a\": pd.Series(dtype=\"int\"), \"b\": pd.Series(dtype=\"float\")})\n ddf1 = dd.from_pandas(df, npartitions=1)\n ddf1.to_parquet(tmpdir, engine=write_engine)\n ddf2 = dd.read_parquet(tmpdir, engine=read_engine, chunksize=\"1MiB\")\n assert_eq(ddf1, ddf2, check_index=False)\n\n\n@PYARROW_MARK\n@pytest.mark.parametrize(\"metadata\", [True, False])\n@pytest.mark.parametrize(\"partition_on\", [None, \"a\"])\n@pytest.mark.parametrize(\"chunksize\", [4096, \"1MiB\"])\n@write_read_engines()\ndef test_chunksize_files(\n tmpdir, chunksize, partition_on, write_engine, read_engine, metadata\n):\n\n if partition_on and read_engine == \"fastparquet\" and not metadata:\n pytest.skip(\"Fastparquet requires _metadata for partitioned data.\")\n\n df_size = 100\n df1 = pd.DataFrame(\n {\n \"a\": np.random.choice([\"apple\", \"banana\", \"carrot\"], size=df_size),\n \"b\": np.random.random(size=df_size),\n \"c\": np.random.randint(1, 5, size=df_size),\n }\n )\n ddf1 = dd.from_pandas(df1, npartitions=9)\n\n ddf1.to_parquet(\n str(tmpdir),\n engine=write_engine,\n partition_on=partition_on,\n write_metadata_file=metadata,\n write_index=False,\n )\n\n ddf2 = dd.read_parquet(\n str(tmpdir),\n engine=read_engine,\n chunksize=chunksize,\n aggregate_files=partition_on if partition_on else True,\n )\n\n # Check that files where aggregated as expected\n if chunksize == 4096:\n assert ddf2.npartitions < ddf1.npartitions\n elif chunksize == \"1MiB\":\n if partition_on:\n assert ddf2.npartitions == 3\n else:\n assert ddf2.npartitions == 1\n\n # Check that the final data is correct\n if partition_on:\n df2 = ddf2.compute().sort_values([\"b\", \"c\"])\n df1 = df1.sort_values([\"b\", \"c\"])\n assert_eq(df1[[\"b\", \"c\"]], df2[[\"b\", \"c\"]], check_index=False)\n else:\n assert_eq(ddf1, ddf2, check_divisions=False, check_index=False)\n\n\n@write_read_engines()\n@pytest.mark.parametrize(\"aggregate_files\", [\"a\", \"b\"])\ndef test_chunksize_aggregate_files(tmpdir, write_engine, read_engine, aggregate_files):\n\n chunksize = \"1MiB\"\n partition_on = [\"a\", \"b\"]\n df_size = 100\n df1 = pd.DataFrame(\n {\n \"a\": np.random.choice([\"apple\", \"banana\", \"carrot\"], size=df_size),\n \"b\": np.random.choice([\"small\", \"large\"], size=df_size),\n \"c\": np.random.random(size=df_size),\n \"d\": np.random.randint(1, 100, size=df_size),\n }\n )\n ddf1 = dd.from_pandas(df1, npartitions=9)\n\n ddf1.to_parquet(\n str(tmpdir),\n engine=write_engine,\n partition_on=partition_on,\n write_index=False,\n )\n ddf2 = dd.read_parquet(\n str(tmpdir),\n engine=read_engine,\n chunksize=chunksize,\n aggregate_files=aggregate_files,\n )\n\n # Check that files where aggregated as expected\n if aggregate_files == \"a\":\n assert ddf2.npartitions == 3\n elif aggregate_files == \"b\":\n assert ddf2.npartitions == 6\n\n # Check that the final data is correct\n df2 = ddf2.compute().sort_values([\"c\", \"d\"])\n df1 = df1.sort_values([\"c\", \"d\"])\n assert_eq(df1[[\"c\", \"d\"]], df2[[\"c\", \"d\"]], check_index=False)\n\n\n@pytest.mark.parametrize(\"metadata\", [True, False])\n@pytest.mark.parametrize(\"chunksize\", [None, 1024, 4096, \"1MiB\"])\ndef test_chunksize(tmpdir, chunksize, engine, metadata):\n nparts = 2\n df_size = 100\n row_group_size = 5\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"apple\", \"banana\", \"carrot\"], size=df_size),\n \"b\": np.random.random(size=df_size),\n \"c\": np.random.randint(1, 5, size=df_size),\n \"index\": np.arange(0, df_size),\n }\n ).set_index(\"index\")\n\n ddf1 = dd.from_pandas(df, npartitions=nparts)\n ddf1.to_parquet(\n str(tmpdir),\n engine=\"pyarrow\",\n row_group_size=row_group_size,\n write_metadata_file=metadata,\n )\n\n if metadata:\n path = str(tmpdir)\n else:\n dirname = str(tmpdir)\n files = os.listdir(dirname)\n assert \"_metadata\" not in files\n path = os.path.join(dirname, \"*.parquet\")\n\n ddf2 = dd.read_parquet(\n path,\n engine=engine,\n chunksize=chunksize,\n split_row_groups=True,\n gather_statistics=True,\n index=\"index\",\n aggregate_files=True,\n )\n\n assert_eq(ddf1, ddf2, check_divisions=False)\n\n num_row_groups = df_size // row_group_size\n if not chunksize:\n assert ddf2.npartitions == num_row_groups\n else:\n # Check that we are really aggregating\n assert ddf2.npartitions < num_row_groups\n if chunksize == \"1MiB\":\n # Largest chunksize will result in\n # a single output partition\n assert ddf2.npartitions == 1\n\n\n@write_read_engines()\ndef test_roundtrip_pandas_chunksize(tmpdir, write_engine, read_engine):\n path = str(tmpdir.join(\"test.parquet\"))\n pdf = df.copy()\n pdf.index.name = \"index\"\n pdf.to_parquet(\n path, engine=\"pyarrow\" if write_engine.startswith(\"pyarrow\") else \"fastparquet\"\n )\n\n ddf_read = dd.read_parquet(\n path,\n engine=read_engine,\n chunksize=\"10 kiB\",\n gather_statistics=True,\n split_row_groups=True,\n index=\"index\",\n )\n\n assert_eq(pdf, ddf_read)\n\n\n@FASTPARQUET_MARK\ndef test_read_pandas_fastparquet_partitioned(tmpdir, engine):\n pdf = pd.DataFrame(\n [{\"str\": str(i), \"int\": i, \"group\": \"ABC\"[i % 3]} for i in range(6)]\n )\n path = str(tmpdir)\n pdf.to_parquet(path, partition_cols=[\"group\"], engine=\"fastparquet\")\n ddf_read = dd.read_parquet(path, engine=engine)\n\n assert len(ddf_read[\"group\"].compute()) == 6\n assert len(ddf_read.compute().group) == 6\n\n\ndef test_read_parquet_getitem_skip_when_getting_read_parquet(tmpdir, engine):\n # https://github.com/dask/dask/issues/5893\n pdf = pd.DataFrame({\"A\": [1, 2, 3, 4, 5, 6], \"B\": [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]})\n path = os.path.join(str(tmpdir), \"data.parquet\")\n pd_engine = \"pyarrow\" if engine.startswith(\"pyarrow\") else \"fastparquet\"\n pdf.to_parquet(path, engine=pd_engine)\n\n ddf = dd.read_parquet(path, engine=engine)\n a, b = dask.optimize(ddf[\"A\"], ddf)\n\n # Make sure we are still allowing the getitem optimization\n ddf = ddf[\"A\"]\n dsk = optimize_dataframe_getitem(ddf.dask, keys=[(ddf._name, 0)])\n read = [key for key in dsk.layers if key.startswith(\"read-parquet\")][0]\n subgraph = dsk.layers[read]\n assert isinstance(subgraph, DataFrameIOLayer)\n assert subgraph.columns == [\"A\"]\n\n\n@pytest.mark.parametrize(\"gather_statistics\", [None, True])\n@write_read_engines()\ndef test_filter_nonpartition_columns(\n tmpdir, write_engine, read_engine, gather_statistics\n):\n tmpdir = str(tmpdir)\n df_write = pd.DataFrame(\n {\n \"id\": [1, 2, 3, 4] * 4,\n \"time\": np.arange(16),\n \"random\": np.random.choice([\"cat\", \"dog\"], size=16),\n }\n )\n ddf_write = dd.from_pandas(df_write, npartitions=4)\n ddf_write.to_parquet(\n tmpdir, write_index=False, partition_on=[\"id\"], engine=write_engine\n )\n ddf_read = dd.read_parquet(\n tmpdir,\n index=False,\n engine=read_engine,\n gather_statistics=gather_statistics,\n filters=[((\"time\", \"<\", 5))],\n )\n df_read = ddf_read.compute()\n assert len(df_read) == len(df_read[df_read[\"time\"] < 5])\n assert df_read[\"time\"].max() < 5\n\n\n@PYARROW_MARK\ndef test_pandas_metadata_nullable_pyarrow(tmpdir):\n tmpdir = str(tmpdir)\n\n ddf1 = dd.from_pandas(\n pd.DataFrame(\n {\n \"A\": pd.array([1, None, 2], dtype=\"Int64\"),\n \"B\": pd.array([\"dog\", \"cat\", None], dtype=\"str\"),\n }\n ),\n npartitions=1,\n )\n ddf1.to_parquet(tmpdir, engine=\"pyarrow\")\n ddf2 = dd.read_parquet(tmpdir, engine=\"pyarrow\")\n\n assert_eq(ddf1, ddf2, check_index=False)\n\n\n@PYARROW_MARK\ndef test_pandas_timestamp_overflow_pyarrow(tmpdir):\n info = np.iinfo(np.dtype(\"int64\"))\n arr_numeric = np.linspace(\n start=info.min + 2, stop=info.max, num=1024, dtype=\"int64\"\n )\n arr_dates = arr_numeric.astype(\"datetime64[ms]\")\n\n table = pa.Table.from_arrays([pa.array(arr_dates)], names=[\"ts\"])\n pa.parquet.write_table(\n table, f\"{tmpdir}/file.parquet\", use_deprecated_int96_timestamps=False\n )\n\n # This will raise by default due to overflow\n with pytest.raises(pa.lib.ArrowInvalid) as e:\n dd.read_parquet(str(tmpdir), engine=\"pyarrow\").compute()\n assert \"out of bounds\" in str(e.value)\n\n if pa_version >= parse_version(\"5.0.0\"):\n from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine as ArrowEngine\n else:\n from dask.dataframe.io.parquet.arrow import ArrowEngine\n\n class ArrowEngineWithTimestampClamp(ArrowEngine):\n @classmethod\n def clamp_arrow_datetimes(cls, arrow_table: pa.Table) -> pa.Table:\n \"\"\"Constrain datetimes to be valid for pandas\n\n Since pandas works in ns precision and arrow / parquet defaults to ms\n precision we need to clamp our datetimes to something reasonable\"\"\"\n\n new_columns = []\n for i, col in enumerate(arrow_table.columns):\n if pa.types.is_timestamp(col.type) and (\n col.type.unit in (\"s\", \"ms\", \"us\")\n ):\n multiplier = {\"s\": 1_0000_000_000, \"ms\": 1_000_000, \"us\": 1_000}[\n col.type.unit\n ]\n\n original_type = col.type\n\n series: pd.Series = col.cast(pa.int64()).to_pandas()\n info = np.iinfo(np.dtype(\"int64\"))\n # constrain data to be within valid ranges\n series.clip(\n lower=info.min // multiplier + 1,\n upper=info.max // multiplier,\n inplace=True,\n )\n new_array = pa.array(series, pa.int64())\n new_array = new_array.cast(original_type)\n new_columns.append(new_array)\n else:\n new_columns.append(col)\n\n return pa.Table.from_arrays(new_columns, names=arrow_table.column_names)\n\n @classmethod\n def _arrow_table_to_pandas(\n cls, arrow_table: pa.Table, categories, **kwargs\n ) -> pd.DataFrame:\n fixed_arrow_table = cls.clamp_arrow_datetimes(arrow_table)\n return super()._arrow_table_to_pandas(\n fixed_arrow_table, categories, **kwargs\n )\n\n # this should not fail, but instead produce timestamps that are in the valid range\n dd.read_parquet(str(tmpdir), engine=ArrowEngineWithTimestampClamp).compute()\n\n\n@pytest.mark.parametrize(\n \"write_cols\",\n [[\"part\", \"col\"], [\"part\", \"kind\", \"col\"]],\n)\ndef test_partitioned_column_overlap(tmpdir, engine, write_cols):\n\n tmpdir.mkdir(\"part=a\")\n tmpdir.mkdir(\"part=b\")\n path0 = str(tmpdir.mkdir(\"part=a/kind=x\"))\n path1 = str(tmpdir.mkdir(\"part=b/kind=x\"))\n path0 = os.path.join(path0, \"data.parquet\")\n path1 = os.path.join(path1, \"data.parquet\")\n\n _df1 = pd.DataFrame({\"part\": \"a\", \"kind\": \"x\", \"col\": range(5)})\n _df2 = pd.DataFrame({\"part\": \"b\", \"kind\": \"x\", \"col\": range(5)})\n df1 = _df1[write_cols]\n df2 = _df2[write_cols]\n df1.to_parquet(path0, index=False)\n df2.to_parquet(path1, index=False)\n\n if engine == \"fastparquet\":\n path = [path0, path1]\n else:\n path = str(tmpdir)\n\n if write_cols == [\"part\", \"kind\", \"col\"]:\n result = dd.read_parquet(path, engine=engine)\n expect = pd.concat([_df1, _df2], ignore_index=True)\n assert_eq(result, expect, check_index=False)\n else:\n # For now, partial overlap between partition columns and\n # real columns is not allowed\n with pytest.raises(ValueError):\n dd.read_parquet(path, engine=engine)\n\n\n@PYARROW_MARK\n@pytest.mark.parametrize(\n \"write_cols\",\n [[\"col\"], [\"part\", \"col\"]],\n)\ndef test_partitioned_no_pandas_metadata(tmpdir, engine, write_cols):\n # See: https://github.com/dask/dask/issues/8087\n\n # Manually construct directory-partitioned dataset\n path1 = tmpdir.mkdir(\"part=a\")\n path2 = tmpdir.mkdir(\"part=b\")\n path1 = os.path.join(path1, \"data.parquet\")\n path2 = os.path.join(path2, \"data.parquet\")\n\n # Write partitions without parquet metadata.\n # Note that we always use pyarrow to do this\n # (regardless of the `engine`)\n _df1 = pd.DataFrame({\"part\": \"a\", \"col\": range(5)})\n _df2 = pd.DataFrame({\"part\": \"b\", \"col\": range(5)})\n t1 = pa.Table.from_pandas(\n _df1[write_cols],\n preserve_index=False,\n ).replace_schema_metadata(metadata={})\n pq.write_table(t1, path1)\n t2 = pa.Table.from_pandas(\n _df2[write_cols],\n preserve_index=False,\n ).replace_schema_metadata(metadata={})\n pq.write_table(t2, path2)\n\n # Check results\n expect = pd.concat([_df1, _df2], ignore_index=True)\n result = dd.read_parquet(str(tmpdir), engine=engine)\n result[\"part\"] = result[\"part\"].astype(\"object\")\n assert_eq(result[list(expect.columns)], expect, check_index=False)\n\n\n@fp_pandas_xfail\ndef test_partitioned_preserve_index(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n size = 1_000\n npartitions = 4\n b = np.arange(npartitions).repeat(size // npartitions)\n data = pd.DataFrame(\n {\n \"myindex\": np.arange(size),\n \"A\": np.random.random(size=size),\n \"B\": pd.Categorical(b),\n }\n ).set_index(\"myindex\")\n data.index.name = None\n df1 = dd.from_pandas(data, npartitions=npartitions)\n df1.to_parquet(tmp, partition_on=\"B\", engine=write_engine)\n\n expect = data[data[\"B\"] == 1]\n got = dd.read_parquet(tmp, engine=read_engine, filters=[(\"B\", \"==\", 1)])\n assert_eq(expect, got)\n\n\ndef test_from_pandas_preserve_none_index(tmpdir, engine):\n if engine.startswith(\"pyarrow\"):\n pytest.importorskip(\"pyarrow\", minversion=\"0.15.0\")\n\n fn = str(tmpdir.join(\"test.parquet\"))\n df = pd.DataFrame({\"a\": [1, 2], \"b\": [4, 5], \"c\": [6, 7]}).set_index(\"c\")\n df.index.name = None\n df.to_parquet(\n fn,\n engine=\"pyarrow\" if engine.startswith(\"pyarrow\") else \"fastparquet\",\n index=True,\n )\n\n expect = pd.read_parquet(fn)\n got = dd.read_parquet(fn, engine=engine)\n assert_eq(expect, got)\n\n\ndef test_multi_partition_none_index_false(tmpdir, engine):\n if engine.startswith(\"pyarrow\"):\n pytest.importorskip(\"pyarrow\", minversion=\"0.15.0\")\n write_engine = \"pyarrow\"\n else:\n assert engine == \"fastparquet\"\n write_engine = \"fastparquet\"\n\n # Write dataset without dask.to_parquet\n ddf1 = ddf.reset_index(drop=True)\n for i, part in enumerate(ddf1.partitions):\n path = tmpdir.join(f\"test.{i}.parquet\")\n part.compute().to_parquet(str(path), engine=write_engine)\n\n # Read back with index=False\n ddf2 = dd.read_parquet(str(tmpdir), index=False, engine=engine)\n assert_eq(ddf1, ddf2)\n\n\n@write_read_engines()\ndef test_from_pandas_preserve_none_rangeindex(tmpdir, write_engine, read_engine):\n # See GitHub Issue#6348\n fn = str(tmpdir.join(\"test.parquet\"))\n df0 = pd.DataFrame({\"t\": [1, 2, 3]}, index=pd.RangeIndex(start=1, stop=4))\n df0.to_parquet(\n fn, engine=\"pyarrow\" if write_engine.startswith(\"pyarrow\") else \"fastparquet\"\n )\n\n df1 = dd.read_parquet(fn, engine=read_engine)\n assert_eq(df0, df1.compute())\n\n\ndef test_illegal_column_name(tmpdir, engine):\n # Make sure user is prevented from preserving a \"None\" index\n # name if there is already a column using the special `null_name`\n null_name = \"__null_dask_index__\"\n fn = str(tmpdir.join(\"test.parquet\"))\n df = pd.DataFrame({\"x\": [1, 2], null_name: [4, 5]}).set_index(\"x\")\n df.index.name = None\n ddf = dd.from_pandas(df, npartitions=2)\n\n # If we don't want to preserve the None index name, the\n # write should work, but the user should be warned\n with pytest.warns(UserWarning, match=null_name):\n ddf.to_parquet(fn, engine=engine, write_index=False)\n\n # If we do want to preserve the None index name, should\n # get a ValueError for having an illegal column name\n with pytest.raises(ValueError) as e:\n ddf.to_parquet(fn, engine=engine)\n assert null_name in str(e.value)\n\n\ndef test_divisions_with_null_partition(tmpdir, engine):\n df = pd.DataFrame({\"a\": [1, 2, None, None], \"b\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(str(tmpdir), engine=engine, write_index=False)\n\n ddf_read = dd.read_parquet(str(tmpdir), engine=engine, index=\"a\")\n assert ddf_read.divisions == (None, None, None)\n\n\n@PYARROW_MARK\ndef test_pyarrow_dataset_simple(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [4, 5, 6], \"b\": [\"a\", \"b\", \"b\"]})\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=engine)\n read_df = dd.read_parquet(fn, engine=\"pyarrow-dataset\")\n read_df.compute()\n assert_eq(ddf, read_df)\n\n\n@PYARROW_MARK\n@pytest.mark.parametrize(\"test_filter\", [True, False])\ndef test_pyarrow_dataset_partitioned(tmpdir, engine, test_filter):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [4, 5, 6], \"b\": [\"a\", \"b\", \"b\"]})\n df[\"b\"] = df[\"b\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=engine, partition_on=\"b\")\n read_df = dd.read_parquet(\n fn,\n engine=\"pyarrow\",\n filters=[(\"b\", \"==\", \"a\")] if test_filter else None,\n )\n\n if test_filter:\n assert_eq(ddf[ddf[\"b\"] == \"a\"].compute(), read_df.compute())\n else:\n assert_eq(ddf, read_df)\n\n\n@PYARROW_MARK\ndef test_pyarrow_dataset_read_from_paths(tmpdir):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [4, 5, 6], \"b\": [\"a\", \"b\", \"b\"]})\n df[\"b\"] = df[\"b\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=\"pyarrow\", partition_on=\"b\")\n\n with pytest.warns(FutureWarning):\n read_df_1 = dd.read_parquet(\n fn,\n engine=\"pyarrow\",\n filters=[(\"b\", \"==\", \"a\")],\n read_from_paths=False,\n )\n\n read_df_2 = dd.read_parquet(\n fn,\n engine=\"pyarrow\",\n filters=[(\"b\", \"==\", \"a\")],\n )\n\n assert_eq(read_df_1, read_df_2)\n assert_eq(ddf[ddf[\"b\"] == \"a\"].compute(), read_df_2.compute())\n\n\n@PYARROW_MARK\n@pytest.mark.parametrize(\"split_row_groups\", [True, False])\ndef test_pyarrow_dataset_filter_partitioned(tmpdir, split_row_groups):\n fn = str(tmpdir)\n df = pd.DataFrame(\n {\n \"a\": [4, 5, 6],\n \"b\": [\"a\", \"b\", \"b\"],\n \"c\": [\"A\", \"B\", \"B\"],\n }\n )\n df[\"b\"] = df[\"b\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=\"pyarrow\", partition_on=[\"b\", \"c\"])\n\n # Filter on a a non-partition column\n read_df = dd.read_parquet(\n fn,\n engine=\"pyarrow-dataset\",\n split_row_groups=split_row_groups,\n filters=[(\"a\", \"==\", 5)],\n )\n assert_eq(\n read_df.compute()[[\"a\"]],\n df[df[\"a\"] == 5][[\"a\"]],\n check_index=False,\n )\n\n\n@PYARROW_MARK\ndef test_parquet_pyarrow_write_empty_metadata(tmpdir):\n # https://github.com/dask/dask/issues/6600\n tmpdir = str(tmpdir)\n\n df_a = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [], \"y\": []}, dtype=(\"int\", \"int\")\n )\n df_b = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 1, 2, 2], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n df_c = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 2, 1, 2], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n\n df = dd.from_delayed([df_a, df_b, df_c])\n\n try:\n df.to_parquet(\n tmpdir,\n engine=\"pyarrow\",\n partition_on=[\"x\"],\n append=False,\n )\n\n except AttributeError:\n pytest.fail(\"Unexpected AttributeError\")\n\n # Check that metadata files where written\n files = os.listdir(tmpdir)\n assert \"_metadata\" in files\n assert \"_common_metadata\" in files\n\n # Check that the schema includes pandas_metadata\n schema_common = pq.ParquetFile(\n os.path.join(tmpdir, \"_common_metadata\")\n ).schema.to_arrow_schema()\n pandas_metadata = schema_common.pandas_metadata\n assert pandas_metadata\n assert pandas_metadata.get(\"index_columns\", False)\n\n\n@PYARROW_MARK\ndef test_parquet_pyarrow_write_empty_metadata_append(tmpdir):\n # https://github.com/dask/dask/issues/6600\n tmpdir = str(tmpdir)\n\n df_a = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 1, 2, 2], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n df_b = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 2, 1, 2], \"y\": [2, 0, 2, 0]}, dtype=(\"int64\", \"int64\")\n )\n\n df1 = dd.from_delayed([df_a, df_b])\n df1.to_parquet(\n tmpdir,\n engine=\"pyarrow\",\n partition_on=[\"x\"],\n append=False,\n )\n\n df_c = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [], \"y\": []}, dtype=(\"int64\", \"int64\")\n )\n df_d = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [3, 3, 4, 4], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n\n df2 = dd.from_delayed([df_c, df_d])\n df2.to_parquet(\n tmpdir,\n engine=\"pyarrow\",\n partition_on=[\"x\"],\n append=True,\n ignore_divisions=True,\n )\n\n\n@PYARROW_MARK\n@pytest.mark.parametrize(\"partition_on\", [None, \"a\"])\n@write_read_engines()\ndef test_create_metadata_file(tmpdir, write_engine, read_engine, partition_on):\n tmpdir = str(tmpdir)\n\n # Write ddf without a _metadata file\n df1 = pd.DataFrame({\"b\": range(100), \"a\": [\"A\", \"B\", \"C\", \"D\"] * 25})\n df1.index.name = \"myindex\"\n ddf1 = dd.from_pandas(df1, npartitions=10)\n ddf1.to_parquet(\n tmpdir,\n write_metadata_file=False,\n partition_on=partition_on,\n engine=write_engine,\n )\n\n # Add global _metadata file\n if partition_on:\n fns = glob.glob(os.path.join(tmpdir, partition_on + \"=*/*.parquet\"))\n else:\n fns = glob.glob(os.path.join(tmpdir, \"*.parquet\"))\n dd.io.parquet.create_metadata_file(\n fns,\n engine=\"pyarrow\",\n split_every=3, # Force tree reduction\n )\n\n # Check that we can now read the ddf\n # with the _metadata file present\n ddf2 = dd.read_parquet(\n tmpdir,\n gather_statistics=True,\n split_row_groups=False,\n engine=read_engine,\n index=\"myindex\", # python-3.6 CI\n )\n if partition_on:\n ddf1 = df1.sort_values(\"b\")\n ddf2 = ddf2.compute().sort_values(\"b\")\n ddf2.a = ddf2.a.astype(\"object\")\n assert_eq(ddf1, ddf2)\n\n # Check if we can avoid writing an actual file\n fmd = dd.io.parquet.create_metadata_file(\n fns,\n engine=\"pyarrow\",\n split_every=3, # Force tree reduction\n out_dir=False, # Avoid writing file\n )\n\n # Check that the in-memory metadata is the same as\n # the metadata in the file.\n fmd_file = pq.ParquetFile(os.path.join(tmpdir, \"_metadata\")).metadata\n assert fmd.num_rows == fmd_file.num_rows\n assert fmd.num_columns == fmd_file.num_columns\n assert fmd.num_row_groups == fmd_file.num_row_groups\n\n\ndef test_read_write_overwrite_is_true(tmpdir, engine):\n # https://github.com/dask/dask/issues/6824\n\n # Create a Dask DataFrame if size (100, 10) with 5 partitions and write to local\n ddf = dd.from_pandas(\n pd.DataFrame(\n np.random.randint(low=0, high=100, size=(100, 10)),\n columns=[\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\"],\n ),\n npartitions=5,\n )\n ddf = ddf.reset_index(drop=True)\n dd.to_parquet(ddf, tmpdir, engine=engine, overwrite=True)\n\n # Keep the contents of the DataFrame constatn but change the # of partitions\n ddf2 = ddf.repartition(npartitions=3)\n\n # Overwrite the existing Dataset with the new dataframe and evaluate\n # the number of files against the number of dask partitions\n dd.to_parquet(ddf2, tmpdir, engine=engine, overwrite=True)\n\n # Assert the # of files written are identical to the number of\n # Dask DataFrame partitions (we exclude _metadata and _common_metadata)\n files = os.listdir(tmpdir)\n files = [f for f in files if f not in [\"_common_metadata\", \"_metadata\"]]\n assert len(files) == ddf2.npartitions\n\n\ndef test_read_write_partition_on_overwrite_is_true(tmpdir, engine):\n # https://github.com/dask/dask/issues/6824\n from pathlib import Path\n\n # Create a Dask DataFrame with 5 partitions and write to local, partitioning on the column A and column B\n df = pd.DataFrame(\n np.vstack(\n (\n np.full((50, 3), 0),\n np.full((50, 3), 1),\n np.full((20, 3), 2),\n )\n )\n )\n df.columns = [\"A\", \"B\", \"C\"]\n ddf = dd.from_pandas(df, npartitions=5)\n dd.to_parquet(ddf, tmpdir, engine=engine, partition_on=[\"A\", \"B\"], overwrite=True)\n\n # Get the total number of files and directories from the original write\n files_ = Path(tmpdir).rglob(\"*\")\n files = [f.as_posix() for f in files_]\n # Keep the contents of the DataFrame constant but change the # of partitions\n ddf2 = ddf.repartition(npartitions=3)\n\n # Overwrite the existing Dataset with the new dataframe and evaluate\n # the number of files against the number of dask partitions\n # Get the total number of files and directories from the original write\n dd.to_parquet(ddf2, tmpdir, engine=engine, partition_on=[\"A\", \"B\"], overwrite=True)\n files2_ = Path(tmpdir).rglob(\"*\")\n files2 = [f.as_posix() for f in files2_]\n # After reducing the # of partitions and overwriting, we expect\n # there to be fewer total files than were originally written\n assert len(files2) < len(files)\n\n\ndef test_to_parquet_overwrite_raises(tmpdir, engine):\n # https://github.com/dask/dask/issues/6824\n # Check that overwrite=True will raise an error if the\n # specified path is the current working directory\n df = pd.DataFrame({\"a\": range(12)})\n ddf = dd.from_pandas(df, npartitions=3)\n with pytest.raises(ValueError):\n dd.to_parquet(ddf, \"./\", engine=engine, overwrite=True)\n with pytest.raises(ValueError):\n dd.to_parquet(ddf, tmpdir, engine=engine, append=True, overwrite=True)\n\n\ndef test_dir_filter(tmpdir, engine):\n # github #6898\n df = pd.DataFrame.from_dict(\n {\n \"A\": {\n 0: 351.0,\n 1: 355.0,\n 2: 358.0,\n 3: 266.0,\n 4: 266.0,\n 5: 268.0,\n 6: np.nan,\n },\n \"B\": {\n 0: 2063.0,\n 1: 2051.0,\n 2: 1749.0,\n 3: 4281.0,\n 4: 3526.0,\n 5: 3462.0,\n 6: np.nan,\n },\n \"year\": {0: 2019, 1: 2019, 2: 2020, 3: 2020, 4: 2020, 5: 2020, 6: 2020},\n }\n )\n ddf = dask.dataframe.from_pandas(df, npartitions=1)\n ddf.to_parquet(tmpdir, partition_on=\"year\", engine=engine)\n dd.read_parquet(tmpdir, filters=[(\"year\", \"==\", 2020)], engine=engine)\n assert all\n\n\n@PYARROW_MARK\ndef test_roundtrip_decimal_dtype(tmpdir):\n # https://github.com/dask/dask/issues/6948\n tmpdir = str(tmpdir)\n\n data = [\n {\n \"ts\": pd.to_datetime(\"2021-01-01\", utc=\"Europe/Berlin\"),\n \"col1\": Decimal(\"123.00\"),\n }\n for i in range(23)\n ]\n ddf1 = dd.from_pandas(pd.DataFrame(data), npartitions=1)\n\n ddf1.to_parquet(path=tmpdir, engine=\"pyarrow\")\n ddf2 = dd.read_parquet(tmpdir, engine=\"pyarrow\")\n\n assert ddf1[\"col1\"].dtype == ddf2[\"col1\"].dtype\n assert_eq(ddf1, ddf2, check_divisions=False)\n\n\ndef test_roundtrip_rename_columns(tmpdir, engine):\n # https://github.com/dask/dask/issues/7017\n\n path = os.path.join(str(tmpdir), \"test.parquet\")\n df1 = pd.DataFrame(columns=[\"a\", \"b\", \"c\"], data=np.random.uniform(size=(10, 3)))\n df1.to_parquet(path)\n\n # read it with dask and rename columns\n ddf2 = dd.read_parquet(path, engine=engine)\n ddf2.columns = [\"d\", \"e\", \"f\"]\n df1.columns = [\"d\", \"e\", \"f\"]\n\n assert_eq(df1, ddf2.compute())\n\n\ndef test_custom_metadata(tmpdir, engine):\n # Write a parquet dataset with custom metadata\n\n # Define custom metadata\n custom_metadata = {b\"my_key\": b\"my_data\"}\n\n # Write parquet dataset\n path = str(tmpdir)\n df = pd.DataFrame({\"a\": range(10), \"b\": range(10)})\n dd.from_pandas(df, npartitions=2).to_parquet(\n path,\n engine=engine,\n custom_metadata=custom_metadata,\n )\n\n # Check that data is correct\n assert_eq(df, dd.read_parquet(path, engine=engine))\n\n # Require pyarrow.parquet to check key/value metadata\n if pq:\n # Read footer metadata and _metadata.\n # Check that it contains keys/values from `custom_metadata`\n files = glob.glob(os.path.join(path, \"*.parquet\"))\n files += [os.path.join(path, \"_metadata\")]\n for fn in files:\n _md = pq.ParquetFile(fn).metadata.metadata\n for k, v in custom_metadata.items():\n assert _md[k] == custom_metadata[k]\n\n # Make sure we raise an error if the custom metadata\n # includes a b\"pandas\" key\n custom_metadata = {b\"pandas\": b\"my_new_pandas_md\"}\n with pytest.raises(ValueError) as e:\n dd.from_pandas(df, npartitions=2).to_parquet(\n path,\n engine=engine,\n custom_metadata=custom_metadata,\n )\n assert \"User-defined key/value\" in str(e.value)\n\n\n@pytest.mark.parametrize(\"gather_statistics\", [True, False, None])\ndef test_ignore_metadata_file(tmpdir, engine, gather_statistics):\n tmpdir = str(tmpdir)\n dataset_with_bad_metadata = os.path.join(tmpdir, \"data1\")\n dataset_without_metadata = os.path.join(tmpdir, \"data2\")\n\n # Write two identical datasets without any _metadata file\n df1 = pd.DataFrame({\"a\": range(100), \"b\": [\"dog\", \"cat\"] * 50})\n ddf1 = dd.from_pandas(df1, npartitions=2)\n ddf1.to_parquet(\n path=dataset_with_bad_metadata, engine=engine, write_metadata_file=False\n )\n ddf1.to_parquet(\n path=dataset_without_metadata, engine=engine, write_metadata_file=False\n )\n\n # Copy \"bad\" metadata into `dataset_with_bad_metadata`\n assert \"_metadata\" not in os.listdir(dataset_with_bad_metadata)\n with open(os.path.join(dataset_with_bad_metadata, \"_metadata\"), \"w\") as f:\n f.write(\"INVALID METADATA\")\n assert \"_metadata\" in os.listdir(dataset_with_bad_metadata)\n assert \"_metadata\" not in os.listdir(dataset_without_metadata)\n\n # Read back the datasets with `ignore_metadata_file=True`, and\n # test that the results are the same\n if engine != \"pyarrow-legacy\":\n ddf2a = dd.read_parquet(\n dataset_with_bad_metadata,\n engine=engine,\n ignore_metadata_file=True,\n gather_statistics=gather_statistics,\n )\n ddf2b = dd.read_parquet(\n dataset_without_metadata,\n engine=engine,\n ignore_metadata_file=True,\n gather_statistics=gather_statistics,\n )\n assert_eq(ddf2a, ddf2b)\n else:\n # Check that \"pyarrow-legacy\" raises a ValueError\n with pytest.raises(ValueError):\n dd.read_parquet(\n dataset_with_bad_metadata,\n engine=engine,\n ignore_metadata_file=True,\n )\n\n\n@pytest.mark.parametrize(\"write_metadata_file\", [True, False])\n@pytest.mark.parametrize(\"metadata_task_size\", [2, 0])\ndef test_metadata_task_size(tmpdir, engine, write_metadata_file, metadata_task_size):\n\n # Write simple dataset\n tmpdir = str(tmpdir)\n df1 = pd.DataFrame({\"a\": range(100), \"b\": [\"dog\", \"cat\"] * 50})\n ddf1 = dd.from_pandas(df1, npartitions=10)\n ddf1.to_parquet(\n path=str(tmpdir), engine=engine, write_metadata_file=write_metadata_file\n )\n\n # Read back\n if engine != \"pyarrow-legacy\" or not metadata_task_size:\n ddf2a = dd.read_parquet(\n str(tmpdir),\n engine=engine,\n gather_statistics=True,\n )\n ddf2b = dd.read_parquet(\n str(tmpdir),\n engine=engine,\n gather_statistics=True,\n metadata_task_size=metadata_task_size,\n )\n assert_eq(ddf2a, ddf2b)\n\n with dask.config.set(\n {\"dataframe.parquet.metadata-task-size-local\": metadata_task_size}\n ):\n ddf2c = dd.read_parquet(\n str(tmpdir),\n engine=engine,\n gather_statistics=True,\n )\n assert_eq(ddf2b, ddf2c)\n\n else:\n # Check that other engines raise a ValueError\n with pytest.raises(ValueError):\n dd.read_parquet(\n str(tmpdir),\n engine=engine,\n gather_statistics=True,\n metadata_task_size=metadata_task_size,\n )\n\n\ndef test_extra_file(tmpdir, engine):\n # Check that read_parquet can handle spark output\n # See: https://github.com/dask/dask/issues/8087\n tmpdir = str(tmpdir)\n df = pd.DataFrame({\"a\": range(100), \"b\": [\"dog\", \"cat\"] * 50})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(tmpdir, engine=engine)\n open(os.path.join(tmpdir, \"_SUCCESS\"), \"w\").close()\n open(os.path.join(tmpdir, \"part.0.parquet.crc\"), \"w\").close()\n os.remove(os.path.join(tmpdir, \"_metadata\"))\n out = dd.read_parquet(tmpdir, engine=engine)\n assert_eq(out, df)\n\n if engine != \"pyarrow-legacy\":\n # For \"fastparquet\" and \"pyarrow-dataset\", we can pass the\n # expected file extension, or avoid checking file extensions\n # by passing False. Check here that this works:\n\n # Should Work\n out = dd.read_parquet(\n tmpdir, engine=engine, dataset={\"require_extension\": \".parquet\"}\n )\n assert_eq(out, df)\n\n # Should Fail (for not capturing the _SUCCESS and crc files)\n with pytest.raises((OSError, pa.lib.ArrowInvalid)):\n dd.read_parquet(\n tmpdir, engine=engine, dataset={\"require_extension\": False}\n ).compute()\n\n # Should Fail (for filtering out all files)\n # (Related to: https://github.com/dask/dask/issues/8349)\n with pytest.raises(ValueError):\n dd.read_parquet(\n tmpdir, engine=engine, dataset={\"require_extension\": \".foo\"}\n ).compute()\n\n\ndef test_unsupported_extension_file(tmpdir, engine):\n # File extension shouldn't matter when we are only\n # reading a single file.\n # (See: https://github.com/dask/dask/issues/8349)\n fn = os.path.join(str(tmpdir), \"multi.foo\")\n df0 = pd.DataFrame({\"a\": range(10)})\n df0.to_parquet(fn, engine=engine.split(\"-\")[0])\n assert_eq(df0, dd.read_parquet(fn, engine=engine, index=False))\n\n\ndef test_unsupported_extension_dir(tmpdir, engine):\n # File extensions shouldn't matter when we have\n # a _metadata file\n # (Related to: https://github.com/dask/dask/issues/8349)\n path = str(tmpdir)\n ddf0 = dd.from_pandas(pd.DataFrame({\"a\": range(10)}), 1)\n ddf0.to_parquet(path, engine=engine, name_function=lambda i: f\"part.{i}.foo\")\n assert_eq(ddf0, dd.read_parquet(path, engine=engine))\n\n\ndef test_custom_filename(tmpdir, engine):\n fn = str(tmpdir)\n pdf = pd.DataFrame(\n {\"num1\": [1, 2, 3, 4], \"num2\": [7, 8, 9, 10]},\n )\n df = dd.from_pandas(pdf, npartitions=2)\n df.to_parquet(fn, name_function=lambda x: f\"hi-{x}.parquet\", engine=engine)\n\n files = os.listdir(fn)\n assert \"_common_metadata\" in files\n assert \"_metadata\" in files\n assert \"hi-0.parquet\" in files\n assert \"hi-1.parquet\" in files\n assert_eq(df, dd.read_parquet(fn, engine=engine))\n\n\ndef test_custom_filename_works_with_pyarrow_when_append_is_true(tmpdir, engine):\n fn = str(tmpdir)\n pdf = pd.DataFrame(\n {\"num1\": [1, 2, 3, 4], \"num2\": [7, 8, 9, 10]},\n )\n df = dd.from_pandas(pdf, npartitions=2)\n df.to_parquet(fn, name_function=lambda x: f\"hi-{x * 2}.parquet\", engine=engine)\n\n pdf = pd.DataFrame(\n {\"num1\": [33], \"num2\": [44]},\n )\n df = dd.from_pandas(pdf, npartitions=1)\n if engine == \"fastparquet\":\n pytest.xfail(\n \"fastparquet errors our with IndexError when ``name_function`` is customized \"\n \"and append is set to True. We didn't do a detailed investigation for expediency. \"\n \"See this comment for the conversation: https://github.com/dask/dask/pull/7682#issuecomment-845243623\"\n )\n df.to_parquet(\n fn,\n name_function=lambda x: f\"hi-{x * 2}.parquet\",\n engine=engine,\n append=True,\n ignore_divisions=True,\n )\n files = os.listdir(fn)\n assert \"_common_metadata\" in files\n assert \"_metadata\" in files\n assert \"hi-0.parquet\" in files\n assert \"hi-2.parquet\" in files\n assert \"hi-4.parquet\" in files\n expected_pdf = pd.DataFrame(\n {\"num1\": [1, 2, 3, 4, 33], \"num2\": [7, 8, 9, 10, 44]},\n )\n actual = dd.read_parquet(fn, engine=engine, index=False)\n assert_eq(actual, expected_pdf, check_index=False)\n\n\ndef test_throws_error_if_custom_filename_is_invalid(tmpdir, engine):\n fn = str(tmpdir)\n pdf = pd.DataFrame(\n {\"num1\": [1, 2, 3, 4], \"num2\": [7, 8, 9, 10]},\n )\n df = dd.from_pandas(pdf, npartitions=2)\n with pytest.raises(\n ValueError, match=\"``name_function`` must be a callable with one argument.\"\n ):\n df.to_parquet(fn, name_function=\"whatever.parquet\", engine=engine)\n\n with pytest.raises(\n ValueError, match=\"``name_function`` must produce unique filenames.\"\n ):\n df.to_parquet(fn, name_function=lambda x: \"whatever.parquet\", engine=engine)\n\n\ndef test_custom_filename_with_partition(tmpdir, engine):\n fn = str(tmpdir)\n pdf = pd.DataFrame(\n {\n \"first_name\": [\"frank\", \"li\", \"marcela\", \"luis\"],\n \"country\": [\"canada\", \"china\", \"venezuela\", \"venezuela\"],\n },\n )\n df = dd.from_pandas(pdf, npartitions=4)\n df.to_parquet(\n fn,\n partition_on=[\"country\"],\n name_function=lambda x: f\"{x}-cool.parquet\",\n write_index=False,\n )\n\n for _, dirs, files in os.walk(fn):\n for dir in dirs:\n assert dir in (\n \"country=canada\",\n \"country=china\",\n \"country=venezuela\",\n )\n for file in files:\n assert file in (\n \"0-cool.parquet\",\n \"1-cool.parquet\",\n \"2-cool.parquet\",\n \"_common_metadata\",\n \"_metadata\",\n )\n actual = dd.read_parquet(fn, engine=engine, index=False)\n assert_eq(\n pdf, actual, check_index=False, check_dtype=False, check_categorical=False\n )\n"
] |
[
[
"pandas.to_datetime",
"numpy.datetime64",
"numpy.array",
"pandas.Index",
"pandas.DatetimeIndex",
"numpy.random.seed",
"pandas.DataFrame",
"pandas.date_range",
"pandas.timedelta_range",
"pandas.Timestamp",
"pandas.Categorical",
"pandas.api.types.CategoricalDtype",
"numpy.arange",
"numpy.random.randint",
"pandas.Series",
"numpy.random.random",
"numpy.unique"
],
[
"numpy.any",
"numpy.array",
"numpy.iterable",
"numpy.core.numeric.normalize_axis_tuple"
],
[
"numpy.array_equal",
"numpy.random.choice",
"numpy.random.rand",
"pandas.Timestamp",
"pandas.concat",
"numpy.random.random",
"numpy.dtype",
"numpy.full",
"numpy.random.normal",
"pandas.DataFrame",
"numpy.random.randint",
"numpy.arange",
"pandas.to_datetime",
"numpy.isnat",
"pandas.array",
"numpy.random.randn",
"numpy.datetime64",
"pandas.Index",
"pandas.DataFrame.from_dict",
"pandas.date_range",
"numpy.random.permutation",
"pandas.RangeIndex",
"pandas.Categorical",
"numpy.random.uniform",
"pandas.Series",
"numpy.repeat",
"numpy.linspace",
"pandas.read_parquet",
"numpy.unique"
]
] |
Snarp/nostalgebraist-autoresponder
|
[
"c22e033d5d091361f7f2d5d8cb609bd95077de6e"
] |
[
"src/stable_library_code/transformers/gpt_neo/modeling_gpt_neo.py"
] |
[
"# coding=utf-8\n# Copyright 2021 The Eleuther AI and HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch GPT Neo model. \"\"\"\n\"\"\"Modified from the original at transformers library @ commit b24ead87e1be6bce17e4ec5c953b6d028e4b3af7 -nost\"\"\"\n\nimport os\nfrom typing import Tuple\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom transformers.activations import ACT2FN\nfrom transformers.file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward\nfrom transformers.modeling_outputs import (\n BaseModelOutputWithPast,\n BaseModelOutputWithPastAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n CausalLMOutputWithPast,\n)\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.utils import logging\nfrom transformers.models.gpt_neo.configuration_gpt_neo import GPTNeoConfig\n\nfrom transformer_utils.partial_forward import AfterStoppingPointException\n\n\nclass LazyLinearAPICompatible(nn.LazyLinear):\n def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:\n super().__init__(out_features=out_features, bias=bias)\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"GPTNeoConfig\"\n_TOKENIZER_FOR_DOC = \"GPT2Tokenizer\"\n\nGPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"EleutherAI/gpt-neo-1.3B\",\n # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo\n]\n\n_CHECKPOINT_FOR_DOC = \"EleutherAI/gpt-neo-1.3B\"\n\n\ndef load_tf_weights_in_gpt_neo(model, config, gpt_neo_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model\"\"\"\n try:\n import re\n\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(gpt_neo_checkpoint_path)\n logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n if \"global_step\" not in name and \"adam\" not in name:\n array = tf.train.load_variable(tf_path, name)\n array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy()\n name = name.replace(\"attn/q\", \"attn/attention/q_proj/w\")\n name = name.replace(\"attn/k\", \"attn/attention/k_proj/w\")\n name = name.replace(\"attn/v\", \"attn/attention/v_proj/w\")\n name = name.replace(\"attn/o\", \"attn/attention/out_proj/w\")\n name = name.replace(\"norm_1\", \"ln_1\")\n name = name.replace(\"norm_2\", \"ln_2\")\n name = name.replace(\"attn/compute_output_bias/o_b\", \"attn/attention/out_proj/b\")\n name = name.replace(\"conv1d_main/c_fc/kernel\", \"c_fc/w\")\n name = name.replace(\"conv1d_main/c_fc/bias\", \"c_fc/b\")\n name = name.replace(\"conv1d_main/c_proj/kernel\", \"c_proj/w\")\n name = name.replace(\"conv1d_main/c_proj/bias\", \"c_proj/b\")\n\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name[5:] # skip \"gpt2/\"\n name = name.split(\"/\")\n pointer = model.transformer\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+\\d+\", m_name):\n scope_names = re.split(r\"(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"w\" or scope_names[0] == \"g\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"b\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"wpe\" or scope_names[0] == \"wte\":\n pointer = getattr(pointer, scope_names[0])\n pointer = getattr(pointer, \"weight\")\n else:\n pointer = getattr(pointer, scope_names[0])\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n\n if name[-1] == \"w\" and name[-2] in [\"out_proj\", \"k_proj\", \"q_proj\", \"v_proj\", \"c_proj\", \"c_fc\"]:\n array = array.transpose()\n\n if name == [\"wte\"]:\n # if vocab is padded, then trim off the padding embeddings\n array = array[: config.vocab_size]\n\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(f\"Initialize PyTorch weight {name}\")\n pointer.data = torch.from_numpy(array)\n\n # init the final linear layer using word embeddings\n embs = model.transformer.wte.weight\n lin = LazyLinearAPICompatible(embs.size()[1], embs.size()[0], bias=False)\n lin.weight = embs\n model.set_output_embeddings(lin)\n return model\n\n\nclass GPTNeoAttentionMixin:\n \"\"\"\n A few attention related utilities for attention modules in GPT Neo, to be used as a mixin.\n \"\"\"\n def _split_heads(self, tensor, num_heads, attn_head_size):\n \"\"\"\n Splits hidden_size dim into attn_head_size and num_heads\n \"\"\"\n new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)\n tensor = tensor.view(*new_shape)\n if len(tensor.shape) == 5:\n return tensor.permute(0, 1, 3, 2, 4) # (batch, blocks, head, block_length, head_features)\n elif len(tensor.shape) == 4:\n return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n else:\n raise ValueError(f\"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}\")\n\n def _merge_heads(self, tensor, num_heads, attn_head_size):\n \"\"\"\n Merges attn_head_size dim and num_attn_heads dim into hidden_size\n \"\"\"\n if len(tensor.shape) == 5:\n tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()\n elif len(tensor.shape) == 4:\n tensor = tensor.permute(0, 2, 1, 3).contiguous()\n else:\n raise ValueError(f\"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}\")\n new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)\n return tensor.view(new_shape)\n\n def _attn(self, query, key, value, causal_mask, masked_bias, attn_dropout, attention_mask=None, head_mask=None):\n # Keep the attention weights computation in fp32 to avoid overflow issues\n query = query.to(torch.float32)\n key = key.to(torch.float32)\n\n # print((\"query\", query.shape, \"key\", key.transpose(-1, -2).shape))\n attn_weights = torch.matmul(query, key.transpose(-1, -2))\n attn_weights = torch.where(causal_mask, attn_weights, masked_bias.to(attn_weights.dtype))\n\n if attention_mask is not None:\n # Apply the attention mask\n attn_weights = attn_weights + attention_mask\n\n attn_weights = nn.Softmax(dim=-1)(attn_weights)\n attn_weights = attn_weights.to(value.dtype)\n attn_weights = attn_dropout(attn_weights)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n attn_output = torch.matmul(attn_weights, value)\n\n return attn_output, attn_weights\n\n\nclass GPTNeoSelfAttention(nn.Module, GPTNeoAttentionMixin):\n def __init__(self, attention_type, config):\n super().__init__()\n\n self.window_size = None\n max_positions = config.max_position_embeddings\n bias = torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(\n 1, 1, max_positions, max_positions\n ).bool()\n\n if attention_type == \"local\":\n self.register_buffer(\n \"bias\",\n bias ^ torch.tril(bias, -config.window_size),\n )\n else:\n self.register_buffer(\n \"bias\",\n bias,\n )\n\n self.register_buffer(\"masked_bias\", torch.tensor(-1e9))\n\n self.attn_dropout = nn.Dropout(config.attention_dropout)\n self.resid_dropout = nn.Dropout(config.resid_dropout)\n\n self.embed_dim = config.hidden_size\n self.num_heads = config.num_heads\n self.head_dim = self.embed_dim // self.num_heads\n if self.head_dim * self.num_heads != self.embed_dim:\n raise ValueError(\n f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).\"\n )\n\n self.k_proj = LazyLinearAPICompatible(self.embed_dim, self.embed_dim, bias=False)\n self.v_proj = LazyLinearAPICompatible(self.embed_dim, self.embed_dim, bias=False)\n self.q_proj = LazyLinearAPICompatible(self.embed_dim, self.embed_dim, bias=False)\n self.out_proj = LazyLinearAPICompatible(self.embed_dim, self.embed_dim, bias=True)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n layer_past=None,\n head_mask=None,\n use_cache=False,\n output_attentions=False,\n ):\n\n query = self.q_proj(hidden_states)\n key = self.k_proj(hidden_states)\n value = self.v_proj(hidden_states)\n\n query = self._split_heads(query, self.num_heads, self.head_dim)\n key = self._split_heads(key, self.num_heads, self.head_dim)\n value = self._split_heads(value, self.num_heads, self.head_dim)\n\n if layer_past is not None:\n past_key = layer_past[0]\n past_value = layer_past[1]\n key = torch.cat((past_key, key), dim=-2)\n value = torch.cat((past_value, value), dim=-2)\n\n if use_cache is True:\n present = (key, value)\n else:\n present = None\n\n query_length, key_length = query.size(-2), key.size(-2)\n causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]\n\n attn_output, attn_weights = self._attn(\n query, key, value, causal_mask, self.masked_bias, self.attn_dropout, attention_mask, head_mask\n )\n\n attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)\n attn_output = self.out_proj(attn_output)\n attn_output = self.resid_dropout(attn_output)\n\n outputs = (attn_output, present)\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs # a, present, (attentions)\n\n\nclass GPTNeoAttention(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.layer_id = layer_id\n self.attention_layers = config.attention_layers\n self.attention_type = self.attention_layers[layer_id]\n\n if self.attention_type in [\"global\", \"local\"]:\n self.attention = GPTNeoSelfAttention(self.attention_type, config)\n else:\n raise NotImplementedError(\n \"Only attn layer types 'global' and 'local' exist, but got `config.attention_layers`: \"\n f\"{config.attention_layers}. Select attn layer types from ['global', 'local'] only.\"\n )\n\n def forward(\n self,\n hidden_states,\n layer_past=None,\n attention_mask=None,\n head_mask=None,\n use_cache=False,\n output_attentions=False,\n ):\n outputs = self.attention(\n hidden_states,\n attention_mask=attention_mask,\n layer_past=layer_past,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n\n return outputs\n\n\nclass GPTNeoMLP(nn.Module):\n def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * hidden_size\n super().__init__()\n embed_dim = config.hidden_size\n self.c_fc = LazyLinearAPICompatible(embed_dim, intermediate_size)\n self.c_proj = LazyLinearAPICompatible(intermediate_size, embed_dim)\n self.act = ACT2FN[config.activation_function]\n self.dropout = nn.Dropout(config.resid_dropout)\n\n def forward(self, hidden_states):\n hidden_states = self.c_fc(hidden_states)\n hidden_states = self.act(hidden_states)\n hidden_states = self.c_proj(hidden_states)\n hidden_states = self.dropout(hidden_states)\n return hidden_states\n\n\nclass GPTNeoBlock(nn.Module):\n def __init__(self, config, layer_id):\n super().__init__()\n hidden_size = config.hidden_size\n inner_dim = config.intermediate_size if config.intermediate_size is not None else 4 * hidden_size\n self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n self.attn = GPTNeoAttention(config, layer_id)\n self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n self.mlp = GPTNeoMLP(inner_dim, config)\n\n def forward(\n self,\n hidden_states,\n layer_past=None,\n attention_mask=None,\n head_mask=None,\n use_cache=False,\n output_attentions=False,\n ):\n residual = hidden_states\n hidden_states = self.ln_1(hidden_states)\n attn_outputs = self.attn(\n hidden_states,\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n attn_output = attn_outputs[0] # output_attn: a, present, (attentions)\n outputs = attn_outputs[1:]\n # residual connection\n hidden_states = attn_output + residual\n\n residual = hidden_states\n hidden_states = self.ln_2(hidden_states)\n feed_forward_hidden_states = self.mlp(hidden_states)\n # residual connection\n hidden_states = residual + feed_forward_hidden_states\n\n if use_cache:\n outputs = (hidden_states,) + outputs\n else:\n outputs = (hidden_states,) + outputs[1:]\n\n return outputs # hidden_states, present, (attentions, cross_attentions)\n\n\nclass GPTNeoPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = GPTNeoConfig\n load_tf_weights = load_tf_weights_in_gpt_neo\n base_model_prefix = \"transformer\"\n\n def __init__(self, *inputs, **kwargs):\n super().__init__(*inputs, **kwargs)\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, (LazyLinearAPICompatible,)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nGPT_NEO_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.GPTNeoConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nGPT_NEO_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):\n :obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else\n ``past_key_values[0][0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input\n sequence tokens in the vocabulary.\n\n If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be\n passed as ``input_ids``.\n\n Indices can be obtained using :class:`~transformers.GPTNeoTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.num_layers`):\n Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see\n :obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which\n have their past given to this model should not be passed as ``input_ids`` as they have already been\n computed.\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n\n If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see\n :obj:`past_key_values`).\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare GPT Neo Model transformer outputting raw hidden-states without any specific head on top.\",\n GPT_NEO_START_DOCSTRING,\n)\nclass GPTNeoModel(GPTNeoPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.embed_dim = config.hidden_size\n self.wte = nn.Embedding(config.vocab_size, self.embed_dim)\n self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)\n self.drop = nn.Dropout(config.embed_dropout)\n self.h = nn.ModuleList([GPTNeoBlock(config, layer_id=i) for i in range(config.num_layers)])\n self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.wte\n\n def set_input_embeddings(self, new_embeddings):\n self.wte = new_embeddings\n\n @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPastAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n batch_size = input_ids.shape[0]\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size = inputs_embeds.shape[0]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, input_shape[-1])\n if position_ids is not None:\n position_ids = position_ids.view(-1, input_shape[-1])\n\n if past_key_values is None:\n # print(\"!!!past_key_values is None!!!\")\n past_length = 0\n past_key_values = tuple([None] * len(self.h))\n else:\n past_length = past_key_values[0][0].size(-2)\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n if position_ids is None:\n position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])\n\n # Attention mask.\n if attention_mask is not None:\n assert batch_size > 0, \"batch_size has to be defined and > 0\"\n global_attention_mask = attention_mask.view(batch_size, -1)\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n global_attention_mask = global_attention_mask[:, None, None, :]\n\n # Since global_attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n global_attention_mask = global_attention_mask.to(dtype=self.dtype) # fp16 compatibility\n global_attention_mask = (1.0 - global_attention_mask) * -10000.0\n else:\n global_attention_mask = None\n\n # Local causal attention mask\n batch_size, seq_length = input_shape\n full_seq_length = seq_length + past_length\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x num_heads x N x N\n # head_mask has shape n_layer x batch x num_heads x N x N\n head_mask = self.get_head_mask(head_mask, self.config.num_layers)\n\n if inputs_embeds is None:\n inputs_embeds = self.wte(input_ids)\n position_embeds = self.wpe(position_ids)\n hidden_states = inputs_embeds + position_embeds\n\n if token_type_ids is not None:\n token_type_embeds = self.wte(token_type_ids)\n hidden_states = hidden_states + token_type_embeds\n\n hidden_states = self.drop(hidden_states)\n\n output_shape = input_shape + (hidden_states.size(-1),)\n\n presents = () if use_cache else None\n all_self_attentions = () if output_attentions else None\n all_hidden_states = () if output_hidden_states else None\n\n for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n attn_type = self.config.attention_layers[i]\n attn_mask = global_attention_mask\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n \"`use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, use_cache, output_attentions)\n\n return custom_forward\n\n outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(block),\n hidden_states,\n None,\n attn_mask,\n head_mask[i],\n )\n else:\n try:\n outputs = block(\n hidden_states,\n layer_past=layer_past,\n attention_mask=attn_mask,\n head_mask=head_mask[i],\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n except AfterStoppingPointException as e:\n raise e\n except Exception as e:\n print(\"failed with:\")\n print(f\"\\t block {i}\")\n print(f\"\\t input_ids.shape {input_ids.shape}\")\n print(f\"\\t hidden_states.shape {hidden_states.shape}\")\n print(f\"\\t past shapes {layer_past[0].shape if layer_past else layer_past}\")\n raise e\n\n hidden_states = outputs[0]\n if use_cache is True:\n presents = presents + (outputs[1],)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)\n\n hidden_states = self.ln_f(hidden_states)\n\n hidden_states = hidden_states.view(*output_shape)\n # Add last hidden state\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)\n\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n The GPT Neo Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n \"\"\",\n GPT_NEO_START_DOCSTRING,\n)\nclass GPTNeoForCausalLM(GPTNeoPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.masked_bias\", r\"lm_head\\.weight\"]\n _keys_to_ignore_on_save = [r\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.transformer = GPTNeoModel(config)\n self.lm_head = LazyLinearAPICompatible(config.hidden_size, config.vocab_size, bias=False)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n # only last token for inputs_ids if past is defined in kwargs\n if past:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n else:\n position_ids = None\n return {\n \"input_ids\": input_ids,\n \"past_key_values\": past,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n\n @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=CausalLMOutputWithCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to\n ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n\n lm_logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # Compute loss in fp32 to match with mesh-tf version\n # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179\n lm_logits = lm_logits.to(torch.float32)\n\n # Shift so that tokens < n predict n\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n lm_logits = lm_logits.to(hidden_states.dtype)\n loss = loss.to(hidden_states.dtype)\n\n if not return_dict:\n output = (lm_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=lm_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n @staticmethod\n def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:\n \"\"\"\n This function is used to re-order the :obj:`past_key_values` cache if\n :meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is\n called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.\n \"\"\"\n return tuple(\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)\n for layer_past in past\n )\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.cat",
"torch.nn.Softmax",
"torch.arange",
"tensorflow.train.load_variable",
"torch.nn.CrossEntropyLoss",
"torch.tril",
"torch.from_numpy",
"tensorflow.train.list_variables",
"torch.ones",
"torch.tensor",
"torch.matmul",
"torch.nn.Embedding"
]
] |
076923/cv2-utils
|
[
"16919450a690870a80c127b41209e338867d3212"
] |
[
"cv2u/core/cluster.py"
] |
[
"import cv2\nimport numpy as np\nfrom sklearn.cluster import DBSCAN as skDBSCAN\n\n\ndef DBSCAN(src, eps, min_samples):\n arr = cv2.cvtColor(src, cv2.COLOR_BGR2LAB).reshape(-1, src.shape[2])\n clustering = skDBSCAN(eps=eps, min_samples=min_samples).fit(arr)\n labels = clustering.labels_ + 1\n maps = labels.reshape(src.shape[:2])\n return maps, labels\n \n \ndef drawDBSCAN(src, maps, labels):\n colors = []\n for lb in set(labels):\n mask = np.where(maps == lb, 255, 0).astype(np.uint8)\n color = list(map(int, list(cv2.mean(src, mask)[:src.shape[2]])))\n colors.append(np.array(color, dtype=np.uint8))\n\n colors = np.asarray(colors) \n dst = colors[labels].astype(np.uint8).reshape(src.shape)\n return dst\n "
] |
[
[
"numpy.where",
"numpy.array",
"sklearn.cluster.DBSCAN",
"numpy.asarray"
]
] |
sinemetu1/confidence
|
[
"a5c46ce72f4de217708358fb12417c032e08c40e"
] |
[
"spotify_confidence/examples.py"
] |
[
"# Copyright 2017-2020 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nimport numpy as np\nfrom itertools import product\n\n\ndef example_data_binomial():\n \"\"\"\n Returns an output dataframe with categorical\n features (country and test variation), and orginal features (date),\n as well as number of successes and total observations for each combination\n \"\"\"\n countries = ['ca', 'us']\n dates = pd.date_range('2018-01-01', '2018-02-01')\n variation_names = ['test', 'control', 'test2']\n\n # test ca, test us, control ca, control us, test2 ca, test2 us\n success_rates = [.3, .32, .24, .22, .25, .42]\n n_observations = [50, 80, 30, 50, 40, 50]\n\n return_df = pd.DataFrame()\n\n for i, (country, variation) in enumerate(\n product(countries, variation_names)):\n df = pd.DataFrame({'date': dates})\n df['country'] = country\n df['variation_name'] = variation\n df['total'] = np.random.poisson(n_observations[i], size=len(dates))\n df['success'] = df['total'].apply(\n lambda x: np.random.binomial(x, success_rates[i]))\n return_df = pd.concat([return_df, df], axis=0)\n\n return return_df\n\n\ndef example_data_gaussian():\n df = pd.DataFrame({\n 'variation_name': [\n 'test',\n 'control',\n 'test2',\n 'test',\n 'control',\n 'test2',\n 'test',\n 'control',\n 'test2',\n 'test',\n 'control',\n 'test2',\n 'test',\n 'control',\n 'test2',\n ],\n 'nr_of_items': [\n 500,\n 8,\n 100,\n 510,\n 8,\n 100,\n 520,\n 9,\n 104,\n 530,\n 7,\n 100,\n 530,\n 8,\n 103,\n ],\n 'nr_of_items_sumsq': [\n 2500,\n 12,\n 150,\n 2510,\n 13,\n 140,\n 2520,\n 14,\n 154,\n 2530,\n 15,\n 160,\n 2530,\n 16,\n 103,\n ],\n 'users': [\n 1010,\n 22,\n 150,\n 1000,\n 20,\n 153,\n 1030,\n 23,\n 154,\n 1000,\n 20,\n 150,\n 1040,\n 21,\n 155,\n ],\n 'days_since_reg': [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5],\n })\n\n return df\n"
] |
[
[
"pandas.DataFrame",
"pandas.date_range",
"numpy.random.binomial",
"pandas.concat"
]
] |
lluciesmith/DeepHalos
|
[
"ef97438ffde2872047aff078308b0a45e4107522"
] |
[
"paper_plots/plot_likelihood.py"
] |
[
"import numpy as np\nfrom plots import plots_for_predictions as pp\nimport sys\nsys.path.append('/Users/lls/Documents/mlhalos_code/')\nfrom mlhalos import distinct_colours as dc\nimport matplotlib.pyplot as plt\nfrom pickle import load\n\nc = dc.get_distinct(6)\n\npath = '/Users/lls/Documents/deep_halos_files/mass_range_13.4/random_20sims_200k/lr5e-5/'\np1 = np.load(path + \"seed_20/all_predicted_sim_6_epoch_09.npy\")\nt1 = np.load(path + \"seed_20/all_true_sim_6_epoch_09.npy\")\ng = np.load(path + \"seed_20/gamma.npy\")[9]\n\npath2 = '/Users/lls/Documents/deep_halos_files/mass_range_13.4/random_20sims_200k/averaged_boxes/log_alpha_-4/'\nscaler_training_set = load(open(path2 + 'scaler_output.pkl', 'rb'))\nslices = [-0.85, -0.6, 0, 0.5, 0.75, 0.95]\n\nf, a = pp.plot_likelihood_distribution(p1, t1, g, scaler_training_set, bins=None, fig=None, axes=None, color=c[4],\n title=None, legend=True, slices=slices)"
] |
[
[
"numpy.load"
]
] |
slowy07/tensor2robot
|
[
"c32179493e2d22bda44f72206d097c59f173dda0",
"c32179493e2d22bda44f72206d097c59f173dda0"
] |
[
"research/vrgripper/mse_decoder.py",
"models/regression_model.py"
] |
[
"# coding=utf-8\n# Copyright 2021 The Tensor2Robot Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Abstract decoder and MSE decoder.\n\"\"\"\n\nimport gin\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.contrib import slim\n\n\n@gin.configurable\nclass MSEDecoder(object):\n \"\"\"Default MSE decoder.\"\"\"\n\n def __call__(self, params, output_size):\n self._predictions = slim.fully_connected(\n params, output_size, activation_fn=None, scope='pose')\n return self._predictions\n\n def loss(self, labels):\n return tf.losses.mean_squared_error(labels=labels.action,\n predictions=self._predictions)\n",
"# coding=utf-8\n# Copyright 2021 The Tensor2Robot Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as python3\n\"\"\"TFModel abstract subclasses.\"\"\"\n\nimport abc\nfrom typing import Optional, Text\nimport warnings\n\nfrom absl import flags\nimport gin\nimport six\nfrom tensor2robot.models import abstract_model\nfrom tensor2robot.utils import tensorspec_utils\nimport tensorflow.compat.v1 as tf\n\nFLAGS = flags.FLAGS\nTRAIN = tf.estimator.ModeKeys.TRAIN\nEVAL = tf.estimator.ModeKeys.EVAL\nPREDICT = tf.estimator.ModeKeys.PREDICT\n\nRunConfigType = abstract_model.RunConfigType\nParamsType = abstract_model.ParamsType\nDictOrSpec = abstract_model.DictOrSpec\nModelTrainOutputType = abstract_model.ModelTrainOutputType\nExportOutputType = abstract_model.ExportOutputType\nwarnings.simplefilter('always', DeprecationWarning)\n\n\n@gin.configurable\n@six.add_metaclass(abc.ABCMeta)\nclass RegressionModel(abstract_model.AbstractT2RModel):\n \"\"\"Continuous-valued output using mean-squared error on target values.\"\"\"\n\n def __init__(self, action_size=2, **kwargs):\n warnings.warn(\n 'RegressionModel is deprecated. Subclass AbstractT2RModel instead',\n DeprecationWarning, stacklevel=2)\n\n super(RegressionModel, self).__init__(**kwargs)\n self._action_size = action_size\n\n @abc.abstractmethod\n def a_func(self,\n features,\n scope,\n mode,\n config = None,\n params = None,\n reuse=tf.AUTO_REUSE):\n \"\"\"A(state) regression function.\n\n This function can return a stochastic or a deterministic tensor.\n\n We only need to define the a_func and loss_fn to have a proper model.\n For more specialization please overwrite inference_network_fn, model_*_fn.\n\n Args:\n features: This is the first item returned from the input_fn and parsed by\n tensorspec_utils.validate_and_pack. A spec_structure which fulfills the\n requirements of the self.get_feature_specification.\n scope: String specifying variable scope.\n mode: (ModeKeys) Specifies if this is training, evaluation or prediction.\n config: Optional configuration object. Will receive what is passed to\n Estimator in config parameter, or the default config. Allows updating\n things in your model_fn based on configuration such as num_ps_replicas,\n or model_dir.\n params: An optional dict of hyper parameters that will be passed into\n input_fn and model_fn. Keys are names of parameters, values are basic\n python types. There are reserved keys for TPUEstimator, including\n 'batch_size'.\n reuse: Whether or not to reuse variables under variable scope 'scope'.\n\n Returns:\n outputs: A {key: Tensor} mapping. The key 'inference_output' is required.\n \"\"\"\n\n def loss_fn(self,\n labels,\n inference_outputs,\n mode,\n params=None):\n \"\"\"Convenience function for regression models.\n\n We only need to define the a_func and loss_fn to have a proper model.\n For more specialization please overwrite inference_network_fn, model_*_fn.\n\n Args:\n labels: This is the second item returned from the input_fn and parsed by\n self._extract_and_validate_inputs. A dictionary which fulfills the\n requirements of the self.get_labels_spefication.\n inference_outputs: A dict containing the output tensors of\n model_inference_fn.\n mode: (ModeKeys) Specifies if this is training, evaluation or prediction.\n params: An optional dict of hyper parameters that will be passed into\n input_fn and model_fn. Keys are names of parameters, values are basic\n python types. There are reserved keys for TPUEstimator, including\n 'batch_size'.\n\n Returns:\n A scalar loss tensor.\n \"\"\"\n del mode, params\n return tf.losses.mean_squared_error(\n labels=labels.target, predictions=inference_outputs['inference_output'])\n\n def inference_network_fn(self,\n features,\n labels,\n mode,\n config = None,\n params = None):\n \"\"\"See base class.\"\"\"\n del labels\n outputs = self.a_func(\n features=features,\n mode=mode,\n scope='a_func',\n config=config,\n params=params,\n reuse=tf.AUTO_REUSE)\n\n if not isinstance(outputs, dict):\n raise ValueError('The output of a_func is expected to be a dict.')\n\n if 'inference_output' not in outputs:\n raise ValueError('For regression models inference_output is a required '\n 'key in outputs but is not in {}.'.format(\n list(outputs.keys())))\n if self.use_summaries(params):\n tf.summary.histogram('inference_output', outputs['inference_output'])\n return outputs\n\n def model_train_fn(self,\n features,\n labels,\n inference_outputs,\n mode,\n config = None,\n params = None):\n \"\"\"See base class.\"\"\"\n del features, config\n loss = self.loss_fn(labels, inference_outputs, mode=mode, params=params)\n return loss\n\n def create_export_outputs_fn(self,\n features,\n inference_outputs,\n mode,\n config = None,\n params = None):\n \"\"\"See base class.\"\"\"\n del features, mode, config, params\n return {'inference_output': inference_outputs['inference_output']}\n"
] |
[
[
"tensorflow.contrib.slim.fully_connected",
"tensorflow.compat.v1.losses.mean_squared_error"
],
[
"tensorflow.compat.v1.summary.histogram",
"tensorflow.compat.v1.losses.mean_squared_error"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.