repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
MJ10/BioSeq-GFN-AL
|
[
"d389aeb729ac29578ad825da5b828ff968a1d555"
] |
[
"lib/dataset/base.py"
] |
[
"import numpy as np\n\nclass Dataset:\n def __init__(self, args, oracle):\n self.oracle = oracle\n self.args = args\n self.rng = np.random.RandomState(142857)\n\n def sample(self, num_samples, ratio=0.5):\n raise NotImplementedError()\n \n def validation_set(self, ratio=None):\n raise NotImplementedError()\n\n def add(self, batch):\n raise NotImplementedError()\n \n def top_k(self, k):\n raise NotImplementedError()"
] |
[
[
"numpy.random.RandomState"
]
] |
schroedk/sensAI
|
[
"a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7",
"a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7"
] |
[
"src/sensai/util/pandas.py",
"src/sensai/evaluation/eval_util.py"
] |
[
"import logging\nfrom copy import copy\n\nimport numpy as np\nimport pandas as pd\n\nlog = logging.getLogger(__name__)\n\n\nclass DataFrameColumnChangeTracker:\n \"\"\"\n A simple class for keeping track of changes in columns between an initial data frame and some other data frame\n (usually the result of some transformations performed on the initial one).\n\n Example:\n\n >>> from sensai.util.pandas import DataFrameColumnChangeTracker\n >>> import pandas as pd\n\n >>> df = pd.DataFrame({\"bar\": [1, 2]})\n >>> columnChangeTracker = DataFrameColumnChangeTracker(df)\n >>> df[\"foo\"] = [4, 5]\n >>> columnChangeTracker.trackChange(df)\n >>> columnChangeTracker.getRemovedColumns()\n set()\n >>> columnChangeTracker.getAddedColumns()\n {'foo'}\n \"\"\"\n def __init__(self, initialDF: pd.DataFrame):\n self.initialColumns = copy(initialDF.columns)\n self.finalColumns = None\n\n def trackChange(self, changedDF: pd.DataFrame):\n self.finalColumns = copy(changedDF.columns)\n\n def getRemovedColumns(self):\n self.assertChangeWasTracked()\n return set(self.initialColumns).difference(self.finalColumns)\n\n def getAddedColumns(self):\n \"\"\"\n Returns the columns in the last entry of the history that were not present the first one\n \"\"\"\n self.assertChangeWasTracked()\n return set(self.finalColumns).difference(self.initialColumns)\n\n def columnChangeString(self):\n \"\"\"\n Returns a string representation of the change\n \"\"\"\n self.assertChangeWasTracked()\n if list(self.initialColumns) == list(self.finalColumns):\n return \"none\"\n removedCols, addedCols = self.getRemovedColumns(), self.getAddedColumns()\n if removedCols == addedCols == set():\n return f\"reordered {list(self.finalColumns)}\"\n\n return f\"added={list(addedCols)}, removed={list(removedCols)}\"\n\n def assertChangeWasTracked(self):\n if self.finalColumns is None:\n raise Exception(f\"No change was tracked yet. \"\n f\"Did you forget to call trackChange on the resulting data frame?\")\n\n\ndef extractArray(df: pd.DataFrame):\n \"\"\"\n Extracts array from data frame. It is expected that each row corresponds to a data point and\n each column corresponds to a \"channel\". Moreover, all entries are expected to be arrays of the same shape\n (or scalars or sequences of the same length). We will refer to that shape as tensorShape.\n\n The output will be of shape `(N_rows, N_columns, *tensorShape)`. Thus, `N_rows` can be interpreted as dataset length\n (or batch size, if a single batch is passed) and N_columns can be interpreted as number of channels.\n Empty dimensions will be stripped, thus if the data frame has only one column, the array will have shape\n `(N_rows, *tensorShape)`.\n E.g. an image with three channels could equally be passed as data frame of the type\n\n\n +------------------+------------------+------------------+\n | R | G | B |\n +==================+==================+==================+\n | channel | channel | channel |\n +------------------+------------------+------------------+\n | channel | channel | channel |\n +------------------+------------------+------------------+\n | ... | ... | ... |\n +------------------+------------------+------------------+\n\n or as data frame of type\n\n +------------------+\n | image |\n +==================+\n | RGB-array |\n +------------------+\n | RGB-array |\n +------------------+\n | ... |\n +------------------+\n\n In both cases the returned array will have shape `(N_images, 3, width, height)`\n\n :param df: data frame where each entry is an array of shape tensorShape\n :return: array of shape `(N_rows, N_columns, *tensorShape)` with stripped empty dimensions\n \"\"\"\n log.debug(f\"Stacking tensors of shape {np.array(df.iloc[0, 0]).shape}\")\n try:\n return np.stack(df.apply(np.stack, axis=1)).squeeze()\n except ValueError:\n raise ValueError(f\"No array can be extracted from frame of length {len(df)} with columns {list(df.columns)}. \"\n f\"Make sure that all entries have the same shape\")\n",
"\"\"\"\nThis module contains methods and classes that facilitate evaluation of different types of models. The suggested\nworkflow for evaluation is to use these higher-level functionalities instead of instantiating\nthe evaluation classes directly.\n\"\"\"\n# TODO: provide a notebook (and possibly an rst file) that illustrates standard evaluation scenarios and at the same\n# time serves as an integration test\n\nimport logging\nfrom abc import ABC, abstractmethod\nfrom typing import Tuple, Dict, Any, Union, Generic, TypeVar, Optional, Sequence, Callable\n\nimport matplotlib.figure\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nfrom .crossval import VectorModelCrossValidationData, VectorRegressionModelCrossValidationData, \\\n VectorClassificationModelCrossValidationData, \\\n VectorClassificationModelCrossValidator, VectorRegressionModelCrossValidator, VectorModelCrossValidator, VectorModelCrossValidatorParams\nfrom .eval_stats.eval_stats_base import EvalStats, EvalStatsCollection\nfrom .eval_stats.eval_stats_classification import ClassificationEvalStats\nfrom .eval_stats.eval_stats_regression import RegressionEvalStats\nfrom .evaluator import VectorModelEvaluator, VectorModelEvaluationData, VectorRegressionModelEvaluator, \\\n VectorRegressionModelEvaluationData, VectorClassificationModelEvaluator, VectorClassificationModelEvaluationData, \\\n VectorRegressionModelEvaluatorParams, VectorClassificationModelEvaluatorParams, VectorModelEvaluatorParams\nfrom ..data import InputOutputData\nfrom ..util.io import ResultWriter\nfrom ..util.string import prettyStringRepr\nfrom ..vector_model import VectorClassificationModel, VectorRegressionModel, VectorModel\n\nlog = logging.getLogger(__name__)\n\nTModel = TypeVar(\"TModel\", bound=VectorModel)\nTEvalStats = TypeVar(\"TEvalStats\", bound=EvalStats)\nTEvalStatsCollection = TypeVar(\"TEvalStatsCollection\", bound=EvalStatsCollection)\nTEvaluator = TypeVar(\"TEvaluator\", bound=VectorModelEvaluator)\nTCrossValidator = TypeVar(\"TCrossValidator\", bound=VectorModelCrossValidator)\nTEvalData = TypeVar(\"TEvalData\", bound=VectorModelEvaluationData)\nTCrossValData = TypeVar(\"TCrossValData\", bound=VectorModelCrossValidationData)\n\n\ndef _isRegression(model: Optional[VectorModel], isRegression: Optional[bool]) -> bool:\n if model is None and isRegression is None or (model is not None and isRegression is not None):\n raise ValueError(\"One of the two parameters have to be passed: model or isRegression\")\n\n if isRegression is None:\n model: VectorModel\n return model.isRegressionModel()\n return isRegression\n\n\ndef createVectorModelEvaluator(data: InputOutputData, model: VectorModel = None,\n isRegression: bool = None, params: Union[VectorModelEvaluatorParams, Dict[str, Any]] = None, **kwargs) \\\n -> Union[VectorRegressionModelEvaluator, VectorClassificationModelEvaluator]:\n if params is not None and len(kwargs) > 0:\n raise ValueError(\"Provide either params or keyword arguments\")\n if params is None:\n params = kwargs\n regression = _isRegression(model, isRegression)\n if regression:\n params = VectorRegressionModelEvaluatorParams.fromDictOrInstance(params)\n else:\n params = VectorClassificationModelEvaluatorParams.fromDictOrInstance(params)\n cons = VectorRegressionModelEvaluator if regression else VectorClassificationModelEvaluator\n return cons(data, params=params)\n\n\ndef createVectorModelCrossValidator(data: InputOutputData, model: VectorModel = None,\n isRegression: bool = None,\n params: Union[VectorModelCrossValidatorParams, Dict[str, Any]] = None,\n **kwArgsOldParams) -> Union[VectorClassificationModelCrossValidator, VectorRegressionModelCrossValidator]:\n if params is not None:\n params = VectorModelCrossValidatorParams.fromDictOrInstance(params)\n params = VectorModelCrossValidatorParams.fromEitherDictOrInstance(kwArgsOldParams, params)\n cons = VectorRegressionModelCrossValidator if _isRegression(model, isRegression) else VectorClassificationModelCrossValidator\n return cons(data, params=params)\n\n\ndef createEvaluationUtil(data: InputOutputData, model: VectorModel = None, isRegression: bool = None,\n evaluatorParams: Optional[Dict[str, Any]] = None,\n crossValidatorParams: Optional[Dict[str, Any]] = None) \\\n -> Union[\"ClassificationEvaluationUtil\", \"RegressionEvaluationUtil\"]:\n cons = RegressionEvaluationUtil if _isRegression(model, isRegression) else ClassificationEvaluationUtil\n return cons(data, evaluatorParams=evaluatorParams, crossValidatorParams=crossValidatorParams)\n\n\ndef evalModelViaEvaluator(model: TModel, inputOutputData: InputOutputData, testFraction=0.2,\n plotTargetDistribution=False, computeProbabilities=True, normalizePlots=True, randomSeed=60) -> TEvalData:\n \"\"\"\n Evaluates the given model via a simple evaluation mechanism that uses a single split\n\n :param model: the model to evaluate\n :param inputOutputData: data on which to evaluate\n :param testFraction: the fraction of the data to test on\n :param plotTargetDistribution: whether to plot the target values distribution in the entire dataset\n :param computeProbabilities: only relevant if the model is a classifier\n :param normalizePlots: whether to normalize plotted distributions such that the sum/integrate to 1\n :param randomSeed:\n\n :return: the evaluation data\n \"\"\"\n if plotTargetDistribution:\n title = \"Distribution of target values in entire dataset\"\n fig = plt.figure(title)\n\n outputDistributionSeries = inputOutputData.outputs.iloc[:, 0]\n log.info(f\"Description of target column in training set: \\n{outputDistributionSeries.describe()}\")\n if not model.isRegressionModel():\n outputDistributionSeries = outputDistributionSeries.value_counts(normalize=normalizePlots)\n ax = sns.barplot(outputDistributionSeries.index, outputDistributionSeries.values)\n ax.set_ylabel(\"%\")\n else:\n ax = sns.distplot(outputDistributionSeries)\n ax.set_ylabel(\"Probability density\")\n ax.set_title(title)\n ax.set_xlabel(\"target value\")\n fig.show()\n\n if model.isRegressionModel():\n evaluatorParams = dict(testFraction=testFraction, randomSeed=randomSeed)\n else:\n evaluatorParams = dict(testFraction=testFraction, computeProbabilities=computeProbabilities, randomSeed=randomSeed)\n ev = createEvaluationUtil(inputOutputData, model=model, evaluatorParams=evaluatorParams)\n return ev.performSimpleEvaluation(model, showPlots=True, logResults=True)\n\n\nclass EvaluationUtil(ABC, Generic[TModel, TEvaluator, TEvalData, TCrossValidator, TCrossValData, TEvalStats]):\n \"\"\"\n Utility class for the evaluation of models based on a dataset\n \"\"\"\n def __init__(self, inputOutputData: InputOutputData,\n evaluatorParams: Optional[Union[VectorRegressionModelEvaluatorParams, VectorClassificationModelEvaluatorParams, Dict[str, Any]]] = None,\n crossValidatorParams: Optional[Union[VectorModelCrossValidatorParams, Dict[str, Any]]] = None):\n \"\"\"\n :param inputOutputData: the data set to use for evaluation\n :param evaluatorParams: parameters with which to instantiate evaluators\n :param crossValidatorParams: parameters with which to instantiate cross-validators\n \"\"\"\n if evaluatorParams is None:\n evaluatorParams = dict(testFraction=0.2)\n if crossValidatorParams is None:\n crossValidatorParams = VectorModelCrossValidatorParams(folds=5)\n self.evaluatorParams = evaluatorParams\n self.crossValidatorParams = crossValidatorParams\n self.inputOutputData = inputOutputData\n\n class ResultCollector:\n def __init__(self, showPlots: bool = True, resultWriter: Optional[ResultWriter] = None):\n self.showPlots = showPlots\n self.resultWriter = resultWriter\n\n def addFigure(self, name, fig: matplotlib.figure.Figure):\n if self.resultWriter is not None:\n self.resultWriter.writeFigure(name, fig, closeFigure=not self.showPlots)\n\n def child(self, addedFilenamePrefix):\n resultWriter = self.resultWriter\n if resultWriter:\n resultWriter = resultWriter.childWithAddedPrefix(addedFilenamePrefix)\n return self.__class__(showPlots=self.showPlots, resultWriter=resultWriter)\n\n def createEvaluator(self, model: TModel = None, isRegression: bool = None) -> TEvaluator:\n \"\"\"\n Creates an evaluator holding the current input-output data\n\n :param model: the model for which to create an evaluator (just for reading off regression or classification,\n the resulting evaluator will work on other models as well)\n :param isRegression: whether to create a regression model evaluator. Either this or model have to be specified\n :return: an evaluator\n \"\"\"\n return createVectorModelEvaluator(self.inputOutputData, model=model, isRegression=isRegression, params=self.evaluatorParams)\n\n def createCrossValidator(self, model: TModel = None, isRegression: bool = None) -> TCrossValidator:\n \"\"\"\n Creates a cross-validator holding the current input-output data\n\n :param model: the model for which to create a cross-validator (just for reading off regression or classification,\n the resulting evaluator will work on other models as well)\n :param isRegression: whether to create a regression model cross-validator. Either this or model have to be specified\n :return: an evaluator\n \"\"\"\n return createVectorModelCrossValidator(self.inputOutputData, model=model, isRegression=isRegression, params=self.crossValidatorParams)\n\n def performSimpleEvaluation(self, model: TModel, createPlots=True, showPlots=False, logResults=True, resultWriter: ResultWriter = None,\n additionalEvaluationOnTrainingData=False, fitModel=True, writeEvalStats=False) -> TEvalData:\n if showPlots and not createPlots:\n raise ValueError(\"showPlots=True requires createPlots=True\")\n resultWriter = self._resultWriterForModel(resultWriter, model)\n evaluator = self.createEvaluator(model)\n log.info(f\"Evaluating {model} via {evaluator}\")\n if fitModel:\n evaluator.fitModel(model)\n\n def gatherResults(evalResultData: VectorModelEvaluationData, resultWriter, subtitlePrefix=\"\"):\n strEvalResults = \"\"\n for predictedVarName in evalResultData.predictedVarNames:\n evalStats = evalResultData.getEvalStats(predictedVarName)\n strEvalResult = str(evalStats)\n if logResults:\n log.info(f\"{subtitlePrefix}Evaluation results for {predictedVarName}: {strEvalResult}\")\n strEvalResults += predictedVarName + \": \" + strEvalResult + \"\\n\"\n if writeEvalStats and resultWriter is not None:\n resultWriter.writePickle(f\"eval-stats-{predictedVarName}\", evalStats)\n strEvalResults += f\"\\n\\n{prettyStringRepr(model)}\"\n if resultWriter is not None:\n resultWriter.writeTextFile(\"evaluator-results\", strEvalResults)\n if createPlots:\n self.createPlots(evalResultData, showPlots=showPlots, resultWriter=resultWriter, subtitlePrefix=subtitlePrefix)\n\n evalResultData = evaluator.evalModel(model)\n gatherResults(evalResultData, resultWriter)\n if additionalEvaluationOnTrainingData:\n evalResultDataTrain = evaluator.evalModel(model, onTrainingData=True)\n additionalResultWriter = resultWriter.childWithAddedPrefix(\"onTrain-\") if resultWriter is not None else None\n gatherResults(evalResultDataTrain, additionalResultWriter, subtitlePrefix=\"[onTrain] \")\n\n return evalResultData\n\n @staticmethod\n def _resultWriterForModel(resultWriter: Optional[ResultWriter], model: TModel) -> Optional[ResultWriter]:\n if resultWriter is None:\n return None\n return resultWriter.childWithAddedPrefix(model.getName() + \"-\")\n\n def performCrossValidation(self, model: TModel, showPlots=False, logResults=True, resultWriter: Optional[ResultWriter] = None) -> TCrossValData:\n \"\"\"\n Evaluates the given model via cross-validation\n\n :param model: the model to evaluate\n :param showPlots: whether to show plots that visualise evaluation results (combining all folds)\n :param logResults: whether to log evaluation results\n :param resultWriter: a writer with which to store text files and plots. The evaluated model's name is added to each filename\n automatically\n :return: cross-validation result data\n \"\"\"\n resultWriter = self._resultWriterForModel(resultWriter, model)\n crossValidator = self.createCrossValidator(model)\n crossValidationData = crossValidator.evalModel(model)\n aggStatsByVar = {varName: crossValidationData.getEvalStatsCollection(predictedVarName=varName).aggStats()\n for varName in crossValidationData.predictedVarNames}\n strEvalResults = str(pd.DataFrame.from_dict(aggStatsByVar, orient=\"index\"))\n if logResults:\n log.info(f\"Cross-validation results:\\n{strEvalResults}\")\n if resultWriter is not None:\n resultWriter.writeTextFile(\"crossval-results\", strEvalResults)\n self.createPlots(crossValidationData, showPlots=showPlots, resultWriter=resultWriter)\n return crossValidationData\n\n def compareModels(self, models: Sequence[TModel], resultWriter: Optional[ResultWriter] = None, useCrossValidation=False,\n fitModels=True, writeIndividualResults=True, sortColumn: Optional[str] = None, sortAscending: bool = True) -> pd.DataFrame:\n \"\"\"\n Compares several models via simple evaluation or cross-validation\n\n :param models: the models to compare\n :param resultWriter: a writer with which to store results of the comparison\n :param useCrossValidation: whether to use cross-validation in order to evaluate models; if False, use a simple evaluation\n on test data (single split)\n :param fitModels: whether to fit models before evaluating them; this can only be False if useCrossValidation=False\n :param writeIndividualResults: whether to write results files on each individual model (in addition to the comparison\n summary)\n :param sortColumn: column/metric name by which to sort\n :param sortAscending: whether to sort in ascending order\n :return: a data frame containing evaluation metrics on all models\n \"\"\"\n statsList = []\n for model in models:\n if useCrossValidation:\n if not fitModels:\n raise ValueError(\"Cross-validation necessitates that models be retrained; got fitModels=False\")\n crossValidationResult = self.performCrossValidation(model, resultWriter=resultWriter if writeIndividualResults else None)\n statsDict = crossValidationResult.getEvalStatsCollection().aggStats()\n else:\n evalStats: EvalStats = self.performSimpleEvaluation(model, resultWriter=resultWriter if writeIndividualResults else None,\n fitModel=fitModels).getEvalStats()\n statsDict = evalStats.getAll()\n statsDict[\"modelName\"] = model.getName()\n statsList.append(statsDict)\n resultsDF = pd.DataFrame(statsList).set_index(\"modelName\")\n if sortColumn is not None:\n if sortColumn not in resultsDF.columns:\n log.warning(f\"Requested sort column '{sortColumn}' not in list of columns {list(resultsDF.columns)}\")\n else:\n resultsDF.sort_values(sortColumn, ascending=sortAscending, inplace=True)\n strResults = f\"Model comparison results:\\n{resultsDF.to_string()}\"\n log.info(strResults)\n if resultWriter is not None:\n suffix = \"crossval\" if useCrossValidation else \"simple-eval\"\n strResults += \"\\n\\n\" + \"\\n\\n\".join([f\"{model.getName()} = {str(model)}\" for model in models])\n resultWriter.writeTextFile(f\"model-comparison-results-{suffix}\", strResults)\n return resultsDF\n\n def compareModelsCrossValidation(self, models: Sequence[TModel], resultWriter: Optional[ResultWriter] = None) -> pd.DataFrame:\n \"\"\"\n Compares several models via cross-validation\n\n :param models: the models to compare\n :param resultWriter: a writer with which to store results of the comparison\n :return: a data frame containing evaluation metrics on all models\n \"\"\"\n return self.compareModels(models, resultWriter=resultWriter, useCrossValidation=True)\n\n def createPlots(self, data: Union[TEvalData, TCrossValData], showPlots=True, resultWriter: Optional[ResultWriter] = None, subtitlePrefix: str = \"\"):\n \"\"\"\n Creates default plots that visualise the results in the given evaluation data\n\n :param data: the evaluation data for which to create the default plots\n :param showPlots: whether to show plots\n :param resultWriter: if not None, plots will be written using this writer\n :param subtitlePrefix: a prefix to add to the subtitle (which itself is the model name)\n \"\"\"\n if not showPlots and resultWriter is None:\n return\n resultCollector = self.ResultCollector(showPlots=showPlots, resultWriter=resultWriter)\n self._createPlots(data, resultCollector, subtitle=subtitlePrefix + data.modelName)\n\n def _createPlots(self, data: Union[TEvalData, TCrossValData], resultCollector: ResultCollector, subtitle=None):\n\n def createPlots(predVarName, rc, subt):\n if isinstance(data, VectorModelCrossValidationData):\n evalStats = data.getEvalStatsCollection(predictedVarName=predVarName).getGlobalStats()\n elif isinstance(data, VectorModelEvaluationData):\n evalStats = data.getEvalStats(predictedVarName=predVarName)\n else:\n raise ValueError(f\"Unexpected argument: data={data}\")\n return self._createEvalStatsPlots(evalStats, rc, subtitle=subt)\n\n predictedVarNames = data.predictedVarNames\n if len(predictedVarNames) == 1:\n createPlots(predictedVarNames[0], resultCollector, subtitle)\n else:\n for predictedVarName in predictedVarNames:\n createPlots(predictedVarName, resultCollector.child(predictedVarName+\"-\"), f\"{predictedVarName}, {subtitle}\")\n\n @abstractmethod\n def _createEvalStatsPlots(self, evalStats: TEvalStats, resultCollector: ResultCollector, subtitle=None):\n \"\"\"\n :param evalStats: the evaluation results for which to create plots\n :param resultCollector: the collector to which all plots are to be passed\n :param subtitle: the subtitle to use for generated plots (if any)\n \"\"\"\n pass\n\n\nclass RegressionEvaluationUtil(EvaluationUtil[VectorRegressionModel, VectorRegressionModelEvaluator, VectorRegressionModelEvaluationData, VectorRegressionModelCrossValidator, VectorRegressionModelCrossValidationData, RegressionEvalStats]):\n def _createEvalStatsPlots(self, evalStats: RegressionEvalStats, resultCollector: EvaluationUtil.ResultCollector, subtitle=None):\n resultCollector.addFigure(\"error-dist\", evalStats.plotErrorDistribution(titleAdd=subtitle))\n resultCollector.addFigure(\"heatmap-gt-pred\", evalStats.plotHeatmapGroundTruthPredictions(titleAdd=subtitle))\n resultCollector.addFigure(\"scatter-gt-pred\", evalStats.plotScatterGroundTruthPredictions(titleAdd=subtitle))\n\n\nclass ClassificationEvaluationUtil(EvaluationUtil[VectorClassificationModel, VectorClassificationModelEvaluator, VectorClassificationModelEvaluationData, VectorClassificationModelCrossValidator, VectorClassificationModelCrossValidationData, ClassificationEvalStats]):\n def _createEvalStatsPlots(self, evalStats: ClassificationEvalStats, resultCollector: EvaluationUtil.ResultCollector, subtitle=None):\n resultCollector.addFigure(\"confusion-matrix\", evalStats.plotConfusionMatrix(titleAdd=subtitle))\n\n\nclass MultiDataEvaluationUtil:\n def __init__(self, inputOutputDataDict: Dict[str, InputOutputData], keyName: str = \"dataset\"):\n \"\"\"\n :param inputOutputDataDict: a dictionary mapping from names to the data sets with which to evaluate models\n :param keyName: a name for the key value used in inputOutputDataDict\n \"\"\"\n self.inputOutputDataDict = inputOutputDataDict\n self.keyName = keyName\n\n def compareModelsCrossValidation(self, modelFactories: Sequence[Callable[[], VectorModel]],\n resultWriter: Optional[ResultWriter] = None, writePerDatasetResults=True,\n crossValidatorParams: Optional[Dict[str, Any]] = None, columnNameForModelRanking: str = None, rankMax=True) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"\n :param modelFactories: a sequence of factory functions for the creation of models to evaluate\n :param resultWriter: a writer with which to store results\n :param writePerDatasetResults: whether to use resultWriter (if not None) in order to generate detailed results for each\n dataset in a subdirectory named according to the name of the dataset\n :param crossValidatorParams: parameters to use for the instantiation of cross-validators\n :param columnNameForModelRanking: column name to use for ranking models\n :param rankMax: if true, use max for ranking, else min\n :return: a pair of data frames (allDF, meanDF) where allDF contains all the individual cross-validation results\n for every dataset and meanDF contains one row for each model with results averaged across datasets\n \"\"\"\n allResults = pd.DataFrame()\n for key, inputOutputData in self.inputOutputDataDict.items():\n log.info(f\"Evaluating models for {key}\")\n models = [f() for f in modelFactories]\n modelsAreRegression = [model.isRegressionModel() for model in models]\n if all(modelsAreRegression):\n isRegression = True\n elif not any(modelsAreRegression):\n isRegression = False\n else:\n raise ValueError(\"The models have to be either all regression models or all classification, not a mixture\")\n ev = createEvaluationUtil(inputOutputData, isRegression=isRegression, crossValidatorParams=crossValidatorParams)\n childResultWriter = resultWriter.childForSubdirectory(key) if writePerDatasetResults else None\n df = ev.compareModelsCrossValidation(models, resultWriter=childResultWriter)\n df[self.keyName] = key\n df[\"modelName\"] = df.index\n if columnNameForModelRanking is not None:\n if columnNameForModelRanking not in df.columns:\n raise ValueError(f\"Rank metric {columnNameForModelRanking} not contained in columns {df.columns}\")\n df[\"bestModel\"] = 0\n if rankMax:\n df[\"bestModel\"].loc[df[columnNameForModelRanking].idxmax()] = 1\n else:\n df[\"bestModel\"].loc[df[columnNameForModelRanking].idxmin()] = 1\n df = df.reset_index(drop=True)\n allResults = pd.concat((allResults, df))\n strAllResults = f\"All results:\\n{allResults.to_string()}\"\n log.info(strAllResults)\n meanResults = allResults.groupby(\"modelName\").mean()\n strMeanResults = f\"Mean results:\\n{meanResults.to_string()}\"\n log.info(strMeanResults)\n if resultWriter is not None:\n resultWriter.writeTextFile(\"model-comparison-results\", strMeanResults + \"\\n\\n\" + strAllResults)\n return allResults, meanResults\n"
] |
[
[
"numpy.array"
],
[
"pandas.DataFrame.from_dict",
"pandas.concat",
"pandas.DataFrame",
"matplotlib.pyplot.figure"
]
] |
VITA-Group/Sandwich-Batch-Normalization
|
[
"25e7df6e64a67cebd7e70b911f874cfc1bd19df0"
] |
[
"Adv/functions.py"
] |
[
"# -*- coding: utf-8 -*-\n# @Date : 2/16/21\n# @Author : Xinyu Gong (xinyu.gong@utexas.edu)\n# @Link : None\n# @Version : 0.0\n\nimport torch\n\nfrom attack_algo import PGD\n\n\ndef train(args, train_loader, model, criterion, optimizer, epoch):\n losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n for i, (input, target) in enumerate(train_loader):\n\n if epoch == 0:\n warmup_lr(i, optimizer)\n\n input = input.cuda()\n target = target.cuda()\n\n # adv samples\n input_adv = PGD(\n input,\n criterion,\n y=target,\n eps=(args.train_eps / 255),\n model=model,\n steps=args.train_steps,\n gamma=(args.train_gamma / 255),\n randinit=args.train_randinit,\n flag=1,\n )\n\n input_adv = input_adv.cuda()\n\n # compute output\n inputsall_clean = {\"x\": input, \"flag\": 0}\n inputsall_adv = {\"x\": input_adv, \"flag\": 1}\n output_clean = model(**inputsall_clean)\n output_adv = model(**inputsall_adv)\n\n loss = (criterion(output_clean, target) + criterion(output_adv, target)) / 2\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n output = output_adv.float()\n loss = loss.float()\n # measure accuracy and record loss\n prec1 = accuracy(output.data, target)[0]\n\n losses.update(loss.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n\n if i % args.print_freq == 0:\n print(\n \"Epoch: [{0}][{1}/{2}]\\t\"\n \"Loss {loss.val:.4f} ({loss.avg:.4f})\\t\"\n \"Accuracy {top1.val:.3f} ({top1.avg:.3f})\\t\".format(\n epoch, i, len(train_loader), loss=losses, top1=top1\n )\n )\n\n print(\"train_accuracy {top1.avg:.3f}\".format(top1=top1))\n\n return top1.avg, losses.avg\n\n\n@torch.no_grad()\ndef validate(args, val_loader, model, criterion, flag):\n \"\"\"\n Run evaluation\n \"\"\"\n losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n for i, (input, target) in enumerate(val_loader):\n input = input.cuda()\n target = target.cuda()\n\n # compute output\n inputsall = {\"x\": input, \"flag\": flag}\n output = model(**inputsall)\n loss = criterion(output, target)\n\n output = output.float()\n loss = loss.float()\n\n # measure accuracy and record loss\n prec1 = accuracy(output.data, target)[0]\n losses.update(loss.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n\n if i % args.print_freq == 0:\n print(\n \"Test: [{0}/{1}]\\t\"\n \"Loss {loss.val:.4f} ({loss.avg:.4f})\\t\"\n \"Accuracy {top1.val:.3f} ({top1.avg:.3f})\".format(\n i, len(val_loader), loss=losses, top1=top1\n )\n )\n\n print(\"valid_accuracy {top1.avg:.3f}\".format(top1=top1))\n\n return top1.avg, losses.avg\n\n\ndef validate_adv(args, val_loader, model, criterion, flag):\n \"\"\"\n Run evaluation\n \"\"\"\n losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n for i, (input, target) in enumerate(val_loader):\n input = input.cuda()\n target = target.cuda()\n\n # adv samples\n input_adv = PGD(\n input,\n criterion,\n y=target,\n eps=(args.test_eps / 255),\n model=model,\n steps=args.test_steps,\n gamma=(args.test_gamma / 255),\n randinit=args.test_randinit,\n flag=flag,\n )\n\n input_adv = input_adv.cuda()\n # compute output\n with torch.no_grad():\n inputsall = {\"x\": input_adv, \"flag\": flag}\n output = model(**inputsall)\n loss = criterion(output, target)\n\n output = output.float()\n loss = loss.float()\n\n # measure accuracy and record loss\n prec1 = accuracy(output.data, target)[0]\n losses.update(loss.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n\n if i % args.print_freq == 0:\n print(\n \"Test: [{0}/{1}]\\t\"\n \"Loss {loss.val:.4f} ({loss.avg:.4f})\\t\"\n \"Accuracy {top1.val:.3f} ({top1.avg:.3f})\".format(\n i, len(val_loader), loss=losses, top1=top1\n )\n )\n\n print(\"ATA {top1.avg:.3f}\".format(top1=top1))\n\n return top1.avg, losses.avg\n\n\ndef save_checkpoint(state, is_best, filename=\"weight.pt\"):\n \"\"\"\n Save the training model\n \"\"\"\n torch.save(state, filename)\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef warmup_lr(step, optimizer):\n lr = 0.01 + step * (0.1 - 0.01) / 200\n lr = min(lr, 0.1)\n for p in optimizer.param_groups:\n p[\"lr\"] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n"
] |
[
[
"torch.no_grad",
"torch.save"
]
] |
JasonD1997/Machine-Learning-Programs
|
[
"81e54ccaf2ca4bd7e2d79ed0e538f13cb355c0bf",
"81e54ccaf2ca4bd7e2d79ed0e538f13cb355c0bf"
] |
[
"p8.py",
"p9.py"
] |
[
"from sklearn.cluster import KMeans\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\ndata=pd.read_csv(\"data8.csv\")\ndf1=pd.DataFrame(data)\nprint(df1)\nf1 = df1['Distance_Feature'].values\nf2 = df1['Speeding_Feature'].values\nX=np.matrix(list(zip(f1,f2)))\nplt.plot()\nplt.xlim([0, 100])\nplt.ylim([0, 50])\nplt.title('Dataset')\nplt.ylabel('speeding_feature')\nplt.xlabel('Distance_Feature')\nplt.scatter(f1,f2)\nplt.show()\nplt.plot()\ncolors = ['b', 'g', 'r']\nmarkers = ['o', 'v', 's']\nkmeans_model = KMeans(n_clusters=3).fit(X)\nplt.plot()\nfor i, l in enumerate(kmeans_model.labels_):\n\tplt.plot(f1[i], f2[i], color=colors[l], marker=markers[l],ls='None')\n\tplt.xlim([0, 100])\n\tplt.ylim([0, 50])\nplt.show()\n",
"from sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import classification_report,confusion_matrix\nfrom sklearn import datasets\n\niris=datasets.load_iris()\niris_data=iris.data\niris_labels=iris.target\nprint(iris_data)\nprint(iris_labels)\nx_train,x_test,y_train,y_test=train_test_split(iris_data,iris_labels,test_size=0.30)\nclassifier=KNeighborsClassifier(n_neighbors=5)\nclassifier.fit(x_train,y_train)\ny_pred=classifier.predict(x_test)\nprint('confusion matrix is as follows')\nprint(confusion_matrix(y_test,y_pred))\nprint('Accuracy metrics')\nprint(classification_report(y_test,y_pred))\n"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.ylim",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"sklearn.datasets.load_iris",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.train_test_split",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.metrics.classification_report"
]
] |
mvtea/sgl
|
[
"b6307e6fe9db536b392f181ca113128c7f0f1821"
] |
[
"not_my_code/bolton/lensdemo_funcs.py"
] |
[
"#\n# lensdemo_funcs.py\n#\n# Function module for strong lensing demos\n#\n# Intended for use with lensdemo_script.py\n#\n# Copyright 2009 by Adam S. Bolton\n# Creative Commons Attribution-Noncommercial-ShareAlike 3.0 license applies:\n# http://creativecommons.org/licenses/by-nc-sa/3.0/\n# All redistributions, modified or otherwise, must include this\n# original copyright notice, licensing statement, and disclaimer.\n# DISCLAIMER: ABSOLUTELY NO WARRANTY EXPRESS OR IMPLIED.\n# AUTHOR ASSUMES NO LIABILITY IN CONNECTION WITH THIS COMPUTER CODE.\n#\n\nimport numpy as N\n\ndef xy_rotate(x, y, xcen, ycen, phi):\n \"\"\"\n NAME: xy_rotate\n\n PURPOSE: Transform input (x, y) coordiantes into the frame of a new\n (x, y) coordinate system that has its origin at the point\n (xcen, ycen) in the old system, and whose x-axis is rotated\n c.c.w. by phi degrees with respect to the original x axis.\n\n USAGE: (xnew,ynew) = xy_rotate(x, y, xcen, ycen, phi)\n\n ARGUMENTS:\n x, y: numpy ndarrays with (hopefully) matching sizes\n giving coordinates in the old system\n xcen: old-system x coordinate of the new origin\n ycen: old-system y coordinate of the new origin\n phi: angle c.c.w. in degrees from old x to new x axis\n\n RETURNS: 2-item tuple containing new x and y coordinate arrays\n\n WRITTEN: Adam S. Bolton, U. of Utah, 2009\n \"\"\"\n phirad = N.deg2rad(phi)\n xnew = (x - xcen) * N.cos(phirad) + (y - ycen) * N.sin(phirad)\n ynew = (y - ycen) * N.cos(phirad) - (x - xcen) * N.sin(phirad)\n return (xnew,ynew)\n\ndef gauss_2d(x, y, par):\n \"\"\"\n NAME: gauss_2d\n\n PURPOSE: Implement 2D Gaussian function\n\n USAGE: z = gauss_2d(x, y, par)\n\n ARGUMENTS:\n x, y: vecors or images of coordinates;\n should be matching numpy ndarrays\n par: vector of parameters, defined as follows:\n par[0]: amplitude\n par[1]: intermediate-axis sigma\n par[2]: x-center\n par[3]: y-center\n par[4]: axis ratio\n par[5]: c.c.w. major-axis rotation w.r.t. x-axis\n\n RETURNS: 2D Gaussian evaluated at x-y coords\n\n NOTE: amplitude = 1 is not normalized, but rather has max = 1\n\n WRITTEN: Adam S. Bolton, U. of Utah, 2009\n \"\"\"\n (xnew,ynew) = xy_rotate(x, y, par[2], par[3], par[5])\n r_ell_sq = ((xnew**2)*par[4] + (ynew**2)/par[4]) / N.abs(par[1])**2\n return par[0] * N.exp(-0.5*r_ell_sq)\n\ndef sie_grad(x, y, par):\n \"\"\"\n NAME: sie_grad\n\n PURPOSE: compute the deflection of an SIE potential\n\n USAGE: (xg, yg) = sie_grad(x, y, par)\n\n ARGUMENTS:\n x, y: vectors or images of coordinates;\n should be matching numpy ndarrays\n par: vector of parameters with 1 to 5 elements, defined as follows:\n par[0]: lens strength, or 'Einstein radius'\n par[1]: (optional) x-center (default = 0.0)\n par[2]: (optional) y-center (default = 0.0)\n par[3]: (optional) axis ratio (default=1.0)\n par[4]: (optional) major axis Position Angle\n in degrees c.c.w. of x axis. (default = 0.0)\n\n RETURNS: tuple (xg, yg) of gradients at the positions (x, y)\n\n NOTES: This routine implements an 'intermediate-axis' convention.\n Analytic forms for the SIE potential can be found in:\n Kassiola & Kovner 1993, ApJ, 417, 450\n Kormann et al. 1994, A&A, 284, 285\n Keeton & Kochanek 1998, ApJ, 495, 157\n The parameter-order convention in this routine differs from that\n of a previous IDL routine of the same name by ASB.\n\n WRITTEN: Adam S. Bolton, U of Utah, 2009\n \"\"\"\n # Set parameters:\n b = N.abs(par[0]) # can't be negative!!!\n xzero = 0. if (len(par) < 2) else par[1]\n yzero = 0. if (len(par) < 3) else par[2]\n q = 1. if (len(par) < 4) else N.abs(par[3])\n phiq = 0. if (len(par) < 5) else par[4]\n eps = 0.001 # for sqrt(1/q - q) < eps, a limit expression is used.\n # Handle q > 1 gracefully:\n if (q > 1.):\n q = 1.0 / q\n phiq = phiq + 90.0\n # Go into shifted coordinats of the potential:\n phirad = N.deg2rad(phiq)\n xsie = (x-xzero) * N.cos(phirad) + (y-yzero) * N.sin(phirad)\n ysie = (y-yzero) * N.cos(phirad) - (x-xzero) * N.sin(phirad)\n # Compute potential gradient in the transformed system:\n r_ell = N.sqrt(q * xsie**2 + ysie**2 / q)\n qfact = N.sqrt(1./q - q)\n # (r_ell == 0) terms prevent divide-by-zero problems\n if (qfact >= eps):\n xtg = (b/qfact) * N.arctan(qfact * xsie / (r_ell + (r_ell == 0)))\n ytg = (b/qfact) * N.arctanh(qfact * ysie / (r_ell + (r_ell == 0)))\n else:\n xtg = b * xsie / (r_ell + (r_ell == 0))\n ytg = b * ysie / (r_ell + (r_ell == 0))\n # Transform back to un-rotated system:\n xg = xtg * N.cos(phirad) - ytg * N.sin(phirad)\n yg = ytg * N.cos(phirad) + xtg * N.sin(phirad)\n # Return value:\n return (xg, yg)\n"
] |
[
[
"numpy.arctanh",
"numpy.abs",
"numpy.sqrt",
"numpy.arctan",
"numpy.cos",
"numpy.sin",
"numpy.deg2rad",
"numpy.exp"
]
] |
Royzon/dl-utils
|
[
"be95707da9cb812a5f25a11f1469055e0b7abcd2"
] |
[
"dlutils/losses/focal.py"
] |
[
"from functools import reduce\n\nimport torch\nfrom rising.transforms.functional.channel import one_hot_batch\nfrom torch.nn import functional as F\n\nfrom dlutils.utils.tensor_ops import reduce\n\n__all__ = [\n 'focal_loss',\n 'focal_loss_with_logits',\n 'binary_focal_loss',\n 'binary_focal_loss_logits',\n 'FocalLoss',\n 'FocalLossWithLogits',\n 'BinaryFocalLoss',\n 'BinaryFocalLossWithLogits'\n]\n\n\ndef _general_focal_loss(p: torch.Tensor, t: torch.Tensor, gamma: float,\n loss_val: torch.Tensor, alpha_weight: float = 1.,\n reduction: str = 'elementwise_mean'):\n \"\"\"\n Helper Function Handling the general focal part and the reduction\n\n Parameters\n ----------\n p: torch.Tensor\n the prediction tensor\n t : torch.Tensor\n the target tensor\n gamma : float\n focusing parameter\n loss_val : torch.Tensor\n the value coming from the previous loss function\n alpha_weight : float\n class weight\n reduction : str\n reduction parameter\n\n Returns\n -------\n torch.Tensor\n loss value\n\n Raises\n ------\n ValueError\n invalid reduction parameter\n\n \"\"\"\n # compute focal weights\n # if not isinstance(alpha_weight, torch.Tensor):\n # alpha_weight = torch.tensor([1.], device=p.device)\n\n focal_weight = 1 - torch.where(torch.eq(t, 1.), p, 1 - p)\n focal_weight.pow_(gamma)\n focal_weight.to(p.device)\n\n # adjust shape if necessary\n if len(loss_val.shape) < len(focal_weight.shape):\n loss_val = loss_val.unsqueeze(1)\n\n # compute loss\n focal_loss = focal_weight * alpha_weight * loss_val\n\n return reduce(focal_loss, reduction)\n\n\ndef _bfocal_loss(p: torch.Tensor, t: torch.Tensor, loss_val: torch.Tensor,\n gamma: float, alpha: float, reduction: str):\n \"\"\"\n Helper function for binary focal loss\n\n Parameters\n ----------\n p: torch.Tensor\n the prediction tensor\n t : torch.Tensor\n the target tensor\n gamma : float\n focusing parameter\n loss_val : torch.Tensor\n the value coming from the previous loss function\n alpha : float\n class weight\n reduction : str\n reduction parameter\n\n Returns\n -------\n torch.Tensor\n loss value\n\n Raises\n ------\n ValueError\n invalid reduction parameter\n\n \"\"\"\n if alpha is not None:\n # create weights for alpha\n alpha_weight = torch.ones(t.shape, device=p.device) * alpha\n alpha_weight = torch.where(torch.eq(t, 1.),\n alpha_weight, 1 - alpha_weight)\n else:\n alpha_weight = 1.\n\n # create weights for focal loss\n\n return _general_focal_loss(p=p, t=t, gamma=gamma, loss_val=loss_val,\n alpha_weight=alpha_weight, reduction=reduction)\n\n\ndef binary_focal_loss(p: torch.Tensor, t: torch.Tensor, gamma: float = 2.,\n alpha: float = None,\n reduction: str = 'elementwise_mean'):\n \"\"\"\n Binary focal loss without (!) logits\n Parameters\n ----------\n p: torch.Tensor\n the prediction tensor\n t : torch.Tensor\n the target tensor\n gamma : float\n focusing parameter\n alpha : float\n class weight\n reduction : str\n reduction parameter\n\n Returns\n -------\n torch.Tensor\n loss value\n\n Raises\n ------\n ValueError\n invalid reduction parameter\n\n \"\"\"\n loss_val = F.binary_cross_entropy(p, t, reduction='none')\n return _bfocal_loss(p=p, t=t, loss_val=loss_val, gamma=gamma, alpha=alpha,\n reduction=reduction)\n\n\ndef binary_focal_loss_logits(p: torch.Tensor, t: torch.Tensor,\n gamma: float = 2., alpha: float = None,\n reduction: str = 'elementwise_mean'):\n \"\"\"\n Binary focal loss with logits\n Parameters\n ----------\n p: torch.Tensor\n the prediction tensor\n t : torch.Tensor\n the target tensor\n gamma : float\n focusing parameter\n alpha : float\n class weight\n reduction : str\n reduction parameter\n\n Returns\n -------\n torch.Tensor\n loss value\n\n Raises\n ------\n ValueError\n invalid reduction parameter\n\n \"\"\"\n loss_val = F.binary_cross_entropy_with_logits(\n p, t, reduction='none')\n\n p = torch.sigmoid(p)\n return _bfocal_loss(p=p, t=t, loss_val=loss_val, gamma=gamma, alpha=alpha,\n reduction=reduction)\n\n\ndef _focal_loss(p: torch.Tensor, t: torch.Tensor, gamma: float,\n loss_val: torch.Tensor, reduction: str):\n \"\"\"\n Focal loss helper function\n Parameters\n ----------\n p: torch.Tensor\n the prediction tensor\n t : torch.Tensor\n the target tensor\n gamma : float\n focusing parameter\n loss_val : torch.Tensor\n value coming from the previous (weighted) loss function\n reduction : str\n reduction parameter\n\n Returns\n -------\n torch.Tensor\n loss value\n\n Raises\n ------\n ValueError\n invalid reduction parameter\n\n \"\"\"\n n_classes = p.size(1)\n target_onehot = one_hot_batch(t.unsqueeze(1), num_classes=n_classes)\n return _general_focal_loss(p=p, t=target_onehot, gamma=gamma,\n loss_val=loss_val, reduction=reduction,\n alpha_weight=1.)\n\n\ndef focal_loss(p: torch.Tensor, t: torch.Tensor, gamma: float = 2.,\n alpha: torch.Tensor = None,\n reduction: str = 'elementwise_mean'):\n \"\"\"\n Focal loss without (!) logits\n\n Parameters\n ----------\n p: torch.Tensor\n the prediction tensor\n t : torch.Tensor\n the target tensor\n gamma : float\n focusing parameter\n alpha : torch.Tensor\n class weight\n reduction : str\n reduction parameter\n\n Returns\n -------\n torch.Tensor\n loss value\n\n Raises\n ------\n ValueError\n invalid reduction parameter\n\n \"\"\"\n loss_val = F.nll_loss(p, t, weight=alpha, reduction='none')\n p = p.log()\n return _focal_loss(p=p, t=t, gamma=gamma, reduction=reduction,\n loss_val=loss_val)\n\n\ndef focal_loss_with_logits(p: torch.Tensor, t: torch.Tensor, gamma: float = 2.,\n alpha: torch.Tensor = None,\n reduction: str = 'elementwise_mean'):\n \"\"\"\n focal loss with logits\n Parameters\n ----------\n p: torch.Tensor\n the prediction tensor\n t : torch.Tensor\n the target tensor\n gamma : float\n focusing parameter\n alpha : torch.Tensor\n class weight\n reduction : str\n reduction parameter\n\n Returns\n -------\n torch.Tensor\n loss value\n\n Raises\n ------\n ValueError\n invalid reduction parameter\n\n \"\"\"\n loss_val = F.cross_entropy(p, t, weight=alpha, reduction='none')\n p = F.softmax(p, dim=1)\n return _focal_loss(p=p, t=t, gamma=gamma, reduction=reduction,\n loss_val=loss_val)\n\n\nclass BinaryFocalLoss(torch.nn.Module):\n \"\"\"\n Focal loss for binary case without(!) logit\n \"\"\"\n\n def __init__(self, alpha=None, gamma=2, reduction='elementwise_mean'):\n \"\"\"\n Implements Focal Loss for binary classification case\n Parameters\n ----------\n alpha : float\n alpha has to be in range [0,1], assigns class weight\n gamma : float\n focusing parameter\n reduction : str\n Specifies the reduction to apply to the output: ‘none’ |\n ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied,\n ‘elementwise_mean’: the sum of the output will be divided by the\n number of elements in the output, ‘sum’: the output will be summed\n (further information about parameters above can be found in pytorch\n documentation)\n \"\"\"\n super().__init__()\n self.alpha = alpha\n self.gamma = gamma\n self.reduction = reduction\n\n def forward(self, p, t):\n \"\"\"\n Compute Focal Loss with logits\n Parameters\n ----------\n p : torch.Tensor\n prediction\n t : torch.Tensor\n target\n Returns\n -------\n torch.Tensor\n result\n \"\"\"\n return binary_focal_loss(p=p, t=t, gamma=self.gamma, alpha=self.alpha,\n reduction=self.reduction)\n\n\nclass BinaryFocalLossWithLogits(torch.nn.Module):\n \"\"\"\n Focal loss for binary case WITH logits\n \"\"\"\n\n def __init__(self, alpha=None, gamma=2, reduction='elementwise_mean'):\n \"\"\"\n Implements Focal Loss for binary class case\n Parameters\n ----------\n alpha : float\n alpha has to be in range [0,1], assigns class weight\n gamma : float\n focusing parameter\n reduction : str\n Specifies the reduction to apply to the output: ‘none’ |\n ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied,\n ‘elementwise_mean’: the sum of the output will be divided by the\n number of elements in the output, ‘sum’: the output will be summed\n (further information about parameters above can be found in pytorch\n documentation)\n Returns\n -------\n torch.Tensor\n loss value\n \"\"\"\n super().__init__()\n self.alpha = alpha\n self.gamma = gamma\n self.reduction = reduction\n\n def forward(self, p, t):\n \"\"\"\n Compute Focal Loss with logits\n Parameters\n ----------\n p : torch.Tensor\n prediction\n t : torch.Tensor\n target\n Returns\n -------\n torch.Tensor\n result\n \"\"\"\n\n return binary_focal_loss_logits(p=p, t=t, alpha=self.alpha,\n gamma=self.gamma,\n reduction=self.reduction)\n\n\nclass FocalLoss(torch.nn.Module):\n def __init__(self, alpha=None, gamma=2, reduction=\"elementwise_mean\"):\n \"\"\"\n Implements Focal Loss for non-binary class case\n Parameters\n ----------\n alpha : torch.Tensor\n alpha has to be in range [0,1], assigns class weight\n gamma : float\n focusing parameter\n reduction : str\n Specifies the reduction to apply to the output: ‘none’ |\n ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied,\n ‘elementwise_mean’: the sum of the output will be divided by the\n number of elements in the output, ‘sum’: the output will be summed\n (further information about parameters above can be found in pytorch\n documentation)\n Returns\n -------\n torch.Tensor\n loss value\n \"\"\"\n super().__init__()\n self.gamma = gamma\n if isinstance(alpha, torch.Tensor):\n self.register_buffer('alpha', alpha)\n else:\n self.alpha = alpha\n self.reduction = reduction\n\n def forward(self, p, t):\n \"\"\"\n Compute Focal Loss without logits\n Parameters\n ----------\n p : torch.Tensor\n prediction\n t : torch.Tensor\n target\n Returns\n -------\n torch.Tensor\n result\n \"\"\"\n return focal_loss(p=p, t=t, gamma=self.gamma, alpha=self.alpha,\n reduction=self.reduction)\n\n\nclass FocalLossWithLogits(torch.nn.Module):\n def __init__(self, alpha=None, gamma=2, reduction=\"elementwise_mean\"):\n \"\"\"\n Implements Focal Loss with logits for non-binary class case\n Parameters\n ----------\n alpha : torch.Tensor\n alpha has to be in range [0,1], assigns class weight\n gamma : float\n focusing parameter\n reduction : str\n Specifies the reduction to apply to the output: ‘none’ |\n ‘elementwise_mean’ | ‘sum’. ‘none’: no reduction will be applied,\n ‘elementwise_mean’: the sum of the output will be divided by the\n number of elements in the output, ‘sum’: the output will be summed\n (further information about parameters above can be found in pytorch\n documentation)\n Returns\n -------\n torch.Tensor\n loss value\n \"\"\"\n super().__init__()\n self.gamma = gamma\n if isinstance(alpha, torch.Tensor):\n self.register_buffer('alpha', alpha)\n else:\n self.alpha = alpha\n self.reduction = reduction\n\n def forward(self, p, t):\n \"\"\"\n Compute Focal Loss with logits\n Parameters\n ----------\n p : torch.Tensor\n prediction\n t : torch.Tensor\n target\n Returns\n -------\n torch.Tensor\n result\n \"\"\"\n return focal_loss_with_logits(p=p, t=t, gamma=self.gamma,\n alpha=self.alpha,\n reduction=self.reduction)\n"
] |
[
[
"torch.sigmoid",
"torch.nn.functional.softmax",
"torch.ones",
"torch.nn.functional.nll_loss",
"torch.eq",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.binary_cross_entropy"
]
] |
raybellwaves/climpred
|
[
"4ce5e3d30dbaa98fb974b54d82a5403c424a79db"
] |
[
"climpred/tests/test_uninitialized.py"
] |
[
"import numpy as np\nimport pytest\n\nfrom climpred.comparisons import HINDCAST_COMPARISONS\nfrom climpred.constants import VALID_ALIGNMENTS\nfrom climpred.metrics import DETERMINISTIC_HINDCAST_METRICS\nfrom climpred.reference import compute_uninitialized\n\n# uacc breaks\nDETERMINISTIC_HINDCAST_METRICS = DETERMINISTIC_HINDCAST_METRICS.copy()\nDETERMINISTIC_HINDCAST_METRICS.remove(\"uacc\")\n\n\n@pytest.mark.parametrize(\"metric\", DETERMINISTIC_HINDCAST_METRICS)\n@pytest.mark.parametrize(\"comparison\", HINDCAST_COMPARISONS)\ndef test_compute_uninitialized(\n hind_ds_initialized_1d,\n reconstruction_ds_1d,\n hist_ds_uninitialized_1d,\n metric,\n comparison,\n):\n \"\"\"\n Checks that compute uninitialized works without breaking.\n \"\"\"\n category_edges = np.array([0.0, 0.5, 1.0])\n if metric == \"contingency\":\n metric_kwargs = {\n \"forecast_category_edges\": category_edges,\n \"observation_category_edges\": category_edges,\n \"score\": \"accuracy\",\n }\n else:\n metric_kwargs = {}\n res = (\n compute_uninitialized(\n hind_ds_initialized_1d,\n hist_ds_uninitialized_1d,\n reconstruction_ds_1d,\n metric=metric,\n comparison=comparison,\n **metric_kwargs\n )\n .isnull()\n .any()\n )\n for var in res.data_vars:\n assert not res[var]\n\n\n@pytest.mark.parametrize(\"alignment\", VALID_ALIGNMENTS)\ndef test_compute_uninitialized_alignment(\n hind_ds_initialized_1d, reconstruction_ds_1d, hist_ds_uninitialized_1d, alignment\n):\n \"\"\"Tests that compute_uninitialized works for various alignments.\"\"\"\n res = (\n compute_uninitialized(\n hind_ds_initialized_1d,\n hist_ds_uninitialized_1d,\n reconstruction_ds_1d,\n metric=\"pr\",\n comparison=\"e2o\",\n alignment=alignment,\n )\n .isnull()\n .any()\n )\n for var in res.data_vars:\n assert not res[var]\n\n\ndef test_compute_uninitialized_same_verifs(\n hind_da_initialized_1d, reconstruction_da_1d, hist_da_uninitialized_1d\n):\n \"\"\"Tests that uninitialized skill is same at all leads for `same_verifs`\n alignment.\"\"\"\n res = compute_uninitialized(\n hind_da_initialized_1d,\n hist_da_uninitialized_1d,\n reconstruction_da_1d,\n metric=\"pr\",\n comparison=\"e2o\",\n alignment=\"same_verifs\",\n )\n assert ((res - res[0]) == 0).all()\n"
] |
[
[
"numpy.array"
]
] |
reginaeckert/isofit
|
[
"30d9ada3915b779e2d92e7d9fd5abc49bb6f512e"
] |
[
"isofit/radiative_transfer/look_up_tables.py"
] |
[
"#! /usr/bin/env python3\n#\n# Copyright 2018 California Institute of Technology\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ISOFIT: Imaging Spectrometer Optimal FITting\n# Author: David R Thompson, david.r.thompson@jpl.nasa.gov\n#\n\nimport os\nimport numpy as np\nimport logging\nimport ray\nfrom collections import OrderedDict\nimport subprocess\nimport time\nfrom typing import List\n\nfrom isofit.core import common\nfrom isofit.configs import Config\nfrom isofit.configs.sections.radiative_transfer_config import RadiativeTransferEngineConfig\nfrom isofit.configs.sections.statevector_config import StateVectorElementConfig\nfrom isofit.configs.sections.implementation_config import ImplementationConfig\n\n\n### Functions ###\n\n@ray.remote\ndef spawn_rt(cmd, local_dir=None):\n \"\"\"Run a CLI command.\"\"\"\n\n print(cmd)\n\n # Add a very slight timing offset to prevent all subprocesses\n # starting simultaneously\n time.sleep(float(np.random.random(1))*2)\n\n subprocess.call(cmd, shell=True, cwd=local_dir)\n\n### Classes ###\n\nclass FileExistsError(Exception):\n \"\"\"FileExistsError with a message.\"\"\"\n\n def __init__(self, message):\n super(FileExistsError, self).__init__(message)\n\n\nclass TabularRT:\n \"\"\"A model of photon transport including the atmosphere.\"\"\"\n\n def __init__(self, engine_config: RadiativeTransferEngineConfig, full_config: Config):\n\n self.implementation_config: ImplementationConfig = full_config.implementation\n if engine_config.wavelength_file is not None:\n wavelength_file = engine_config.wavelength_file \n else:\n wavelength_file = full_config.forward_model.instrument.wavelength_file\n self.wl, self.fwhm = common.load_wavelen(wavelength_file)\n if engine_config.wavelength_range is not None:\n valid_wl = np.logical_and(self.wl >= engine_config.wavelength_range[0],\n self.wl <= engine_config.wavelength_range[1])\n self.wl = self.wl[valid_wl]\n self.fwhm = self.fwhm[valid_wl]\n\n self.n_chan = len(self.wl)\n\n self.auto_rebuild = full_config.implementation.rte_auto_rebuild\n self.configure_and_exit = full_config.implementation.rte_configure_and_exit\n self.implementation_mode = full_config.implementation.mode\n\n # We use a sorted dictionary here so that filenames for lookup\n # table (LUT) grid points are always constructed the same way, with\n # consistent dimesion ordering). Every state vector element has\n # a lookup table dimension, but some lookup table dimensions\n # (like geometry parameters) may not be in the state vector.\n # TODO: enforce a requirement that makes all SV elements be inside the LUT\n full_lut_grid = full_config.forward_model.radiative_transfer.lut_grid\n # selectively get lut components that are in this particular RTE\n self.lut_grid_config = OrderedDict()\n if engine_config.lut_names is not None:\n lut_names = engine_config.lut_names\n else:\n lut_names = full_config.forward_model.radiative_transfer.lut_grid.keys()\n\n for key, value in full_lut_grid.items():\n if key in lut_names:\n self.lut_grid_config[key] = value\n\n # selectively get statevector components that are in this particular RTE\n full_sv_names = full_config.forward_model.radiative_transfer.statevector.get_element_names()\n self.statevector_names = full_sv_names\n\n self.lut_dir = engine_config.lut_path\n self.n_point = len(self.lut_grid_config)\n self.n_state = len(self.statevector_names)\n\n self.luts = {}\n\n # Retrieved variables. We establish scaling, bounds, and\n # initial guesses for each state vector element. The state\n # vector elements are all free parameters in the RT lookup table,\n # and they all have associated dimensions in the LUT grid.\n self.bounds, self.scale, self.init = [], [], []\n self.prior_mean, self.prior_sigma = [], []\n for key in self.statevector_names:\n element: StateVectorElementConfig = full_config.forward_model.radiative_transfer.statevector.get_single_element_by_name(\n key)\n self.bounds.append(element.bounds)\n self.scale.append(element.scale)\n self.init.append(element.init)\n self.prior_sigma.append(element.prior_sigma)\n self.prior_mean.append(element.prior_mean)\n self.bounds = np.array(self.bounds)\n self.scale = np.array(self.scale)\n self.init = np.array(self.init)\n self.prior_mean = np.array(self.prior_mean)\n self.prior_sigma = np.array(self.prior_sigma)\n\n self.lut_dims = []\n self.lut_grids = []\n self.lut_names = []\n self.lut_interp_types = []\n for key, grid_values in self.lut_grid_config.items():\n\n # do some quick checks on the values\n # For forward (simulation) mode, 1-dimensional LUT grids are OK!\n if len(grid_values) == 1 and not self.implementation_mode == \"simulation\":\n err = 'Only 1 value in LUT grid {}. '.format(key) +\\\n '1-d LUT grids cannot be interpreted.'\n raise ValueError(err)\n if grid_values != sorted(grid_values):\n logging.error('Lookup table grid needs ascending order')\n raise ValueError('Lookup table grid needs ascending order')\n\n # Store the values\n self.lut_grids.append(grid_values)\n self.lut_dims.append(len(grid_values))\n self.lut_names.append(key)\n\n # Store in an indication of the type of value each key is\n # (normal - n, degree - d, radian - r)\n if key in self.angular_lut_keys_radians:\n self.lut_interp_types.append('r')\n elif key in self.angular_lut_keys_degrees:\n self.lut_interp_types.append('d')\n else:\n self.lut_interp_types.append('n')\n\n # Cast as array for faster reference later\n self.lut_interp_types = np.array(self.lut_interp_types)\n\n # \"points\" contains all combinations of grid points\n # We will have one filename prefix per point\n self.points = common.combos(self.lut_grids)\n self.files = self.get_lut_filenames()\n\n def get_rebuild_cmds(self, rebuild=False) -> List:\n \"\"\"\n Generate a full set of RTM subprocess calls to run\n Args:\n rebuild: optional flag to rebuild all RTM simulations from scratch\n\n Returns:\n A series of strings to execute as subprocess calls.\n \"\"\"\n\n # Build the list of radiative transfer run commands. This\n # rebuild_cmd() function will be overriden by the child class to\n # perform setup activities unique to each RTM.\n rebuild_cmds = []\n for point, fn in zip(self.points, self.files):\n try:\n cmd = self.rebuild_cmd(point, fn)\n rebuild_cmds.append(cmd)\n except FileExistsError:\n pass\n\n return rebuild_cmds\n\n def build_lut(self, rebuild=False):\n \"\"\"Build a lookup table by:\n (1) defining the LUT dimensions, state vector names, and the\n grid of values;\n (2) running the radiative transfer solver if needed, with each\n run defining a different point in the LUT; and\n (3) loading the LUTs, one per key atmospheric coefficient vector,\n into memory as VectorInterpolator objects.\n\n Args:\n rebuild: optional flag to rebuild all RTM simulations from scratch\n \"\"\"\n\n # Build the list of radiative transfer run commands. This\n # rebuild_cmd() function will be overriden by the child class to\n # perform setup activities unique to each RTM.\n rebuild_cmds = self.get_rebuild_cmds(rebuild=rebuild)\n\n if self.configure_and_exit:\n logging.info(\"configure_and_exit flag set - terminating\")\n raise SystemExit\n elif len(rebuild_cmds) > 0 and self.auto_rebuild:\n logging.info(\"Rebuilding radiative transfer look up table\")\n\n # check to make sure lut directory is there, create if not\n if os.path.isdir(self.lut_dir) is False:\n os.mkdir(self.lut_dir)\n\n # Make the LUT calls (in parallel if specified)\n results = ray.get([spawn_rt.remote(rebuild_cmd, self.lut_dir) for rebuild_cmd in rebuild_cmds])\n\n def get_lut_filenames(self):\n files = []\n for point in self.points:\n outf = '_'.join(['%s-%6.4f' % (n, x)\n for n, x in zip(self.lut_names, point)])\n files.append(outf)\n return files\n\n def summarize(self, x_RT, geom):\n \"\"\"Summary of state vector.\"\"\"\n\n if len(x_RT) < 1:\n return ''\n return 'Atmosphere: '+' '.join(['%s: %5.3f' % (si, xi) for si, xi in\n zip(self.statevector_names, x_RT)])\n"
] |
[
[
"numpy.array",
"numpy.random.random",
"numpy.logical_and"
]
] |
anaxsouza/FBPINNs
|
[
"fc50f9e69613318353ccff339d25c723cd8dd55c"
] |
[
"fbpinns/main.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 15 21:53:56 2021\n\n@author: bmoseley\n\"\"\"\n\n# This module defines trainer classes for FBPINNs and PINNs. It is the main entry point for training FBPINNs and PINNs\n# To train a FBPINN / PINN, use a Constants object to setup the problem and define its hyperparameters, and pass that \n# to one of the trainer classes defined here\n\n# This module is used by the paper_main_ND.py scripts\n\nimport time\n\nimport numpy as np\nimport torch\nimport torch.optim\n\nimport plot_main\nimport losses\nfrom trainersBase import _Trainer\nfrom constants import Constants\nfrom domains import ActiveRectangularDomainND\n\n\n## HELPER FUNCTIONS\n\n\ndef _x_random(subdomain_xs, batch_size, device):\n \"Get flattened random samples of x\"\n s = torch.tensor([[x.min(), x.max()] for x in subdomain_xs], dtype=torch.float32, device=device).T.unsqueeze(1)# (2, 1, nd)\n x_random = s[0]+(s[1]-s[0])*torch.rand((np.prod(batch_size),len(subdomain_xs)), device=device)# random samples in domain\n return x_random\n\ndef _x_mesh(subdomain_xs, batch_size, device):\n \"Get flattened samples of x on a mesh\"\n x_mesh = [torch.linspace(x.min(), x.max(), b, device=device) for x,b in zip(subdomain_xs, batch_size)]\n x_mesh = torch.stack(torch.meshgrid(*x_mesh), -1).view(-1, len(subdomain_xs))# nb torch.meshgrid uses np indexing=\"ij\"\n return x_mesh\n\ndef full_model_FBPINN(x, models, c, D):\n \"\"\"Get the full FBPINN prediction over all active and fixed models (forward inference only)\"\"\"\n \n def _single_model(im):# use separate function to ensure computational graph/memory is released\n \n x_ = x.detach().clone().requires_grad_(True)\n \n # normalise, run model, add window function\n mu, sd = D.n_torch[im]# (nd)\n y = models[im]((x_-mu)/sd)\n y_raw = y.detach().clone()\n y = y*c.Y_N[1] + c.Y_N[0]\n y = D.w[im](x_)*y\n \n # get gradients\n yj = c.P.get_gradients(x_, y)# problem-specific\n \n # detach from graph\n yj = [t.detach() for t in yj]\n \n # apply boundary conditions (for QC only)\n yj_bc = c.P.boundary_condition(x, *yj, *c.BOUNDARY_N)# problem-specific\n \n return yj, yj_bc, y_raw\n \n # run all models\n yjs, yjs_bc, ys_raw = [], [], []\n for im in D.active_fixed_ims:\n yj, yj_bc, y_raw = _single_model(im)\n \n # add to model lists\n yjs.append(yj); yjs_bc.append(yj_bc); ys_raw.append(y_raw)\n \n # sum across models\n yj = [torch.sum(torch.stack(ts, -1), -1) for ts in zip(*yjs)]# note zip(*) transposes\n \n # apply boundary condition to summed solution\n yj = c.P.boundary_condition(x, *yj, *c.BOUNDARY_N)# problem-specific\n \n return yj, yjs_bc, ys_raw\n\ndef full_model_PINN(x, model, c):\n \"\"\"Get the full PINN prediction (forward inference only)\"\"\"\n \n # get normalisation values\n xmin, xmax = torch.tensor([[x.min(), x.max()] for x in c.SUBDOMAIN_XS], dtype=torch.float32, device=x.device).T\n mu = (xmin + xmax)/2; sd = (xmax - xmin)/2\n \n # get full model solution using test data\n x_ = x.detach().clone().requires_grad_(True)\n y = model((x_-mu)/sd)\n y_raw = y.detach().clone()\n y = y*c.Y_N[1] + c.Y_N[0]\n \n # get gradients\n yj = c.P.get_gradients(x_, y)# problem-specific\n \n # detach from graph\n yj = [t.detach() for t in yj]\n \n # apply boundary conditions\n yj = c.P.boundary_condition(x, *yj, *c.BOUNDARY_N)# problem-specific\n \n return yj, y_raw\n\n\n## MAIN TRAINER CLASSES\n\n\nclass FBPINNTrainer(_Trainer):\n \"FBPINN model trainer class\"\n \n def _train_step(self, models, optimizers, c, D, i):# use separate function to ensure computational graph/memory is released\n \n ## ZERO PARAMETER GRADIENTS, SET TO TRAIN\n for optimizer in optimizers: optimizer.zero_grad()\n for model in models: model.train()\n \n ## RANDOMLY SAMPLE ALL SEGMENTS\n x_segments = D.sample_segments()\n \n ## RUN MODELS (ACTIVE AND FIXED NEIGHBOURS)\n xs, yjs = [], []\n for im,_ in D.active_fixed_neighbours_ims:\n \n # sample segments in model\n x = [x_segments[iseg] for iseg in D.m[im]]\n x = torch.cat(x, dim=0).detach().clone().requires_grad_(True)#(N, nd)\n \n # normalise, run model, add window function\n mu, sd = D.n_torch[im]# (nd)\n y = models[im]((x-mu)/sd)\n y = y*c.Y_N[1] + c.Y_N[0]\n y = D.w[im](x)*y\n \n # get gradients\n yj = c.P.get_gradients(x, y)# problem-specific\n \n # add to model lists\n yjs.append(yj)\n xs.append(x)\n \n if (i % c.SUMMARY_FREQ) == 0: \n print(*[t.shape for t in yj], x.shape)\n \n ## SUM OVERLAPPING MODELS, APPLY BCS (ACTIVE)\n yjs_sum = []\n for im,i1 in D.active_ims:\n \n # for each segment in model\n yjs_segs = []\n for iseg in D.m[im]:\n \n # for each model which contributes to that segment\n yjs_seg = []\n for im2,j1,j2,j3 in D.s[iseg]:\n \n # get model yj segment iseg\n yj = yjs[j1]# j1 is the index of yj for model im2 in yjs above\n if im2 == im: yj = [t[j2:j3] for t in yj]# get appropriate segment\n else: yj = [t[j2:j3].detach() for t in yj]\n \n # add to model list\n yjs_seg.append(yj)\n \n # sum across models\n yj_seg = [torch.sum(torch.stack(ts, -1), -1) for ts in zip(*yjs_seg)]# note zip(*) transposes\n \n # add to segment list\n yjs_segs.append(yj_seg)\n \n # concatenate across segments\n yj = [torch.cat(ts) for ts in zip(*yjs_segs)]# note zip(*) transposes\n \n # apply boundary conditions\n x = xs[i1]\n yj = c.P.boundary_condition(x, *yj, *c.BOUNDARY_N)# problem-specific\n \n # add to model list\n yjs_sum.append(yj)\n \n if (i % c.SUMMARY_FREQ) == 0: \n print(*[t.shape for t in yj])# should be the same as above!\n \n ## BACKPROPAGATE LOSS (ACTIVE)\n for im,i1 in D.active_ims:\n x, yj = xs[i1], yjs_sum[i1]\n loss = c.P.physics_loss(x, *yj)# problem-specific\n loss.backward()\n optimizers[im].step()\n \n # return result\n return ([t.detach() for t in xs], \n [[t.detach() for t in ts] for ts in yjs], \n [[t.detach() for t in ts] for ts in yjs_sum], loss.item())\n \n def _test_step(self, x_test, yj_true, xs, yjs, yjs_sum, models, c, D, i, mstep, fstep, writer, yj_test_losses):# use separate function to ensure computational graph/memory is released\n\n # get full model solution using test data\n yj_full, yjs_full, ys_full_raw = full_model_FBPINN(x_test, models, c, D)\n print(x_test.shape, yj_true[0].shape, yj_full[0].shape)\n \n # get losses over test data\n yj_test_loss = [losses.l1_loss(a,b).item() for a,b in zip(yj_true, yj_full)]\n physics_loss = c.P.physics_loss(x_test, *yj_full).item()# problem-specific\n yj_test_losses.append([i + 1, mstep, fstep]+yj_test_loss+[physics_loss])\n for j,l in enumerate(yj_test_loss): \n for step,tag in zip([i + 1, mstep, fstep], [\"istep\", \"mstep\", \"zfstep\"]):\n writer.add_scalar(\"loss_%s/yj%i/test\"%(tag,j), l, step)\n writer.add_scalar(\"loss_istep/zphysics/test\", physics_loss, i + 1)\n \n # PLOTS\n \n if (i + 1) % c.TEST_FREQ == 0:\n \n # save figures\n fs = plot_main.plot_FBPINN(x_test, yj_true, xs, yjs, yjs_sum, yj_full, yjs_full, ys_full_raw, yj_test_losses, c, D, i + 1)\n if fs is not None: self._save_figs(i, fs)\n \n del x_test, yj_true, xs, yjs, yjs_sum, yj_full, yjs_full, ys_full_raw# fixes weird over-allocation of GPU memory bug caused by plotting (?)\n \n return yj_test_losses\n \n def train(self):\n \"Train model\"\n \n c, device, writer = self.c, self.device, self.writer\n \n # define domain\n D = ActiveRectangularDomainND(c.SUBDOMAIN_XS, c.SUBDOMAIN_WS, device=device)\n D.update_sampler(c.BATCH_SIZE, c.RANDOM)\n A = c.ACTIVE_SCHEDULER(c.N_STEPS, D, *c.ACTIVE_SCHEDULER_ARGS)\n \n # create models\n models = [c.MODEL(c.P.d[0], c.P.d[1], c.N_HIDDEN, c.N_LAYERS) for _ in range(D.N_MODELS)]# problem-specific\n \n # create optimisers\n optimizers = [torch.optim.Adam(model.parameters(), lr=c.LRATE) for model in models]\n \n # put models on device\n for model in models: model.to(device)\n\n # get exact solution if it exists\n x_test = _x_mesh(c.SUBDOMAIN_XS, c.BATCH_SIZE_TEST, device)\n yj_true = c.P.exact_solution(x_test, c.BATCH_SIZE_TEST)# problem-specific\n \n ## TRAIN\n \n mstep, fstep, yj_test_losses = 0, 0, []\n start, gpu_time = time.time(), 0.\n for i,active in enumerate(A):\n \n # update active if required\n if active is not None: \n D.update_active(active)\n print(i, \"Active updated:\\n\", active)\n \n gpu_start = time.time()\n xs, yjs, yjs_sum, loss = self._train_step(models, optimizers, c, D, i)\n for im,i1 in D.active_ims: mstep += models[im].size# record number of weights updated\n for im,i1 in D.active_fixed_neighbours_ims: fstep += models[im].flops(xs[i1].shape[0])# record number of FLOPS\n gpu_time += time.time()-gpu_start\n \n \n # METRICS\n \n if (i + 1) % c.SUMMARY_FREQ == 0:\n \n # set counters\n rate, gpu_time = c.SUMMARY_FREQ / gpu_time, 0.\n \n # print summary\n self._print_summary(i, loss, rate, start)\n \n # test step\n yj_test_losses = self._test_step(x_test, yj_true, xs, yjs, yjs_sum, models, c, D, i, mstep, fstep, writer, yj_test_losses)\n \n # SAVE\n \n if (i + 1) % c.MODEL_SAVE_FREQ == 0:\n \n # save models, losses and active array\n for im,model in enumerate(models):\n self._save_model(i, model, im)\n np.save(c.MODEL_OUT_DIR+\"active_%.8i.npy\"%(i + 1), D.active)\n np.save(c.MODEL_OUT_DIR+\"loss_%.8i.npy\"%(i + 1), np.array(yj_test_losses))\n \n # cleanup\n writer.close()\n print(\"Finished training\")\n \n\nclass PINNTrainer(_Trainer):\n \"Standard PINN model trainer class\"\n \n def _train_step(self, model, optimizer, c, i, mu, sd, device):# use separate function to ensure computational graph/memory is released\n \n optimizer.zero_grad()\n model.train()\n \n sampler = _x_random if c.RANDOM else _x_mesh\n x = sampler(c.SUBDOMAIN_XS, c.BATCH_SIZE, device).requires_grad_(True)\n y = model((x-mu)/sd)\n y = y*c.Y_N[1] + c.Y_N[0]\n \n # get gradients\n yj = c.P.get_gradients(x, y)# problem-specific\n \n # apply boundary conditions\n yj = c.P.boundary_condition(x, *yj, *c.BOUNDARY_N)# problem-specific\n\n # backprop loss\n loss = c.P.physics_loss(x, *yj)# problem-specific\n loss.backward()\n optimizer.step()\n \n if (i % c.SUMMARY_FREQ) == 0: \n print(*[t.shape for t in yj], x.shape)\n \n # return result\n return x.detach(), [t.detach() for t in yj], loss.item()\n \n def _test_step(self, x_test, yj_true, x, yj, model, c, i, mstep, fstep, writer, yj_test_losses):# use separate function to ensure computational graph/memory is released\n \n # get full model solution using test data\n yj_full, y_full_raw = full_model_PINN(x_test, model, c)\n print(x_test.shape, yj_true[0].shape, yj_full[0].shape)\n \n # get losses over test data\n yj_test_loss = [losses.l1_loss(a,b).item() for a,b in zip(yj_true, yj_full)]\n physics_loss = c.P.physics_loss(x_test, *yj_full).item()# problem-specific\n yj_test_losses.append([i + 1, mstep, fstep]+yj_test_loss+[physics_loss])\n for j,l in enumerate(yj_test_loss): \n for step,tag in zip([i + 1, mstep, fstep], [\"istep\", \"mstep\", \"zfstep\"]):\n writer.add_scalar(\"loss_%s/yj%i/test\"%(tag,j), l, step)\n writer.add_scalar(\"loss_istep/zphysics/test\", physics_loss, i + 1)\n \n # PLOTS\n \n if (i + 1) % c.TEST_FREQ == 0:\n \n # save figures\n fs = plot_main.plot_PINN(x_test, yj_true, x, yj, yj_full, y_full_raw, yj_test_losses, c, i + 1)\n if fs is not None: self._save_figs(i, fs)\n \n del x_test, yj_true, x, yj, yj_full, y_full_raw# fixes weird over-allocation of GPU memory bug caused by plotting (?)\n \n return yj_test_losses\n \n def train(self):\n \"Train model\"\n \n c, device, writer = self.c, self.device, self.writer\n \n # define model\n model = c.MODEL(c.P.d[0], c.P.d[1], c.N_HIDDEN, c.N_LAYERS)# problem-specific\n \n # create optimiser\n optimizer = torch.optim.Adam(model.parameters(), lr=c.LRATE)\n \n # put model on device\n model.to(device)\n \n # get normalisation values\n xmin, xmax = torch.tensor([[x.min(), x.max()] for x in c.SUBDOMAIN_XS], dtype=torch.float32, device=device).T\n mu = (xmin + xmax)/2; sd = (xmax - xmin)/2\n print(mu, sd, mu.shape, sd.shape)# (nd)# broadcast below\n \n # get exact solution if it exists\n x_test = _x_mesh(c.SUBDOMAIN_XS, c.BATCH_SIZE_TEST, device)\n yj_true = c.P.exact_solution(x_test, c.BATCH_SIZE_TEST)# problem-specific\n \n ## TRAIN\n \n mstep, fstep, yj_test_losses = 0, 0, []\n start, gpu_time = time.time(), 0.\n for i in range(c.N_STEPS):\n gpu_start = time.time()\n x, yj, loss = self._train_step(model, optimizer, c, i, mu, sd, device)\n mstep += model.size# record number of weights updated\n fstep += model.flops(x.shape[0])# record number of FLOPS\n gpu_time += time.time()-gpu_start\n \n \n # METRICS\n \n if (i + 1) % c.SUMMARY_FREQ == 0:\n \n # set counters\n rate, gpu_time = c.SUMMARY_FREQ / gpu_time, 0.\n \n # print summary\n self._print_summary(i, loss, rate, start)\n \n # test step\n yj_test_losses = self._test_step(x_test, yj_true, x, yj, model, c, i, mstep, fstep, writer, yj_test_losses)\n \n # SAVE\n \n if (i + 1) % c.MODEL_SAVE_FREQ == 0:\n \n # save model and losses\n self._save_model(i, model)\n np.save(c.MODEL_OUT_DIR+\"loss_%.8i.npy\"%(i + 1), np.array(yj_test_losses))\n \n # cleanup\n writer.close()\n print(\"Finished training\")\n \n \nif __name__ == \"__main__\":\n #'''\n c = Constants(\n N_LAYERS=2,\n N_HIDDEN=16,\n TEST_FREQ=1000,\n RANDOM=True,\n )\n run = FBPINNTrainer(c)\n '''\n \n c = Constants(\n N_LAYERS=4,\n N_HIDDEN=64,\n TEST_FREQ=1000,\n RANDOM=True,\n )\n run = PINNTrainer(c)\n '''\n \n run.train()"
] |
[
[
"torch.cat",
"numpy.save",
"numpy.prod",
"torch.stack",
"numpy.array",
"torch.meshgrid"
]
] |
Bhaskers-Blu-Org1/Semantic-Search-for-Sustainable-Development
|
[
"08b79de5df1f7427e63c8d9806e0bd509ca58fb7"
] |
[
"src/undp_experiments_2.py"
] |
[
"\"\"\"\r\n Copyright 2018 IBM Corporation\r\n\r\n Licensed under the Apache License, Version 2.0 (the \"License\");\r\n you may not use this file except in compliance with the License.\r\n You may obtain a copy of the License at\r\n\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n Unless required by applicable law or agreed to in writing, software\r\n distributed under the License is distributed on an \"AS IS\" BASIS,\r\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n See the License for the specific language governing permissions and\r\n limitations under the License.\r\n\"\"\"\r\n\r\nfrom gensim.utils import simple_preprocess\r\nfrom gensim.models import Word2Vec\r\nfrom gensim.models import doc2vec\r\nfrom gensim import models\r\nfrom scipy import spatial\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nfrom difflib import SequenceMatcher as sm\r\nfrom collections import OrderedDict\r\nimport os\r\nimport numpy as np\r\nimport itertools\r\nimport shelve\r\nfrom collections import Counter\r\nimport parseundp\r\nfrom CustomParVec import CustomParVec\r\nfrom undp_experiments_Utils import getTargetDoc, getInfo, get_all_matches, loadTruth, ria, avgMatches, evaluateByTarget\r\n\r\n\r\n#To start, we specify the paths that hold our policy documents and template2 RIA data. \r\n#We should also specify any documents we wish to exclude. We will learn our embeddings using the undp target descriptions \r\n#and all of the policy documents including the new country we wish to produce an RIA for.\r\n\r\ndocuments_path = 'data/documents/'\r\ntemplate_data_path = 'data/template2/'\r\nexclude_documents = []\r\n\r\n#I have saved a dictionary of the target descriptions. We load it here.\r\n#shelf = shelve.open('undp.db')\r\n#targets_only = shelf['targets_only']\r\nimport pickle\r\ntargets_only = pickle.load(open('undp.pkl', 'rb'))\r\n#targets_only['1.1'] gives the text for SDG 1 Target 1.1 and so on\r\n#shelf.close()\r\n\r\n#Next we create our corpus of Doc2Vec tagged documents in which each document is a paragraph/sentence from the documents \r\n#in the documents path as well as the target descriptions.\r\ncorpus = list(parseundp.read_corpus(documents_path, exclude_documents, targets_only))\r\n\r\n#Once we have our corpus of Doc2Vec tagged documents, we create a list in which every entry is a list of words of that line.\r\nwords_by_line = [entry.words for entry in corpus]\r\n#words_by_line actually lists the words in entire paragraphs (i.e. each line is one or more paragraphs) in the document\r\n#Here we create instances of our custom paragraph vector model.\r\n\r\n#Set values for various parameters\r\nnum_features = 4000 # Word vector dimensionality \r\nmin_word_count = 30 # Minimum word count \r\nnum_workers = 4 # Number of threads to run in parallel\r\ncontext = 30 # Context window size \r\ndownsampling = 1e-3 # Downsample setting for frequent words\r\n\r\n#We use the same parameters to create one instance that uses normalized bag of words scaling and another that uses tf-idf scaling.\r\n\r\npar_vec_nbow = CustomParVec(words_by_line, num_workers, num_features, min_word_count, context, downsampling, False)\r\npar_vec_tfidf = CustomParVec(words_by_line, num_workers, num_features, min_word_count, context, downsampling, True)\r\n\r\n#We will also experiment with Google's pre-trained word2vec model which has 300 dimensions\r\nmodel_google = models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True) \r\npar_vec_google = CustomParVec(words_by_line, num_workers, 300, min_word_count, context, downsampling, True, model_google)\r\n\r\n#Let's place our models in a single list\r\npar_vecs = [par_vec_google, par_vec_nbow, par_vec_tfidf]\r\n\r\n\r\n\r\n\r\n#Data we will be testing with\r\n\r\npolicy_documents_liberia = ['Liberia Agenda for Transformation.txt', 'Liberia Eco stabilization and recovery plan-april_2015.txt']\r\npolicy_documents_bhutan = ['Eleventh-Five-Year-Plan_Vol-1.txt', '11th-Plan-Vol-2.txt']\r\npolicy_documents_namibia = ['na-nbsap-v2-en.txt', 'Agri Book with cover1.txt', 'execution strategy for industrialisation.txt', 'INDC of Namibia Final pdf.txt', 'Namibia_Financial_Sector_Strategy.txt', 'Tourism Policy.txt', 'namibia_national_health_policy_framework_2010-2020.txt', 'nampower booklet_V4.txt', '826_Ministry of Education Strategic Plan 2012-17.txt', 'Namibia_NDP4_Main_Document.txt']\r\npolicy_documents_cambodia = ['National Strategic Development Plan 2014-2018 EN Final.txt', 'Cambodia_EducationStrategicPlan_2014_2018.txt', 'Cambodia Climate Change Strategic Plan 2014_2023.txt', 'Cambodia Industrial Development Policy 2015_2025.txt', 'Cambodian Gender Strategic Plan - Neary Rattanak 4_Eng.txt', 'Draft_HealthStrategicPlan2016-2020.txt', 'Cambodia_national-disability-strategic-plan-2014-2018.txt', 'National_Policy_on_Green_Growth_2013_EN.txt', 'tourism_development_stategic_plan_2012_2020_english.txt', 'Labour Migration Policy for Cambodia 2015-2018.txt', 'kh-nbsap-v2-en.txt', 'financial-sector-development-strategy-2011-2020.txt', 'National_Social_Protection_Strategy_for_the_Poor_and_Vulnerable_Eng.txt']\r\npolicy_documents_mauritius = ['Agro-forestry Strategy 2016-2020.txt', 'VISION_14June2016Vision 2030DraftVersion4.txt', 'Updated Action Plan of the Energy Strategy 2011 -2025.txt', 'National Water Policy 2014.txt', 'National CC Adaptioin Policy Framework report.txt', 'MauritiusEnergy Strategy 2009-2025.txt', 'Mauritius Govertment programme 2015-2019.txt', 'CBD Strategy and Action Plan.txt']\r\n\r\nexclude_ria_liberia = ['liberia.xlsx']\r\nexclude_ria_bhutan = ['bhutan_template2.xlsx']\r\nexclude_ria_namibia = ['namibia_template2.xlsx']\r\nexclude_ria_cambodia = ['cambodia_template2.xlsx']\r\nexclude_ria_mauritius = ['mauritius.xlsx']\r\n\r\nall_exclude_ria = [exclude_ria_liberia, exclude_ria_bhutan, exclude_ria_namibia, exclude_ria_cambodia, exclude_ria_mauritius]\r\nall_policy_documents = [policy_documents_liberia, policy_documents_bhutan, policy_documents_namibia, policy_documents_cambodia, policy_documents_mauritius]\r\n\r\n#Experiment 2: Include matches form prior RIAs in semantic search\r\ninclude_prior_matches = {}\r\nvec = 1\r\nfor par_vec in par_vecs:\r\n for i in range(len(all_policy_documents)):\r\n exclude_ria = all_exclude_ria[i]\r\n policy_documents = all_policy_documents[i]\r\n target_matches = loadTruth(template_data_path, exclude_ria)\r\n targs, targ_vecs, sents = getInfo(par_vec, target_matches)\r\n print(exclude_ria[0][:-5]+str(vec))\r\n score_dict = ria(documents_path, policy_documents, par_vec, sents, targ_vecs, targs)\r\n include_prior_matches[exclude_ria[0][:-5]+str(vec)] = [score_dict]\r\n vec += 1\r\n \r\ni = 0\r\nfor key, val in include_prior_matches.items():\r\n exclude_test = [file for file in os.listdir(template_data_path) if file not in all_exclude_ria[i]]\r\n test_development_matches = parseundp.extract_template_data(template_data_path, exclude_test)\r\n test_target_matches = parseundp.create_target_dictionary(test_development_matches)\r\n \r\n print(key, all_exclude_ria[i])\r\n match_by_sent = evaluateByTarget(val[0], test_target_matches, 301)\r\n include_prior_matches[key].append(match_by_sent)\r\n avg_new = avgMatches(match_by_sent, test_target_matches, 301)\r\n include_prior_matches[key].append(avg_new)\r\n\r\n i+=1\r\n if i % 5 == 0:\r\n i = 0\r\n\r\ninclude_prior_matches['liberia_google'] = include_prior_matches.pop('liberia1')\r\ninclude_prior_matches['liberia_nbow'] = include_prior_matches.pop('liberia2')\r\ninclude_prior_matches['liberia_tfidf'] = include_prior_matches.pop('liberia3')\r\ninclude_prior_matches['bhutan_google'] = include_prior_matches.pop('bhutan_template21')\r\ninclude_prior_matches['bhutan_nbow'] = include_prior_matches.pop('bhutan_template22')\r\ninclude_prior_matches['bhutan_tfidf'] = include_prior_matches.pop('bhutan_template23')\r\ninclude_prior_matches['namibia_google'] = include_prior_matches.pop('namibia_template21')\r\ninclude_prior_matches['namibia_nbow'] = include_prior_matches.pop('namibia_template22')\r\ninclude_prior_matches['namibia_tfidf'] = include_prior_matches.pop('namibia_template23')\r\ninclude_prior_matches['cambodia_google'] = include_prior_matches.pop('cambodia_template21')\r\ninclude_prior_matches['cambodia_nbow'] = include_prior_matches.pop('cambodia_template22')\r\ninclude_prior_matches['cambodia_tfidf'] = include_prior_matches.pop('cambodia_template23')\r\ninclude_prior_matches['mauritius_google'] = include_prior_matches.pop('mauritius1')\r\ninclude_prior_matches['mauritius_nbow'] = include_prior_matches.pop('mauritius2')\r\ninclude_prior_matches['mauritius_tfidf'] = include_prior_matches.pop('mauritius3')\r\n\r\ncountries = ['liberia', 'bhutan', 'namibia', 'cambodia', 'mauritius']\r\nnum_sentences = 30\r\nfor key in include_prior_matches.keys():\r\n print('{0:10} {1:10.5f}%'.format(key, include_prior_matches[key][2][num_sentences]*100))\r\n #print('-------------------------------------------------------------------------------') \r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n#%matplotlib inline\r\nsns.set_context('talk')\r\nsns.set_style(\"white\")\r\nplt.figure(figsize=(15,11))\r\n\r\ni =0\r\nfor key in include_prior_matches:\r\n if 'tfidf' in key:\r\n plt.plot(list(range(1, 31)), (np.asarray(sorted(include_prior_matches[key][2]))*100)[:30], label = key.split('.')[0].split('_')[0].upper())\r\nplt.legend(title = 'Country', bbox_to_anchor=(1.1, 0.45), loc=1, borderaxespad=10)\r\nplt.title('Percent Matches Vs. Number of Sentences')\r\nplt.xlabel('Number of Sentences')\r\nplt.ylabel('Percent Matches with Policy Experts')\r\nplt.yticks(np.arange(0, 55, 5))\r\n#plt.savefig('matches_update_30.jpeg')\r\nplt.show()\r\n\r\nplt.figure(figsize=(15,11))\r\n\r\nexamples = ['1.2', '3.3', '5.1', '9.3', '8.5', '15.2', '16.1']\r\nfor key in examples:\r\n plt.plot(list(range(1, 101)), (np.asarray(sorted(include_prior_matches['liberia_tfidf'][1][key]))*100)[:100], label = key)\r\nplt.legend(title = 'Target', bbox_to_anchor=(1.1, .7), loc=1, borderaxespad=10)\r\nplt.title('LIBERIA Percent Matches Vs. Number of Sentences by Target')\r\nplt.xlabel('Number of Sentences')\r\nplt.ylabel('Percent Matches with Policy Experts')\r\nplt.yticks(np.arange(0, 105, 10))\r\n#plt.savefig('liberia_target_update.jpeg')\r\nplt.show()\r\n\r\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
CookieNotSession/facial_POC
|
[
"4c89aeadf76051948406e7a941a8d64ac68d20ec"
] |
[
"src/flask_Server.py"
] |
[
"import cv2\nfrom flask import Flask, render_template, Response, jsonify, request, redirect, url_for\nfrom web_camera import VideoCamera\nfrom statistics import mode\nfrom keras.models import load_model\nimport numpy as np\nimport tensorflow as tf\nfrom utils.datasets import get_labels\nfrom utils.inference import detect_faces\nfrom utils.inference import draw_text\nfrom utils.inference import draw_bounding_box\nfrom utils.inference import apply_offsets\nfrom utils.inference import load_detection_model\nfrom utils.preprocessor import preprocess_input\nfrom flask import send_file\nfrom keras import backend as K\n\n# rtsp://10.50.249.4/live.sdp#http://10.50.197.220:8081/video.mjpg')\napp = Flask(__name__, static_folder='', static_url_path='')\n# parameters for loading data and images\ndetection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'\nemotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'\nemotion_labels = get_labels('fer2013')\n\nframe_window = 10\nemotion_offsets = (20, 40)\ndef modeling():\n # load the pre-trained Keras model (here we are using a model\n # pre-trained on ImageNet and provided by Keras, but you can\n # substitute in your own networks just as easily)\n global face_detection\n face_detection = load_detection_model(detection_model_path)\n global emotion_classifier\n emotion_classifier = load_model(emotion_model_path, compile=False)\n global graph\n graph = tf.get_default_graph()\n global emotion_target_size\n emotion_target_size = emotion_classifier.input_shape[1:3]\n\n\n# loading models\n#face_detection = load_detection_model(detection_model_path)\n#emotion_classifier = load_model(emotion_model_path, compile=False)\n# getting input model shapes for inference\n#emotion_target_size = emotion_classifier.input_shape[1:3]\n\n# starting lists for calculating modes\nemotion_window = []\n\nvideo_capture = cv2.VideoCapture(0)\n\n\ndef gen(camera):\n \"\"\"Video streaming generator function.\"\"\"\n while True:\n ret, bgr_image = video_capture.read()\n gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)\n rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)\n faces = detect_faces(face_detection, gray_image)\n count_angry = 0\n count_sad = 0\n count_happy = 0\n count_surprise = 0\n count = 0\n for face_coordinates in faces:\n x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)\n gray_face = gray_image[y1:y2, x1:x2]\n print(gray_face)\n try:\n gray_face = cv2.resize(gray_face, (emotion_target_size))\n except:\n continue\n gray_face = preprocess_input(gray_face, True)\n gray_face = np.expand_dims(gray_face, 0)\n gray_face = np.expand_dims(gray_face, -1)\n print('Transfer To Gray Face Successful')\n with graph.as_default():\n \n emotion_prediction = emotion_classifier.predict(gray_face)\n print('Emotion Prediction Successful')\n emotion_probability = np.max(emotion_prediction)\n emotion_label_arg = np.argmax(emotion_prediction)\n emotion_text = emotion_labels[emotion_label_arg]\n emotion_window.append(emotion_text)\n\n #print('Probability:' + str(emotion_probability))\n #print('Emotion:' + str(emotion_text))\n\n if len(emotion_window) > frame_window:\n emotion_window.pop(0)\n try:\n emotion_mode = mode(emotion_window)\n except:\n continue\n\n if emotion_text == 'angry':\n color = emotion_probability * np.asarray((255, 0, 0))\n print('Emotion:' + str(emotion_text))\n break \n elif emotion_text == 'sad':\n color = emotion_probability * np.asarray((0, 0, 255))\n print('Emotion:' + str(emotion_text))\n break \n elif emotion_text == 'happy':\n color = emotion_probability * np.asarray((255, 255, 0))\n print('Emotion:' + str(emotion_text))\n break\n elif emotion_text == 'surprise':\n color = emotion_probability * np.asarray((0, 255, 255))\n print('Emotion:' + str(emotion_text))\n break\n else:\n color = emotion_probability * np.asarray((0, 255, 0))\n print('Emotion:' + str(emotion_text))\n break\n\n # if emotion_text == 'angry':\n # count_angry = count_angry + 1\n # elif emotion_text == 'sad':\n # count_sad = count_sad + 1\n # elif emotion_text == 'happy':\n # count_happy = count_happy + 1\n # elif emotion_text == 'surprise':\n # count_surprise = count_surprise + 1\n # else:\n # count = count + 1\n # if count_angry > 1 :\n # print('Emotion:' + str(emotion_text))\n # break\n # if count_sad > 1 :\n # print('Emotion:' + str(emotion_text))\n # break\n # if count_happy > 1 :\n # print('Emotion:' + str(emotion_text))\n # break\n # if count_surprise > 1 :\n # print('Emotion:' + str(emotion_text))\n # break\n # if count > 1 :\n # print('Emotion:' + str(emotion_text))\n # break\n\n\n color = color.astype(int)\n color = color.tolist()\n\n\n ret, bgr_image = cv2.imencode('.jpg', bgr_image)\n bgr_image = bgr_image.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + bgr_image + b'\\r\\n\\r\\n')\n\n\n@app.route('/')\ndef index():\n return Response(\n gen(VideoCamera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\n@app.route('/video_feed')\ndef video_feed():\n return Response(\n gen(VideoCamera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n@app.route('/hello/')\n#@app.route('/hello/<name>')\ndef hello(name=None):\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n modeling()\n app.run(host='0.0.0.0', debug=True, threaded=True, port=8801)\n"
] |
[
[
"numpy.expand_dims",
"numpy.asarray",
"numpy.max",
"numpy.argmax",
"tensorflow.get_default_graph"
]
] |
naojibrainmachine/chinese-tokenization-bert
|
[
"7ffad6ee89a5e9b570d1fb4ff9f1ef5e2d8ed132"
] |
[
"test_bert_tokenize.py"
] |
[
"import tensorflow as tf\nimport numpy as np\nimport math\n#from BERT import return_accuracy\nfrom LOADDATA import get_corpus_indices,data_format,get_data,build_vocab,ner_load_data,bild_vocab_token,build_vocab_label\nfrom LOAD_SAVE_PARAMS.LOAD_SAVE_PARAMS import save_weight,load_weight\nfrom Tokenizer import tokenizer\n\ndef test(model,params_bert, train_vocab,train_labels,vocab_size,label_size,chars_to_idx,label_idx_to_chars,batch_size,clip_norm):\n acc=[]\n acc2=[]\n F1=[]\n iter_data=get_data(train_vocab,train_labels,chars_to_idx,label_idx_to_chars,batch_size)\n outputs=[]\n Ys=[]\n los=[]\n for x,y in iter_data:\n\n \n label=[[y_2 for y_2 in y_1]for y_1 in y]\n \n X,Y,mask=data_format(x,y)#还要输出个mask\n #目前还需要修改数据的产生和mask的生成\n mask=tf.concat(mask,0)\n \n X=tf.concat(X,0)\n \n X=tf.one_hot(X,vocab_size)\n \n \n Y=tf.concat(Y,0)\n\n \n Y_bert=tf.one_hot(Y,label_size)\n \n Y=tf.cast(Y,dtype=tf.float32)\n \n output_bert=model(X)\n \n cc=return_accuracy_2(Y_bert,output_bert,mask)\n acc2.append(cc)\n print(\"bert训练准确率:%f\"%cc)\n\n filepath=\"test_acc_bert.txt\"\n flie=open(filepath,\"a+\")\n \n flie.write(str(tf.math.reduce_mean(acc2).numpy())+\"\\n\")\n flie.close()\n\n \n \n\n\ndef return_accuracy_2(Y,Y_pre,mask):\n \n rowMaxSoft=np.argmax(tf.nn.softmax(Y_pre), axis=-1)+1\n rowMax=np.argmax(Y, axis=-1)+1\n rowMaxSoft*mask.numpy()\n rowMax=rowMax*mask.numpy()\n rowMaxSoft=rowMaxSoft.reshape([1,-1])\n rowMax=rowMax.reshape([1,-1])\n \n nonO=rowMaxSoft-rowMax\n nonO=nonO*tf.reshape(mask,[1,-1]).numpy()\n exist = (nonO != 0) * 1.0\n factor = np.ones([nonO.shape[1],1])\n res = np.dot(exist, factor)\n accuracy=(float(tf.reduce_sum(mask).numpy())-res[0][0])/float(tf.reduce_sum(mask).numpy())\n \n return accuracy\n \n\n \nif __name__ == \"__main__\":\n \n batch_size=2\n\n input_nums=24\n\n num_hiddens=4\n\n num_outputs=24\n\n layer_nums=12\n\n multi_head=12\n \n max_position_dim=512\n\n clip_norm=1.0\n\n train_vocab,train_labels=ner_load_data(\"data\",\"ner_data_weiboNER_2nd_conll.train.csv\"),ner_load_data(\"data\",\"tokenize_label_weiboNER_2nd_conll.train.csv\")#读取训练数据\n\n test_vovab,test_labels=ner_load_data(\"data\",\"ner_data_weiboNER_2nd_conll.test.csv\"),ner_load_data(\"data\",\"tokenize_label_weiboNER_2nd_conll.test.csv\")#读取测试数据\n\n idx_to_chars,chars_to_idx,vocab_size=bild_vocab_token('data\\\\chinese_token.csv')#用汉字字库代替build_vocab返回的数据集生成的字库\n\n label_idx_to_char,label_char_to_idx,label_vocab_size=build_vocab_label(\"data\",\"tokenize_label_num_weiboNER_2nd_conll.train.csv\")#读取label词库\n\n model=tokenizer(lr=1e-5,input_nums=input_nums,hidden_nums=num_hiddens,output_nums=num_outputs,max_position_dim=max_position_dim,multi_head=multi_head,layers_encoder=layer_nums,vocab_size=vocab_size,labels_num=len(label_char_to_idx.keys()),label_char_to_idx=label_char_to_idx)\n\n params=model.get_params_bert()#bert的基础参数,为了恢复预训练的参数数据\n\n params_bert=params+model.get_patams_cls()\n \n epochs=3000\n\n isContinue=True\n \n if isContinue==True:\n load_weight(\"ckp\",\"params_tokenize\",params_bert)\n \n for i in range(epochs):\n test(model,params_bert, test_vovab,test_labels,vocab_size,label_vocab_size,chars_to_idx,label_char_to_idx,batch_size,clip_norm)\n \n"
] |
[
[
"numpy.dot",
"tensorflow.nn.softmax",
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.math.reduce_mean",
"numpy.ones",
"numpy.argmax",
"tensorflow.one_hot"
]
] |
lcarnevale/gympy
|
[
"157156f64b6c2567077d9c00ae1645360d0a7514"
] |
[
"gympy/envs/blife.py"
] |
[
"import gym\nimport time\nimport random\nimport numpy as np\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nfrom .sensors import Sensors\nfrom .scenarios import WhiteNoiseScenario\n\nclass BatteryLifetimeEnv(gym.Env):\n \"\"\"\n Description:\n The blife environment simulates the behavior of a re-chargable\n lithium battery while the controlled device samples white noise\n and delivery it out in streaming or batch processes.\n The controlled device is a Raspberry Pi 3.\n\n Source:\n This environment and its documentation is available at\n https://github.com/lcarnevale/blife\n\n Observation:\n Type: Box(4)\n Num Observation Min Max\n 0 Battery Voltage 0 V 5 V\n 1 Battery Current 0 mA 2000 mA\n 2 Network Outbound Traffic 0 +inf\n 3 Buffer Size 0 100\n\n Actions:\n Type: Discrete(3)\n Num Action\n 0 do nothing\n 1 sample any 0.2s, deliver any 0.2s (streaming)\n 2 sample any 1s, deliver any 1s (streaming)\n 3 sample any 0.2s, deliver any 5s (batch)\n 4 sample ant 1s, deliver any 5s (batch)\n\n Reward:\n TODO\n\n Starting State:\n Configure and start the white noise scenario. \n\n Episode Termination:\n TODO \n \"\"\"\n\n def __init__(self, battery_capacity=2000, num_discrete_actions=5):\n self.__step_counter = 0\n self.__battery_capacity = battery_capacity\n\n min_battery_voltage = 0 #V\n max_battery_voltage = 5 #V\n\n min_battery_current = 0 #mA\n max_battery_current = 2000 #mA\n\n min_net_outbound = 0 #Bytes\n max_net_outbound = 1000000000 #Bytes\n\n min_buffer_size = 0 #Units\n max_buffer_size = 100 #Units\n\n self.__perform_action = [\n self.__action_do_nothing,\n self.__action_streaming_one,\n self.__action_streaming_two,\n self.__action_batch_one,\n self.__action_batch_two\n ]\n\n self.__sensors = Sensors()\n self.__max_expected_runtime = 0.0\n\n low = np.array([\n min_battery_voltage, min_battery_current, \n min_net_outbound, min_buffer_size], \n dtype = np.float16\n )\n high = np.array([\n max_battery_voltage, max_battery_current,\n max_net_outbound, max_buffer_size],\n dtype = np.float16\n )\n\n self.action_space = spaces.Discrete(num_discrete_actions)\n self.observation_space = spaces.Box(low, high, dtype = np.float16)\n\n\n def reset(self):\n \"\"\"Reset the state of the environment to an initial state.\n\n Returns:\n The observations space.\n See the class description for more details.\n \"\"\"\n self.__configure_experiment()\n self.__reset_experiment()\n return self.__observe()\n\n def __configure_experiment(self):\n self.__scenario = WhiteNoiseScenario(\n 'broker.mqttdashboard.com',\n '/fcrlab/distinsys/lcarnevale',\n )\n \n def __reset_experiment(self):\n self.__scenario.reset()\n\n def __observe(self):\n \"\"\"\n Returns:\n The observations space wrapped within a numpy array.\n See the class description for more details.\n \"\"\"\n observations = self.__next_observation()\n voltage = observations[0]\n current = observations[1]\n self.__power_last_mean = current * voltage\n return np.array(observations)\n\n def __next_observation(self):\n \"\"\"Observe the environment.\n\n Returns:\n The observations space.\n See the class description for more details.\n \"\"\"\n return [\n self.__sensors.get_bus_voltage(),\n self.__sensors.get_bus_current(),\n self.__sensors.get_net_bytes_sent(),\n self.__scenario.get_queue_size()\n ]\n\n\n def step(self, action):\n \"\"\"Execute one time step within the environment.\n\n Returns:\n\n \"\"\"\n done = False\n \n # perform action\n self.__valuate_action(action)\n self.__perform_action[action]()\n \n # collect observation\n observation = self.__sub_observe()\n voltage_mean = observation[0]\n current_mean = observation[1]\n observation = np.array(observation)\n\n # calculate reward\n power_mean = voltage_mean * current_mean\n reward = self.__reward_function(power_mean)\n\n # verify the termination condition\n expected_runtime = self.__battery_capacity / current_mean\n self.__set_max_expected_runtime(expected_runtime)\n done = self.__termination_function(expected_runtime)\n # print(\"\\t\\ttermination: %.3fh (runtime) >= 8h is %s\" % (expected_runtime, done))\n \n self.__power_last_mean = power_mean\n return observation, reward, done, {}\n \n def __valuate_action(self, action):\n err_msg = \"%r (%s) invalid\" % (action, type(action))\n assert self.action_space.contains(action), err_msg\n\n # action 0\n def __action_do_nothing(self):\n # print('do nothing')\n pass\n\n # action 1\n def __action_streaming_one(self):\n # print(\"sample any 1s, deliver any 1s (streaming)\")\n self.__scenario.set_transmission('streaming')\n self.__scenario.set_rate(0.05)\n\n # action 2\n def __action_streaming_two(self):\n # print(\"sample any 10s, deliver any 10s (streaming)\")\n self.__scenario.set_transmission('streaming')\n self.__scenario.set_rate(1)\n\n # action 3\n def __action_batch_one(self):\n self.__scenario.set_transmission('batch')\n self.__scenario.set_rate(0.05)\n\n # action 4\n def __action_batch_two(self):\n self.__scenario.set_transmission('batch')\n self.__scenario.set_rate(1)\n\n def __sub_observe(self, observation_rate=1, observation_window=15):\n \"\"\"Observe the environment under specific conditions.\n \"\"\"\n voltage_sub_history = list()\n current_sub_history = list()\n net_bytes_sent_sub_history = list()\n queue_size_sub_history = list()\n for _ in range(observation_window):\n observations = self.__next_observation()\n voltage_sub_history.append(observations[0])\n current_sub_history.append(observations[1])\n net_bytes_sent_sub_history.append(observations[2])\n queue_size_sub_history.append(observations[3])\n time.sleep(observation_rate)\n return [\n np.array(voltage_sub_history).mean(),\n np.array(current_sub_history).mean(),\n np.array(net_bytes_sent_sub_history).mean(),\n np.array(queue_size_sub_history).mean()\n ]\n\n def __reward_function(self, power_mean):\n \"\"\"Calculate the reward.\n\n Returns:\n float representing the energy delta in mJ\n \"\"\"\n energy_delta = - (power_mean - self.__power_last_mean) * 1\n return energy_delta\n\n def __set_max_expected_runtime(self, expected_runtime):\n if expected_runtime > self.__max_expected_runtime:\n self.__max_expected_runtime = expected_runtime\n\n def __termination_function(self, expected_runtime, target_expected_runtime=8.5):\n \"\"\"Calculate the termination\n\n With the battery capacity and average current consumption,\n you can compute the expected runtime of your project by \n solving the equation:\n Battery capacity (in mAh) / Average current consumption \n (in mA) = Hours of expected runtime\n\n I expect the runtime is at least one day.\n \n Returns:\n bool representing the termination validation\n \"\"\"\n return expected_runtime >= target_expected_runtime\n\n\n def close(self):\n self.__scenario.stop()"
] |
[
[
"numpy.array"
]
] |
PrabhanshuAttri/BentoML
|
[
"7253155677a4d683387b9d117881a7fcba9c98a3"
] |
[
"tests/integration/test_tensorflow_v2_2_savedmodel_artifact.py"
] |
[
"import json\nimport pytest\n\nimport tensorflow as tf\n\nimport bentoml\nfrom tests.bento_service_examples.tensorflow_classifier import Tensorflow2Classifier\nfrom tests.integration.api_server.conftest import (\n run_api_server_docker_container,\n build_api_server_docker_image,\n)\n\ntest_data = [[1, 2, 3, 4, 5]]\ntest_tensor = tf.constant(test_data)\n\n\nclass Tensorflow2Model(tf.keras.Model):\n def __init__(self):\n super(Tensorflow2Model, self).__init__()\n # Simple linear layer which sums the inputs\n self.dense = tf.keras.layers.Dense(\n units=1,\n input_shape=(5,),\n use_bias=False,\n kernel_initializer=tf.keras.initializers.Ones(),\n )\n\n def call(self, inputs):\n return self.dense(inputs)\n\n\n@pytest.fixture(scope=\"module\")\ndef tf2_svc():\n \"\"\"Return a TensorFlow2 BentoService.\"\"\"\n # When the ExampleBentoService got saved and loaded again in the test, the\n # two class attribute below got set to the loaded BentoService class.\n # Resetting it here so it does not effect other tests\n Tensorflow2Classifier._bento_service_bundle_path = None\n Tensorflow2Classifier._bento_service_bundle_version = None\n\n svc = Tensorflow2Classifier()\n model = Tensorflow2Model()\n svc.pack('model', model)\n\n return svc\n\n\n@pytest.fixture(scope=\"module\")\ndef tf2_svc_saved_dir(tmp_path_factory, tf2_svc):\n \"\"\"Save a TensorFlow2 BentoService and return the saved directory.\"\"\"\n # Must be called at least once before saving so that layers are built\n # See: https://github.com/tensorflow/tensorflow/issues/37439\n tf2_svc.predict(test_tensor)\n\n tmpdir = str(tmp_path_factory.mktemp(\"tf2_svc\"))\n tf2_svc.save_to_dir(tmpdir)\n\n return tmpdir\n\n\n@pytest.fixture()\ndef tf2_svc_loaded(tf2_svc_saved_dir):\n \"\"\"Return a TensorFlow2 BentoService that has been saved and loaded.\"\"\"\n return bentoml.load(tf2_svc_saved_dir)\n\n\n@pytest.fixture()\ndef tf2_image(tf2_svc_saved_dir):\n with build_api_server_docker_image(\n tf2_svc_saved_dir, \"tf2_example_service\"\n ) as image:\n yield image\n\n\n@pytest.fixture()\ndef tf2_host(tf2_image):\n with run_api_server_docker_container(tf2_image, timeout=500) as host:\n yield host\n\n\ndef test_tensorflow_2_artifact(tf2_svc):\n assert (\n tf2_svc.predict(test_tensor) == 15.0\n ), 'Inference on unsaved TF2 artifact does not match expected'\n\n\ndef test_tensorflow_2_artifact_loaded(tf2_svc_loaded):\n assert (\n tf2_svc_loaded.predict(test_tensor) == 15.0\n ), 'Inference on saved and loaded TF2 artifact does not match expected'\n\n\n@pytest.mark.asyncio\nasync def test_tensorflow_2_artifact_with_docker(tf2_host):\n await pytest.assert_request(\n \"POST\",\n f\"http://{tf2_host}/predict\",\n headers=((\"Content-Type\", \"application/json\"),),\n data=json.dumps({\"instances\": test_data}),\n assert_status=200,\n assert_data=b'[[15.0]]',\n )\n"
] |
[
[
"tensorflow.constant",
"tensorflow.keras.initializers.Ones"
]
] |
EduardoMoraesRitter/Alura
|
[
"c0f5e7e9807e8e1d1dc46e6b847df8a8085783a6",
"c0f5e7e9807e8e1d1dc46e6b847df8a8085783a6"
] |
[
"Alura/MLClassificacao2/A4V2_Classificando_email.py",
"Alura/MLClassificacao2/A4V5_Classificando_email.py"
] |
[
"#!-*- coding: utf8 -*-\n\nimport pandas as pd\n\nclassificacoes = pd.read_csv('email.csv')\ntextosPuros = classificacoes['email']\ntextosQuebrados = textosPuros.str.lower().str.split(' ')\ndicionario = set()\nfor lista in textosQuebrados:\n dicionario.update(lista)\n#dicionario sem indice - print(dicionario)\n#print(list(dicionario)[2])\ntotalPalavras = len(dicionario)\n#print(totalPalavras)\ntuplas = list(zip(dicionario, range(totalPalavras)))\n#print(tuplas)\n\n#so um dicionario(python)pode consultar assim > print(tuplas['pode'])\n\n#agora temos um dicionario com indice\npalavrasIndices = {palavra:indice for palavra, indice in tuplas}\n#print(palavrasIndices)\nprint(palavrasIndices['como'])\n\n\n\n\n",
"import pandas as pd\nfrom collections import Counter\nfrom sklearn.model_selection import cross_val_score\nimport numpy as np\nfrom sklearn.svm import LinearSVC\n\nclassificacoes = pd.read_csv('email.csv')\ntextosPuros = classificacoes['email']\n\ntextosQuebrados = textosPuros.str.lower().str.split(' ')\ndicionario = set()\nfor lista in textosQuebrados:\n dicionario.update(lista)\n\n\ntotalPalavras = len(dicionario)\ntuplas = list(zip(dicionario, range(totalPalavras)))\ntradutor = {palavra:indice for palavra, indice in tuplas}\n\ndef vetorizar_texto(texto, tradutor):\n vetor = [0] * len(tradutor)\n for palavra in texto:\n if palavra in tradutor:\n vetor[tradutor[palavra]] += 1\n return vetor\n\n#print(vetorizar_texto(textosQuebrados[0], tradutor))\n#print(vetorizar_texto(textosQuebrados[1], tradutor))\n#print(vetorizar_texto(textosQuebrados[2], tradutor))\n\nvetoresDeTexto = [vetorizar_texto(texto, tradutor) for texto in textosQuebrados]\n#print(vetoresDeTexto)\n\nmarcas = classificacoes['classificacao']\n\nX = np.array(vetoresDeTexto)\nY = np.array(marcas.tolist())\nprint(\"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\")\nprint(X, Y)\nprint(\"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\")\n\nporcentagem_treino = 0.8\ntamanho_treino = int(porcentagem_treino * len(Y))\ntamanho_validacao = len(Y) - tamanho_treino\n\ntreino_dados = X[0:tamanho_treino]\ntreino_marcacoes = Y[0:tamanho_treino]\n\nvalidacao_dados = X[tamanho_treino:]\nvalidacao_marcadores = Y[tamanho_treino:]\n\ndef fit_predict(nome, modelo, treino_dados, treino_marcacoes):\n k=10\n scores = cross_val_score(modelo, treino_dados, treino_marcacoes, cv=k)\n taxa_acerto = np.mean(scores)\n\n msg = 'Taxa de acerto do {0}: {1}'.format(nome, taxa_acerto)\n print(msg)\n return taxa_acerto\n\n\nresultados = {}\n\nfrom sklearn.multiclass import OneVsRestClassifier\nmodeloOneVsRest = OneVsRestClassifier(LinearSVC(random_state=0))\nresultadoOneVsRest = fit_predict('OneVsRest', modeloOneVsRest, treino_dados, treino_marcacoes)\nresultados[resultadoOneVsRest] = modeloOneVsRest\n\nfrom sklearn.multiclass import OneVsOneClassifier\nmodeloOneVsOne = OneVsOneClassifier(LinearSVC(random_state=0))\nresultadoOneVsOne = fit_predict('OneVsOne', modeloOneVsOne, treino_dados, treino_marcacoes)\nresultados[resultadoOneVsOne] = modeloOneVsOne\n\nfrom sklearn.naive_bayes import MultinomialNB\nmodeloMultinomialNB = MultinomialNB()\nresultadoMultinomialNB = fit_predict('MultinomialNB', modeloMultinomialNB, treino_dados, treino_marcacoes)\nresultados[resultadoMultinomialNB] = modeloMultinomialNB\n\nfrom sklearn.ensemble import AdaBoostClassifier\nmodeloAdaBoostClassifier = AdaBoostClassifier(random_state=0)\nresultadoAdaBoostClassifier = fit_predict('AdaBoostClassifier', modeloAdaBoostClassifier, treino_dados, treino_marcacoes)\nresultados[resultadoAdaBoostClassifier] = modeloAdaBoostClassifier\n\n\n#procurar na lista o resultado q for o maior\nvencedor = resultados[max(resultados)]\nprint(vencedor)\nvencedor.fit(treino_dados, treino_marcacoes)\nresultadorvencedor = vencedor.predict(validacao_dados)\n\n#mosta o vencedor\nacertos = (resultadorvencedor == validacao_marcadores)\ntotal_acertos = sum(acertos)\ntoral_elementos = len(validacao_dados)\ntaxa_acerto = 100.0 * total_acertos / toral_elementos\n\nprint(\"\\n\")\nprint(\"taxa de acerto do vencedor: {0} \".format(taxa_acerto))\n\n#algoritimo basico\nacerto_base = max(Counter(validacao_marcadores).values())\ntaxa_acerto_base = 100.0 * acerto_base/len(validacao_marcadores) \nprint(\"taxa de acerto base: %f\" % taxa_acerto_base)\nprint(\"total de validacao: %i\" % len(validacao_marcadores))\n\n\n"
] |
[
[
"pandas.read_csv"
],
[
"pandas.read_csv",
"sklearn.model_selection.cross_val_score",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.ensemble.AdaBoostClassifier",
"numpy.mean",
"sklearn.svm.LinearSVC",
"numpy.array"
]
] |
temach/timesketch
|
[
"3420931f55acab27ca1de6ebe0fd4da1ff249665"
] |
[
"timesketch/lib/analyzers/utils.py"
] |
[
"# Copyright 2019 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This file contains utilities for analyzers.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport numpy\n\nfrom six.moves import urllib_parse as urlparse\n\nfrom timesketch.lib.analyzers import interface\n\n\n# Title and header text of a story that is common among browser\n# based analyzers.\nBROWSER_STORY_TITLE = 'Browser Artifacts'\nBROWSER_STORY_HEADER = \"\"\"\nThis is an automatically generated story that browser history\nbased analyzers contribute to. Each section in this story\nis generated by a separate analyzer.\n\"\"\"\n\n# CDN domain list based on:\n# https://github.com/WPO-Foundation/webpagetest/blob/master/agent/wpthook/cdn.h\n# Last updated: 2019-01-11\nKNOWN_CDN_DOMAINS = {\n '.att-dsa.net': 'AT&T',\n '.pix-cdn.org': 'Advanced Hosters CDN',\n '.akamai.net': 'Akamai',\n '.akamaiedge.net': 'Akamai',\n '.akamaihd.net': 'Akamai',\n '.akamaitechnologies.com': 'Akamai',\n '.akamaitechnologies.fr': 'Akamai',\n '.akamaized.net': 'Akamai',\n '.edgekey.net': 'Akamai',\n '.edgesuite.net': 'Akamai',\n '.srip.net': 'Akamai',\n '.tl88.net': 'Akamai China CDN',\n '.gslb.tbcache.com': 'Alimama',\n '.cloudfront.net': 'Amazon CloudFront',\n '.aads-cn.net': 'Aryaka',\n '.aads-cng.net': 'Aryaka',\n '.aads1.net': 'Aryaka',\n '.azion.net': 'Azion',\n '.azioncdn.com': 'Azion',\n '.azioncdn.net': 'Azion',\n '.bo.lt': 'BO.LT',\n '.bisongrid.net': 'Bison Grid',\n '.bitgravity.com': 'BitGravity',\n '.bluehatnetwork.com': 'Blue Hat Network',\n '.b-cdn.net': 'BunnyCDN',\n '.cdn77.net': 'CDN77',\n '.cdn77.org': 'CDN77',\n '.cdngc.net': 'CDNetworks',\n '.gccdn.net': 'CDNetworks',\n '.panthercdn.com': 'CDNetworks',\n '.cdnsun.net': 'CDNsun',\n '.cdnvideo.net': 'CDNvideo',\n '.cdnvideo.ru': 'CDNvideo',\n '.cachefly.net': 'Cachefly',\n '.caspowa.com': 'Caspowa',\n '.cedexis.net': 'Cedexis',\n '.ccgslb.com': 'ChinaCache',\n '.lxdns.com': 'ChinaNetCenter',\n '.ourwebpic.com': 'ChinaNetCenter',\n '.wscdns.com': 'ChinaNetCenter',\n '.wscloudcdn.com': 'ChinaNetCenter',\n '.cloudflare.com': 'Cloudflare',\n '.cotcdn.net': 'Cotendo CDN',\n '.systemcdn.net': 'Edgecast',\n '.transactcdn.net': 'Edgecast',\n '.v1cdn.net': 'Edgecast',\n '.v2cdn.net': 'Edgecast',\n '.v3cdn.net': 'Edgecast',\n '.v4cdn.net': 'Edgecast',\n '.v5cdn.net': 'Edgecast',\n '.edgecastcdn.net': 'Edgecast',\n '.cdninstagram.com': 'Facebook',\n '.fbcdn.net': 'Facebook',\n '.fastly.net': 'Fastly',\n '.fastlylb.net': 'Fastly',\n '.nocookie.net': 'Fastly',\n '.cdn.gocache.net': 'GoCache',\n '.doubleclick.net': 'Google',\n '.googleusercontent.com': 'Google',\n '.gstatic.com': 'Google',\n '.googlehosted.com': 'Google',\n '.googlesyndication.': 'Google',\n '.hiberniacdn.com': 'HiberniaCDN',\n '.hwcdn.net': 'Highwinds',\n '.hosting4cdn.com': 'Hosting4CDN',\n '.incapdns.net': 'Incapsula',\n '.inscname.net': 'Instart Logic',\n '.insnw.net': 'Instart Logic',\n '.internapcdn.net': 'Internap',\n '.kinxcdn.com': 'KINX CDN',\n '.kinxcdn.net': 'KINX CDN',\n '.kxcdn.com': 'KeyCDN',\n '.lswcdn.eu': 'LeaseWeb CDN',\n '.lswcdn.net': 'LeaseWeb CDN',\n '.footprint.net': 'Level 3',\n '.fpbns.net': 'Level 3',\n '.llnwd.net': 'Limelight',\n '.cdncloud.net.au': 'MediaCloud',\n '.mncdn.com': 'Medianova',\n '.mncdn.net': 'Medianova',\n '.mncdn.org': 'Medianova',\n '.azure.microsoft.com': 'Microsoft Azure',\n '.azureedge.net': 'Microsoft Azure',\n '.vo.msecnd.net': 'Microsoft Azure',\n '.instacontent.net': 'Mirror Image',\n '.mirror-image.net': 'Mirror Image',\n '.ngenix.net': 'NGENIX',\n '.nyiftw.com': 'NYI FTW',\n '.nyiftw.net': 'NYI FTW',\n '.netdna-cdn.com': 'NetDNA',\n '.netdna-ssl.com': 'NetDNA',\n '.netdna.com': 'NetDNA',\n '.netlify.com': 'Netlify',\n '.r.worldcdn.net': 'OnApp',\n '.r.worldssl.net': 'OnApp',\n '.optimalcdn.com': 'Optimal CDN',\n '.pagerain.net': 'PageRain',\n '.raxcdn.com': 'Rackspace',\n '.resrc.it': 'ReSRC.it',\n '.rlcdn.com': 'Reapleaf',\n '.rncdn1.com': 'Reflected Networks',\n '.rncdn7.com': 'Reflected Networks',\n '.revcn.net': 'Rev Software',\n '.revdn.net': 'Rev Software',\n '.roast.io': 'Roast.io',\n '.streamprovider.net': 'Rocket CDN',\n '.cdn.sfr.net': 'SFR',\n '.simplecdn.net': 'Simple CDN',\n '.singularcdn.net.br': 'Singular CDN',\n '.stackpathdns.com': 'StackPath',\n '.swiftcdn1.com': 'SwiftCDN',\n '.swiftserve.com': 'SwiftCDN',\n '.trbcdn.ru': 'TRBCDN',\n '.gslb.taobao.com': 'Taobao',\n '.taobaocdn.com': 'Taobao',\n '.tbcdn.cn': 'Taobao',\n '.cdntel.net': 'Telenor',\n '.twimg.com': 'Twitter',\n '.unicorncdn.net': 'UnicornCDN',\n '.voxcdn.net': 'VoxCDN',\n '.gravatar.com': 'WordPress',\n '.wordpress.com': 'WordPress',\n '.wp.com': 'WordPress',\n '.ay1.b.yahoo.com': 'Yahoo',\n '.yahooapis.com': 'Yahoo',\n '.yimg.': 'Yahoo',\n '.yottaa.net': 'Yottaa',\n '.zenedge.net': 'Zenedge',\n '.afxcdn.net': 'afxcdn.net',\n '.cubecdn.net': 'cubeCDN',\n '.cdn.jsdelivr.net': 'jsDelivr',\n '.squixa.net': 'section.io'}\n\n\ndef get_domain_from_url(url):\n \"\"\"Extract domain from URL.\n\n Args:\n url: URL to parse.\n\n Returns:\n String with domain from URL.\n \"\"\"\n # TODO: See if we can optimize this because it is rather slow.\n domain_parsed = urlparse.urlparse(url)\n domain_full = domain_parsed.netloc\n domain, _, _ = domain_full.partition(':')\n return domain\n\n\ndef get_tld_from_domain(domain):\n \"\"\"Get the top level domain from a domain string.\n\n Args:\n domain: string with a full domain, eg. www.google.com\n\n Returns:\n string: TLD or a top level domain extracted from the domain,\n eg: google.com\n \"\"\"\n return '.'.join(domain.split('.')[-2:])\n\n\ndef strip_www_from_domain(domain):\n \"\"\"Strip www. from beginning of domain names.\n\n Args:\n domain: string with a full domain, eg. www.google.com\n\n Returns:\n string: Domain without any www, eg: google.com\n \"\"\"\n if domain.startswith('www.'):\n return domain[4:]\n return domain\n\n\ndef get_cdn_provider(domain):\n \"\"\"Return name of CDN provider if domain is recognized as a CDN.\n\n Args:\n domain: Domain name to check against CDN list.\n\n Returns:\n String of names of CDN providers or empty string if not found.\n\n \"\"\"\n cdn_providers = [v for k, v in iter(KNOWN_CDN_DOMAINS.items()) if\n domain.endswith(k.lower())]\n return ' '.join(set(cdn_providers))\n\n\ndef _fix_np_nan(source_dict, attribute, replace_with=None):\n \"\"\"Replaces a numpy.nan value within dict with another value.\n\n Args:\n source_dict: a dictionary.\n attribute: string that is a key to the dictionary, in which\n numpy.nan values will be replaced by the supplied value.\n replace_with: the value that will be used as a replacement for\n the numpy.nan. If not supplied an empty list will be used.\n \"\"\"\n if replace_with is None:\n replace_with = []\n\n value = source_dict.get(attribute)\n try:\n if numpy.isnan(value):\n source_dict[attribute] = replace_with\n except TypeError:\n # The value does not need to be changed.\n pass\n\n\ndef get_events_from_data_frame(frame, datastore):\n \"\"\"Generates events from a data frame.\n\n Args:\n frame: a pandas DataFrame object.\n datastore: Elasticsearch datastore client.\n\n Yields:\n An event (interface.Event) object for each row\n in the DataFrame.\n \"\"\"\n for row in frame.iterrows():\n _, entry = row\n event_id = entry.get('_id')\n if not event_id:\n continue\n event_index = entry.get('_index')\n if not event_index:\n continue\n event_type = entry.get('_type')\n\n source = entry.to_dict()\n _fix_np_nan(source, '__ts_emojis')\n _fix_np_nan(source, 'human_readable', replace_with='')\n _fix_np_nan(source, 'tag')\n\n datetime = source.get('datetime')\n if hasattr(datetime, 'to_pydatetime'):\n datetime_string = datetime.to_pydatetime().isoformat()\n source['datetime'] = datetime_string\n\n event_dict = dict(\n _id=event_id, _type=event_type, _index=event_index,\n _source=source)\n yield interface.Event(event_dict, datastore)\n"
] |
[
[
"numpy.isnan"
]
] |
mindspore-ai/docs
|
[
"e7cbd69fe2bbd7870aa4591510ed3342ec6a3d41"
] |
[
"docs/sample_code/maskrcnn_fine_tune/src/maskrcnn/resnet50.py"
] |
[
"# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\"\"\"Resnet50 backbone.\"\"\"\r\n\r\nimport numpy as np\r\nimport mindspore.nn as nn\r\nfrom mindspore import ops\r\nfrom mindspore import Tensor, dtype as mstype\r\n\r\ndef weight_init_ones(shape):\r\n \"\"\"Weight init.\"\"\"\r\n return Tensor(np.array(np.ones(shape).astype(np.float32) * 0.01).astype(np.float32))\r\n\r\n\r\ndef _conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, pad_mode='pad'):\r\n \"\"\"Conv2D wrapper.\"\"\"\r\n shape = (out_channels, in_channels, kernel_size, kernel_size)\r\n weights = weight_init_ones(shape)\r\n return nn.Conv2d(in_channels, out_channels,\r\n kernel_size=kernel_size, stride=stride, padding=padding,\r\n pad_mode=pad_mode, weight_init=weights, has_bias=False).to_float(mstype.float16)\r\n\r\n\r\ndef _BatchNorm2dInit(out_chls, momentum=0.1, affine=True, use_batch_statistics=True):\r\n \"\"\"Batchnorm2D wrapper.\"\"\"\r\n gamma_init = Tensor(np.array(np.ones(out_chls)).astype(np.float32))\r\n beta_init = Tensor(np.array(np.ones(out_chls) * 0).astype(np.float32))\r\n moving_mean_init = Tensor(np.array(np.ones(out_chls) * 0).astype(np.float32))\r\n moving_var_init = Tensor(np.array(np.ones(out_chls)).astype(np.float32))\r\n\r\n return nn.BatchNorm2d(out_chls, momentum=momentum, affine=affine, gamma_init=gamma_init,\r\n beta_init=beta_init, moving_mean_init=moving_mean_init,\r\n moving_var_init=moving_var_init, use_batch_statistics=use_batch_statistics)\r\n\r\n\r\nclass ResNetFea(nn.Cell):\r\n \"\"\"\r\n ResNet architecture.\r\n\r\n Args:\r\n block (Cell): Block for network.\r\n layer_nums (list): Numbers of block in different layers.\r\n in_channels (list): Input channel in each layer.\r\n out_channels (list): Output channel in each layer.\r\n weights_update (bool): Weight update flag.\r\n Returns:\r\n Tensor, output tensor.\r\n\r\n Examples:\r\n >>> ResNet(ResidualBlock,\r\n >>> [3, 4, 6, 3],\r\n >>> [64, 256, 512, 1024],\r\n >>> [256, 512, 1024, 2048],\r\n >>> False)\r\n \"\"\"\r\n def __init__(self,\r\n block,\r\n layer_nums,\r\n in_channels,\r\n out_channels,\r\n weights_update=False):\r\n super(ResNetFea, self).__init__()\r\n\r\n if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:\r\n raise ValueError(\"the length of \"\r\n \"layer_num, inchannel, outchannel list must be 4!\")\r\n\r\n bn_training = False\r\n self.conv1 = _conv(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')\r\n self.bn1 = _BatchNorm2dInit(64, affine=bn_training, use_batch_statistics=bn_training)\r\n self.relu = ops.ReLU()\r\n self.maxpool = ops.MaxPool(kernel_size=3, strides=2, pad_mode=\"SAME\")\r\n self.weights_update = weights_update\r\n\r\n if not self.weights_update:\r\n self.conv1.weight.requires_grad = False\r\n\r\n self.layer1 = self._make_layer(block,\r\n layer_nums[0],\r\n in_channel=in_channels[0],\r\n out_channel=out_channels[0],\r\n stride=1,\r\n training=bn_training,\r\n weights_update=self.weights_update)\r\n self.layer2 = self._make_layer(block,\r\n layer_nums[1],\r\n in_channel=in_channels[1],\r\n out_channel=out_channels[1],\r\n stride=2,\r\n training=bn_training,\r\n weights_update=True)\r\n self.layer3 = self._make_layer(block,\r\n layer_nums[2],\r\n in_channel=in_channels[2],\r\n out_channel=out_channels[2],\r\n stride=2,\r\n training=bn_training,\r\n weights_update=True)\r\n self.layer4 = self._make_layer(block,\r\n layer_nums[3],\r\n in_channel=in_channels[3],\r\n out_channel=out_channels[3],\r\n stride=2,\r\n training=bn_training,\r\n weights_update=True)\r\n\r\n def _make_layer(self, block, layer_num, in_channel, out_channel, stride, training=False, weights_update=False):\r\n \"\"\"Make block layer.\"\"\"\r\n layers = []\r\n down_sample = False\r\n if stride != 1 or in_channel != out_channel:\r\n down_sample = True\r\n resblk = block(in_channel,\r\n out_channel,\r\n stride=stride,\r\n down_sample=down_sample,\r\n training=training,\r\n weights_update=weights_update)\r\n layers.append(resblk)\r\n\r\n for _ in range(1, layer_num):\r\n resblk = block(out_channel, out_channel, stride=1, training=training, weights_update=weights_update)\r\n layers.append(resblk)\r\n\r\n return nn.SequentialCell(layers)\r\n\r\n def construct(self, x):\r\n \"\"\"ResNet Network\"\"\"\r\n x = self.conv1(x)\r\n x = self.bn1(x)\r\n x = self.relu(x)\r\n c1 = self.maxpool(x)\r\n\r\n c2 = self.layer1(c1)\r\n identity = c2\r\n if not self.weights_update:\r\n identity = ops.stop_gradient(c2)\r\n c3 = self.layer2(identity)\r\n c4 = self.layer3(c3)\r\n c5 = self.layer4(c4)\r\n\r\n return identity, c3, c4, c5\r\n\r\n\r\nclass ResidualBlockUsing(nn.Cell):\r\n \"\"\"\r\n ResNet V1 residual block definition.\r\n\r\n Args:\r\n in_channels (int) - Input channel.\r\n out_channels (int) - Output channel.\r\n stride (int) - Stride size for the initial convolutional layer. Default: 1.\r\n down_sample (bool) - If to do the downsample in block. Default: False.\r\n momentum (float) - Momentum for batchnorm layer. Default: 0.1.\r\n training (bool) - Training flag. Default: False.\r\n weights_updata (bool) - Weights update flag. Default: False.\r\n\r\n Returns:\r\n Tensor, output tensor.\r\n\r\n Examples:\r\n ResidualBlock(3,256,stride=2,down_sample=True)\r\n \"\"\"\r\n expansion = 4\r\n\r\n def __init__(self,\r\n in_channels,\r\n out_channels,\r\n stride=1,\r\n down_sample=False,\r\n momentum=0.1,\r\n training=False,\r\n weights_update=False):\r\n super(ResidualBlockUsing, self).__init__()\r\n\r\n self.affine = weights_update\r\n\r\n out_chls = out_channels // self.expansion\r\n self.conv1 = _conv(in_channels, out_chls, kernel_size=1, stride=1, padding=0)\r\n self.bn1 = _BatchNorm2dInit(out_chls, momentum=momentum, affine=self.affine, use_batch_statistics=training)\r\n\r\n self.conv2 = _conv(out_chls, out_chls, kernel_size=3, stride=stride, padding=1)\r\n self.bn2 = _BatchNorm2dInit(out_chls, momentum=momentum, affine=self.affine, use_batch_statistics=training)\r\n\r\n self.conv3 = _conv(out_chls, out_channels, kernel_size=1, stride=1, padding=0)\r\n self.bn3 = _BatchNorm2dInit(out_channels, momentum=momentum, affine=self.affine, use_batch_statistics=training)\r\n\r\n if training:\r\n self.bn1 = self.bn1.set_train()\r\n self.bn2 = self.bn2.set_train()\r\n self.bn3 = self.bn3.set_train()\r\n\r\n if not weights_update:\r\n self.conv1.weight.requires_grad = False\r\n self.conv2.weight.requires_grad = False\r\n self.conv3.weight.requires_grad = False\r\n\r\n self.relu = ops.ReLU()\r\n self.downsample = down_sample\r\n if self.downsample:\r\n self.conv_down_sample = _conv(in_channels, out_channels, kernel_size=1, stride=stride, padding=0)\r\n self.bn_down_sample = _BatchNorm2dInit(out_channels, momentum=momentum, affine=self.affine,\r\n use_batch_statistics=training)\r\n if training:\r\n self.bn_down_sample = self.bn_down_sample.set_train()\r\n if not weights_update:\r\n self.conv_down_sample.weight.requires_grad = False\r\n self.add = ops.Add()\r\n\r\n def construct(self, x):\r\n \"\"\"ResNet V1 residual block definition.\"\"\"\r\n identity = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv3(out)\r\n out = self.bn3(out)\r\n\r\n if self.downsample:\r\n identity = self.conv_down_sample(identity)\r\n identity = self.bn_down_sample(identity)\r\n\r\n out = self.add(out, identity)\r\n out = self.relu(out)\r\n\r\n return out\r\n"
] |
[
[
"numpy.ones"
]
] |
StephenGrey/my_api
|
[
"e43ee8e897a4fa49399757feb785887b8b1c44a6",
"e43ee8e897a4fa49399757feb785887b8b1c44a6"
] |
[
"graph/scotland.py",
"graph/phe_fetch.py"
] |
[
"import requests,json,csv,pandas,os,logging\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup as BS\nfrom .ons_week import week as ons_week,sunday,stored_names,nation,scotcode\nfrom datetime import timedelta\nfrom .models import CovidWeek, DailyCases, AverageWeek\nfrom .import_csv import URLImporter,PandaImporter,timeaware\nfrom .model_calcs import update_cum_district_death,DATA_STORE\nfrom .ons_fetch import update_row\nfrom .phe_fetch import update_weekly_total\nlog = logging.getLogger('api.graph.scotland')\n\nimport configs\nfrom configs import userconfig\n\nINFO_URL=\"https://www.nrscotland.gov.uk/statistics-and-data/statistics/statistics-by-theme/vital-events/general-publications/weekly-and-monthly-data-on-births-and-deaths/deaths-involving-coronavirus-covid-19-in-scotland/related-statistics\"\n\nDEATHS_URL='https://www.nrscotland.gov.uk/files//statistics/covid19/weekly-deaths-by-date-health-board-location.xlsx'\n\nCASES_URL=\"https://statistics.gov.scot/slice/observations.csv?&dataset=http%3A%2F%2Fstatistics.gov.scot%2Fdata%2Fcoronavirus-covid-19-management-information&http%3A%2F%2Fpurl.org%2Flinked-data%2Fcube%23measureType=http%3A%2F%2Fstatistics.gov.scot%2Fdef%2Fmeasure-properties%2Fcount&http%3A%2F%2Fstatistics.gov.scot%2Fdef%2Fdimension%2Fvariable=http%3A%2F%2Fstatistics.gov.scot%2Fdef%2Fconcept%2Fvariable%2Ftesting-cumulative-people-tested-for-covid-19-positive\"\n\n\"\"\"\n\nInfo and links:\nhttps://www.nrscotland.gov.uk/statistics-and-data/statistics/statistics-by-theme/vital-events/general-publications/weekly-and-monthly-data-on-births-and-deaths/deaths-involving-coronavirus-covid-19-in-scotland\n\nDeaths data file downloaded: (Data updated every Wednesday)\n\nWeekly deaths by date of occurrence, health board and location:\nhttps://www.nrscotland.gov.uk/files//statistics/covid19/weekly-deaths-by-date-council-area-location.zip\"\n\n#https://www.gov.scot/publications/coronavirus-covid-19-trends-in-daily-data/\n\t\n#data.columns=['Week of occurrence','Health Board','Location of death','Cause of Death','deaths']\n#load average deaths: https://www.nrscotland.gov.uk/files//statistics/covid19/weekly-deaths-by-date-health-board-location-15-19.zip\n\n\nFootnotes:\t\t\t\t\t\n1) figures are provisional and subject to future changes\t\t\t\t\t\n2) Weeks run from Monday to Sunday and are based on the ISO8601 international standard for week numbering. Note that weeks at the beginning and end of a year can overlap with the previous and subsequent year, so counts may not sum to annual totals published elsewhere.\t\t\t\n3) Other institutions include clinics, medical centres, prisons and schools.\t\t\t\t\t\n\nThe figures are produced using same definition as those published by the ONS\nfor England and Wales, so are broadly comparable.\nOne minor difference is how the registration weeks are defined:\nWeeks used by ONS (for England and Wales) run from Saturday to\nFriday\nNRS weeks (for Scotland) run from Monday to Sunday (this is the\nISO8601 standard week).\"\"\"\n\n\nclass Scot_Importer(PandaImporter):\n \n def update_check(self):\n scotupdate=configs.config.get('Scotland')\n if scotupdate:\n self.last_update=scotupdate.get('latest_deaths')\n else:\n self.last_update=None\n \n res=requests.get(INFO_URL)\n if res:\n html=res.content\n soup=BS(res.content, 'html.parser')\n el=soup.table.tbody.contents[3].td.next.next.next.text\n target=\"Weekly deaths by date of occurrence, health board and location\"\n if target in el:\n log.info(\"Found target: latest Scotland weekly deaths data\")\n self.edition=el[el.find('(')+1:el.find(')')]\n log.info(f\"Last update: {self.edition}\")\n #datetime. strptime(self.edition, '%d %B %Y')\n \n if self.edition and self.edition==self.last_update:\n log.info('Scotdeaths - Up to date')\n return False\n else:\n log.info('Scotdeaths - Update available')\n return True\n \n def process(self,f=DEATHS_URL):\n self.fetch_excel(url=f,skiprows=2,sheet_name=\"Data\")\n self.fix()\n self.parse()\n log.info('Scot deaths processing complete')\n \n def fetch_excel(self,url=DEATHS_URL,skiprows=2,sheet_name=\"Data\"):\n f=requests.get(url)\n xl=f.content if f else None\n self.data=pandas.read_excel(xl,sheet_name=sheet_name,skiprows=skiprows)\n return self.data\n \n def fix(self):\n self.data=self.data[self.data.columns[:5]].dropna()\n \n deaths_field='deaths' if 'deaths' in self.data.columns else 'Deaths'\n \n self.data['Deaths'] = self.data[deaths_field].astype(int)\n self.data['Week of occurrence'] = self.data['Week of occurrence'].astype(int)\n# self.data.columns=['Week of occurrence','Health Board','Location of death','Cause of Death','deaths']\n# #self.data.dropna() #drop any columns with null values\n# validweeks=[n for n in self.data['Week of occurrence'].unique() if valid_int(n)] #only week numbers that are valid\n# print(len(self.data.index))\n# self.data=self.data[self.data['Week of occurrence'].isin(validweeks)] #select rows only with valid week numbers\n# print(len(self.data.index))\n \n \n def weeks(self):\n return sorted([int(z) for z in self.data['Week of occurrence'].unique()])\n \n def districts(self):\n return sorted([z for z in self.data['Health Board'].unique()])\n\n def parse(self):\n for district in self.districts():\n for week in self.weeks():\n self.parse_week(week,district)\n if self.edition:\n configs.userconfig.update('Scotland','latest_deaths',self.edition)\n# if self.edition:\n# configs.userconfig.update('ONS','latest_update',self.edition)\n def parse_week(self,week,district,_update=True):\n week_str=str(week)\n sub=self.data[(self.data['Health Board']==district)&(self.data['Week of occurrence']==week)]\n _allc19=sub[(sub['Cause of Death']=='COVID-19')]['Deaths'].sum()\n _all=sub['Deaths'].sum()\n careh=sub[(sub['Location of death']=='Care Home')]['Deaths'].sum()\n careh19=sub[(sub['Cause of Death']=='COVID-19')&(sub['Location of death']=='Care Home')]['Deaths'].sum()\n hosp19=sub[(sub['Cause of Death']=='COVID-19')&(sub['Location of death']=='Hospital')]['Deaths'].sum()\n log.debug(f'District: {district} Week: {week} C19:{_allc19} All: {_all} Carehomes {careh} ({careh19} C19)')\n qrow=CovidWeek.objects.filter(week=week,areaname=district)\n if qrow:\n row=qrow[0]\n #print(row)\n if _update:\n update_row(row,_all,_allc19,careh,careh19,hosp19)\n else:\n if _update:\n areacode=scotcode[district]\n _nation='Scotland'\n row=CovidWeek(date=sunday(week),areacode=areacode,nation=_nation,areaname=district,week=week)\n print(f'Created week {sunday(week)} for {district}')\n row.save()\n update_row(row,_all,_allc19,careh,careh19,hosp19)\n \n def update_cum_deaths(self):\n for d in scotcode.values():\n update_cum_district_death(d)\n\nclass Scot_Average(Scot_Importer):\n \n def process(self,f):\n self.open_csv(f)\n self.fix()\n \n def open_csv(self,f):\n self.data=pandas.read_csv(f, encoding= \"iso-8859-1\",skiprows=2)\n \n def fix(self):\n self.data=self.data[self.data.columns[:5]].dropna()\n validweeks=self.weeks()\n self.data=self.data[self.data['week of occurrence'].isin(validweeks)]\n \n self.data['year']=self.data['year'].astype(int)\n self.data['week of occurrence']=self.data['week of occurrence'].astype(int)\n \n def weeks(self):\n return [n for n in self.data['week of occurrence'].unique() if valid_int(n)]\n \n def parse(self):\n for place in self.districts():\n for week in self.weeks():\n self.parserow(place,week)\n \n def districts(self):\n return sorted([z for z in self.data['health board'].unique()])\n\n \n def parserow(self,place,week):\n areacode=scotcode[place]\n try:\n sub=self.data[(self.data['week of occurrence']==week)&(self.data['health board']==place)]\n wk, created = AverageWeek.objects.get_or_create(\n week=week,\n areacode=areacode\n )\n location=place\n print(f'Parsing: Area: {place} week {week}')\n wk.weeklyalldeaths=sub['number of deaths'].sum()/5\n wk.weeklyhospitaldeaths=sub[(sub['location']=='Hospital')]['number of deaths'].sum()/5\n wk.weeklyelsewheredeaths=None\n wk.weeklyhospicedeaths=None\n wk.weeklyothercommunaldeaths=sub[(sub['location']=='Other institution')]['number of deaths'].sum()/5\n wk.weeklycarehomedeaths=sub[(sub['location']=='Care Home')]['number of deaths'].sum()/5\n wk.weeklyhomedeaths=sub[(sub['location']=='Home / Non-institution')]['number of deaths'].sum()/5\n wk.save()\n #print(wk.__dict__)\n except Exception as e:\n print(e)\n \n \nclass Scot_Cases(Scot_Importer):\n def process(self,f=CASES_URL,live=True):\n if live:\n self.fetch_csv(f)\n else:\n self.open_csv(path)\n self.fix()\n \n if self.update_check():\n self.ingest_all()\n \n def update_check(self):\n self.edition=self.data.index.max()\n print(f'Latest Scot cases data: {self.edition}')\n scotupdate=configs.config.get('Scotland')\n if scotupdate:\n self.last_update=scotupdate.get('latest_cases')\n log.info(f'Previously stored Scot cases data:{self.last_update}')\n else:\n self.last_update=None\n return True\n if self.last_update != str(self.edition):\n log.info('Update Scottish cases')\n return True\n else:\n log.info('Scottish cases up to date')\n return False\n \n def fetch_csv(self,url=CASES_URL):\n path=os.path.join(DATA_STORE,'Scotland_latestcases.csv')\n res=requests.get(url)\n with open(path, 'wb') as f:\n f.write(res.content)\n self.open_csv(path)\n \n def open_csv(self,path):\n self.data = pandas.read_csv(path, skiprows=7, encoding= \"utf-8\") #skip the first rows\n\n def districts(self):\n return scotcode.keys()\n\n\n def fix(self):\n zt=self.data[self.data.columns[1:]].T #drop first column and transpose\n labels=[n for n in zt.iloc[0]] #grab column labels from first row\n zt.columns=labels\n self.data=zt[1:]\n self.data.index=pandas.to_datetime(self.data.index)\n \n def ingest_all(self):\n \"\"\"pull all daily cases from all Scottish areas\"\"\"\n for sequence in self.districts():\n self.sequence_ingest(sequence)\n if self.edition:\n configs.userconfig.update('Scotland','latest_cases',str(self.edition))\n \n def sequence_ingest(self,place):\n \"\"\"ingest cases for region\"\"\"\n data=self.data\n counter=0\n for day in self.data.index:\n \n yesterday=self.data[self.data.index==day-timedelta(1)][place].fillna(0)\n if yesterday.empty:\n yesterday=0\n elif yesterday.item()==\"*\":\n yesterday=0\n else:\n yesterday=int(yesterday)\n totalcases=self.data[self.data.index==day][place].fillna(0).item()\n if totalcases==\"*\":\n totalcases=0\n else:\n totalcases=int(totalcases)\n if totalcases:\n today=totalcases-yesterday\n else:\n today=0\n \n log.debug(f'Place:{place} Date: {day:%d/%m} Yesterday:{yesterday} Today:{today} Total:{totalcases}')\n #datestring=item['specimenDate']\n date=day\n areacode=scotcode[place]\n row,created=DailyCases.objects.get_or_create(specimenDate=timeaware(date),areacode=areacode)\n row.areaname=place\n row.dailyLabConfirmedCases=today\n row.totalLabConfirmedCases=totalcases\n row.changeInDailyCases=None #item['changeInDailyCases']\n row.dailyTotalLabConfirmedCasesRate=None #item['dailyTotalLabConfirmedCasesRate']\n row.previouslyReportedDailyCases=None #item['previouslyReportedDailyCases']\n row.previouslyReportedTotalCases=None #item['previouslyReportedTotalCases']\n row.changeInTotalCases=None #item['changeInTotalCases']\n row.save()\n counter+=1\n log.info(f'Processed: {counter} rows')\n update_weekly_total(areacode=scotcode[place],areaname=place)\n\n\ndef valid_int(s):\n try:\n n=int(s)\n return n\n except:\n return None\n \n ",
"# -*- coding: utf-8 -*- \nimport os,json,requests,csv,pandas,logging, time\nfrom bs4 import BeautifulSoup as BS\nfrom utils import time_utils\nfrom .models import DailyCases,CovidWeek, DailyReport\nfrom datetime import datetime,timedelta,date\nimport pytz\nfrom contextlib import closing\nfrom . import ons_week, model_calcs,phe_codes\nfrom .import_csv import DATA_STORE,PandaImporter\nfrom django.db.models import Max\nfrom collections import defaultdict\n\nimport configs\nfrom configs import userconfig\n\n#pip install uk-covid19\nfrom uk_covid19 import Cov19API\n#https://github.com/publichealthengland/coronavirus-dashboard-api-python-sdk\nlog = logging.getLogger('api.graph.phe_fetch')\n\nURL=\"https://c19downloads.azureedge.net/downloads/json/coronavirus-cases_latest.json\"\nURL_CSV=\"https://coronavirus.data.gov.uk/downloads/csv/coronavirus-cases_latest.csv\"\nDASHBOARD=\"https://coronavirus.data.gov.uk/\"\nMSOA: \"https://c19downloads.azureedge.net/downloads/msoa_data/MSOAs_latest.csv\"\n\nTIMEOUT=60\nDATALOAD={}\nAREACODE=\"E08000025\"\nAREA=\"Birmingham\"\n\n\"\"\"\nMain routine to fetch data:\n\nz=Fetch_API()\nz.process()\n\n\n\n\"\"\"\nclass NoContent(Exception):\n pass\n\nclass NoEntry(Exception):\n pass\n\n\nclass Check_PHE():\n def __init__(self):\n self.api = Cov19API(filters=self.filters, structure=self.structure)\n PHEstored=configs.config.get('PHE')\n if PHEstored:\n self.England_cases=PHEstored.get('england_total_cases')\n self.edition=PHEstored.get('latest_update')\n\n else:\n self.England_cases=None\n try:\n self.top()\n except Exception as e:\n print(e)\n print('Check PHE failed - default to needs update')\n self._update=True\n \n \n def top(self):\n \"\"\"get latest total\"\"\"\n self.api.latest_by='cumCasesByPublishDate'\n self.get()\n self.latest_total=self.data['data'][0]['cumCasesByPublishDate']\n log.info(f'England latest total: {self.latest_total}')\n if self.latest_total:\n if self.England_cases:\n if int(self.England_cases)==self.latest_total:\n if self.edition==self.latest_update:\n log.info('Database up to date')\n self._update=False\n return False\n else:\n log.info(f'Database needs update: PHE latest: {self.latest_update} Stored update:{self.edition}')\n userconfig.update('PHE','england_total_cases',str(self.latest_total))\n self._update=True\n return True\n \n \n @property\n def filters(self):\n \"\"\"override to any filter\"\"\"\n return self.England_filter\n \n @property\n def structure(self):\n \"\"\"override to any structure\"\"\"\n return self.cases_and_deaths\n \n def get(self):\n print('Fetching PHE cases from API')\n try:\n self.data=self.api.get_json() # Returns a dictionary\n except Exception as e:\n print(e)\n log.error('Failed to download cases')\n \n @property\n def latest_update(self):\n return self.data.get('lastUpdate')\n \n @property\n def latest_date_str(self):\n return f'{time_utils.parseISO(self.api.last_update):%Y-%m-%dT%H-%M}'\n\n def update_edition(self):\n self.edition=self.api.last_update\n self.edition_date=time_utils.parseISO(self.edition).date()\n \n @property\n def cases_and_deaths(self):\n return {\n \"date\": \"date\",\n \"areaName\": \"areaName\",\n \"areaCode\": \"areaCode\",\n \"newCasesByPublishDate\": \"newCasesByPublishDate\",\n \"cumCasesByPublishDate\": \"cumCasesByPublishDate\",\n \"newCasesBySpecimenDate\":\"newCasesBySpecimenDate\",\n \"cumCasesBySpecimenDate\":\"cumCasesBySpecimenDate\",\n \"newDeaths28DaysByPublishDate\":\"newDeaths28DaysByPublishDate\",\n \"newDeaths28DaysByDeathDate\":\"newDeaths28DaysByDeathDate\",\n# \"newPillarOneTestsByPublishDate\":\"newPillarOneTestsByPublishDate\",\n# \"cumPillarOneTestsByPublishDate\":\"newPillarOneTestsByPublishDate\",\n# \"newAdmissions\":\"newAdmissions\",\n# \"cumAdmissions\":\"cumAdmissions\",\n# \"cumTestsByPublishDate\":\"cumTestsByPublishDate\",\n# \"newTestsByPublishDate\":\"newTestsByPublishDate\",\n }\n \n \n \n @property\n def England_filter(self):\n return ['areaType=nation','areaName=England']\n \n def district_filter(self, district):\n area_type=phe_codes.area_types.get(district)\n if area_type:\n return [f'areaType={area_type}',f'areaName={district}']\n else:\n return None\n \n def nation_filter(self,nation):\n return ['areaType=nation',f'areaName={nation}']\n \n @property\n def local_filter(self):\n return ['areaType=ltla']\n\n @property\n def local_filter(self):\n return ['areaType=ltla']\n\n @property\n def updated(self):\n return self.api.last_update\n \n @property\n def newcases(self):\n return{\n \"specimenDate\": \"date\",\n \"areaName\": \"areaName\",\n \"areaCode\": \"areaCode\",\n# \"newCasesByPublishDate\": \"newCasesByPublishDate\",\n# \"cumCasesByPublishDate\": \"cumCasesByPublishDate\",\n# \"newDeathsByDeathDate\": \"newDeathsByDeathDate\",\n# \"cumDeathsByDeathDate\": \"cumDeathsByDeathDate\",\n \"newCasesBySpecimenDate\":\"newCasesBySpecimenDate\",\n \"cumCasesBySpecimenDate\":\"cumCasesBySpecimenDate\",\n\n \t# \"newAdmissions\":\"newAdmissions\",\n# \"cumAdmissions\":\"cumAdmissions\",\n# \"cumTestsByPublishDate\":\"cumTestsByPublishDate\",\n# \"newTestsByPublishDate\":\"newTestsByPublishDate\",\n }\n \n def save(self):\n _date=self.latest_date_str #fetches date of latest published update\n filename=f\"{_date}-PHE-cases.json\"\n filepath=os.path.join(DATA_STORE,filename)\n with open(filepath, 'w') as outfile:\n json.dump(self.data, outfile)\n \n def save_all(self):\n _date=self.latest_date_str #fetches date of latest published update\n filename=f\"{_date}-PHE-cases.json\"\n filepath=os.path.join(DATA_STORE,filename)\n with open(filepath, 'w') as outfile:\n json.dump(self.data_all, outfile)\n\nclass LocalLatest(Check_PHE):\n def __init__(self):\n self.api = Cov19API(filters=self.local_filter, structure=self.structure)\n self.api.latest_by='cumCasesByPublishDate'\n self.get()\n\n\nclass Fetch_API(Check_PHE):\n\tdef __init__(self,force_update=False):\n\t\tself.today=date.today()\n\t\tself.api = Cov19API(filters=self.filters, structure=self.structure)\n\t\tself.edition=None\n\t\tself.sequences=['ltla','region']\n\t\t#self.api.latest_by='cumCasesBySpecimenDate' - this fetches only latest cases\n\t\t#self.fetch - get\n\t\tself.data_all=[]\n\t\tself.force_update=force_update\n\t\t\n\tdef process(self):\n\t\t\"\"\"pull the data district by district\"\"\"\n\t\tif self.update_check() or self.force_update:\n\t\t\tself.district_check() #pull all local data and regions\n\t\t\tself.fix() #fix data anomalies - e.g add in Bucks.\n\t\t\tself.save_all() #store a copy of the data\n\t\t\tself.ingest() #add data to models\n\t\t\tself.update_totals() #calculate weekly data\n\t\telse:\n\t\t\tlog.info('PHE cases up to date')\n\t\t\n\tdef fetch(self):\n\t\tfor sequence in self.sequences:\n\t\t\tself.api.filters=[f'areaType={sequence}']\n\t\t\tprint(f'SEQUENCE: {sequence}')\n\t\t\tself.get() #get local data\t\t\t\n\t\t\tself.data_all +=self.data.get('data')\n\t\tself.edition=self.latest_update\n\t\t\t\t\t\t\n\tdef fix(self):\n\t\t#get Bucks data\n\t\tself.api.filters=['areaType=utla', 'areaName=Buckinghamshire']\n\t\tself.get()\n\t\tbucks=self.data.get('data')\n\t\tfixed=[]\n\t\tfor row in bucks:\n\t\t\trow['areaCode']='E06000060'\n\t\t\tfixed.append(row)\n\t\tself.data_all+=fixed\n\t\tprint('Fixed wrong areacode and added Bucks in PHE local data')\n\t\n\t@property\n\tdef filters(self):\n\t\t\"\"\"override to any filter\"\"\"\n\t\treturn self.local_filter\n\t\t\n\t@property\n\tdef structure(self):\n\t\t\"\"\"override to any structure\"\"\"\n\t\treturn self.newcases\n\t\n#\tdef process_all(self):\n#\t\t\"\"\"pull all the data and process\"\"\"\n#\t\tif self.update_check() or self.force_update:\n#\t\t\tself.fetch() #pull all local data and regions\n#\t\t\tself.fix() #fix data anomalies - e.g add in Bucks.\n#\t\t\tself.save_all() #store a copy of the data\n#\t\t\tself.ingest() #add data to models\n#\t\t\tself.update_totals() #calculate weekly data\n#\t\telse:\n#\t\t\tlog.info('PHE cases up to date')\n\t\n\t\n\t\n\tdef areacodes():\n\t\toutput=set()\n\t\tfor x in zz.data['data']:\n\t\t\toutput.add(x['areaCode'])\n\t\treturn output\n\n\n\n\tdef district_check(self):\n\t\t\"\"\"fetch data from API district by district\"\"\"\n\t\t\n\t\tplaces_2_fetch=list(ons_week.stored_names.values())+ons_week.extra_places\n\t\tself.edition=None\n\t\tfor place in places_2_fetch:\n\t\t\t_filters=self.district_filter(place)\n\t\t\tif _filters:\n\t\t\t\tself.api.filters=_filters\n\t\t\telse:\n\t\t\t\tlog.info(f'Not fetching {place} - not in PHE API')\n\t\t\t\tcontinue\n\t\t\ttries=0\n\t\t\twhile tries < 5:\n\t\t\t\ttry:\n\t\t\t\t\tlog.debug(f'Fetching {place}')\n\t\t\t\t\tself.data=self.api.get_json() # Returns a dictionary\n\t\t\t\t\tnew_data=self.data.get('data')\n\t\t\t\t\tif not self.edition:\n\t\t\t\t\t\tself.edition=self.latest_update\n\t\t\t\t\tbreak\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tlog.error(e)\n\t\t\t\t\tlog.error('Retrying after 8 secs')\n\t\t\t\t\ttime.sleep(8)\n\t\t\t\t\ttries +=1\n\t\t\t\t\tnew_data=[]\n\t\t\tif not new_data:\n\t\t\t\tlog.error('No data here')\n\t\t\telse:\n\t\t\t\tself.data_all +=new_data\n\t\t\ttime.sleep(0.1)\n\n\n\tdef count_reports(self):\n\t\treports={}\n\t\tfor i in self.data_all:\n\t\t\treports[i['areaCode']]=reports.get(i['areaCode'],0)+1\n\t\t\n\t\tfor areacode in ons_week.stored_names:\n\t\t\tif not reports.get(areacode):\n\t\t\t\tlog.info(f'missing data for {ons_week.stored_names.get(areacode)}')\n\n\t\treturn reports\n\n\tdef ingest(self,check=True):\n\t\t\"\"\"ingest all the data\"\"\"\n\t\tdata=self.data_all\n\t\tpubdate=time_utils.parseISO(self.api.last_update).date()\n\t\t\n\t\tcounter=0\n\t\tfor item in data:\n\t\t\tareacode=item['areaCode']\n\t\t\tdatestring=item['specimenDate']\n\t\t\t_date=fetchdate(datestring)\n\t\t\trow,created=DailyCases.objects.get_or_create(specimenDate=_date,areacode=areacode)\n\t\t\trow.areaname=item['areaName']\n\t\t\tdaily=item['newCasesBySpecimenDate']\n\t\t\ttotal=item['cumCasesBySpecimenDate']\n\t\t\t\n\t\t\t#log.debug(f'{row.areaname}: {datestring}')\t\t\t\n\t\t\tif created:\n\t\t\t\trow.dailyLabConfirmedCases=daily\n\t\t\t\trow.totalLabConfirmedCases=total\n\t\t\t\trow.save()\n\t\t\t\t\n\t\t\t\tif daily:\n\t\t\t\t\tlag=(pubdate-_date.date()).days\n\t\t\t\t\tlog.debug(f'date:{_date} lag: {lag} daily:{daily}')\n\t\t\t\t\tdrow,dcreated=DailyReport.objects.get_or_create(specimenDate=_date,areacode=areacode,publag=lag)\n\t\t\t\t\tdrow.dailycases=daily\n\t\t\t\t\tdrow.add_cases=daily #if a new daily case, assume no prior report\n\t\t\t\t\tdrow.save()\n\t\t\t\n\t\t\tif not created:\n\t\t\t\texisting_daily=row.dailyLabConfirmedCases\n\t\t\t\texisting_total=row.totalLabConfirmedCases\n\t\t\t\tif daily is not None:\n\t\t\t\t\tif existing_daily !=daily or existing_total!=total:\n\t\t\t\t\t\trow.dailyLabConfirmedCases=daily\n\t\t\t\t\t\trow.totalLabConfirmedCases=total\n\t\t\t\t\t\trow.save()\n\t\t\t\t\t\tif existing_daily !=daily:\n\t\t\t\t\t\t\tlog.info(f'Updating {row.areaname} on {datestring}: Daily: {existing_daily} to {daily} Total: {existing_total} to {total}')\n\t\t\t\t\t\t\tif existing_daily:\n\t\t\t\t\t\t\t\t_increase=daily-existing_daily\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t_increase=daily\n\t\t\t\t\t\t\tlag=(pubdate-_date.date()).days\n\t\t\t\t\t\t\tdrow,dcreated=DailyReport.objects.get_or_create(specimenDate=_date,areacode=areacode,publag=lag)\n\t\t\t\t\t\t\tdrow.dailycases=daily\n\t\t\t\t\t\t\tdrow.add_cases=_increase\n\t\t\t\t\t\t\tdrow.save()\n\t\t\t\t\t\n\t\t\tcounter+=1\n\t\t\tif counter%1000==0:\n\t\t\t\tlog.info(f'Processing row {counter}')\n\t\tlog.info(f'Processed: {counter} rows')\n\n\t\tif self.edition:\n\t\t\tconfigs.userconfig.update('PHE','latest_update',self.edition)\n\n#\t\n\tdef save(self):\n\t\tfilename=f\"{date.today()}-PHE-cases.json\"\n\t\tfilepath=os.path.join(DATA_STORE,filename)\n\t\twith open(filepath, 'w') as outfile:\n\t\t\tjson.dump(self.data_all, outfile)\n\t\t\n\tdef update_totals(self):\n\t\tupdate_weekly_cases('England')\n\t\tupdate_weekly_cases('Northern Ireland')\n\t\tupdate_weekly_cases('Wales')\n\t\t\n\tdef update_check(self):\n\t\treturn check()\n\t\t\n\t\t\n\n\tdef sequence_ingest(self,sequence):\n\t\t\"\"\"ingest from a particular sequence\"\"\"\n\t\tdata=self.data\n\t\t\n\t\tcounter=0\n\n\t\tfor item in data[sequence]:\n\t\t\tdatestring=item['specimenDate']\n\t\t\tdate=fetchdate(datestring)\n\t\t\trow,created=DailyCases.objects.get_or_create(specimenDate=date,areacode=item['areaCode'])\n\t\t\trow.areaname=item['areaName']\n\t\t\trow.dailyLabConfirmedCases=item['dailyLabConfirmedCases']\n\t\t\trow.totalLabConfirmedCases=item['totalLabConfirmedCases']\n\t\t\trow.changeInDailyCases=item['changeInDailyCases']\n\t\t\trow.dailyTotalLabConfirmedCasesRate=item['dailyTotalLabConfirmedCasesRate']\n\t\t\trow.previouslyReportedDailyCases=item['previouslyReportedDailyCases']\n\t\t\trow.previouslyReportedTotalCases=item['previouslyReportedTotalCases']\n\t\t\trow.changeInTotalCases=item['changeInTotalCases']\n\t\t\trow.save()\n\t\t\tcounter+=1\n\t\tlog.info(f'Processed: {counter} rows')\n\n\n\n\n\n#class OLDCheck_PHE():\n#\tdef __init__(self):\n#\t\tPHEstored=configs.config.get('PHE')\n#\t\tif PHEstored:\n#\t\t\tself.England_cases=PHEstored.get('england_total_cases')\n#\t\telse:\n#\t\t\tself.England_cases=None\n#\t\tself.top()\n#\t\t\n#\tdef top(self,url=URL_CSV):\n#\t\t\"\"\"get lastest England total\"\"\"\n#\t\t\n#\t\twith closing(requests.get(url, stream=True)) as r:\n#\t\t\tf = (line.decode('utf-8') for line in r.iter_lines())\n#\t\t\treader = csv.reader(f, delimiter=',', quotechar='\"')\n#\t\t\tfields=next(reader,None)\n#\t\t\tengland=next(reader,None)\n#\t\t\tself.latest_total=england[7]\n#\t\t\tlog.info(f'England latest total: {self.latest_total}')\n#\t\t\t\n#\t\tif True:\n#\t\t\tif self.latest_total:\n#\t\t\t\tif self.England_cases:\n#\t\t\t\t\tif str(self.England_cases) ==self.latest_total:\n#\t\t\t\t\t\tlog.info('nothing new here')\n#\t\t\t\t\t\tself._update=False\n#\t\t\t\t\t\treturn False\n#\t\t\t\tuserconfig.update('PHE','england_total_cases',str(self.latest_total))\n#\t\t\t\tself._update=True\n#\t\t\t\treturn True\n#\t\t\t\t\n##\t\t\tfor count, row in enumerate(reader, start=1):\n##\t\t\t\tprint(row[7])\n##\t\t\t\tif count == 1:\n##\t\t\t\t\tbreak\n\nclass Fetch_PHE(PandaImporter):\n\t\"\"\"fetch PHE cases for England and Wales from CSV\"\"\"\n\t\n\tdef __init__(self):\n\t\tself.today=date.today()\n\t\tself.edition=None\n\t\tself.fetch()\n\t\tself.fix()\n\t\tself.sequences=['ltla', 'nation', 'region', 'utla']\n\t\t\n\tdef process(self):\n\t\t\"\"\"ingest cases into database & update weekly totals\"\"\"\n\t\tif self.update_check():\n\t\t\tself.ingest_all()\n\t\t\tself.update_totals()\n\t\telse:\n\t\t\tlog.info('PHE cases up to date')\n\t\n\tdef district_codes(self):\n\t\treturn sorted([z for z in self.data['Area code'].unique()])\n\n\t\n\tdef ingest_all(self):\n\t\t\"\"\"pull all daily cases from all PHE areas\"\"\"\n\t\tfor place in self.district_codes():\n\t\t\tself.sequence_ingest(place)\n\t\tif self.edition:\n\t\t\tconfigs.userconfig.update('PHE','latest_cases',self.edition)\n\n\tdef save(self):\n\t\tfilename=f\"{date.today()}-PHE-cases.csv\"\n\t\tfilename2=f\"{date.today()}-PHE-cases.json\"\n\t\tfilepath=os.path.join(DATA_STORE,filename)\n\t\tfilepath2=os.path.join(DATA_STORE,filename2)\n\t\tself.data.to_csv(filepath)\n\t\tself.data.to_json(filepath2)\n#\t\twith open(filepath, 'w') as outfile:\n#\t\t\tjson.dump(self.data, outfile)\n\t\t\n\tdef update_totals(self):\n\t\tupdate_weekly_cases('England')\n\n\n\tdef update_check(self):\n\t\tPHEstored=configs.config.get('PHE')\n\t\tif PHEstored:\n\t\t\tself.last_update=PHEstored.get('latest_cases')\n\t\t\tif self.last_update:\n\t\t\t\tif self.edition == self.last_update:\n\t\t\t\t\treturn False\n\t\treturn True\n\n\t@property\n\tdef total_cases(self):\n\t\treturn self.data[self.data['Area type']=='utla']['Daily lab-confirmed cases'].sum()\n\t\n\t@property\n\tdef latest_samples(self):\n\t\treturn self.data['Specimen date'].max()\n\t\n\tdef fetch(self,url=URL):\n\t\t\"\"\" get the latest cases data\"\"\"\n\t\tlog.info('downloading latest PHE case data')\n#\t\tself.data=lookup_json(url)\n\t\tself.fetch_csv() #JSON discontinued; switched back to CSV\n\t\tself.edition=self.latest_samples\n\t\tprint(f'Last samples from {self.edition}')\n\n\tdef fetch_csv(self,url=URL_CSV):\n\t\tpath=os.path.join(DATA_STORE,'PHE_latestcases.csv')\n\t\tres=requests.get(url)\n\t\twith open(path, 'wb') as f:\n\t\t\tf.write(res.content)\n\t\tself.open_csv(path)\n\n\tdef fix(self):\n\t\tself.data.loc[self.data['Area name']=='Buckinghamshire','Area code']='E06000060'\n\t\tlog.info('Fixed wrong areacode for Bucks in PHE data')\n\n\tdef open_csv(self,f):\n\t\tself.data=pandas.read_csv(f, encoding= \"iso-8859-1\")\n\n\tdef sequence_ingest(self,areacode):\n\t\t\"\"\"ingest from a particular areacode\"\"\"\n\t\tdata=self.data[self.data['Area code']==areacode]\n\t\tareaname=data['Area name'].unique().item()\n\t\tprint(f'Ingesting cases from {areacode}: {areaname}')\n\t\t\n\t\tcounter=0\n\t\tfor day in data['Specimen date']:\n\t\t\tdate=fetchdate(day)\n\t\t\trow,created=DailyCases.objects.get_or_create(specimenDate=date,areacode=areacode)\n\t\t\tthis_day=data[data['Specimen date']==day]\n\t\t\trow.areaname=areaname \n\t\t\t#add head(1) (faster than unique() ) to deal with some areas returned twice as part of both UTLA AND LTLA sequences\n\t\t\trow.dailyLabConfirmedCases=this_day['Daily lab-confirmed cases'].head(1).item()\n\t\t\trow.totalLabConfirmedCases=this_day['Cumulative lab-confirmed cases'].head(1).item()\n\t\t\trow.save()\n\t\t\tcounter+=1\n\t\tprint(f'Processed: {counter} rows')\n\n\n\ndef check():\n ck=Check_PHE()\n return ck._update\n\ndef check_and_download():\n ck=Check_PHE()\n latest=ck.latest_update\n if ck._update:\n f=Fetch_API()\n f.district_check() #pull all local data and regions\n f.fetch()\n f.fix()\n f.last_update=latest\n if f.update_check():\n print('Saving latest PHE cases')\n f.save_all()\n else:\n print('No need to download')\n\n\ndef update_weekly_cases(nation):\n log.info(f\"update weekly cases for nation: {nation}\")\n q=CovidWeek.objects.filter(nation=nation)\n for place in q.values('areacode','areaname').distinct():\n areacode=place['areacode']\n area=place['areaname']\n if areacode and area:\n update_weekly_total(areacode=areacode,areaname=area)\n log.info(f'Completed updated weekly cases for nation {nation}')\n\ndef update_weekly_total(areacode=AREACODE,areaname=AREA):\n \"\"\"add up all daily cases into week calculation\"\"\"\n start,stop=model_calcs.RANGE_WEEK\n log.debug(f'Processing {areaname}')\n for week in range(start,stop+1):\n end_day=ons_week.week(week)\n \n week_total=weekly_total(end_day,areacode=areacode,areaname=areaname)\n #print(f'{areaname}: Weektotal for week number {week} ending {end_day}: {week_total}')\n \n if week_total is not None:\n try:\n stored,created=CovidWeek.objects.get_or_create(areacode=areacode,week=week)\n #print(stored.weeklycases)\n if stored.weeklycases != week_total:\n log.debug(f'{areaname}: updating week {week} from {stored.weeklycases} to {week_total}')\n stored.weeklycases=week_total\n stored.areaname=areaname\n stored.save()\n if created:\n stored.nation=ons_week.nation[areacode]\n stored.areaname=areaname\n log.debug(f'Created new entry for week {week} for {areaname}')\n stored.week=week\n stored.save()\n except Exception as e:\n log.error(e)\n log.error(f'No data stored for {areaname} week {week}')\n else:\n log.error(f'Bypassing {areaname} - no data')\n\ndef weekly_total(end_day,areacode=AREACODE,areaname=AREA):\n if True:\n week_total=0\n for day in range(6,-1,-1):\n date=end_day-timedelta(day)\n try:\n entry=DailyCases.objects.get(areacode=areacode,specimenDate=date)\n week_total+=entry.dailyLabConfirmedCases\n except:\n #print(f'No entry for {date}')\n pass\n return week_total\n \ndef sum_cases(nation='England'):\n \"\"\"add up total cases for a nation - for integrity checks\"\"\"\n _sum=0\n for _code in ons_week.stored_names:\n if ons_week.nation[_code]==nation:\n place=ons_week.stored_names[_code]\n _total=DailyCases.objects.filter(areaname=place).aggregate(Max('totalLabConfirmedCases')).get('totalLabConfirmedCases__max')\n if _total:\n _sum +=_total\n else:\n print(f'No total for {place}')\n return _sum\n\ndef clean_cases(data):\n \"\"\"adjust for data glitches in PHE data\"\"\"\n newdata=[]\n #Add up Bucks Data\n bucks=defaultdict(list)\n for i in data:\n if i['areaName'] in ['Chiltern','Aylesbury Vale','South Bucks','Wycombe']:\n bucks[i['date']].append(i)\n else:\n newdata.append(i)\n print(bucks)\n for _date,_all in bucks.items():\n item={'areaName': 'Buckinghamshire','areaCode':'E06000060','specimenDate':_date}\n item['newCasesBySpecimenDate']=sum([x['newCasesBySpecimenDate'] for x in _all])\n item['cumCasesBySpecimenDate']=sum([x['cumCasesBySpecimenDate'] for x in _all])\n newdata.append(item)\n\n return newdata\n\ndef check_sum_cases(nation='England'):\n \"\"\"check total data\"\"\"\n ck=LocalLatest()\n fail=False\n data=ck.data.get('data')\n latest={}\n \n \n data=clean_cases(data) #repair glitches\n #check latest data matches stored data for nation\n for i in data:\n _code=i['areaCode']\n latest[_code]=i\n try:\n _nation=ons_week.nation[_code]\n except Exception as e:\n log.error(e)\n log.error(i['areaName'])\n continue\n if _nation==nation:\n if _code in ons_week.stored_names:\n place=ons_week.stored_names[_code]\n _total=DailyCases.objects.filter(areaname=place).aggregate(Max('totalLabConfirmedCases')).get('totalLabConfirmedCases__max')\n _latest=i['cumCasesByPublishDate']\n if _total !=_latest:\n print(f'Mismatch: {place} Latest total{_latest} != stored {_total}')\n fail=True\n else:\n #print(f'{place} up to date')\n pass\n \n else:\n place=i['areaName']\n print(f'{place} not counted / not in TR tally')\n \n sumtotal=0\n for _code in ons_week.stored_names:\n if ons_week.nation[_code]==nation:\n i=latest.get(_code)\n if i:\n _latest=i['cumCasesByPublishDate']\n _total=DailyCases.objects.filter(areacode=_code).aggregate(Max('totalLabConfirmedCases')).get('totalLabConfirmedCases__max')\n if _latest!=_total:\n print(f'Mismatch: {_code} Latest total{_latest} != stored {_total}')\n else:\n if _latest:\n sumtotal +=_latest\n else:\n print(f'Missing place {_code} in PHE published cases')\n print(f'Sum total of stored names for {nation} is {sumtotal}')\n \n return fail\n\n#DATALOAD=main()\n\ndef process(data):\n\tdata=main(eg)\n\tlatest=data['ltlas']#list of latest entries - lower teir\n\tlatest_upper_tier=data['utlas'] # upper tier\n\tmetadata=data['metadata']\n\tdaily_countrylevel=data['countries'] #just England\n\tregions=data['regions'] #English regions\n\tbigtotal=['dailyRecords']\n\ndef max_week():\n\treturn CovidWeek.objects.aggregate(Max('week')).get('week__max')\n\ndef name_index():\n\tq=DailyCases.objects.values('areacode','areaname').distinct()\n\t_i={}\n\tfor place in q:\n\t\tareacode=place['areacode']\n\t\tarea=place['areaname']\n\t\t_i[areacode]=area\n\treturn _i\n\ndef lookup_json(url):\n \"\"\"fetch and decode json from an api\"\"\"\n session=requests.Session()\n json_res=get_api_result(session,url)\n try:\n content=json.loads(json_res)\n return content\n except:\n raise NoContent\n \n \ndef get_api_result(session,url):\n \"\"\"return content of a get request\"\"\"\n try:\n res=session.get(url,timeout=TIMEOUT)\n if res.status_code == 404:\n raise NotFound(\"URL {} not found\".format(url))\n except Exception as e:\n print(e)\n return None\n return res.content\n\n\n\ndef fetchdate(datestring):\n try:\n if not datestring:\n raise NullDate\n date=datetime.strptime(datestring,'%Y-%m-%d')\n# date=iso8601.parse_date(datestring) -- convert a string in ISO8601\n date=timeaware(date)\n #print(datestring,date)\n except ValueError:\n raise NullDate\n return date\n\n \ndef timeaware(dumbtimeobject):\n return pytz.timezone(\"GMT\").localize(dumbtimeobject)\n#Mac / Linux stores all file times etc in GMT, so localise to GMT\n\n\ndef ingest_cases(data):\n\tcount=0\n\tprint('Checking for new data')\n\ttry:\n\t\tfor index,row in data.iterrows():\n\t\t\ttry:\n\t\t\t\tcount+=1\n\t\t\t\tif count%100==0:\n\t\t\t\t\tprint(count)\n\t\t\t\ti,created=DailyCases.objects.get_or_create(areacode=row['Area code'], specimenDate=fetchdate(row['Specimen date']))\n\t\t\t\ti.dailyLabConfirmedCases = row['Daily lab-confirmed cases']\n\t\t\t\ti.totalLabConfirmedCases = row['Cumulative lab-confirmed cases']\n\t\t\texcept DailyCases.DoesNotExist:\n\t\t\t\tprint('entry does not exist')\n\t\t\t\n\tfinally:\n\t\tprint(count)\n\t\t\t\n\n\n\n#\t\tdatestring=item['specimenDate']\n#\t\t_date=fetchdate(datestring)\n\t\t\t\n#\t\t\trow.areaname=areaname \n#\t\t\t#add head(1) (faster than unique() ) to deal with some areas returned twice as part of both UTLA AND LTLA sequences\n#\t\t\trow.dailyLabConfirmedCases=this_day['Daily lab-confirmed cases'].head(1).item()\n#\t\t\trow.totalLabConfirmedCases=this_day['Cumulative lab-confirmed cases'].head(1).item()\n#\t\t\trow.save()\n#\t\t\tcounter+=1\n#\t\tprint(f'Processed: {counter} rows')\n\n\n#\t\t\trow,created=DailyCases.objects.get_or_create(specimenDate=_date,areacode=areacode)\n#\t\t\trow.areaname=item['areaName']\n#\t\t\tdaily=item['newCasesBySpecimenDate']\n#\t\t\ttotal=item['cumCasesBySpecimenDate']\n#\t\t\trow,created=DailyReport.objects.get_or_create(specimenDate=date,publag=lag)\n#\t\t\tlag=(time_utils.parseISO(self.edition).date()-_date.date()).days\n#\t\t\tprint(f'{row.areaname}: Pubdate{pubdate}, SpecimenDate {_date.date}, Lag: {lag}')\n#\t\t\t\n#\t\t\tif counter==10:\n#\t\t\t\tbreak\n#\t\t\t\n#\t\t\trow,created=DailyReport.objects.get_or_create(specimenDate=date,publishDate=pubdate,areacode=item['areaCode'])\n#\t\t\tlag=(time_utils.parseISO(self.edition).date()-_date.date()).days#\n#\t\t\tprint(lag)\n\n \n\n"
] |
[
[
"pandas.read_excel",
"pandas.to_datetime",
"pandas.read_csv"
],
[
"pandas.read_csv"
]
] |
csavur/biosignalsnotebooks
|
[
"c99596741a854c58bdefb429906023ac48ddc3b7",
"c99596741a854c58bdefb429906023ac48ddc3b7"
] |
[
"biosignalsnotebooks/biosignalsnotebooks/visualise.py",
"biosignalsnotebooks/build/lib/biosignalsnotebooks/detect.py"
] |
[
"\n\"\"\"\nList of functions intended to visualise the loaded data (electrophysiological signals).\n\nThis functions are mainly supported by Bokeh package.\n\nAvailable Functions\n-------------------\n[Public]\n\nplot_future !!!!!!!!!!! Untested version for future application !!!!!!!!!!!!!!!!!\n Plotting function intended for an easy representation of OpenSignals acquired data.\nplot\n Plotting function intended for an easy representation of OpenSignals acquired data.\nopensignals_style\n The application of this function ensures that OpenSignals graphical style will be automatically\n applied to the Bokeh plots.\nopensignals_color_pallet\n Returns one of the available OpenSignals colors following an iterative mechanism.\nopensignals_kwargs\n Function used in order to be automatically applied the OpenSignals graphical style to the\n toolbar of Bokeh grid plots.\n\nAvailable Functions\n-------------------\n[Private]\n\n_check_validity_of_inputs\n Checks when an input of function 'plot' has a valid format.\n\nObservations/Comments\n---------------------\nNone\n\n/\\\n\"\"\"\n\nimport itertools\nfrom numbers import Number\nimport numpy\nfrom bokeh.plotting import figure, output_file, show, save\nfrom bokeh.models.tools import PanTool, ResetTool, BoxZoomTool, WheelZoomTool\nfrom bokeh.models.glyphs import Line\nfrom bokeh.plotting.figure import FigureOptions\nfrom bokeh.layouts import gridplot\nfrom IPython.display import HTML\nfrom .aux_functions import _filter_keywords, _is_instance, _generate_bokeh_file\n\nCOLOR_LIST = itertools.cycle((\"#009EE3\", \"#302683\", \"#00893E\", \"#94C11E\", \"#FDC400\", \"#E84D0E\",\n \"#CF0272\", \"#F199C1\"))\n\ndef _plot_future(time, data, legend_label=None, title=None, y_axis_label=None, hor_lines=None,\n hor_lines_leg=None, vert_lines=None, vert_lines_leg=None,\n apply_opensignals_style=True, show_plot=True, warn_print=False, **kwargs):\n \"\"\"\n Plotting function intended for an easy representation of OpenSignals acquired data.\n\n ----------\n Parameters\n ----------\n time : list or list of lists\n List that contains the time axis samples or a list of lists, when it is intended to present\n data in a gridplot format. When the input is a list of lists the following structure must\n be respected:\n Gridplot with N rows and M columns\n [[cell_row_0_column_0, cell_row_0_column_1, ..., cell_row_0_column_M],\n [cell_row_1_column_0, cell_row_1_column_1, ..., cell_row_1_column_M],\n ...\n [cell_row_N_column_0, cell_row_N_column_1, ..., cell_row_N_column_M]]\n\n data : list or list of lists\n Should have the same shape of time until the cell_row_n_column_m level. At this stage\n cell_row_n_column_m can contain a set of lists. Each one of these lists contains give\n rise to a different plot at the figure located in row n and column m of the grid structure.\n\n legend_label : list\n Input where the legend of each plot is specified. Should have the same shape of data.\n\n title : list\n Input where the title of each figure is specified. Should have the same shape of time.\n\n y_axis_label : list\n Input where the y label of each figure is specified. Should have the same shape of time.\n\n hor_lines : list of lists\n The parent list must have the same shape of time and each of its elements (child lists)\n must be formed by numbers defining the y axis position of the horizontal lines.\n\n hor_lines_leg : list of lists\n Legends of the horizontal lines (same shape of hor_lines).\n\n vert_lines : list of lists\n The parent list must have the same shape of time and each of its elements (child lists)\n must be formed by numbers defining the x axis position of the vertical lines.\n\n vert_lines_leg : list of lists\n Legends of the vertical lines (same shape of vert_lines).\n\n apply_opensignals_style : boolean\n If True then the OpenSignals style will be applied.\n\n\n show_plot : boolean\n If True the generated figures will be shown.\n\n warn_print : bool\n If True some warnings about invalid kwargs keys will be prompted.\n\n **kwargs : dict\n Keyword values for applying in bokeh figures, lines and gridplots.\n\n Returns\n -------\n out : bokeh figure or gridplot\n Object that is produced during the execution of the present function.\n\n \"\"\"\n\n # -------------------------------- Application of styling options -----------------------------\n if apply_opensignals_style is True:\n style_figure = {**opensignals_kwargs(\"figure\"), **_filter_keywords(FigureOptions, kwargs,\n is_class=True,\n warn_print=warn_print)}\n style_line = {**opensignals_kwargs(\"line\"), **_filter_keywords(Line, kwargs,\n warn_print=warn_print)}\n style_gridplot = {**opensignals_kwargs(\"gridplot\"),\n **_filter_keywords(gridplot, kwargs, warn_print=warn_print)}\n else:\n style_figure = _filter_keywords(FigureOptions, kwargs, is_class=True, warn_print=warn_print)\n style_line = _filter_keywords(Line, kwargs, warn_print=warn_print)\n style_gridplot = _filter_keywords(gridplot, kwargs, warn_print=warn_print)\n\n # ---------- Based on the input check if the output should be in the gridplot format ----------\n if len(list(numpy.shape(data))) == 3 and len(list(numpy.shape(time))) == 3:\n grid_plot = True\n elif len(list(numpy.shape(data))) == 1 and len(list(numpy.shape(time))) == 1:\n grid_plot = False\n else:\n raise RuntimeError(\"'time' and 'data' fields must have the same shape, which would be a \"\n \"list with 1 dimension or a list of lists with 3 levels, such as [[[\"\n \"time_0_0, time_0,1, time_0_2], [time_1_0, time_1_1, time_1_2]]]. In the\"\n \" previous example the output will be a gridplot with 2 rows and \"\n \"three columns.\")\n\n # ------------ Verification if the input arguments (title and legend) are valid ---------------\n # [legend]\n legend_label = _check_validity_of_inputs(data, legend_label, \"legend\", grid_plot, dimension=3)\n\n # [title]\n title = _check_validity_of_inputs(data, title, \"title\", grid_plot, dimension=2)\n\n # [y_axis_label]\n y_axis_label = _check_validity_of_inputs(data, y_axis_label, \"y_axis_label\", grid_plot,\n dimension=2)\n\n # Horizontal Lines.\n # [hor_lines]\n hor_lines = _check_validity_of_inputs(data, hor_lines, \"hor_lines\", grid_plot, dimension=2)\n hor_lines_leg = _check_validity_of_inputs(data, hor_lines_leg, \"hor_lines_leg\", grid_plot,\n dimension=2)\n\n # Vertical Lines.\n # [vert_lines]\n vert_lines = _check_validity_of_inputs(data, vert_lines, \"vert_lines\", grid_plot, dimension=2)\n vert_lines_leg = _check_validity_of_inputs(data, vert_lines_leg, \"vert_lines_leg\", grid_plot,\n dimension=2)\n\n # --------------------------------------- Plotting Stage --------------------------------------\n fig_list = []\n if grid_plot is True:\n # Each element inside \"data\", \"time\", \"title\", \"legend\" ... matrix cell must be a list.\n if all(_is_instance(list, el, condition=\"all\", deep=True) for el in [time, data, title,\n legend_label, y_axis_label,\n hor_lines, vert_lines,\n hor_lines_leg,\n vert_lines_leg]):\n for row in range(0, len(data)): # Generation of a figure per plot.\n fig_list.append([])\n for column in range(0, len(data[row])):\n for plt in range(0, len(data[row][column])):\n # Verification if all elements inside list are numbers.\n if _is_instance(Number, data[row][column][plt], condition=\"all\", deep=True) \\\n and not _is_instance(bool, data[row][column][plt], condition=\"any\") \\\n and _is_instance(Number, time[row][column][0], condition=\"all\") \\\n and not _is_instance(bool, time[row][column][0], condition=\"any\"):\n fig_list.append([])\n\n # Generation of multiple figures.\n fig_list[-1][-1].append(figure(title=title[row][column][0],\n y_axis_label=y_axis_label[row]\n [column][0],\n **style_figure))\n\n fig_list[-1][-1][-1].line(time[row][column][0], data[row][column][plt],\n legend_label=legend_label[row][column][plt], **style_line)\n else:\n raise RuntimeError(\"At least one of the list elements, specified in \"\n \"data or time, is not numeric.\")\n\n # Representation of horizontal lines.\n if hor_lines is not None:\n for hor_line_nbr, hor_line in enumerate(hor_lines[row][column]):\n if hor_lines_leg is not None:\n fig_list[-1][-1][-1].line([time[row][column][0],\n time[row][column][-1]],\n [hor_line, hor_line],\n legend_label=hor_lines_leg[row][hor_line_nbr],\n **opensignals_kwargs(\"line\"))\n else:\n fig_list[-1][-1][-1].line([time[row][column][0],\n time[row][column][-1]],\n [hor_line, hor_line],\n **opensignals_kwargs(\"line\"))\n\n # Representation of vertical lines.\n if vert_lines is not None:\n for vert_line_nbr, vert_line in enumerate(vert_lines[row][column]):\n if vert_lines_leg is not None:\n fig_list[-1][-1][-1].line([vert_line, vert_line],\n [numpy.min(data[row][column][0]),\n numpy.max(data[row][column][0])],\n legend_label=vert_lines_leg[row][vert_line_nbr],\n **opensignals_kwargs(\"line\"))\n else:\n fig_list[-1][-1][-1].line([vert_line, vert_line],\n [numpy.min(data[row][column][0]),\n numpy.max(data[row][column][0])],\n **opensignals_kwargs(\"line\"))\n\n # Update of line style.\n if apply_opensignals_style is True:\n style_line = {**opensignals_kwargs(\"line\"),\n **_filter_keywords(Line, kwargs, warn_print=warn_print)}\n else:\n style_line = _filter_keywords(Line, kwargs, warn_print=warn_print)\n\n else:\n raise RuntimeError(\"At least one of the list elements, specified in data, \"\n \"is not a sublist.\")\n else:\n # If this happen, then we receive as input a single list for time and data\n # (Single plot perspective).\n if _is_instance(Number, data, condition=\"all\") \\\n and not _is_instance(bool, data, condition=\"any\") \\\n and _is_instance(Number, time, condition=\"all\")\\\n and not _is_instance(bool, time, condition=\"any\"):\n fig_list.append(figure(title=title, y_axis_label=y_axis_label[0], **style_figure))\n fig_list[-1].line(time, data, legend_label=legend_label, **style_line)\n else:\n raise RuntimeError(\"At least one of the list elements, specified in data or time, is \"\n \"not numeric.\")\n\n # Application of the OpenSignals Sytle.\n if apply_opensignals_style is True:\n opensignals_style([item for sublist in fig_list for item in sublist])\n\n # Show of plots.\n if grid_plot is True:\n # Generation of the gridplot.\n grid = gridplot(fig_list, **style_gridplot)\n\n if show_plot is True:\n show(grid)\n else:\n raise RuntimeError(\"The specified number of lines and columns for the grid plot is not \"\n \"compatible.\")\n\n else:\n if show_plot is True:\n show(fig_list[-1])\n\n return fig_list\n\n\ndef plot(*args, legend_label=None, title=None, x_axis_label=\"Time (s)\", y_axis_label=None,\n grid_plot=False, grid_lines=None, grid_columns=None, hor_lines=None, hor_lines_leg=None,\n vert_lines=None, vert_lines_leg=None, apply_opensignals_style=True, show_plot=True,\n save_plot=False, warn_print=False, get_fig_list=False, file_name=None, **kwargs):\n \"\"\"\n -----\n Brief\n -----\n Plotting function intended for an easy representation of OpenSignals acquired data.\n\n -----------\n Description\n -----------\n This function allows to plot data acquired with resource to OpenSignals, available at\n https://bitalino.com/en/software, in an easy way using the Bokeh library (https://bokeh.pydata.org/en/latest/).\n The plot will automatically present the OpenSignals style if the corresponding parameter is set to True\n (apply_opensignals_style=True).\n\n This allows to easily use multiple plot elements without the need to know any visualization libraries.\n\n ----------\n Parameters\n ----------\n *args: list\n Variable number of arguments with the purpose of giving the user the possibility of\n defining as an input only the \"data\" axis or both \"time\" and \"data\" axes.\n\n legend_label : list\n Input where the legend of each plot is specified. Should have the same shape of time.\n\n title : list\n Input where the title of each figure is specified. Should have the same shape of time.\n\n x_axis_label : list\n Input where the x label of each figure is specified. All figures will have the same x label\n in the current implementation.\n\n y_axis_label : list\n Input where the y label of each figure is specified. Should have a length equal to the\n number of figures.\n\n grid_plot : boolean\n If True then the plots will be organized in a grid_plot structure.\n\n grid_lines : int\n Number of lines of grid plot.\n\n grid_columns : int\n Number of columns of grid plot.\n\n hor_lines : list of lists\n The parent list must have the same shape of time and each of its elements (child lists)\n must be formed by numbers defining the y axis position of the horizontal lines.\n\n hor_lines_leg : list of lists\n Legends of the horizontal lines (same shape of hor_lines).\n\n vert_lines : list of lists\n The parent list must have the same shape of time and each of its elements (child lists)\n must be formed by numbers defining the x axis position of the vertical lines.\n\n vert_lines_leg : list of lists\n Legends of the vertical lines (same shape of vert_lines).\n\n apply_opensignals_style : boolean\n If True then the OpenSignals style will be applied.\n\n\n show_plot : boolean\n If True the generated figures will be shown.\n\n save_plot : boolean\n If True the generated figures will be stored on an html file.\n\n warn_print : bool\n If True some warnings about invalid kwargs keys will be prompted.\n\n get_fig_list : bool\n If True then it will be returned a list containing the figure objects generated during\n the function execution.\n\n file_name : str\n Path containing the destination folder where the Bokeh figure will be stored.\n\n **kwargs : dict\n Keyword values for applying in bokeh figures, lines and gridplots.\n\n Returns\n -------\n out : bokeh figure or gridplot\n Object that is produced during the execution of the present function.\n\n \"\"\"\n\n # Generation of the HTML file where the plot will be stored.\n #file_name = _generate_bokeh_file(file_name)\n\n # Data conversion for ensuring that the function only works with lists.\n if len(args) == 1:\n time = [list(numpy.linspace(1, len(args[0][0]) + 1, len(args[0][0])))] * len(args[0])\n data = args[0]\n elif len(args) == 2:\n time = list(args[0])\n data = list(args[1])\n else:\n raise RuntimeError(\"biosignalsnotebooks plot function only accepts 1 or 2 arguments in *args\"\n \" input. If only 1 input is given it should be a list with data samples,\"\n \"otherwise if 2 inputs are given then the first one defines the time\"\n \"axis and the second one data values.\")\n\n # This function offers two input mechanisms (easy and complex). The easiest one consists in\n # the representation of a single plot in a single figure, so, the user only needs to specify as\n # inputs \"time\" and \"data\" lists. On the other hand, for the complex mechanism, the user can\n # represent plots in different figures, using for that lists of lists as \"time\" and \"data\"\n # inputs.\n # In the following lines is ensured that independently of the input given, the function will\n # achieve is purpose correctly.\n if _is_instance(Number, data, condition=\"all\") and not _is_instance(bool, data, condition=\"any\") \\\n and _is_instance(Number, time, condition=\"all\") \\\n and not _is_instance(bool, time, condition=\"any\"):\n time = [time]\n data = [data]\n if y_axis_label is not None:\n y_axis_label = [y_axis_label]\n if hor_lines is not None:\n hor_lines = [hor_lines]\n if hor_lines_leg is not None:\n hor_lines_leg = [hor_lines_leg]\n if vert_lines is not None:\n vert_lines = [vert_lines]\n if vert_lines_leg is not None:\n vert_lines_leg = [vert_lines_leg]\n if title is not None:\n title = [title]\n if legend_label is not None:\n legend_label = [legend_label]\n elif _is_instance(numpy.ndarray, data, condition=\"any\") \\\n or _is_instance(numpy.ndarray, time, condition=\"any\"):\n time = list(map(list, time))\n data = list(map(list, data))\n\n # Ensures the application or not of opensignals graphical style.\n if apply_opensignals_style is True:\n style_figure = {**opensignals_kwargs(\"figure\"), **_filter_keywords(FigureOptions, kwargs,\n is_class=True,\n warn_print=warn_print)}\n style_line = {**opensignals_kwargs(\"line\"), **_filter_keywords(Line, kwargs,\n warn_print=warn_print)}\n style_gridplot = {**opensignals_kwargs(\"gridplot\"),\n **_filter_keywords(gridplot, kwargs, warn_print=warn_print)}\n else:\n style_figure = _filter_keywords(FigureOptions, kwargs, is_class=True, warn_print=warn_print)\n style_line = _filter_keywords(Line, kwargs, warn_print=warn_print)\n style_gridplot = _filter_keywords(gridplot, kwargs, warn_print=warn_print)\n\n # ------------------------ Verification if the input arguments are valid ----------------------\n if legend_label is not None:\n if isinstance(legend_label, list):\n if len(legend_label) != len(time) or len(legend_label) != len(data):\n raise RuntimeError(\"The shape of legend does not match with time input.\")\n else:\n raise RuntimeError(\"The specified data type of legend field is not valid. Input must \"\n \"be a list.\")\n #else:\n # legend_label = [None] * len(time)\n\n if title is not None:\n if isinstance(title, list):\n if len(title) != len(time) or len(title) != len(data):\n raise RuntimeError(\"The shape of title does not match with time input.\")\n elif isinstance(title, str):\n if grid_plot is True:\n raise RuntimeError(\"Each figure of the gridplot must have a title, i.e., the shape\"\n \" of time, data and title inputs needs to match.\")\n else:\n title = [title] * len(time)\n elif grid_plot is False and len(title) != 1:\n raise RuntimeError(\"The number of titles is not compatible with the number of figures \"\n \"(only one title is needed).\")\n else:\n raise RuntimeError(\"The specified data type of title field is not valid. Input must be \"\n \"a list.\")\n else:\n title = [None] * len(time)\n\n if y_axis_label is not None:\n if isinstance(y_axis_label, list):\n if len(y_axis_label) != len(time) or len(y_axis_label) != len(data):\n raise RuntimeError(\"The shape of y_axis_label does not match with time input.\")\n elif isinstance(y_axis_label, str):\n y_axis_label = [y_axis_label] * len(time)\n elif grid_plot is False and len(y_axis_label) != 1:\n raise RuntimeError(\"The number of y axis labels is not compatible with the number of \"\n \"figures.\")\n else:\n raise RuntimeError(\"The specified data type of y_axis_label field is not valid. Input \"\n \"must be a list or a string when grid_plot field is False.\")\n else:\n y_axis_label = [None] * len(time)\n\n # Coherence between grid_plot, grid_lines and grid_columns inputs.\n if grid_lines is not None or grid_columns is not None:\n if grid_plot is not True:\n raise RuntimeError(\"When grid_lines and grid_columns inputs are used the field grid_\"\n \"plot must be True.\")\n else:\n if not isinstance(grid_lines, int) or not isinstance(grid_columns, int):\n raise RuntimeError(\"At least one of the grid_lines or grid_columns values is not \"\n \"an integer.\")\n\n # Horizontal Lines.\n if hor_lines is not None:\n if isinstance(hor_lines, list):\n if len(hor_lines) != len(time) or len(hor_lines) != len(data):\n raise RuntimeError(\"The shape of hor_lines does not match with time input.\")\n else:\n raise RuntimeError(\"The specified data type of hor_lines field is not valid. Input \"\n \"must be a list of lists.\")\n\n # Each sublist entry must be numeric.\n for cell in hor_lines:\n if not _is_instance(Number, cell, condition=\"all\") \\\n or _is_instance(bool, cell, condition=\"any\"):\n raise RuntimeError(\"At least one of the list elements, specified in hor_lines, \"\n \"is not numeric.\")\n elif vert_lines_leg is not None:\n if len(hor_lines) != len(hor_lines_leg):\n raise RuntimeError(\"The shape of hor_lines and hor_lines_leg is not the same.\")\n\n # Vertical Lines.\n if vert_lines is not None:\n if isinstance(vert_lines, list):\n if len(vert_lines) != len(time) or len(vert_lines) != len(data):\n raise RuntimeError(\"The shape of vert_lines does not match with time input.\")\n else:\n raise RuntimeError(\"The specified data type of vert_lines field is not valid. \"\n \"Input must be a list of lists.\")\n\n # Each sublist entry must be numeric.\n for cell in vert_lines:\n if not _is_instance(Number, cell, condition=\"all\") \\\n or _is_instance(bool, cell, condition=\"any\"):\n raise RuntimeError(\"At least one of the list elements, specified in vert_lines, \"\n \"is not numeric.\")\n elif vert_lines_leg is not None:\n if len(vert_lines) != len(vert_lines_leg):\n raise RuntimeError(\"The shape of vert_lines and vert_lines_leg is not \"\n \"the same.\")\n\n # --------------------------------------- Plotting Stage --------------------------------------\n fig_list = []\n # If all data entries are lists, then it is considered that we are in a multiplot situation.\n if _is_instance(list, data, condition=\"all\") and _is_instance(list, time, condition=\"all\"):\n for list_entry in range(0, len(time)): # Generation of a figure per plot.\n # Verification if all elements inside list are numbers.\n if _is_instance(Number, data[list_entry], condition=\"all\") \\\n and not _is_instance(bool, data[list_entry], condition=\"any\") \\\n and _is_instance(Number, time[list_entry], condition=\"all\") \\\n and not _is_instance(bool, time[list_entry], condition=\"any\"):\n if len(time[list_entry]) == len(data[list_entry]): # Shape verification\n if grid_plot is True: # Generation of multiple figures.\n fig_list.append(figure(title=title[list_entry],\n y_axis_label=y_axis_label[list_entry],\n x_axis_label=x_axis_label,\n **style_figure))\n elif grid_plot is False and list_entry == 0:\n fig_list.append(figure(title=title[list_entry],\n y_axis_label=y_axis_label[list_entry],\n x_axis_label=x_axis_label,\n sizing_mode='scale_both',\n **style_figure))\n\n if legend_label is not None:\n fig_list[-1].line(time[list_entry], data[list_entry], legend_label=legend_label[list_entry],\n **style_line)\n else:\n fig_list[-1].line(time[list_entry], data[list_entry], **style_line)\n\n # Representation of horizontal lines.\n if hor_lines is not None:\n for hor_line_nbr, hor_line in enumerate(hor_lines[list_entry]):\n if hor_lines_leg is not None:\n fig_list[-1].line([time[list_entry][0], time[list_entry][-1]],\n [hor_line, hor_line],\n legend_label=hor_lines_leg[list_entry][hor_line_nbr],\n **opensignals_kwargs(\"line\"))\n else:\n fig_list[-1].line([time[list_entry][0], time[list_entry][-1]],\n [hor_line, hor_line],\n **opensignals_kwargs(\"line\"))\n\n # Representation of vertical lines.\n if vert_lines is not None:\n for vert_line_nbr, vert_line in enumerate(vert_lines[list_entry]):\n if vert_lines_leg is not None:\n fig_list[-1].line([vert_line, vert_line],\n [numpy.min(data[list_entry]),\n numpy.max(data[list_entry])],\n legend_label=vert_lines_leg[list_entry][vert_line_nbr],\n **opensignals_kwargs(\"line\"))\n else:\n fig_list[-1].line([vert_line, vert_line],\n [numpy.min(data[list_entry]),\n numpy.max(data[list_entry])],\n **opensignals_kwargs(\"line\"))\n\n # Update of line style.\n if apply_opensignals_style is True:\n style_line = {**opensignals_kwargs(\"line\"),\n **_filter_keywords(Line, kwargs, warn_print=warn_print)}\n else:\n style_line = _filter_keywords(Line, kwargs, warn_print=warn_print)\n\n else:\n raise RuntimeError(\"The shape of time and data inputs does not match.\")\n else:\n raise RuntimeError(\"At least one of the list elements, specified in data or time, \"\n \"is not numeric.\")\n\n # If this happen, then we receive as input a single list for time and data\n # (Single plot perspective).\n elif _is_instance(Number, data, condition=\"all\") \\\n and not _is_instance(bool, data, condition=\"any\") \\\n and _is_instance(Number, time, condition=\"all\") \\\n and not _is_instance(bool, time, condition=\"any\"):\n grid_plot = False\n\n # Verification if all elements inside list are numbers.\n if _is_instance(Number, data, condition=\"all\") \\\n and not _is_instance(bool, data, condition=\"any\") \\\n and _is_instance(Number, time, condition=\"all\") \\\n and not _is_instance(bool, time, condition=\"any\"):\n if len(time) == len(data): # Shape verification\n fig_list.append(figure(title=title[0], y_axis_label=y_axis_label[0],\n x_axis_label=x_axis_label, **style_figure))\n if legend_label is not None:\n fig_list[-1].line(time, data, legend_label=legend_label[0], **style_line)\n else:\n fig_list[-1].line(time, data, **style_line)\n else:\n raise RuntimeError(\"The shape of time and data inputs does not match.\")\n else:\n raise RuntimeError(\"At least one of the list elements, specified in data or time, is \"\n \"not numeric.\")\n\n else:\n raise RuntimeError(\"The input 'data' or/and 'time' does not have a valid format. It should \"\n \"be a list of numbers or a list of lists.\")\n\n # Application of the OpenSignals Style.\n if apply_opensignals_style is True:\n opensignals_style(fig_list)\n\n # Show of plots.\n if grid_plot is True:\n nbr_of_spaces = grid_lines * grid_columns\n nbr_of_figures = len(fig_list)\n\n if nbr_of_spaces >= nbr_of_figures > (grid_lines - 1) * grid_columns:\n # Organization of data accordingly to the number of rows and columns specified as input\n # arguments.\n grid_layout = []\n fig_nbr = 0\n for row in range(0, grid_lines):\n grid_layout.append([])\n for column in range(0, grid_columns):\n if fig_nbr <= nbr_of_figures - 1:\n grid_layout[-1].append(fig_list[fig_nbr])\n else:\n grid_layout[-1].append(None)\n\n # Update of auxiliary variable.\n fig_nbr += 1\n\n # Generation of the gridplot.\n grid = gridplot(grid_layout, **style_gridplot)\n\n if show_plot is True:\n show(grid)\n elif save_plot is True:\n save(grid)\n #return HTML('<iframe width=100% height=350 src=\"generated_plots/' + file_name + '\"></iframe>')\n else:\n raise RuntimeError(\"The specified number of lines and columns for the grid plot is not \"\n \"compatible.\")\n\n else:\n if show_plot is True:\n show(fig_list[-1])\n elif save_plot is True:\n save(fig_list[-1])\n #return HTML('<iframe width=100% height=\"' + str(fig_list[-1].plot_height) + '\" src=\"generated_plots/' + file_name + '\"></iframe>')\n\n if get_fig_list is True:\n return fig_list\n\n\ndef opensignals_style(figure_list, grid_plot=None, toolbar=\"right\"):\n \"\"\"\n -----\n Brief\n -----\n Function used to automatically apply the OpenSignals graphical style to Bokeh plots.\n\n -----------\n Description\n -----------\n OpenSignals has its own graphical style and Bokeh plots function in an object oriented way.\n\n This function allows to apply the Opensignals graphical style to a set of Bokeh figures objects given as inputs.\n\n ----------\n Parameters\n ----------\n figure_list : bokeh figure/s\n The base object/s where the graphical functions will be applied.\n\n grid_plot : bokeh gridplot\n Contains the layout structure, where multiple bokeh figures are represented.\n\n toolbar : str\n String defining the toolbar position.\n\n \"\"\"\n\n for fig in figure_list:\n fig.background_fill_color = (242, 242, 242)\n\n fig.toolbar.active_scroll = fig.select_one(WheelZoomTool)\n\n # Removal of unnecessary tools.\n figure_tools = fig.tools\n for tool in range(len(figure_tools) - 1, -1, -1):\n if not isinstance(figure_tools[tool], (type(PanTool()), type(BoxZoomTool()),\n type(WheelZoomTool()), type(ResetTool()))):\n del figure_tools[tool]\n\n fig.sizing_mode = 'scale_width'\n fig.height = 200\n fig.toolbar.logo = None\n fig.toolbar_location = toolbar\n\n fig.xgrid.grid_line_color = (150, 150, 150)\n fig.ygrid.grid_line_color = (150, 150, 150)\n\n fig.xgrid.grid_line_dash = [2, 2]\n\n fig.xaxis.major_tick_line_color = \"white\"\n fig.xaxis.minor_tick_line_color = \"white\"\n fig.xaxis.axis_line_color = \"white\"\n fig.yaxis.major_tick_in = 0\n fig.yaxis.major_tick_out = 0\n\n fig.yaxis.major_tick_line_color = \"white\"\n fig.yaxis.minor_tick_line_color = \"white\"\n fig.yaxis.minor_tick_in = 0\n fig.yaxis.minor_tick_out = 0\n fig.yaxis.axis_line_color = (150, 150, 150)\n fig.yaxis.axis_line_dash = [2, 2]\n\n fig.yaxis.major_label_text_color = (88, 88, 88)\n fig.xaxis.major_label_text_color = (88, 88, 88)\n\n fig.ygrid.grid_line_dash = [2, 2]\n\n if isinstance(grid_plot, list):\n if grid_plot:\n for g_plot in grid_plot:\n g_plot.sizing_mode = 'scale_width'\n g_plot.height = 600\n\n\ndef opensignals_color_pallet():\n \"\"\"\n -----\n Brief\n -----\n Function that automatically returns one of the available OpenSignals colors.\n\n -----------\n Description\n -----------\n OpenSignals has a set of predefined colours that can be applied in plots.\n\n This functions returns one of those colors as a hexadecimal code.\n\n Returns\n -------\n out : str\n Hexadecimal color.\n\n \"\"\"\n\n return COLOR_LIST.__next__()\n\n\ndef opensignals_kwargs(obj):\n \"\"\"\n -----\n Brief\n -----\n Function used to automatically apply the OpenSignals graphical style to the toolbar of Bokeh grid plots.\n\n -----------\n Description\n -----------\n Bokeh grid plots have numerous options in order to personalise the visual aspect and functionalities of plots.\n OpenSignals uses a specific graphical design that limits this options and unifies the aspect of its plots.\n\n This function applies the graphical aspect of the toolbar of OpenSignals to a given Bokeh figure object given as\n input.\n\n ----------\n Parameters\n ----------\n obj : str\n String that identifies if the kwargs will be the input of \"figure\" or \"gridplot\".\n\n Returns\n -------\n out : dict\n Dictionary with toolbar parameters.\n\n \"\"\"\n\n out = None\n if obj == \"figure\":\n out = {}\n elif obj == \"gridplot\":\n out = {\"toolbar_options\": {\"logo\": None}, \"sizing_mode\": 'scale_width'}\n elif obj == \"line\":\n out = {\"line_width\": 2, \"line_color\": opensignals_color_pallet()}\n\n return out\n\n\n# ==================================================================================================\n# ================================= Private Functions ==============================================\n# ==================================================================================================\n\n\ndef _check_validity_of_inputs(data, input_arg, input_name, grid_plot, dimension):\n \"\"\"\n Function that verifies when an input ('input_arg') of function 'plot' has a valid structure.\n\n ----------\n Parameters\n ----------\n data : list or list of lists\n Structure with the data that will be plotted.\n\n input_arg : list or list of lists\n The input data to be verified.\n\n input_name : str\n Name of the input_arg variable.\n\n grid_plot : bool\n A flag that identifies when the input_arg is a matrix or not.\n\n dimension : int\n Level of verification in the matrix format structure.\n\n Returns\n -------\n out : list or list of lists\n Returns the same value as input_arg or a modified version.\n \"\"\"\n if input_arg is not None:\n if grid_plot is True:\n if isinstance(input_arg, list):\n if numpy.shape(input_arg)[:dimension] != numpy.shape(data)[:dimension]:\n raise RuntimeError(\"The shape of \" + input_name + \" does not match with data \"\n \"input.\")\n\n else:\n raise RuntimeError(\"The specified data type of \" + input_name +\n \" field is not valid. Input must be a list.\")\n else:\n if not isinstance(input_arg, str):\n raise RuntimeError(\"Taking into account that only one time-series had been \"\n \"specified at 'data', the \" + input_name + \" field must be a \"\n \"string\")\n elif grid_plot is True:\n input_arg = numpy.ndarray(shape=numpy.shape(data)[:dimension], dtype=numpy.object)\n\n return input_arg\n\n# 07/11/2018 20h28m :)\n",
"\n\"\"\"\nList of functions intended to detect events in the acquired data.\n\nAvailable Functions\n-------------------\n[Public]\n\ndetect_r_peaks\n Function that applies the Pan-Tompkins algorithm for detection of R peaks.\ntachogram\n From ECG signal or from the list of samples where R peaks are located, this function generates\n a tachogram, i.e., a time-series with the evolution of RR interval (in seconds) along the\n acquisition.\ndetect_emg_activations\n In this function a single threshold algorithm is used for identifying the begin and end of each\n muscular activation period.\n EMG signal was simplified before the \"sample by sample\" threshold check, through smoothing and\n application of Teager Kaiser Energy Operator (TKEO).\n\nAvailable Functions\n-------------------\n[Private]\n\n<Pan-Tompkins Algorithm>\n_ecg_band_pass_filter\n Step 1 of ECG simplification presented on Pan-Tompkins algorithm.\n_differentiate\n Step 2 of ECG simplification presented on Pan-Tompkins algorithm.\n_squaring\n Step 3 of ECG simplification presented on Pan-Tompkins algorithm.\n_integration\n Step 4 of ECG simplification presented on Pan-Tompkins algorithm.\n_buffer_ini\n Initialisation of the buffer that stores eight RR intervals (R peak detection algorithm consists\n in a sequential verification of each signal samples and rr_buffer will store the most recent\n eight RR intervals).\n_buffer_update\n Update of buffer content (used in the R peak detection algorithm).\n_detects_peaks\n Function where a first sequence of conditions, defined by Pan and Tompkins, are checked in order\n to obtain a temporary list of possible R peaks.\n_checkup\n Checkup of a second sequence of conditions, defined by Pan and Tompkins, in order to our\n temporary list with possible R peaks be reduced to the set of definitive R peaks.\n_acceptpeak\n In this function the Pan-Tompkins algorithm parameter SPK1 is updated.\n_noisepeak\n In this function the Pan-Tompkins algorithm parameter NPK1 is updated.\n</Pan-Tompkins Algorithm>\n\n<Muscular Activation Detection Algorithm>\n_thres_norm_reg\n Inside this function a relative value (percentage) of the muscular activation threshold is\n converted to an absolute format.\n\nObservations/Comments\n---------------------\nNone\n\n/\\\n\"\"\"\n\nfrom .conversion import raw_to_phy\nfrom .visualise import plot, opensignals_kwargs, opensignals_style, opensignals_color_pallet\nfrom .aux_functions import _butter_bandpass_filter, _moving_average\n\nimport numpy\nfrom scipy.stats import linregress\nfrom scipy.signal import filtfilt, butter\n\n# Base packages used in OpenSignals Tools Notebooks for plotting data\nfrom bokeh.plotting import figure, show\nfrom bokeh.io import output_notebook\nfrom bokeh.layouts import gridplot\noutput_notebook(hide_banner=True)\n\n\ndef detect_r_peaks(ecg_signal, sample_rate, time_units=False, volts=False, resolution=None,\n device=\"biosignalsplux\", plot_result=False):\n \"\"\"\n -----\n Brief\n -----\n Python implementation of R peak detection algorithm (proposed by Raja Selvaraj).\n\n -----------\n Description\n -----------\n Pan-Tompkins algorithm is one of the gold-standard algorithms in R-peak detection on ECG due to its low\n computational complexity, which allows for real-time applications, preserving high accuracy values.\n\n This function allows the detection of these events in ECG signals using the Pan-Tompkins.\n\n ----------\n Parameters\n ----------\n ecg_signal : list\n List of ECG acquired samples.\n\n sample_rate : int\n Sampling frequency.\n\n time_units : boolean\n If True this function will return the R peak position in seconds.\n\n volts : boolean\n If True, then the conversion of raw units to mV will be done. Resolution needs to be\n specified.\n\n resolution : int or None\n Selected resolution for data acquisition.\n\n device : str\n Specification of the device category.\n\n plot_result : boolean\n If True it will be presented a graphical representation of the R peak position in the ECG\n signal.\n\n Returns\n -------\n out : R peak position (ndarray), R peak amplitude (ndarray)\n R peak position (sample number or time instant in seconds) and amplitude (raw or mV).\n\n \"\"\"\n\n if volts is True:\n if resolution is not None:\n # ecg_signal = ((ecg_signal / 2 ** resolution) - 0.5) * 3\n ecg_signal = raw_to_phy(\"ECG\", device, ecg_signal, resolution, option=\"mV\")\n else:\n raise RuntimeError(\"For converting raw units to mV is mandatory the specification of \"\n \"acquisition resolution.\")\n\n if time_units is True:\n time = numpy.linspace(0, len(ecg_signal) / sample_rate, len(ecg_signal))\n else:\n time = numpy.linspace(0, len(ecg_signal) - 1, len(ecg_signal))\n\n # Filtering Step of Pan-Tompkins Algorithm.\n filtered = _ecg_band_pass_filter(ecg_signal, sample_rate)\n\n # Differentiation Step of Pan-Tompkins Algorithm.\n differentiated = _differentiate(filtered)\n\n # Rectification Step of Pan-Tompkins Algorithm.\n squared = _squaring(differentiated)\n\n # Integration Step of Pan-Tompkins Algorithm.\n integrated = _integration(squared, sample_rate)\n\n rr_buffer, spk1, npk1, threshold = _buffer_ini(integrated, sample_rate)\n probable_peaks, possible_peaks = _detects_peaks(integrated, sample_rate)\n definitive_peaks = _checkup(probable_peaks, integrated, sample_rate, rr_buffer, spk1, npk1,\n threshold)\n definitive_peaks = list(map(int, definitive_peaks))\n\n # Rephasing step.\n definitive_peaks_rephase = numpy.array(definitive_peaks) - 30 * (sample_rate / 1000)\n definitive_peaks_rephase = list(map(int, definitive_peaks_rephase))\n\n if time_units is True:\n peaks = numpy.array(time)[definitive_peaks_rephase]\n else:\n peaks = definitive_peaks_rephase\n\n amplitudes = numpy.array(ecg_signal)[definitive_peaks_rephase]\n\n # If plot is invoked by plot_result flag, then a graphical representation of the R peaks is\n # presented to the user.\n if plot_result is True:\n time_int = numpy.array(time[1:])\n integrated = numpy.array(integrated)\n\n fig = figure(x_axis_label='Time (s)', y_axis_label='Raw Data',\n **opensignals_kwargs(\"figure\"))\n fig.line(time_int, integrated, **opensignals_kwargs(\"line\"))\n fig.circle(time_int[definitive_peaks], integrated[definitive_peaks], size=30,\n color=\"#00893E\", legend_label=\"Definitive Peaks\")\n fig.circle(time_int[probable_peaks], integrated[probable_peaks], size=20, color=\"#009EE3\",\n legend_label=\"Probable Peaks\")\n fig.circle(time_int[possible_peaks], integrated[possible_peaks], size=10, color=\"#302683\",\n legend_label=\"Possible Peaks\")\n\n fig2 = figure(x_axis_label='Time (s)', y_axis_label='Raw Data',\n **opensignals_kwargs(\"figure\"))\n fig2.line(time, ecg_signal, **opensignals_kwargs(\"line\"))\n fig2.circle(time[definitive_peaks_rephase],\n numpy.array(ecg_signal)[definitive_peaks_rephase],\n size=30, color=opensignals_color_pallet(), legend_label=\"Definitive Peaks\")\n\n opensignals_style([fig, fig2])\n\n grid_plot = gridplot([[fig], [fig2]], **opensignals_kwargs(\"gridplot\"))\n show(grid_plot)\n\n return peaks, amplitudes\n\n\ndef detect_emg_activations(emg_signal, sample_rate, smooth_level=20, threshold_level=10,\n time_units=False, volts=False, resolution=None, device=\"biosignalsplux\",\n plot_result=False):\n \"\"\"\n -----\n Brief\n -----\n Python implementation of Burst detection algorithm using Teager Kaiser Energy Operator.\n\n -----------\n Description\n -----------\n Activation events in EMG readings correspond to an increase of muscular activity, namely, from inaction to action.\n These events are characterised by an increase in electric potential that returns to the initial values when the\n muscle returns to a state of inaction.\n\n This function detects activation events using the Teager Kaiser Energy Operator.\n\n ----------\n Parameters\n ----------\n emg_signal : list\n List of EMG acquired samples.\n\n sample_rate : int\n Sampling frequency.\n\n smooth_level : number\n Defines a percentage proportional to the smoothing level, i.e. the bigger this value is,\n the more smoothed is the signal.\n\n threshold_level : number\n Specification of the single threshold position, used for distinguishing between activation\n (above) and inactivation samples (below).\n\n time_units : boolean\n If True this function will return the Burst begin and end positions in seconds.\n\n volts : boolean\n If True, then the conversion of raw units to mV will be done. Resolution need to be\n specified.\n\n resolution : int\n Selected resolution for data acquisition.\n\n device : str\n Specification of the device category.\n\n plot_result : boolean\n If True it will be presented a graphical representation of the detected burst in the EMG\n signal.\n\n Returns\n -------\n out : bursts begin (ndarray), bursts end (ndarray)\n Begin and end of bursts (sample number or time instant in seconds).\n\n smooth_signal: list\n It is returned the smoothed EMG signal (after the processing steps intended to simplify the\n signal).\n\n threshold_level: float\n The value of the detection threshold used to locate the begin and end of each muscular\n activation period.\n \"\"\"\n\n if volts is True:\n if resolution is not None:\n emg_signal = raw_to_phy(\"EMG\", device, emg_signal, resolution, option=\"mV\")\n units = \"mV\"\n else:\n raise RuntimeError(\n \"For converting raw units to mV is mandatory the specification of acquisition \"\n \"resolution.\")\n else:\n units = \"Input Units\"\n\n if time_units is True:\n time_units_str = \"Time (s)\"\n time = numpy.linspace(0, len(emg_signal) / sample_rate, len(emg_signal))\n else:\n time = numpy.linspace(0, len(emg_signal) - 1, len(emg_signal))\n time_units_str = \"Sample Number\"\n\n # ----------------------------------- Baseline Removal -----------------------------------------\n pre_pro_signal = numpy.array(emg_signal) - numpy.average(emg_signal)\n\n # ------------------------------------ Signal Filtering ----------------------------------------\n low_cutoff = 10 # Hz\n high_cutoff = 300 # Hz\n\n # Application of the signal to the filter.\n pre_pro_signal = _butter_bandpass_filter(pre_pro_signal, low_cutoff, high_cutoff, sample_rate)\n\n # ------------------------------ Application of TKEO Operator ----------------------------------\n tkeo = []\n for i, signal_sample in enumerate(pre_pro_signal):\n if i in (0, len(pre_pro_signal) - 1):\n tkeo.append(signal_sample)\n else:\n tkeo.append(numpy.power(signal_sample, 2) - (pre_pro_signal[i + 1] *\n pre_pro_signal[i - 1]))\n\n # Smoothing level - Size of sliding window used during the moving average process (a function\n # of sampling frequency)\n smoothing_level = int((smooth_level / 100) * sample_rate)\n\n # --------------------------------- Signal Rectification ---------------------------------------\n rect_signal = numpy.absolute(tkeo)\n\n # ------------------------------ First Moving Average Filter -----------------------------------\n rect_signal = _moving_average(rect_signal, sample_rate / 10)\n\n # -------------------------------- Second Smoothing Phase --------------------------------------\n smooth_signal = []\n for i in range(0, len(rect_signal)):\n if smoothing_level < i < len(rect_signal) - smoothing_level:\n smooth_signal.append(numpy.mean(rect_signal[i - smoothing_level:i + smoothing_level]))\n else:\n smooth_signal.append(0)\n\n # ----------------------------------- Threshold -----------------------------------------------\n avg_pre_pro_signal = numpy.average(pre_pro_signal)\n std_pre_pro_signal = numpy.std(pre_pro_signal)\n\n threshold_level = avg_pre_pro_signal + _thres_norm_reg(threshold_level, smooth_signal,\n pre_pro_signal) * std_pre_pro_signal\n\n # Generation of a square wave reflecting the activation and inactivation periods.\n binary_signal = []\n for i in range(0, len(time)):\n if smooth_signal[i] >= threshold_level:\n binary_signal.append(1)\n else:\n binary_signal.append(0)\n\n # ------------------------------ Begin and End of Bursts --------------------------------------\n diff_signal = numpy.diff(binary_signal)\n act_begin = numpy.where(diff_signal == 1)[0]\n act_end = numpy.where(diff_signal == -1)[0]\n\n if time_units is True:\n time_begin = numpy.array(time)[act_begin]\n time_end = numpy.array(time)[act_end]\n else:\n time_begin = act_begin\n time_end = act_end\n\n # If plot is invoked by plot_result flag, then a graphical representation of the R peaks is\n # presented to the user.\n if plot_result is True:\n plot([list(time), list(time)], [list(emg_signal), list(numpy.array(binary_signal) *\n numpy.max(emg_signal))],\n yAxisLabel=[\"Data Samples (\" + units + \")\"] * 2,\n x_axis_label=time_units_str, legend_label=[\"EMG Signal\", \"Activation Signal\"])\n\n return time_begin, time_end, smooth_signal, threshold_level\n\n\n# ==================================================================================================\n# ================================= Private Functions ==============================================\n# ==================================================================================================\n\n# [Pan-Tompkins R peak detection algorithm]\ndef _ecg_band_pass_filter(data, sample_rate):\n \"\"\"\n Bandpass filter with a bandpass setting of 5 to 15 Hz\n\n ----------\n Parameters\n ----------\n data : list\n List with the ECG signal samples.\n sample_rate : int\n Sampling rate at which the acquisition took place.\n\n Returns\n -------\n out : list\n Filtered signal.\n \"\"\"\n nyquist_sample_rate = sample_rate / 2.\n normalized_cut_offs = [5/nyquist_sample_rate, 15/nyquist_sample_rate]\n b_coeff, a_coeff = butter(2, normalized_cut_offs, btype='bandpass')[:2]\n return filtfilt(b_coeff, a_coeff, data, padlen=150)\n\n\ndef _differentiate(data):\n \"\"\"\n Derivative nearly linear between dc and 30 Hz\n\n ----------\n Parameters\n ----------\n data : list\n Data samples of the signal where the first derivative estimate is done.\n\n Returns\n -------\n out : list\n List with the differences between consecutive samples (the length of this list is equal to\n len(data) - 1).\n\n \"\"\"\n return numpy.diff(data)\n\n\ndef _squaring(data):\n \"\"\"\n Squaring data point by point. Nonlinear amplification, emphasizing the higher\n frequencies\n\n ----------\n Parameters\n ----------\n data : ndarry\n Array that contains signal samples. Each sample value will be squared.\n\n Returns\n -------\n out : ndarray\n Squared signal.\n\n \"\"\"\n return data * data\n\n\ndef _integration(data, sample_rate):\n \"\"\"\n Moving window integration. N is the number of samples in the width of the integration\n window\n\n ----------\n Parameters\n ----------\n data : ndarray\n Samples of the signal where a moving window integration will be applied.\n sample_rate : int\n Sampling rate at which the acquisition took place.\n\n Returns\n -------\n out : ndarray\n Integrated signal samples.\n \"\"\"\n wind_size = int(0.080 * sample_rate)\n int_ecg = numpy.zeros_like(data)\n cum_sum = data.cumsum()\n int_ecg[wind_size:] = (cum_sum[wind_size:] - cum_sum[:-wind_size]) / wind_size\n int_ecg[:wind_size] = cum_sum[:wind_size] / numpy.arange(1, wind_size + 1)\n\n return int_ecg\n\n\ndef _buffer_ini(data, sample_rate):\n \"\"\"\n Initializes the buffer with eight 1s intervals\n\n ----------\n Parameters\n ----------\n data : ndarray\n Pre-processed ECG signal samples.\n sample_rate : int\n Sampling rate at which the acquisition took place.\n\n Returns\n -------\n rr_buffer : list\n Data structure that stores eight samples (in the future this buffer will store the duration\n of eight RR intervals instead of the 1 second values defined in initialisation).\n spk1 : float\n Initial value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm\n (named signal peak).\n npk1 : int\n Initial value of NPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm\n (named noise peak).\n threshold : float\n Initial value of the adaptive threshold level (relevant parameter for the application of\n specific criteria during the identification of R peaks).\n\n Sources\n -------\n https://www.robots.ox.ac.uk/~gari/teaching/cdt/A3/readings/ECG/Pan+Tompkins.pdf\n\n\n \"\"\"\n rr_buffer = [1] * 8\n spk1 = max(data[sample_rate:2*sample_rate])\n npk1 = 0\n threshold = _buffer_update(npk1, spk1)\n\n return rr_buffer, spk1, npk1, threshold\n\n\ndef _buffer_update(npk1, spk1):\n \"\"\"\n Computes threshold based on signal and noise values\n\n ----------\n Parameters\n ----------\n npk1 : int\n Actual value of NPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm\n (named noise peak).\n spk1 : float\n Actual value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm\n (named signal peak).\n\n Returns\n -------\n out : float\n The updated threshold level.\n\n \"\"\"\n threshold = npk1 + 0.25 * (spk1 - npk1)\n\n return threshold\n\n\ndef _detects_peaks(ecg_integrated, sample_rate):\n \"\"\"\n Detects peaks from local maximum\n\n ----------\n Parameters\n ----------\n ecg_integrated : ndarray\n Array that contains the samples of the integrated signal.\n sample_rate : int\n Sampling rate at which the acquisition took place.\n\n Returns\n -------\n choosen_peaks : list\n List of local maximums that pass the first stage of conditions needed to be considered as\n a R peak.\n possible_peaks : list\n List with all the local maximums in the signal.\n\n \"\"\"\n\n # Minimum RR interval = 200 ms\n min_rr = (sample_rate / 1000) * 200\n\n # Computes all possible peaks and their amplitudes\n possible_peaks = [i for i in range(0, len(ecg_integrated)-1)\n if ecg_integrated[i-1] < ecg_integrated[i] and\n ecg_integrated[i] > ecg_integrated[i+1]]\n\n possible_amplitudes = [ecg_integrated[k] for k in possible_peaks]\n chosen_peaks = []\n\n # Starts with first peak\n if not possible_peaks:\n raise Exception(\"No Peaks Detected.\")\n peak_candidate_i = possible_peaks[0]\n peak_candidate_amp = possible_amplitudes[0]\n for peak_i, peak_amp in zip(possible_peaks, possible_amplitudes):\n if peak_i - peak_candidate_i <= min_rr and peak_amp > peak_candidate_amp:\n peak_candidate_i = peak_i\n peak_candidate_amp = peak_amp\n elif peak_i - peak_candidate_i > min_rr:\n chosen_peaks += [peak_candidate_i - 6] # Delay of 6 samples\n peak_candidate_i = peak_i\n peak_candidate_amp = peak_amp\n else:\n pass\n\n return chosen_peaks, possible_peaks\n\n\ndef _checkup(peaks, ecg_integrated, sample_rate, rr_buffer, spk1, npk1, threshold):\n \"\"\"\n Check each peak according to thresholds\n\n ----------\n Parameters\n ----------\n peaks : list\n List of local maximums that pass the first stage of conditions needed to be considered as\n an R peak.\n ecg_integrated : ndarray\n Array that contains the samples of the integrated signal.\n sample_rate : int\n Sampling rate at which the acquisition took place.\n rr_buffer : list\n Data structure that stores the duration of the last eight RR intervals.\n spk1 : float\n Actual value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm\n (named signal peak).\n npk1 : int\n Actual value of NPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm\n (named noise peak).\n threshold : float\n Initial value of the adaptive threshold level (relevant parameter for the application of\n specific criteria during the identification of R peaks).\n\n Returns\n -------\n out : list\n List with the position of the peaks considered as R peak by the algorithm.\n\n \"\"\"\n peaks_amp = [ecg_integrated[peak] for peak in peaks]\n definitive_peaks = []\n for i, peak in enumerate(peaks):\n amp = peaks_amp[i]\n\n # accept if larger than threshold and slope in raw signal\n # is +-30% of previous slopes\n if amp > threshold:\n definitive_peaks, spk1, rr_buffer = _acceptpeak(peak, amp, definitive_peaks, spk1,\n rr_buffer)\n\n # accept as qrs if higher than half threshold,\n # but is 360 ms after last qrs and next peak\n # is more than 1.5 rr intervals away\n # just abandon it if there is no peak before\n # or after\n elif amp > threshold / 2 and list(definitive_peaks) and len(peaks) > i + 1:\n mean_rr = numpy.mean(rr_buffer)\n last_qrs_ms = (peak - definitive_peaks[-1]) * (1000 / sample_rate)\n last_qrs_to_next_peak = peaks[i+1] - definitive_peaks[-1]\n\n if last_qrs_ms > 360 and last_qrs_to_next_peak > 1.5 * mean_rr:\n definitive_peaks, spk1, rr_buffer = _acceptpeak(peak, amp, definitive_peaks, spk1,\n rr_buffer)\n else:\n npk1 = _noisepeak(amp, npk1)\n # if not either of these it is noise\n else:\n npk1 = _noisepeak(amp, npk1)\n threshold = _buffer_update(npk1, spk1)\n\n definitive_peaks = numpy.array(definitive_peaks)\n\n return definitive_peaks\n\n\ndef _acceptpeak(peak, amp, definitive_peaks, spk1, rr_buffer):\n \"\"\"\n Private function intended to insert a new RR interval in the buffer.\n\n ----------\n Parameters\n ----------\n peak : int\n Sample where the peak under analysis is located.\n amp : int\n Amplitude of the peak under analysis.\n definitive_peaks : list\n List with the definitive_peaks stored until the present instant.\n spk1 : float\n Actual value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm\n (named signal peak).\n rr_buffer : list\n Data structure that stores the duration of the last eight RR intervals.\n\n Returns\n -------\n definitive_peaks_out : list\n Definitive peaks list.\n spk1 : float\n Updated value of SPK1 parameter.\n rr_buffer : list\n Buffer after appending a new RR interval and excluding the oldest one.\n\n \"\"\"\n\n definitive_peaks_out = definitive_peaks\n definitive_peaks_out = numpy.append(definitive_peaks_out, peak)\n spk1 = 0.125 * amp + 0.875 * spk1 # spk1 is the running estimate of the signal peak\n if len(definitive_peaks_out) > 1:\n rr_buffer.pop(0)\n rr_buffer += [definitive_peaks_out[-1] - definitive_peaks_out[-2]]\n\n return numpy.array(definitive_peaks_out), spk1, rr_buffer\n\n\ndef _noisepeak(amp, npk1):\n \"\"\"\n Private function intended to insert a new RR interval in the buffer.\n\n ----------\n Parameters\n ----------\n amp : int\n Amplitude of the peak under analysis.\n npk1 : int\n Actual value of NPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm\n (named noise peak).\n\n Returns\n -------\n npk1 : float\n Updated value of NPK1 parameter.\n \"\"\"\n npk1 = 0.125 * amp + 0.875 * npk1 # npk1 is the running estimate of the noise peak\n\n return npk1\n\n\ndef tachogram(data, sample_rate, signal=False, in_seconds=False, out_seconds=False):\n \"\"\"\n Function for generation of ECG Tachogram.\n\n ----------\n Parameters\n ----------\n data : list\n ECG signal or R peak list. When the input is a raw signal the input flag signal should be\n True.\n\n sample_rate : int\n Sampling frequency.\n\n signal : boolean\n If True, then the data argument contains the set of the ECG acquired samples.\n\n in_seconds : boolean\n If the R peaks list defined as the input argument \"data\" contains the sample numbers where\n the R peaks occur, then in_seconds needs to be False.\n\n out_seconds : boolean\n If True then each sample of the returned time axis is expressed in seconds.\n\n Returns\n -------\n out : list, list\n List of tachogram samples. List of instants where each cardiac cycle ends.\n\n \"\"\"\n\n if signal is False: # data is a list of R peaks position.\n data_copy = data\n time_axis = numpy.array(data)#.cumsum()\n if out_seconds is True and in_seconds is False:\n time_axis = time_axis / sample_rate\n else: # data is a ECG signal.\n # Detection of R peaks.\n data_copy = detect_r_peaks(data, sample_rate, time_units=out_seconds, volts=False,\n resolution=None, plot_result=False)[0]\n time_axis = data_copy\n\n # Generation of Tachogram.\n tachogram_data = numpy.diff(time_axis)\n tachogram_time = time_axis[1:]\n\n return tachogram_data, tachogram_time\n\n\n# [Muscular Activation Detection Algorithm]\n\ndef _thres_norm_reg(threshold_level, signal, pre_smooth_signal):\n \"\"\"\n Regression function that with a percent input gives an absolute value of the threshold\n level (used in the muscular activation detection algorithm).\n Converts a relative threshold level to an absolute value.\n\n ----------\n Parameters\n ----------\n threshold_level : int\n Percentage value that defines the absolute threshold level relatively to the maximum value\n of signal.\n signal : list\n List of EMG smoothed signal samples.\n pre_smooth_signal : list\n Original EMG samples.\n\n Returns\n -------\n out : float\n Threshold level in absolute format.\n\n \"\"\"\n avg_signal = numpy.average(pre_smooth_signal)\n std_signal = numpy.std(pre_smooth_signal)\n\n threshold_0_perc_level = (-avg_signal) / float(std_signal)\n threshold_100_perc_level = (numpy.max(signal) - avg_signal) / float(std_signal)\n\n slope, b_coeff = linregress([0, 100], [threshold_0_perc_level, threshold_100_perc_level])[:2]\n return slope * threshold_level + b_coeff\n\n# 01/10/2018 19h19m :)\n"
] |
[
[
"numpy.max",
"numpy.shape",
"numpy.min"
],
[
"numpy.absolute",
"scipy.signal.filtfilt",
"numpy.power",
"numpy.arange",
"numpy.max",
"numpy.std",
"numpy.append",
"numpy.diff",
"numpy.zeros_like",
"scipy.signal.butter",
"scipy.stats.linregress",
"numpy.mean",
"numpy.array",
"numpy.average",
"numpy.where"
]
] |
jlevine18/mirgecom
|
[
"5089accba9f7954ca426ee5b3bd97c511e4f6861"
] |
[
"examples/wave-mpi.py"
] |
[
"\"\"\"Demonstrate wave MPI example.\"\"\"\n\n__copyright__ = \"Copyright (C) 2020 University of Illinois Board of Trustees\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\nimport logging\n\nimport numpy as np\nimport numpy.linalg as la # noqa\nimport pyopencl as cl\n\nfrom pytools.obj_array import flat_obj_array\n\nfrom meshmode.array_context import (PyOpenCLArrayContext,\n PytatoPyOpenCLArrayContext)\nfrom arraycontext import thaw, freeze\n\nfrom mirgecom.profiling import PyOpenCLProfilingArrayContext # noqa\n\nfrom meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa\n\nfrom grudge.eager import EagerDGDiscretization\nfrom grudge.shortcuts import make_visualizer\nfrom mirgecom.mpi import mpi_entry_point\nfrom mirgecom.integrators import rk4_step\nfrom mirgecom.wave import wave_operator\n\nimport pyopencl.tools as cl_tools\n\nfrom logpyle import IntervalTimer, set_dt\n\nfrom mirgecom.logging_quantities import (initialize_logmgr,\n logmgr_add_cl_device_info,\n logmgr_add_device_memory_usage)\n\n\ndef bump(actx, discr, t=0):\n \"\"\"Create a bump.\"\"\"\n source_center = np.array([0.2, 0.35, 0.1])[:discr.dim]\n source_width = 0.05\n source_omega = 3\n\n nodes = thaw(discr.nodes(), actx)\n center_dist = flat_obj_array([\n nodes[i] - source_center[i]\n for i in range(discr.dim)\n ])\n\n return (\n np.cos(source_omega*t)\n * actx.np.exp(\n -np.dot(center_dist, center_dist)\n / source_width**2))\n\n\n@mpi_entry_point\ndef main(snapshot_pattern=\"wave-mpi-{step:04d}-{rank:04d}.pkl\", restart_step=None,\n use_profiling=False, use_logmgr=False, actx_class=PyOpenCLArrayContext):\n \"\"\"Drive the example.\"\"\"\n cl_ctx = cl.create_some_context()\n queue = cl.CommandQueue(cl_ctx)\n\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n num_parts = comm.Get_size()\n\n logmgr = initialize_logmgr(use_logmgr,\n filename=\"wave-mpi.sqlite\", mode=\"wu\", mpi_comm=comm)\n if use_profiling:\n queue = cl.CommandQueue(cl_ctx,\n properties=cl.command_queue_properties.PROFILING_ENABLE)\n actx = actx_class(queue,\n allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)),\n logmgr=logmgr)\n else:\n queue = cl.CommandQueue(cl_ctx)\n actx = actx_class(queue,\n allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))\n\n if restart_step is None:\n\n from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis\n mesh_dist = MPIMeshDistributor(comm)\n\n dim = 2\n nel_1d = 16\n\n if mesh_dist.is_mananger_rank():\n from meshmode.mesh.generation import generate_regular_rect_mesh\n mesh = generate_regular_rect_mesh(\n a=(-0.5,)*dim, b=(0.5,)*dim,\n nelements_per_axis=(nel_1d,)*dim)\n\n print(\"%d elements\" % mesh.nelements)\n part_per_element = get_partition_by_pymetis(mesh, num_parts)\n local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element, num_parts)\n\n del mesh\n\n else:\n local_mesh = mesh_dist.receive_mesh_part()\n\n fields = None\n\n else:\n from mirgecom.restart import read_restart_data\n restart_data = read_restart_data(\n actx, snapshot_pattern.format(step=restart_step, rank=rank)\n )\n local_mesh = restart_data[\"local_mesh\"]\n nel_1d = restart_data[\"nel_1d\"]\n assert comm.Get_size() == restart_data[\"num_parts\"]\n\n order = 3\n\n discr = EagerDGDiscretization(actx, local_mesh, order=order,\n mpi_communicator=comm)\n\n current_cfl = 0.485\n wave_speed = 1.0\n from grudge.dt_utils import characteristic_lengthscales\n dt = current_cfl * characteristic_lengthscales(actx, discr) / wave_speed\n\n from grudge.op import nodal_min\n dt = nodal_min(discr, \"vol\", dt)\n\n t_final = 3\n\n if restart_step is None:\n t = 0\n istep = 0\n\n fields = flat_obj_array(\n bump(actx, discr),\n [discr.zeros(actx) for i in range(discr.dim)]\n )\n\n else:\n t = restart_data[\"t\"]\n istep = restart_step\n assert istep == restart_step\n restart_fields = restart_data[\"fields\"]\n old_order = restart_data[\"order\"]\n if old_order != order:\n old_discr = EagerDGDiscretization(actx, local_mesh, order=old_order,\n mpi_communicator=comm)\n from meshmode.discretization.connection import make_same_mesh_connection\n connection = make_same_mesh_connection(actx, discr.discr_from_dd(\"vol\"),\n old_discr.discr_from_dd(\"vol\"))\n fields = connection(restart_fields)\n else:\n fields = restart_fields\n\n if logmgr:\n logmgr_add_cl_device_info(logmgr, queue)\n logmgr_add_device_memory_usage(logmgr, queue)\n\n logmgr.add_watches([\"step.max\", \"t_step.max\", \"t_log.max\"])\n\n try:\n logmgr.add_watches([\"memory_usage_python.max\", \"memory_usage_gpu.max\"])\n except KeyError:\n pass\n\n if use_profiling:\n logmgr.add_watches([\"multiply_time.max\"])\n\n vis_timer = IntervalTimer(\"t_vis\", \"Time spent visualizing\")\n logmgr.add_quantity(vis_timer)\n\n vis = make_visualizer(discr)\n\n def rhs(t, w):\n return wave_operator(discr, c=wave_speed, w=w)\n\n compiled_rhs = actx.compile(rhs)\n\n while t < t_final:\n if logmgr:\n logmgr.tick_before()\n\n # restart must happen at beginning of step\n if istep % 100 == 0 and (\n # Do not overwrite the restart file that we just read.\n istep != restart_step):\n from mirgecom.restart import write_restart_file\n write_restart_file(\n actx, restart_data={\n \"local_mesh\": local_mesh,\n \"order\": order,\n \"fields\": fields,\n \"t\": t,\n \"step\": istep,\n \"nel_1d\": nel_1d,\n \"num_parts\": num_parts},\n filename=snapshot_pattern.format(step=istep, rank=rank),\n comm=comm\n )\n\n if istep % 10 == 0:\n print(istep, t, discr.norm(fields[0]))\n vis.write_parallel_vtk_file(\n comm,\n \"fld-wave-mpi-%03d-%04d.vtu\" % (rank, istep),\n [\n (\"u\", fields[0]),\n (\"v\", fields[1:]),\n ]\n )\n\n fields = thaw(freeze(fields, actx), actx)\n fields = rk4_step(fields, t, dt, compiled_rhs)\n\n t += dt\n istep += 1\n\n if logmgr:\n set_dt(logmgr, dt)\n logmgr.tick_after()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(format=\"%(message)s\", level=logging.INFO)\n # Turn off profiling to not overwhelm CI\n use_profiling = False\n use_logging = True\n\n import argparse\n parser = argparse.ArgumentParser(description=\"Wave (MPI version)\")\n parser.add_argument(\"--lazy\", action=\"store_true\",\n help=\"switch to a lazy computation mode\")\n args = parser.parse_args()\n\n main(use_profiling=use_profiling, use_logmgr=use_logging,\n actx_class=PytatoPyOpenCLArrayContext if args.lazy\n else PyOpenCLArrayContext)\n\n\n# vim: foldmethod=marker\n"
] |
[
[
"numpy.dot",
"numpy.array",
"numpy.cos"
]
] |
ClaudeCoulombe/DeepRLBootcamp2017
|
[
"83568aae6e4531fb426a981fcea39c0bea60835b"
] |
[
"lab3/simpledqn/gridworld_env.py"
] |
[
"\"\"\"\nThis project was developed by Rein Houthooft, Rocky Duan, Peter Chen, Pieter Abbeel for the Berkeley Deep RL Bootcamp, August 2017. Bootcamp website with slides and lecture videos: https://sites.google.com/view/deep-rl-bootcamp/.\n\nCode adapted from OpenAI Baselines: https://github.com/openai/baselines\n\nCopyright 2017 Deep RL Bootcamp Organizers.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"\n\n\nimport numpy as np\nimport sys\nfrom six import StringIO, b\n\nfrom gym import utils\nfrom gym.envs.toy_text import discrete\nfrom gym.envs.registration import register\n\nLEFT = 0\nDOWN = 1\nRIGHT = 2\nUP = 3\n\nMAPS = {\n \"4x4\": [\n \"SFFF\",\n \"FFFH\",\n \"FFFF\",\n \"HFFG\"\n ],\n \"8x8\": [\n \"SFFFFFFF\",\n \"FFFFFFFF\",\n \"FFFHFFFF\",\n \"FFFFFHFF\",\n \"FFFHFFFF\",\n \"FHHFFFHF\",\n \"FHFFHFHF\",\n \"FFFHFFFG\"\n ],\n \"9x9\": [\n \"HFFFFFFFH\",\n \"FFFFFFFFF\",\n \"FFFFFFFFF\",\n \"FFFFFFFFF\",\n \"FFFFSFFFF\",\n \"FFFFFFFFF\",\n \"FFFFFFFFF\",\n \"FFFFFFFFF\",\n \"HFFFFFFFH\"\n ]\n}\n\n\ndef to_one_hot(x, len):\n one_hot = np.zeros(len)\n one_hot[x] = 1\n return one_hot\n\n\nclass GridWorld(discrete.DiscreteEnv):\n \"\"\"\n Winter is here. You and your friends were tossing around a frisbee at the park\n when you made a wild throw that left the frisbee out in the middle of the lake.\n The water is mostly frozen, but there are a few holes where the ice has melted.\n If you step into one of those holes, you'll fall into the freezing water.\n At this time, there's an international frisbee shortage, so it's absolutely imperative that\n you navigate across the lake and retrieve the disc.\n However, the ice is slippery, so you won't always move in the direction you intend.\n The surface is described using a grid like the following\n\n SFFF\n FHFH\n FFFH\n HFFG\n\n S : starting point, safe\n F : frozen surface, safe\n H : hole, fall to your doom\n G : goal, where the frisbee is located\n\n The episode ends when you reach the goal or fall in a hole.\n You receive a reward of 1 if you reach the goal, and zero otherwise.\n\n \"\"\"\n\n metadata = {'render.modes': ['human', 'ansi']}\n\n def __init__(self, desc=None, map_name=\"4x4\", is_slippery=False):\n if desc is None and map_name is None:\n raise ValueError('Must provide either desc or map_name')\n elif desc is None:\n desc = MAPS[map_name]\n self.desc = desc = np.asarray(desc, dtype='c')\n self.nrow, self.ncol = nrow, ncol = desc.shape\n\n nA = 4\n nS = nrow * ncol\n\n isd = np.array(desc == b'S').astype('float64').ravel()\n isd /= isd.sum()\n\n P = {s: {a: [] for a in range(nA)} for s in range(nS)}\n\n def to_s(row, col):\n return row * ncol + col\n\n def inc(row, col, a):\n if a == 0: # left\n col = max(col - 1, 0)\n elif a == 1: # down\n row = min(row + 1, nrow - 1)\n elif a == 2: # right\n col = min(col + 1, ncol - 1)\n elif a == 3: # up\n row = max(row - 1, 0)\n return (row, col)\n\n for row in range(nrow):\n for col in range(ncol):\n s = to_s(row, col)\n for a in range(4):\n li = P[s][a]\n letter = desc[row, col]\n if letter in b'GH':\n li.append((1.0, s, 0, True))\n else:\n if is_slippery:\n for b in [(a - 1) % 4, a, (a + 1) % 4]:\n newrow, newcol = inc(row, col, b)\n newstate = to_s(newrow, newcol)\n newletter = desc[newrow, newcol]\n done = bytes(newletter) in b'GH'\n if newletter == b'G':\n rew = 1.0\n elif newletter == b'H':\n rew = .0\n else:\n rew = 0.\n # rew = float(newletter == b'G')\n li.append((1.0 / 3.0, newstate, rew, done))\n else:\n newrow, newcol = inc(row, col, a)\n newstate = to_s(newrow, newcol)\n newletter = desc[newrow, newcol]\n done = bytes(newletter) in b'GH'\n # rew = float(newletter == b'G')\n if newletter == b'G':\n rew = 1.0\n elif newletter == b'H':\n rew = 0.\n else:\n rew = 0.\n li.append((1.0, newstate, rew, done))\n\n super(GridWorld, self).__init__(nS, nA, P, isd)\n\n def _reset(self):\n s = super(GridWorld, self)._reset()\n return to_one_hot(s, self.nS)\n\n def _step(self, a):\n s, r, d, p = super(GridWorld, self)._step(a)\n return to_one_hot(s, self.nS), r, d, p\n\n def print_obs(self, obs):\n import copy\n map = copy.deepcopy(self.desc).astype(str)\n _obs = int(np.where(obs == 1)[0][0])\n map[_obs // 9, _obs % 9] = 'X'\n for row in map:\n print(row)\n\n def _render(self, mode='human', close=False):\n if close:\n return\n outfile = StringIO() if mode == 'ansi' else sys.stdout\n\n row, col = self.s // self.ncol, self.s % self.ncol\n desc = self.desc.tolist()\n desc = [[c.decode('utf-8') for c in line] for line in desc]\n desc[row][col] = utils.colorize(desc[row][col], \"red\", highlight=True)\n if self.lastaction is not None:\n outfile.write(\" ({})\\n\".format(\n [\"Left\", \"Down\", \"Right\", \"Up\"][self.lastaction]))\n else:\n outfile.write(\"\\n\")\n outfile.write(\"\\n\".join(''.join(line) for line in desc) + \"\\n\")\n\n if mode != 'human':\n return outfile\n\n\nregister(\n 'GridWorld-v0',\n entry_point='simpledqn.gridworld_env:GridWorld',\n timestep_limit=40,\n)\n"
] |
[
[
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] |
pyansys/pydpf-post
|
[
"8fea9103259786067d3451dc12e7c0ae5a38ea33"
] |
[
"tests/test_electricanalysis.py"
] |
[
"import numpy as np\nimport pytest\nfrom ansys.dpf import post\nfrom ansys.dpf.post import errors as dpf_errors\nfrom ansys.dpf.post.common import _PhysicsType\nfrom ansys.dpf.post.electric_results import ElectricField, ElectricPotential\n\n\ndef test_electricfield(rth_electric):\n solution = post.load_solution(rth_electric)\n assert solution._model.metadata.result_info.physics_type == _PhysicsType.thermal\n ef = solution.electric_field()\n assert isinstance(ef, ElectricField)\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.nodal\n assert len(s[0].data[20]) == 3\n assert np.isclose(s[0].data[23][1], 19.562952041625977)\n\n # with dpf.core operator\n from ansys.dpf import core\n\n op = core.Operator(\"EF\")\n op.inputs.requested_location.connect(core.locations.nodal)\n op.inputs.data_sources.connect(core.DataSources(rth_electric))\n fc = op.outputs.fields_container()\n assert len(fc) == s.num_fields\n assert fc[0].location == s[0].location\n assert len(fc[0].data[20]) == len(s[0].data[20])\n assert np.allclose(s[0].data.tolist(), fc[0].data.tolist())\n\n\ndef test_electricfield_nodscoping(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_field(node_scoping=[2])\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.nodal\n assert len(s[0].data) == 1\n assert len(s[0].data[0]) == 3\n assert np.allclose(\n s[0].data[0].tolist(), [5.25223311e-14, 1.95629520e01, 2.82945325e-14]\n )\n ef = solution.electric_field(location=post.locations.elemental, node_scoping=[2])\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.elemental\n assert len(s[0].data) == 8\n assert len(s[0].data[0]) == 3\n assert np.allclose(\n s[0].data[0].tolist(), [-3.41948692e-14, 1.95629520e01, 7.77156117e-15]\n )\n ef = solution.electric_field(\n location=post.locations.elemental_nodal, node_scoping=[2]\n )\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.elemental_nodal\n assert len(s[0].data) == 8\n assert len(s[0].data[0]) == 3\n assert np.allclose(\n s[0].data.tolist(), [2.63128894e-11, 1.95629520e01, 2.62733394e-11]\n )\n\n\n@pytest.mark.skipif(\n True, reason=\"element scoping not available with electrical results.\"\n)\ndef test_electricfield_elemscoping(rth_electric):\n raise Exception(\"Element scoping on electric_field does not work.\")\n solution = post.load_solution(rth_electric)\n ef = solution.electric_field(element_scoping=[2])\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.nodal\n assert len(s[0].data) == 20\n assert len(s[0].data[0]) == 3\n # assert np.isclose(s[0].data[0].tolist(), [2.63128894e-11, 1.95629520e+01, 2.62733394e-11])\n ef = solution.electric_field(location=post.locations.elemental, element_scoping=[2])\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.elemental\n assert len(s[0].data) == 3\n # assert np.isclose(s[0].data.tolist(), [-3.41948692e-14, 1.95629520e+01, 7.77156117e-15])\n ef = solution.electric_field(\n location=post.locations.elemental_nodal, element_scoping=[2]\n )\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.elemental_nodal\n assert len(s[0].data) == 8\n assert len(s[0].data[0]) == 3\n # assert np.isclose(s[0].data.tolist(), [-3.41948692e-14, 1.95629520e+01, 7.77156117e-15])\n\n\ndef test_electricfield_nodlocation(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_field()\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.nodal\n\n\ndef test_electricfield_elemlocation(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_field(location=post.locations.elemental)\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.elemental\n\n\ndef test_electricfield_elemnodlocation(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_field(location=post.locations.elemental_nodal)\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.elemental_nodal\n\n\ndef test_electricfield_timescoping(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_field(time_scoping=1)\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.nodal\n assert len(s[0].data[20]) == 3\n assert np.isclose(s[0].data[23][1], 19.562952041625977)\n\n\ndef test_electricfield_time(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_field(time=1.0)\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.nodal\n assert len(s[0].data[20]) == 3\n assert np.isclose(s[0].data[23][1], 19.562952041625977)\n\n\ndef test_electricfield_set(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_field(set=1)\n s = ef.vector\n assert s.num_fields == 1\n assert s[0].location == post.locations.nodal\n assert len(s[0].data[20]) == 3\n assert np.isclose(s[0].data[23][1], 19.562952041625977)\n\n\ndef test_electricpotential(rth_electric):\n solution = post.load_solution(rth_electric)\n assert solution._model.metadata.result_info.physics_type == _PhysicsType.thermal\n ef = solution.electric_potential()\n assert isinstance(ef, ElectricPotential)\n s = ef.scalar\n assert s.num_fields == 1\n assert s[0].location == post.locations.nodal\n assert len(s[0].data) == 4125\n assert np.isclose(s[0].data[23], 0.09781476007338061)\n\n # with dpf.core operator\n from ansys.dpf import core\n\n op = core.Operator(\"VOLT\")\n # op.inputs.requested_location.connect(core.locations.nodal)\n op.inputs.data_sources.connect(core.DataSources(rth_electric))\n fc = op.outputs.fields_container()\n assert len(fc) == s.num_fields\n assert fc[0].location == s[0].location\n assert len(fc[0].data) == len(s[0].data)\n assert np.allclose(s[0].data.tolist(), fc[0].data.tolist())\n comp = core.operators.logic.identical_fc()\n comp.inputs.fields_containerA.connect(fc)\n comp.inputs.fields_containerB.connect(s.result_fields_container)\n out = comp.outputs.boolean()\n assert out == True\n\n\nto_return = \"node scoping and element scoping returns the same\"\n\n\ndef test_electricpotential_nodscoping(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_potential(node_scoping=[2])\n s = ef.scalar\n assert s.num_fields == 1\n assert s[0].location == post.locations.nodal\n assert len(s[0].data) == 1\n assert np.isclose(s[0].data[0], 0.048907380036668786)\n\n\n@pytest.mark.skipif(\n True, reason=\"element scoping not available with electrical results.\"\n)\ndef test_electricpotential_elemscoping(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_potential(node_scoping=[2])\n s = ef.scalar\n assert s.num_fields == 1\n assert s[0].location == post.locations.nodal\n assert len(s[0].data) == 1\n # assert np.isclose(s[0].data[0], 0.02445369)\n raise Exception(to_return)\n\n\ndef test_electricpotential_nodlocation(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_potential(location=post.locations.nodal)\n s = ef.scalar\n assert s.num_fields == 1\n assert s[0].location == post.locations.nodal\n\n\ndef test_electricpotential_elemlocation(rth_electric):\n solution = post.load_solution(rth_electric)\n with pytest.raises(dpf_errors.NodalLocationError):\n solution.electric_potential(location=post.locations.elemental)\n\n\ndef test_electricpotential_elemnodallocation(rth_electric):\n solution = post.load_solution(rth_electric)\n with pytest.raises(dpf_errors.NodalLocationError):\n solution.electric_potential(location=post.locations.elemental_nodal)\n\n\ndef test_electricpotential_timescoping(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_potential(time_scoping=[1])\n s = ef.scalar\n assert s.num_fields == 1\n assert len(s[0].data) == 4125\n assert s[0].location == post.locations.nodal\n assert np.isclose(s[0].data[0], 0.07336107005500624)\n\n\ndef test_electricpotential_time(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_potential(set=1)\n s = ef.scalar\n assert s.num_fields == 1\n assert len(s[0].data) == 4125\n assert s[0].location == post.locations.nodal\n assert np.isclose(s[0].data[0], 0.07336107005500624)\n\n\ndef test_electricpotential_set(rth_electric):\n solution = post.load_solution(rth_electric)\n ef = solution.electric_potential(time=1.0)\n s = ef.scalar\n assert s.num_fields == 1\n assert len(s[0].data) == 4125\n assert s[0].location == post.locations.nodal\n assert np.isclose(s[0].data[0], 0.07336107005500624)\n"
] |
[
[
"numpy.isclose"
]
] |
CDMCH/ddpg-curiosity-and-multi-criteria-her
|
[
"49c3aabe73e19aeec06cde0c3e0b6ab239d04467"
] |
[
"ddpg_curiosity_mc_her/ddpg/main.py"
] |
[
"import argparse\nimport time\nimport os\nfrom ddpg_curiosity_mc_her import logger\nfrom ddpg_curiosity_mc_her.common.misc_util import (\n set_global_seeds,\n)\nimport ddpg_curiosity_mc_her.ddpg.training as training\nimport ddpg_curiosity_mc_her.ddpg.demo as demo\nfrom ddpg_curiosity_mc_her.ddpg.config import DEFAULT_PARAMS, get_convert_arg_to_type_fn, prepare_params, log_params, configure_dims, \\\n configure_rollout_worker, create_agents, configure_memory\nfrom ddpg_curiosity_mc_her.ddpg.policy_selection import get_policy_fn\nimport ddpg_curiosity_mc_her.common.tf_util as U\n\nimport tensorflow as tf\nfrom mpi4py import MPI\n\n\ndef run(args):\n # Configure things.\n rank = MPI.COMM_WORLD.Get_rank()\n if rank != 0:\n logger.set_level(logger.DISABLED)\n\n # If we are supposed to divide gpu usage among a specific set of devices,\n # set this processes' device to the correct one.\n gpu_nums = args['split_gpu_usage_among_device_nums']\n if gpu_nums is not None:\n gpu_num_to_use = gpu_nums[rank % len(gpu_nums)]\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_num_to_use)\n\n # Seed everything to make things reproducible.\n rank_seed = args['seed'] + 1000000 * rank\n logger.info('rank {}: seed={}, logdir={}'.format(rank, rank_seed, logger.get_dir()))\n tf.reset_default_graph()\n set_global_seeds(rank_seed)\n\n input_dims = configure_dims(args)\n\n # Configure the replay buffer.\n memory = configure_memory(args)\n\n with U.single_threaded_session() as sess:\n # Setup up DDPG Agents\n\n agents = create_agents(sess=sess, memory=memory, input_dims=input_dims, params=args)\n\n saver = tf.train.Saver()\n if args['restore_from_ckpt'] is not None:\n logger.info(\"Restoring agents from {}\".format(args['restore_from_ckpt']))\n saver.restore(sess, args['restore_from_ckpt'])\n\n sess.graph.finalize()\n logger.log_graph_to_tensorboard(sess.graph)\n\n # Setup Rollout workers\n train_policy_fn = get_policy_fn(\n name=args['train_policy_fn'], agents=agents\n )\n eval_policy_fn = get_policy_fn(\n name=args['eval_policy_fn'], agents=agents\n )\n\n train_rollout_worker = configure_rollout_worker(\n role='train', policy_fn=train_policy_fn, agents=agents, dims=input_dims,\n seed=rank_seed, logger=logger, params=args\n )\n eval_rollout_worker = configure_rollout_worker(\n role='eval', policy_fn=eval_policy_fn, agents=agents, dims=input_dims,\n seed=rank_seed, logger=logger, params=args\n )\n\n # Begin main training loop\n if rank == 0:\n start_time = time.time()\n\n if args['do_demo_only'] is False:\n training.train(\n memory=memory, agents=agents, saver=saver, sess=sess,\n train_rollout_worker=train_rollout_worker, eval_rollout_worker=eval_rollout_worker,\n param_noise_adaption_interval=50, **args\n )\n else:\n demo.demo(agents=agents, eval_rollout_worker=eval_rollout_worker,\n demo_video_recording_name=args[\"demo_video_recording_name\"])\n\n train_rollout_worker.close()\n eval_rollout_worker.close()\n\n if rank == 0:\n logger.info('total runtime: {}s'.format(time.time() - start_time))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n for key, value in DEFAULT_PARAMS.items():\n key = '--' + key.replace('_', '-')\n parser.add_argument(key, type=get_convert_arg_to_type_fn(type(value)), default=value)\n\n args = parser.parse_args()\n dict_args = vars(args)\n\n logger.configure()\n\n dict_args = prepare_params(dict_args)\n log_params(dict_args)\n run(dict_args)\n"
] |
[
[
"tensorflow.train.Saver",
"tensorflow.reset_default_graph"
]
] |
PetritIgrishtaj/new_NIH
|
[
"f1674e806df85d8109220d9b3be73cadb9289499"
] |
[
"modules/collate.py"
] |
[
"\nimport collections\nimport re\n\nimport numpy as np\nimport torch\n\nnp_str_obj_array_pattern = re.compile(r'[SaUO]')\n\ndef cf(batch):\n r\"\"\"Puts each data field into a tensor with outer dimension batch size\"\"\"\n\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor):\n out = None\n if torch.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(default_collate_err_msg_format.format(elem.dtype))\n\n # return cf([torch.as_tensor(b) for b in batch])\n stacked = np.stack(batch)\n return torch.as_tensor(stacked)\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n elif isinstance(elem, collections.abc.Sequence):\n # check to make sure that the elements in batch have consistent size\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError('each element in list of batch should be of equal size')\n transposed = zip(*batch)\n return [cf(samples) for samples in transposed]\n\n raise TypeError(default_collate_err_msg_format.format(elem_type))\n"
] |
[
[
"torch.stack",
"torch.utils.data.get_worker_info",
"numpy.stack",
"torch.as_tensor"
]
] |
duccluong/proReadBook
|
[
"65ba6ad25902d4920ad96c00a843cbea7d831c52"
] |
[
"src/DataLoader.py"
] |
[
"from __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport random\nimport numpy as np\nimport cv2\nfrom SamplePreprocessor import preprocess\n\n\nclass Sample:\n\t\"sample from the dataset\"\n\tdef __init__(self, gtText, filePath):\n\t\tself.gtText = gtText\n\t\tself.filePath = filePath\n\n\nclass Batch:\n\t\"batch containing images and ground truth texts\"\n\tdef __init__(self, gtTexts, imgs):\n\t\tself.imgs = np.stack(imgs, axis=0)\n\t\tself.gtTexts = gtTexts\n\n\nclass DataLoader:\n\t\"loads data which corresponds to IAM format, see: http://www.fki.inf.unibe.ch/databases/iam-handwriting-database\" \n\n\tdef __init__(self, filePath, batchSize, imgSize, maxTextLen):\n\t\t\"loader for dataset at given location, preprocess images and text according to parameters\"\n\n\t\tassert filePath[-1]=='/'\n\n\t\tself.dataAugmentation = False\n\t\tself.currIdx = 0\n\t\tself.batchSize = batchSize\n\t\tself.imgSize = imgSize\n\t\tself.samples = []\n\t\n\t\tf=open(filePath+'words.txt')\n\t\t\n\t\t# Editted by yhuynh\n\t\tlines = f.readlines()\n\t\trandom.shuffle(lines)\n\t\topen(filePath + 'words_suffle.txt', 'w').writelines(lines)\n\t\tf=open(filePath+'words_suffle.txt')\n\n\n\t\tchars = set()\n\t\tbad_samples = []\n\t\tbad_samples_reference = ['a01-117-05-02.png', 'r06-022-03-05.png']\n\t\tfor line in f:\n\t\t\t# ignore comment line\n\t\t\tif not line or line[0]=='#':\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tlineSplit = line.strip().split(' ')\n\t\t\tassert len(lineSplit) >= 9\n\t\t\t\n\t\t\t# filename: part1-part2-part3 --> part1/part1-part2/part1-part2-part3.png\n\t\t\tfileNameSplit = lineSplit[0].split('-')\n\t\t\tfileName = filePath + 'words/' + fileNameSplit[0] + '/' + lineSplit[0] + '.png'\n\n\t\t\t# GT text are columns starting at 9\n\t\t\tgtText = self.truncateLabel(' '.join(lineSplit[8:]), maxTextLen)\n\t\t\tchars = chars.union(set(list(gtText)))\n\n\t\t\t# check if image is not empty\n\t\t\tif not os.path.getsize(fileName):\n\t\t\t\tbad_samples.append(lineSplit[0] + '.png')\n\t\t\t\tcontinue\n\n\t\t\t# put sample into list\n\t\t\tself.samples.append(Sample(gtText, fileName))\n\n\t\t# some images in the IAM dataset are known to be damaged, don't show warning for them\n\t\tif set(bad_samples) != set(bad_samples_reference):\n\t\t\tprint(\"Warning, damaged images found:\", bad_samples)\n\t\t\tprint(\"Damaged images expected:\", bad_samples_reference)\n\n\t\t# split into training and validation set: 95% - 5%\n\t\tsplitIdx = int(0.95 * len(self.samples))\n\t\tself.trainSamples = self.samples[:splitIdx]\n\t\tself.validationSamples = self.samples[splitIdx:]\n\n\t\t# put words into lists\n\t\tself.trainWords = [x.gtText for x in self.trainSamples]\n\t\tself.validationWords = [x.gtText for x in self.validationSamples]\n\n\t\t# number of randomly chosen samples per epoch for training \n\t\tself.numTrainSamplesPerEpoch = 25000 \n\t\t\n\t\t# start with train set\n\t\tself.trainSet()\n\n\t\t# list of all chars in dataset\n\t\tself.charList = sorted(list(chars))\n\n\n\tdef truncateLabel(self, text, maxTextLen):\n\t\t# ctc_loss can't compute loss if it cannot find a mapping between text label and input \n\t\t# labels. Repeat letters cost double because of the blank symbol needing to be inserted.\n\t\t# If a too-long label is provided, ctc_loss returns an infinite gradient\n\t\tcost = 0\n\t\tfor i in range(len(text)):\n\t\t\tif i != 0 and text[i] == text[i-1]:\n\t\t\t\tcost += 2\n\t\t\telse:\n\t\t\t\tcost += 1\n\t\t\tif cost > maxTextLen:\n\t\t\t\treturn text[:i]\n\t\treturn text\n\n\n\tdef trainSet(self):\n\t\t\"switch to randomly chosen subset of training set\"\n\t\tself.dataAugmentation = True\n\t\tself.currIdx = 0\n\t\trandom.shuffle(self.trainSamples)\n\t\tself.samples = self.trainSamples[:self.numTrainSamplesPerEpoch]\n\n\t\n\tdef validationSet(self):\n\t\t\"switch to validation set\"\n\t\tself.dataAugmentation = False\n\t\tself.currIdx = 0\n\t\tself.samples = self.validationSamples\n\n\n\tdef getIteratorInfo(self):\n\t\t\"current batch index and overall number of batches\"\n\t\treturn (self.currIdx // self.batchSize + 1, len(self.samples) // self.batchSize)\n\n\n\tdef hasNext(self):\n\t\t\"iterator\"\n\t\treturn self.currIdx + self.batchSize <= len(self.samples)\n\t\t\n\t\t\n\tdef getNext(self):\n\t\t\"iterator\"\n\t\tbatchRange = range(self.currIdx, self.currIdx + self.batchSize)\n\t\tgtTexts = [self.samples[i].gtText for i in batchRange]\n\t\timgs = [preprocess(cv2.imread(self.samples[i].filePath, cv2.IMREAD_GRAYSCALE), self.imgSize, self.dataAugmentation) for i in batchRange]\n\t\tself.currIdx += self.batchSize\n\t\treturn Batch(gtTexts, imgs)\n\n\n"
] |
[
[
"numpy.stack"
]
] |
anlthms/neon
|
[
"cba318c9f0a2acf2ab8a3d7725b588b2a8b17cb9"
] |
[
"neon/data/image.py"
] |
[
"# ----------------------------------------------------------------------------\n# Copyright 2014-2016 Nervana Systems Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\n\"\"\"\nClass definitions for image data sets.\n\"\"\"\nfrom __future__ import division\nfrom future import standard_library\nstandard_library.install_aliases() # triggers E402, hence noqa below\nfrom builtins import str # noqa\n\nimport gzip # noqa\nimport logging # noqa\nimport numpy as np # noqa\nimport os # noqa\nimport tarfile # noqa\n\nfrom neon.util.compat import pickle # noqa\nfrom neon.util.compat import pickle_load # noqa\nfrom neon.data.datasets import Dataset # noqa\nfrom neon.data.dataiterator import ArrayIterator # noqa\n\nlogger = logging.getLogger(__name__)\n\n\nclass MNIST(Dataset):\n \"\"\"\n MNIST data set from http://yann.lecun.com/exdb/mnist/\n\n Normalize defaults to true and scales the data 0 to 1.\n Size defaults to 28 for 28x28 pixels, specifying a smaller values allows\n cropping to a smaller size.\n \"\"\"\n def __init__(self, path='.', subset_pct=100, normalize=True, size=28):\n super(MNIST, self).__init__('mnist.pkl.gz',\n 'https://s3.amazonaws.com/img-datasets',\n 15296311,\n path=path,\n subset_pct=subset_pct)\n self.normalize = normalize\n self.size = size\n\n def load_data(self):\n \"\"\"\n Fetch the MNIST dataset and load it into memory.\n\n Arguments:\n path (str, optional): Local directory in which to cache the raw\n dataset. Defaults to current directory.\n normalize (bool, optional): Whether to scale values between 0 and 1.\n Defaults to True.\n\n Returns:\n tuple: Both training and test sets are returned.\n \"\"\"\n filepath = self._valid_path_append(self.path, self.filename)\n if not os.path.exists(filepath):\n self.fetch_dataset(self.url, self.filename, filepath, self.size)\n\n with gzip.open(filepath, 'rb') as mnist:\n (X_train, y_train), (X_test, y_test) = pickle_load(mnist)\n X_train = X_train[:, :self.size, :self.size]\n X_test = X_test[:, :self.size, :self.size]\n X_train = X_train.reshape(-1, self.size*self.size)\n X_test = X_test.reshape(-1, self.size*self.size)\n\n if self.normalize:\n X_train = X_train / 255.\n X_test = X_test / 255.\n\n return (X_train, y_train), (X_test, y_test), 10\n\n def gen_iterators(self):\n (X_train, y_train), (X_test, y_test), nclass = self.load_data()\n train = ArrayIterator(X_train,\n y_train,\n nclass=nclass,\n lshape=(1, self.size, self.size),\n name='train')\n val = ArrayIterator(X_test,\n y_test,\n nclass=nclass,\n lshape=(1, self.size, self.size),\n name='valid')\n self._data_dict = {'train': train,\n 'valid': val}\n return self._data_dict\n\n\nclass CIFAR10(Dataset):\n \"\"\"\n CIFAR10 data set from https://www.cs.toronto.edu/~kriz/cifar.html\n\n Arguments:\n path (str): Local path to copy data files.\n normalize (bool): Flag to normalize data.\n whiten (bool): Flag to apply whitening transform.\n pad_classes (bool): Flag to pad out class count to 16\n for compatibility with conv layers on GPU.\n \"\"\"\n def __init__(self, path='.', subset_pct=100, normalize=True,\n contrast_normalize=False, whiten=False, pad_classes=False):\n super(CIFAR10, self).__init__('cifar-10-python.tar.gz',\n 'http://www.cs.toronto.edu/~kriz',\n 170498071,\n path=path,\n subset_pct=subset_pct)\n # CIFAR10 load method specific options\n self.normalize = normalize\n self.contrast_normalize = contrast_normalize\n self.whiten = whiten\n self.pad_classes = pad_classes\n\n def load_data(self):\n \"\"\"\n Fetch the CIFAR-10 dataset and load it into memory.\n\n Arguments:\n path (str, optional): Local directory in which to cache the raw\n dataset. Defaults to current directory.\n normalize (bool, optional): Whether to scale values between 0 and 1.\n Defaults to True.\n\n Returns:\n tuple: Both training and test sets are returned.\n \"\"\"\n workdir, filepath = self._valid_path_append(self.path, '', self.filename)\n batchdir = os.path.join(workdir, 'cifar-10-batches-py')\n if not os.path.exists(os.path.join(batchdir, 'data_batch_1')):\n if not os.path.exists(filepath):\n self.fetch_dataset(self.url, self.filename, filepath, self.size)\n with tarfile.open(filepath, 'r:gz') as f:\n f.extractall(workdir)\n\n train_batches = [os.path.join(batchdir, 'data_batch_' + str(i)) for i in range(1, 6)]\n Xlist, ylist = [], []\n for batch in train_batches:\n with open(batch, 'rb') as f:\n d = pickle_load(f)\n Xlist.append(d['data'])\n ylist.append(d['labels'])\n\n X_train = np.vstack(Xlist)\n y_train = np.vstack(ylist)\n\n with open(os.path.join(batchdir, 'test_batch'), 'rb') as f:\n d = pickle_load(f)\n X_test, y_test = d['data'], d['labels']\n\n y_train = y_train.reshape(-1, 1)\n y_test = np.array(y_test).reshape(-1, 1)\n\n if self.contrast_normalize:\n norm_scale = 55.0 # Goodfellow\n X_train = self.global_contrast_normalize(X_train, scale=norm_scale)\n X_test = self.global_contrast_normalize(X_test, scale=norm_scale)\n\n if self.normalize:\n X_train = X_train / 255.\n X_test = X_test / 255.\n\n if self.whiten:\n zca_cache = os.path.join(workdir, 'cifar-10-zca-cache.pkl')\n X_train, X_test = self.zca_whiten(X_train, X_test, cache=zca_cache)\n\n return (X_train, y_train), (X_test, y_test), 10\n\n def gen_iterators(self):\n datasets = self.load_data()\n\n (X_train, y_train), (X_test, y_test), nclass = datasets\n if self.pad_classes:\n nclass = 16\n\n train = ArrayIterator(X_train,\n y_train,\n nclass=nclass,\n lshape=(3, 32, 32),\n name='train')\n test = ArrayIterator(X_test,\n y_test,\n nclass=nclass,\n lshape=(3, 32, 32),\n name='valid')\n self._data_dict = {'train': train,\n 'valid': test}\n return self._data_dict\n\n @staticmethod\n def _compute_zca_transform(imgs, filter_bias=0.1):\n \"\"\"\n Compute the zca whitening transform matrix.\n \"\"\"\n logger.info(\"Computing ZCA transform matrix\")\n meanX = np.mean(imgs, 0)\n\n covX = np.cov(imgs.T)\n D, E = np.linalg.eigh(covX + filter_bias * np.eye(covX.shape[0], covX.shape[1]))\n\n assert not np.isnan(D).any()\n assert not np.isnan(E).any()\n assert D.min() > 0\n\n D = D ** -.5\n\n W = np.dot(E, np.dot(np.diag(D), E.T))\n return meanX, W\n\n @staticmethod\n def zca_whiten(train, test, cache=None):\n \"\"\"\n Use train set statistics to apply the ZCA whitening transform to\n both train and test sets.\n \"\"\"\n if cache and os.path.isfile(cache):\n with open(cache, 'rb') as f:\n (meanX, W) = pickle_load(f)\n else:\n meanX, W = CIFAR10._compute_zca_transform(train)\n if cache:\n logger.info(\"Caching ZCA transform matrix\")\n with open(cache, 'wb') as f:\n pickle.dump((meanX, W), f, 2)\n\n logger.info(\"Applying ZCA whitening transform\")\n train_w = np.dot(train - meanX, W)\n test_w = np.dot(test - meanX, W)\n\n return train_w, test_w\n\n @staticmethod\n def global_contrast_normalize(X, scale=1., min_divisor=1e-8):\n \"\"\"\n Subtract mean and normalize by vector norm.\n \"\"\"\n\n X = X - X.mean(axis=1)[:, np.newaxis]\n\n normalizers = np.sqrt((X ** 2).sum(axis=1)) / scale\n normalizers[normalizers < min_divisor] = 1.\n\n X /= normalizers[:, np.newaxis]\n\n return X\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"numpy.isnan",
"numpy.eye",
"numpy.cov",
"numpy.mean",
"numpy.array",
"numpy.vstack"
]
] |
shawn233/BLASPoisoning
|
[
"3b54a1f2b83fe2f6a89f8065d519fd8312a3f7b4"
] |
[
"dev/model/imgutils.py"
] |
[
"'''\r\nAuthor: shawn233\r\nDate: 2021-01-15 17:54:21\r\nLastEditors: shawn233\r\nLastEditTime: 2021-01-17 18:37:08\r\nDescription: Image reading tools\r\n'''\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport os\r\nimport logging\r\n# import cv2\r\n# import matplotlib.pyplot as plt\r\n# from skimage import io\r\n\r\n\r\n\r\ndef _readImageFromFile(filepath, as_gray=False):\r\n '''\r\n Read an image from an **existing** file. Remark that this function\r\n does not consider invalid file path due to performance needs.\r\n\r\n Args:\r\n - filepath: str. A string representing a file path.\r\n - as_gray: Boolean. Set as True to convert the image to the gray scale.\r\n\r\n Returns: a numpy.ndarray object (H x W x C).\r\n '''\r\n\r\n with Image.open(filepath) as img:\r\n logging.info(f\"Reading image {filepath}\")\r\n if as_gray:\r\n img = img.convert(\"L\")\r\n arr = np.asarray(img) # (H x W)\r\n arr = arr.reshape((*arr.shape, 1)) # add a channel dimension\r\n else:\r\n arr = np.asarray(img) # (H x W x C)\r\n return arr\r\n\r\n\r\n\r\ndef _readImageFromFileOrDir(file_or_dir, as_gray=False):\r\n '''\r\n Read an image from either a file or a directory.\r\n We also assume that in the given directory and all of its sub-directories,\r\n all files are image files so that Image.open(<fp>) can be directly invoked.\r\n \r\n Args:\r\n - file_or_dir: str. A string representing a file path or a directory.\r\n - as_gray: Boolean. Set as True to convert all images to the gray scale.\r\n\r\n Returns: a list of numpy.ndarray object [(H x W x C)].\r\n\r\n Notes:\r\n - The function currently skips file links (e.g. soft-link in Unix systems)\r\n '''\r\n \r\n if os.path.isfile(file_or_dir):\r\n arr = _readImageFromFile(file_or_dir, as_gray)\r\n return [arr]\r\n elif os.path.isdir(file_or_dir):\r\n ret = []\r\n for root, _, files in os.walk(file_or_dir, topdown=True):\r\n for filename in files:\r\n fp = os.path.join(root, filename)\r\n if fp.endswith(\".lnk\"):\r\n logging.warning(f\"Skipping {fp} because it is a Windows shortcut.\")\r\n else:\r\n ret.append(_readImageFromFile(fp, as_gray))\r\n return ret\r\n else:\r\n logging.warning(f\"Skipping {file_or_dir} because it is neither a file path nor a directory.\")\r\n\r\n\r\n\r\n\r\ndef readImages(files_or_dirs, as_gray=False):\r\n '''\r\n Read images from one of the following sources:\r\n - a single file: `readImages(\"./pics/lenna.png\")`\r\n - a single directory: `readImages(\"./pics/folder\")`\r\n - a list of files and directories: `readImages([\"./pics/lenna.png\", \"./pics/folder\"])`\r\n \r\n Args:\r\n - files_or_dirs: str or list<str>, locations where images will be read.\r\n - as_gray: Boolean. Set as True to convert all images to the gray scale.\r\n\r\n Returns: (N x H x W x C) numpy.ndarray object.\r\n '''\r\n\r\n if type(files_or_dirs) == type([]):\r\n # input is a list\r\n arr = []\r\n for obj in files_or_dirs:\r\n arr.extend(_readImageFromFileOrDir(obj, as_gray))\r\n elif type(files_or_dirs) == type(''):\r\n # input is a string\r\n arr = _readImageFromFileOrDir(files_or_dirs, as_gray)\r\n else:\r\n raise TypeError(f\"Can not read from {files_or_dirs}, a {type(files_or_dirs)} object.\")\r\n \r\n return np.stack(arr, axis=0)\r\n\r\n\r\n\r\ndef main():\r\n logging.basicConfig(level=logging.INFO)\r\n arr = readImages([\"./pics/lenna.png\", \"./pics/folder\"])\r\n print(arr.shape, type(arr))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()"
] |
[
[
"numpy.asarray",
"numpy.stack"
]
] |
e-ucm/countrix-analysis
|
[
"9da9ef419f996e9ebd3709ecc7ad260cc1f72493"
] |
[
"analysis_countrix.py"
] |
[
"\"\"\"\r\nCreated on Fri May 06 11:18:38 2016\r\n\r\n@author: crisal24\r\n\r\nDeveloped as part of e-ucm development group.\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import division\r\nimport numpy as np\r\nimport json\r\nimport csv\r\nimport os\r\nimport unicodedata\r\nimport time\r\nimport datetime\r\n\r\n\r\nclass AnalysisCountrix:\r\n\r\n \"\"\" Class for analysis of xAPI statements generated by Countrix game\r\n Countrix available at https://github.com/e-ucm/countrix/\r\n\r\n Attributes\r\n ----------\r\n questions : dict\r\n Dictionary to store questions data\r\n countries : dict\r\n Dictionary to store information of countries\r\n\t\t\twith capitals and continents\r\n players : dict\r\n Dictionary to store players data\r\n verbs: dict\r\n Dictionary to store xAPI verbs used\r\n results: dict\r\n Dictionary to store results of players\r\n count_minutes : numpy.array\r\n 24x60 matrix to store users per minute\r\n\r\n Methods\r\n ----------\r\n load_data\r\n parsing\r\n parse_statement\r\n parse_question_statement\r\n parse_question_type\r\n update_peaks\r\n update_verbs\r\n update_score\r\n update_user_questions\r\n update_question\r\n update_results\r\n store_analysis_questions\r\n store_analysis_sessions\r\n store_analysis_results\r\n store_analysis_peaks\r\n store_analysis_xapi\r\n\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\" __init__ initializes class atributes questions, countries, players,\r\n verbs and results to be empty dictionaries. It also initializes the\r\n class atribute count_minutes to be a 24x60 matrix of zeros.\r\n It also calls load_data method to prepare the countries dictionary\r\n and parsing method to automatically start parsing.\r\n\r\n \"\"\"\r\n\r\n self.questions = dict()\r\n self.countries = dict()\r\n self.players = dict()\r\n self.verbs = dict()\r\n self.results = dict()\r\n self.count_minutes = np.zeros((24, 60), dtype=list)\r\n self.load_data()\r\n self.parsing()\r\n\r\n def load_data(self):\r\n \"\"\" load_data reads data from geography data file\r\n and stores the information in countries dictionary\r\n\r\n \"\"\"\r\n\r\n with open('data/geography.csv', 'rb') as csv_file:\r\n reader = csv.reader(csv_file)\r\n # parse geography file storing the information of each country\r\n for row in reader:\r\n r = [unicode(cell, 'utf-8') for cell in row]\r\n # name, capital, continent\r\n self.countries[r[0]] = (r[1], r[3], r[2])\r\n\r\n def parsing(self):\r\n \"\"\" parsing main method, parses the xAPI statements\r\n and stores the analysis in csv files\r\n\r\n \"\"\"\r\n\r\n with open('data/xapi_statements_countrix.json') as data_file:\r\n with open('analysis_players.csv', 'wb') as csv_players:\r\n # load data\r\n data = json.load(data_file)\r\n statements = data[\"statements\"]\r\n\r\n # prepares writer for players csv file\r\n players_fieldnames = ['timestamp', 'user', 'question', 'error']\r\n writerp = csv.DictWriter(csv_players,\r\n fieldnames=players_fieldnames)\r\n\r\n # statements parsing\r\n for i in range(len(statements)):\r\n statement = statements[i]\r\n self.parse_statement(statement, writerp)\r\n\r\n # stores results\r\n self.store_analysis_questions()\r\n self.store_analysis_results()\r\n self.store_analysis_sessions()\r\n self.store_analysis_peaks()\r\n self.store_analysis_xapi()\r\n\r\n def parse_statement(self, statement, writerp):\r\n \"\"\" parse_statement parses a single xAPI statement\r\n updating the corresponding dictionaries when necessary\r\n\r\n Parameters\r\n ----------\r\n statement : dict\r\n xAPI statement to parsse\r\n writerp : csv.DictWriter\r\n Writer to store information of player\r\n\r\n \"\"\"\r\n\r\n user = statement[\"actor\"][\"name\"]\r\n timestamp = statement[\"timestamp\"]\r\n\r\n if user not in self.players: # first statement of user\r\n beginning = timestamp\r\n self.players[user] = (beginning, beginning, 0, 0)\r\n else: # update last time of user\r\n beginning, oldtime, questions, score = self.players[user]\r\n if timestamp > oldtime:\r\n self.players[user] = (beginning, timestamp, questions, score)\r\n\r\n # adds user to peaks\r\n self.update_peaks(user, timestamp)\r\n\r\n # update xAPI verb times used\r\n complete_verb = statement[\"verb\"][\"id\"]\r\n verb = os.path.split(complete_verb)[-1]\r\n self.update_verbs(verb, timestamp)\r\n\r\n object_id = statement[\"object\"][\"id\"]\r\n question = os.path.split(object_id)[-1]\r\n\r\n if question == \"score\": # xAPI trace that updates/sets score value\r\n newscore = statement[\"result\"] [\"extensions\"]\\\r\n [\"http://rage-eu.com/xapi/extensions/value\"]\r\n self.update_score(user, newscore)\r\n\r\n if verb == \"preferred\": # xAPI trace of question answered\r\n self.parse_question_statement(statement, writerp)\r\n\r\n def parse_question_statement(self, statement, writerp):\r\n \"\"\" parse_question_statement parse a xAPI statement given\r\n that corresponds to a game question, updating the information for user,\r\n question and results after determining if the question was correctly\r\n answered or not\r\n\r\n Parameters\r\n ----------\r\n statement : dict\r\n xAPI statement corresponding to a question\r\n writerp : csv.DictWriter\r\n Writer to store information of player\r\n\r\n \"\"\"\r\n\r\n user = statement[\"actor\"][\"name\"]\r\n object_id = statement[\"object\"][\"id\"]\r\n question = os.path.split(object_id)[-1]\r\n answer = statement[\"result\"][\"response\"]\r\n timestamp = statement[\"timestamp\"]\r\n\r\n def parse_question_type(question):\r\n \"\"\" parse_question_type receives a question name and determines\r\n its type parsing the name, according to the 4 known types\r\n of questions: Flag, IsCapitalOf, CountryOfCapital and Continent\r\n\r\n Parameters\r\n ----------\r\n question : string\r\n Question name\r\n\r\n Returns\r\n -------\r\n country : string\r\n Country name\r\n question_type : string\r\n Type of question of the 4 known types: Flag, IsCapitalOf,\r\n CapitalOfCountry or Continent\r\n key : integer\r\n Key to access the country information, its value is 0, 1 or 2\r\n depending on if the information needed is country name,\r\n capital or continent, respectively\r\n results_index : integer\r\n Index in which to store the user results information, its value\r\n is 0, 1, 2 or 3 corresponding to the 4 types of questions\r\n\r\n \"\"\"\r\n\r\n # Question type 1: FLAG\r\n if question[-4:] == \"Flag\":\r\n country = question[:-4]\r\n key = 0 # country name\r\n question_type = \"Flag\"\r\n results_index = 0\r\n\r\n # Question type 2: ISCAPITALOF\r\n elif question[-11:] == \"IsCapitalOf\":\r\n country = question[:-11]\r\n key = 0 # country name\r\n question_type = \"CountryOfCapital\"\r\n results_index = 1\r\n\r\n # Question type 3: CAPITALOFCOUNTRY\r\n elif question[-7:] == \"Capital\":\r\n country = question[:-7]\r\n key = 1 # capital\r\n question_type = \"CapitalOfCountry\"\r\n results_index = 2\r\n\r\n # Question type 4: CONTINENT\r\n elif question[-9:] == \"Continent\":\r\n country = question[:-9]\r\n key = 2 # continent\r\n question_type = \"Continent\"\r\n results_index = 3\r\n else:\r\n country = \"\"\r\n question_type = \"\"\r\n key = 0\r\n results_index = 0\r\n\r\n return country, question_type, key, results_index\r\n\r\n self.update_user_questions(user) # update number of questions answered\r\n\r\n country, question_type, key, results_index = parse_question_type(\r\n question)\r\n\r\n if country in self.countries:\r\n correct = self.countries[country][key]\r\n else:\r\n correct = \"\"\r\n\r\n hit = (correct == answer)\r\n question = unicodedata.normalize('NFKD', question).encode(\r\n 'ascii', 'ignore')\r\n\r\n # updates question information\r\n self.update_question(question, question_type, hit, timestamp)\r\n # update results of user\r\n self.update_results(user, results_index, hit)\r\n\r\n # stores data of player\r\n writerp.writerow({'timestamp': timestamp, 'user': user,\r\n 'question': question, 'error': not hit})\r\n\r\n def update_peaks(self, user, timestamp):\r\n \"\"\" update_peaks updates the information of peaks of use in\r\n count_minutes array\r\n\r\n Parameters\r\n ----------\r\n user : string\r\n User identifier\r\n timestamp : string\r\n Timestamp at which the user interacted with the game\r\n\r\n \"\"\"\r\n\r\n # time parsing to obtain hour and minute\r\n hour = int(timestamp[11:-7])\r\n minute = int(timestamp[-6:-4])\r\n\r\n # adds user to the minute of game use\r\n if not self.count_minutes[hour][minute]:\r\n self.count_minutes[hour][minute] = [user]\r\n elif user not in self.count_minutes[hour][minute]:\r\n self.count_minutes[hour][minute].append(user)\r\n\r\n def update_verbs(self, verb, timestamp):\r\n \"\"\" update_verbs updates the verbs dictionary increasing by one\r\n the number of times the verb given as parameter has been used\r\n\r\n Parameters\r\n ----------\r\n verb : string\r\n xAPI verb used\r\n timestamp : string\r\n Timestamp at which the verb was user\r\n\r\n \"\"\"\r\n\r\n if verb in self.verbs: # the verb had already appeared\r\n firstuse, times = self.verbs[verb]\r\n self.verbs[verb] = (firstuse, times+1)\r\n else: # first time verb used\r\n self.verbs[verb] = (timestamp, 1)\r\n\r\n def update_score(self, user, newscore):\r\n \"\"\" update_score updates score of user in players dictionary\r\n\r\n Parameters\r\n ----------\r\n user : string\r\n User identifier\r\n newscore: integer\r\n Updated score of user\r\n\r\n \"\"\"\r\n\r\n beginning, oldtime, questions, _ = self.players[user]\r\n self.players[user] = (beginning, oldtime, questions, newscore)\r\n\r\n def update_user_questions(self, user):\r\n \"\"\" update_user_questions increases by 1 the number of questions\r\n answered by the user given as parameter in players dictionary\r\n\r\n Parameters\r\n ----------\r\n user : string\r\n User identifier\r\n\r\n \"\"\"\r\n\r\n beginning, oldtime, questions, score = self.players[user]\r\n self.players[user] = (beginning, oldtime, questions+1, score)\r\n\r\n def update_question(self, question, question_type, correct, timestamp):\r\n \"\"\" update_questions receives a question name and type\r\n and whether it has been answered correctly or wronlgy at time\r\n given by timestamp, and updates this information in questions\r\n dictionary\r\n\r\n Parameters\r\n ----------\r\n question : string\r\n Question name\r\n question_type : string\r\n Type of question of the 4 known types: Flag, IsCapitalOf,\r\n CapitalOfCountry or Continent\r\n correct : boolean\r\n True if and only if the question was correctly answered\r\n timestamp : string\r\n Time at which the question was answered\r\n\r\n \"\"\"\r\n\r\n # analysis of times question has been answered\r\n if question in self.questions:\r\n # question already store: increase times answered\r\n answers, wrong, question_type, timestamp = self.questions[question]\r\n answers += 1\r\n else:\r\n # question not previously stored in dictionary\r\n answers = 1\r\n wrong = 0\r\n\r\n if not correct: # increases number of wrong times answered\r\n wrong += 1\r\n\r\n # store/update data of question in dictionary\r\n self.questions[question] = (answers, wrong, question_type,\r\n timestamp)\r\n\r\n def update_results(self, user, results_index, correct):\r\n \"\"\" update_results updates the information of the user given as\r\n\t\tparameter in results dictionary for the question type specified\r\n\t\tby results_index. Whether it was correctly answered or not\r\n is determined by the value of correct parameter\r\n\r\n Parameters\r\n ----------\r\n user : string\r\n User identifier\r\n results_index : integer\r\n Index in which to store the user results information, its value\r\n is 0, 1, 2 or 3 corresponding to the 4 types of questions\r\n correct : boolean\r\n True if and only if the question was correctly answered\r\n\r\n \"\"\"\r\n\r\n # if new user, add empty results\r\n for k in range(4):\r\n if (user, k) not in self.results:\r\n self.results[(user, k)] = (0, 0)\r\n\r\n correct_type, answer_type = self.results[(user, results_index)]\r\n if correct: # increase number of correct answers\r\n correct_type += 1\r\n\r\n # store/update results in dictionary for the given question type\r\n self.results[(user, results_index)] = (correct_type, answer_type+1)\r\n\r\n def store_analysis_questions(self):\r\n \"\"\" store_analysis_questions stores the results of the analysis of\r\n questions determined in questions dictionary in a csv file\r\n\r\n \"\"\"\r\n\r\n with open('analysis_questions.csv', 'wb') as csv_questions:\r\n questions_fieldnames = ['timestamp', 'question', 'qtype',\r\n 'answered', 'errors', 'ratio']\r\n writerq = csv.DictWriter(csv_questions,\r\n fieldnames=questions_fieldnames)\r\n\r\n # for each question\r\n for k in self.questions.keys():\r\n answered, errors, question_type, timestamp = self.questions[k]\r\n # error ratio\r\n ratio = errors / answered\r\n\r\n # store data\r\n writerq.writerow({'timestamp': timestamp,\r\n 'question': k,\r\n 'qtype': question_type,\r\n 'answered': answered,\r\n 'errors': errors,\r\n 'ratio': ratio})\r\n\r\n def store_analysis_sessions(self):\r\n \"\"\" store_analysis_sessions stores the results of the analysis of\r\n sessions determined as part of players dictionary in a csv file\r\n\r\n \"\"\"\r\n\r\n with open('analysis_sessions.csv', 'wb') as csv_sessions:\r\n sessions_fieldnames = ['timestamp', 'user', 'time', 'questions']\r\n writers = csv.DictWriter(csv_sessions,\r\n fieldnames=sessions_fieldnames)\r\n\r\n # for each player\r\n for k in self.players.keys():\r\n beginning, end, questions, _ = self.players[k]\r\n\r\n # calculation of user's session time\r\n init_time = datetime.datetime.strptime(beginning,\r\n \"%Y-%m-%d\" + 'T' +\r\n \"%H:%M:%S\" + 'Z')\r\n init_time = time.mktime(init_time.timetuple()) * 1000\r\n end_time = datetime.datetime.strptime(end,\r\n \"%Y-%m-%d\" + 'T' +\r\n \"%H:%M:%S\" + 'Z')\r\n end_time = time.mktime(end_time.timetuple()) * 1000\r\n playtime = end_time - init_time\r\n if playtime > 0:\r\n # store data\r\n writers.writerow({'timestamp': beginning,\r\n 'user': k,\r\n 'time': playtime,\r\n 'questions': questions})\r\n\r\n def store_analysis_results(self):\r\n \"\"\" store_analysis_results stores the results of the analysis of\r\n users results in the four question types determined in results\r\n dictionary in a csv file\r\n\r\n \"\"\"\r\n\r\n with open('analysis_results.csv', 'wb') as csv_results:\r\n results_fieldnames = ['timestamp', 'user', 'flag', 'iscapitalof',\r\n 'capitalofcountry', 'continent', 'score']\r\n writerr = csv.DictWriter(csv_results,\r\n fieldnames=results_fieldnames)\r\n users = list(set(k[0] for k in self.results.keys()))\r\n\r\n # for each user\r\n for u in users:\r\n myresults = [\"\", \"\", \"\", \"\"]\r\n # for each question type\r\n for i in range(4):\r\n correct, answered = self.results[(u, i)]\r\n if answered < 2:\r\n myresults[i] = \"nodata\"\r\n elif correct / answered < 0.5:\r\n myresults[i] = \"failed\"\r\n else:\r\n myresults[i] = \"passed\"\r\n\r\n timestamp, _, _, score = self.players[u]\r\n\r\n # store data\r\n writerr.writerow({'timestamp': timestamp,\r\n 'user': u,\r\n 'flag': myresults[0],\r\n 'iscapitalof': myresults[1],\r\n 'capitalofcountry': myresults[2],\r\n 'continent': myresults[3],\r\n 'score': int(score)})\r\n\r\n def store_analysis_peaks(self):\r\n \"\"\" store_analysis_peaks stores the results of the analysis of\r\n users' use of the game per minute determined in count_minutes array\r\n in a csv file\r\n\r\n \"\"\"\r\n\r\n with open('analysis_peaks.csv', 'wb') as csv_peaks:\r\n peaks_fieldnames = ['timestamp', 'minute', 'users']\r\n writerpk = csv.DictWriter(csv_peaks, fieldnames=peaks_fieldnames)\r\n # as there it no timestamp, we choose a random one (the current)\r\n timestamp = int(time.time()*1000)\r\n\r\n # for each hour\r\n for h in range(24):\r\n # for each minute\r\n for m in range(60):\r\n users = self.count_minutes[h][m]\r\n if h < 10:\r\n hour = '0'+str(h)\r\n else:\r\n hour = str(h)\r\n if m < 10:\r\n minute = '0'+str(m)\r\n else:\r\n minute = str(m)\r\n if users:\r\n count = len(users)\r\n else:\r\n count = 0\r\n\r\n # store data\r\n writerpk.writerow({'timestamp': timestamp,\r\n 'minute': hour+minute,\r\n 'users': count})\r\n\r\n # next different random timestamp\r\n timestamp += 1\r\n\r\n def store_analysis_xapi(self):\r\n \"\"\" store_analysis_xapi stores the results of the analysis of\r\n use of xAPI verbs determined in verbs dictionary in a csv file\r\n\r\n \"\"\"\r\n\r\n with open('analysis_xapi.csv', 'wb') as csv_xapi:\r\n xapi_fieldnames = ['timestamp', 'verb', 'times']\r\n writerx = csv.DictWriter(csv_xapi, fieldnames=xapi_fieldnames)\r\n\r\n # for each xAPI verb\r\n for k in self.verbs.keys():\r\n firstuse, times = self.verbs[k]\r\n\r\n # store data\r\n writerx.writerow({'timestamp': firstuse,\r\n 'verb': k,\r\n 'times': times})\r\n"
] |
[
[
"numpy.zeros"
]
] |
BinWang28/AnomalyHop
|
[
"8e6b1c597e66d250cd6c154f3ea02a6a060a03a4"
] |
[
"src/display.py"
] |
[
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : display.py\n@Time : 2021/04/21 19:49:59\n@Author : bin.wang\n@Version : 1.0\n'''\n\n# here put the import lib\nimport os\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nfrom skimage import morphology\nfrom skimage.segmentation import mark_boundaries\n\ndef plot_fig(test_img, scores, gts, threshold, save_dir, class_name):\n num = len(scores)\n vmax = scores.max() * 255.\n vmin = scores.min() * 255.\n for i in range(num):\n img = test_img[i]\n img = denormalization(img)\n gt = gts[i].transpose(1, 2, 0).squeeze()\n heat_map = scores[i] * 255\n mask = scores[i]\n mask[mask > threshold] = 1\n mask[mask <= threshold] = 0\n kernel = morphology.disk(4)\n mask = morphology.opening(mask, kernel)\n mask *= 255\n vis_img = mark_boundaries(img, mask, color=(1, 0, 0), mode='thick')\n fig_img, ax_img = plt.subplots(1, 5, figsize=(12, 3))\n fig_img.subplots_adjust(right=0.9)\n norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)\n for ax_i in ax_img:\n ax_i.axes.xaxis.set_visible(False)\n ax_i.axes.yaxis.set_visible(False)\n ax_img[0].imshow(img)\n ax_img[0].title.set_text('Image')\n ax_img[1].imshow(gt, cmap='gray')\n ax_img[1].title.set_text('GroundTruth')\n ax = ax_img[2].imshow(heat_map, cmap='jet', norm=norm)\n figa = ax_img[2].imshow(img, cmap='gray', interpolation='none')\n figb = ax_img[2].imshow(heat_map, cmap='jet', alpha=0.5, interpolation='none')\n ax_img[2].title.set_text('Predicted heat map')\n ax_img[3].imshow(mask, cmap='gray')\n ax_img[3].title.set_text('Predicted mask')\n ax_img[4].imshow(vis_img)\n ax_img[4].title.set_text('Segmentation result')\n left = 0.92\n bottom = 0.15\n width = 0.015\n height = 1 - 2 * bottom\n rect = [left, bottom, width, height]\n cbar_ax = fig_img.add_axes(rect)\n cb = plt.colorbar(ax, shrink=0.6, cax=cbar_ax, fraction=0.046)\n cb.ax.tick_params(labelsize=8)\n font = {\n 'family': 'serif',\n 'color': 'black',\n 'weight': 'normal',\n 'size': 8,\n }\n cb.set_label('Anomaly Score', fontdict=font)\n\n fig_img.savefig(os.path.join(save_dir, class_name + '_{}'.format(i)), dpi=1000)\n\n # image - 1\n plt.imsave(os.path.join(save_dir, class_name + '_image' + '_{}'.format(i)) + '.png', img)\n plt.imsave(os.path.join(save_dir, class_name + '_gt' + '_{}'.format(i)) + '.png', gt, cmap=\"gray\")\n plt.imsave(os.path.join(save_dir, class_name + '_heat_map' + '_{}'.format(i)) + '.png', heat_map, cmap=\"jet\")\n plt.imsave(os.path.join(save_dir, class_name + '_predict_mask' + '_{}'.format(i)) + '.png', mask, cmap=\"gray\")\n plt.imsave(os.path.join(save_dir, class_name + '_predict_mask_with_image' + '_{}'.format(i)) + '.png', vis_img, cmap=\"gray\")\n\n extent = ax_img[2].get_window_extent().transformed(fig_img.dpi_scale_trans.inverted())\n fig_img.savefig(os.path.join(save_dir, class_name + '_heat_mapk_with_image' + '_{}'.format(i)), bbox_inches=extent, dpi=1000)\n\n plt.close()\n plt.close()\n\n\ndef denormalization(x):\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n x = (((x.transpose(1, 2, 0) * std) + mean) * 255.).astype(np.uint8)\n \n return x"
] |
[
[
"matplotlib.pyplot.subplots",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.close",
"numpy.array"
]
] |
roks/deepsnap
|
[
"d5b349b9ee05c0e3d7215a226a436f325093edb1"
] |
[
"deepsnap/batch.py"
] |
[
"import torch\nfrom deepsnap.graph import Graph\nfrom typing import (\n Callable,\n Dict,\n List,\n)\n\n\nclass Batch(Graph):\n r\"\"\"\n A plain old python object modeling a batch of :class:`deepsnap.graph.Graph` objects\n as one big (disconnected) graph, with :class:`torch_geometric.data.Data` being the\n base class, all its methods can also be used here.\n In addition, single graphs can be reconstructed via the assignment vector\n :obj:`batch`, which maps each node to its respective graph identifier.\n \"\"\"\n def __init__(self, batch=None, **kwargs):\n super(Batch, self).__init__(**kwargs)\n\n self.batch = batch\n self.__data_class__ = Graph\n self.__slices__ = None\n\n @staticmethod\n def collate(follow_batch=[], transform=None, **kwargs):\n return lambda batch: Batch.from_data_list(\n batch, follow_batch, transform, **kwargs\n )\n\n @staticmethod\n def from_data_list(\n data_list: List[Graph],\n follow_batch: List = None,\n transform: Callable = None,\n **kwargs\n ):\n r\"\"\"\n Constructs A :class:`deepsnap.batch.Batch` object from a python list holding\n :class:`torch_geometric.data.Data` objects.\n The assignment vector :obj:`batch` is created on the fly.\n Additionally, creates assignment batch vectors for each key in\n :obj:`follow_batch`.\n\n Args:\n data_list (list): A list of `deepsnap.graph.Graph` objects.\n follow_batch (list, optional): Creates assignment batch vectors for each key.\n transform: If apply transform when batching.\n **kwargs: Other parameters.\n \"\"\"\n if follow_batch is None:\n follow_batch = []\n if transform is not None:\n data_list = [\n data.apply_transform(\n transform,\n deep_copy=True,\n **kwargs,\n )\n for data in data_list\n ]\n keys = [set(data.keys) for data in data_list]\n keys = list(set.union(*keys))\n assert \"batch\" not in keys\n\n batch, cumsum = Batch._init_batch_fields(keys, follow_batch)\n batch.__data_class__ = data_list[0].__class__\n batch.batch = []\n for i, data in enumerate(data_list):\n # Note: in heterogeneous graph, __inc__ logic is different\n Batch._collate_dict(\n data, cumsum,\n batch.__slices__, batch,\n data, follow_batch, i=i\n )\n num_nodes = data.num_nodes\n if num_nodes is not None:\n item = torch.full((num_nodes, ), i, dtype=torch.long)\n batch.batch.append(item)\n\n if num_nodes is None:\n batch.batch = None\n\n Batch._dict_list_to_tensor(batch, data_list[0])\n\n return batch.contiguous()\n\n @staticmethod\n def _init_batch_fields(keys, follow_batch):\n batch = Batch()\n batch.__slices__ = {key: [0] for key in keys}\n\n for key in keys:\n batch[key] = []\n\n for key in follow_batch:\n batch[f\"{key}_batch\"] = []\n\n cumsum = {key: 0 for key in keys}\n return batch, cumsum\n\n @staticmethod\n def _collate_dict(\n curr_dict,\n cumsum: Dict[str, int],\n slices,\n batched_dict,\n graph,\n follow_batch,\n i=None\n ):\n r\"\"\" Called in from_data_list to collate a dictionary.\n This can also be applied to Graph object, since it has support for\n keys and __getitem__().\n\n Args:\n curr_dict: current dictionary to be added to the collated dictionary.\n cumsum: cumulative sum to be used for indexing.\n slices: a dictionary of the same structure as batched_dict,\n slices[key] indicates the indices to slice batch[key] into\n tensors for all graphs in the batch.\n batched_dict: the batched dictionary of the same structure as curr_dict.\n But all graph data are batched together.\n \"\"\"\n if isinstance(curr_dict, dict):\n keys = curr_dict.keys()\n else:\n keys = curr_dict.keys\n for key in keys:\n item = curr_dict[key]\n if isinstance(item, dict):\n # recursively collate every key in the dictionary\n if isinstance(batched_dict[key], list):\n # nested dictionary not initialized yet\n assert len(batched_dict[key]) == 0\n # initialize the nested dictionary for batch\n cumsum[key] = {inner_key: 0 for inner_key in item.keys()}\n slices[key] = {inner_key: [0] for inner_key in item.keys()}\n batched_dict[key] = {}\n for inner_key in item.keys():\n batched_dict[key][inner_key] = []\n for inner_key in follow_batch:\n batched_dict[key][f\"{key}_batch\"] = []\n Batch._collate_dict(\n item, cumsum[key],\n slices[key], batched_dict[key],\n graph, follow_batch, i=i\n )\n continue\n if torch.is_tensor(item) and item.dtype != torch.bool:\n item = item + cumsum[key]\n if torch.is_tensor(item):\n size = item.size(graph.__cat_dim__(key, curr_dict[key]))\n else:\n size = 1\n slices[key].append(size + slices[key][-1])\n cumsum[key] = cumsum[key] + graph.__inc__(key, item)\n batched_dict[key].append(item)\n\n if key in follow_batch:\n item = torch.full((size, ), i, dtype=torch.long)\n batched_dict[f\"{key}_batch\"].append(item)\n\n @staticmethod\n def _dict_list_to_tensor(dict_of_list, graph):\n r\"\"\"Convert a dict/Graph with list as values to a dict/Graph with\n concatenated/stacked tensor as values.\n \"\"\"\n if isinstance(dict_of_list, dict):\n keys = dict_of_list.keys()\n else:\n keys = dict_of_list.keys\n for key in keys:\n if isinstance(dict_of_list[key], dict):\n # recursively convert the dictionary of list to dict of tensor\n Batch._dict_list_to_tensor(dict_of_list[key], graph)\n continue\n item = dict_of_list[key][0]\n if torch.is_tensor(item):\n if (\n Graph._is_graph_attribute(key)\n and item.ndim == 1\n and (not item.dtype == torch.long)\n and \"feature\" in key\n ):\n # special consideration: 1D tensor for graph attribute (classification)\n # named as: \"graph_xx_feature\"\n # batch by stacking the first dim\n dict_of_list[key] = torch.stack(\n dict_of_list[key],\n dim=0\n )\n else:\n # concat at the __cat_dim__\n dict_of_list[key] = torch.cat(\n dict_of_list[key],\n dim=graph.__cat_dim__(key, item)\n )\n elif isinstance(item, (float, int)):\n dict_of_list[key] = torch.tensor(dict_of_list[key])\n\n def to_data_list(self):\n r\"\"\"\n Reconstructs the list of :class:`torch_geometric.data.Data` objects\n from the batch object.\n The batch object must have been created via :meth:`from_data_list` in\n order to be able reconstruct the initial objects.\n \"\"\"\n if self.__slices__ is None:\n raise RuntimeError(\n \"Cannot reconstruct data list from batch because the \"\n \"batch object was not created using Batch.from_data_list()\"\n )\n\n keys = [key for key in self.keys if key[-5:] != \"batch\"]\n cumsum = {key: 0 for key in keys}\n data_list = []\n for i in range(len(self.__slices__[keys[0]]) - 1):\n # i: from 0 up to num graphs in the batch\n data = self.__data_class__()\n self._reconstruct_dict(\n i, keys, data, cumsum, self.__slices__, self, data\n )\n data_list.append(data)\n\n return data_list\n\n def _reconstruct_dict(\n self, graph_idx: int, keys, data_dict,\n cumsum: Dict[str, int], slices, batched_dict, graph):\n\n for key in keys:\n if isinstance(batched_dict[key], dict):\n # recursively unbatch the dict\n data_dict[key] = {}\n inner_keys = [\n inner_key\n for inner_key in batched_dict[key].keys()\n if inner_key[-5:] != \"batch\"\n ]\n inner_cumsum = {inner_key: 0 for inner_key in inner_keys}\n inner_slices = slices[key]\n self._reconstruct_dict(\n graph_idx, inner_keys,\n data_dict[key], inner_cumsum,\n inner_slices, batched_dict[key], graph\n )\n continue\n\n if torch.is_tensor(batched_dict[key]):\n data_dict[key] = batched_dict[key].narrow(\n graph.__cat_dim__(key, batched_dict[key]),\n slices[key][graph_idx],\n slices[key][graph_idx + 1] - slices[key][graph_idx]\n )\n if batched_dict[key].dtype != torch.bool:\n data_dict[key] = data_dict[key] - cumsum[key]\n else:\n data_dict[key] = (\n batched_dict[key][\n slices[key][graph_idx]:slices[key][graph_idx + 1]\n ]\n )\n cumsum[key] = cumsum[key] + graph.__inc__(key, data_dict[key])\n\n @property\n def num_graphs(self) -> int:\n r\"\"\"\n Returns the number of graphs in the batch.\n\n Returns:\n int: The number of graphs in the batch.\n \"\"\"\n return self.batch[-1].item() + 1\n\n def apply_transform(\n self,\n transform,\n update_tensor: bool = True,\n update_graph: bool = False,\n deep_copy: bool = False,\n **kwargs\n ):\n r\"\"\"\n Applies a transformation to each graph object in parallel by first\n calling `to_data_list`, applying the transform, and then perform\n re-batching again to a `Batch`.\n A transform should edit the graph object,\n including changing the graph structure, and adding node/edge/graph attributes.\n The rest are automatically handled by the :class:`deepsnap.graph.Graph` object,\n including everything ended with index.\n\n Args:\n transform: Transformation function applied to each graph object.\n update_tensor: Whether use nx graph to update tensor attributes.\n update_graph: Whether use tensor attributes to update nx graphs.\n deep_copy: :obj:`True` if a new deep copy of batch is returned.\n This option allows modifying the batch of graphs without\n changing the graphs in the original dataset.\n kwargs: Parameters used in transform function in :class:`deepsnap.graph.Graph` objects.\n\n Returns:\n a batch object containing all transformed graph objects.\n\n \"\"\"\n # TODO: transductive setting, assert update_tensor == True\n return self.from_data_list(\n [\n Graph(graph).apply_transform(\n transform, update_tensor, update_graph, deep_copy, **kwargs\n )\n for graph in self.G\n ]\n )\n\n def apply_transform_multi(\n self,\n transform,\n update_tensors: bool = True,\n update_graphs: bool = False,\n deep_copy: bool = False,\n **kwargs\n ):\n r\"\"\"\n Comparison to apply_transform, this allows multiple graph objects\n to be returned by the supplied transform function.\n\n Args:\n transform: (Multiple return value) tranformation function\n applied to each graph object. It needs to return a tuple of\n Graph objects or internal .G (NetworkX) objects.\n\n Returns:\n a tuple of batch objects. The i-th batch object contains the i-th\n return value of the transform function applied to all graphs\n in the batch.\n \"\"\"\n g_lists = (\n zip(\n *[\n Graph(graph).apply_transform_multi(\n transform, update_tensors, update_graphs,\n deep_copy, **kwargs,\n )\n for graph in self.G\n ]\n )\n )\n return (self.from_data_list(g_list) for g_list in g_lists)\n"
] |
[
[
"torch.stack",
"torch.is_tensor",
"torch.full",
"torch.tensor"
]
] |
SebaDro/ST-DeepHydro
|
[
"98cf8c1fabd9098c34f486655cc608383db36eaa"
] |
[
"libs/plotutils.py"
] |
[
"import matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport tensorflow as tf\nimport xarray as xr\n\n\ndef plot_loss(history: tf.keras.callbacks.History):\n \"\"\"\n Visualizes the progress of a trained model by plotting the loss per epoch\n\n Parameters\n ----------\n history: tf.keras.callbacks.History\n A Tensorflow history object that holds information about training progress\n\n \"\"\"\n plt.plot(history.history['loss'], label='Loss')\n plt.plot(history.history['val_loss'], label='Validation Loss')\n plt.xlabel('Epoche')\n plt.ylabel('RMSE')\n plt.legend()\n plt.grid(True)\n\n\ndef plot_predictions(ds: xr.Dataset, variable: str, basins: list = None):\n if basins is None:\n basins = ds.basin.values\n nr_basins = len(basins)\n if nr_basins == 1:\n plot_prediction_for_single_basin(ds, basins[0], variable)\n elif nr_basins > 1:\n fig, axis = plt.subplots(1, nr_basins, figsize=(16, 10))\n for ax, basin in zip(axis, basins):\n plot_prediction_for_single_basin(ds, basin, variable, ax)\n else:\n raise ValueError(\"There must be one basin for plotting, at least!\")\n\n\ndef plot_prediction_for_single_basin(ds: xr.Dataset, basin: str, variable: str, ax=None):\n if ax is None:\n fig, ax = plt.subplots(figsize=(16, 10))\n ds.sel(basin=basin)[f\"{variable}_pred\"].plot(ax=ax, label=\"prediction\", zorder=1)\n ds.sel(basin=basin)[f\"{variable}_obs\"].plot(ax=ax, label=\"observation\", zorder=0)\n ax.set_xlabel(\"time\")\n ax.set_ylabel(variable)\n ax.xaxis.set_major_formatter(mdates.DateFormatter(\"%Y-%b\"))\n ax.set_title(basin)\n ax.legend()"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
ternaus/retinafacemask
|
[
"94e00317e9f7ecbb7b5134daaf10093f25694855"
] |
[
"retinafacemask/box_utils.py"
] |
[
"from typing import List\n\nimport numpy as np\nimport torch\n\n\ndef point_form(boxes: torch.Tensor) -> torch.Tensor:\n \"\"\"Convert prior_boxes to (x_min, y_min, x_max, y_max) representation for comparison to point form ground truth data.\n\n Args:\n boxes: center-size default boxes from priorbox layers.\n Return:\n boxes: Converted x_min, y_min, x_max, y_max form of boxes.\n \"\"\"\n return torch.cat((boxes[:, :2] - boxes[:, 2:] / 2, boxes[:, :2] + boxes[:, 2:] / 2), 1)\n\n\ndef center_size(boxes: torch.Tensor) -> torch.Tensor:\n \"\"\"Convert prior_boxes to (cx, cy, w, h) representation for comparison to center-size form ground truth data.\n Args:\n boxes: point_form boxes\n Return:\n boxes: Converted x_min, y_min, x_max, y_max form of boxes.\n \"\"\"\n return torch.cat((boxes[:, 2:] + boxes[:, :2]) / 2, boxes[:, 2:] - boxes[:, :2], 1)\n\n\ndef intersect(box_a: torch.Tensor, box_b: torch.Tensor) -> torch.Tensor:\n \"\"\" We resize both tensors to [A,B,2] without new malloc:\n [A, 2] -> [A, 1, 2] -> [A, B, 2]\n [B, 2] -> [1, B, 2] -> [A, B, 2]\n Then we compute the area of intersect between box_a and box_b.\n Args:\n box_a: bounding boxes, Shape: [A, 4].\n box_b: bounding boxes, Shape: [B, 4].\n Return:\n intersection area, Shape: [A, B].\n \"\"\"\n A = box_a.size(0)\n B = box_b.size(0)\n max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), box_b[:, 2:].unsqueeze(0).expand(A, B, 2))\n min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), box_b[:, :2].unsqueeze(0).expand(A, B, 2))\n inter = torch.clamp((max_xy - min_xy), min=0)\n return inter[:, :, 0] * inter[:, :, 1]\n\n\ndef jaccard(box_a: torch.Tensor, box_b: torch.Tensor) -> torch.Tensor:\n \"\"\"Compute the jaccard overlap of two sets of boxes. The jaccard overlap is simply the intersection over\n union of two boxes. Here we operate on ground truth boxes and default boxes.\n E.g.:\n A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n Args:\n box_a: Ground truth bounding boxes, Shape: [num_objects,4]\n box_b: Prior boxes from priorbox layers, Shape: [num_priors,4]\n Return:\n jaccard overlap: Shape: [box_a.size(0), box_b.size(0)]\n \"\"\"\n inter = intersect(box_a, box_b)\n area_a = ((box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]\n area_b = ((box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]\n union = area_a + area_b - inter\n return inter / union\n\n\ndef matrix_iof(a: np.ndarray, b: np.ndarray) -> np.ndarray:\n \"\"\"\n return iof of a and b, numpy version for data augmentation\n \"\"\"\n lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])\n rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])\n\n area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)\n area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)\n return area_i / np.maximum(area_a[:, np.newaxis], 1)\n\n\ndef match(\n threshold: float,\n box_gt: torch.Tensor,\n priors: torch.Tensor,\n variances: List[float],\n labels_gt: torch.Tensor,\n landmarks_gt: torch.Tensor,\n box_t: torch.Tensor,\n label_t: torch.Tensor,\n landmarks_t: torch.Tensor,\n batch_id: int,\n) -> None:\n \"\"\"Match each prior box with the ground truth box of the highest jaccard overlap, encode the bounding\n boxes, then return the matched indices corresponding to both confidence and location preds.\n\n Args:\n threshold: The overlap threshold used when matching boxes.\n box_gt: Ground truth boxes, Shape: [num_obj, 4].\n priors: Prior boxes from priorbox layers, Shape: [n_priors, 4].\n variances: Variances corresponding to each prior coord, Shape: [num_priors, 4].\n labels_gt: All the class labels for the image, Shape: [num_obj, 2].\n landmarks_gt: Ground truth landms, Shape [num_obj, 10].\n box_t: Tensor to be filled w/ endcoded location targets.\n label_t: Tensor to be filled w/ matched indices for labels predictions.\n landmarks_t: Tensor to be filled w/ endcoded landmarks targets.\n batch_id: current batch index\n Return:\n The matched indices corresponding to 1)location 2)confidence 3)landmarks preds.\n \"\"\"\n # Compute iou between gt and priors\n overlaps = jaccard(box_gt, point_form(priors))\n # (Bipartite Matching)\n # [1, num_objects] best prior for each ground truth\n best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)\n\n # ignore hard gt\n valid_gt_idx = best_prior_overlap[:, 0] >= 0.2\n best_prior_idx_filter = best_prior_idx[valid_gt_idx, :]\n if best_prior_idx_filter.shape[0] <= 0:\n box_t[batch_id] = 0\n label_t[batch_id] = 0\n return\n\n # [1, num_priors] best ground truth for each prior\n best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)\n best_truth_idx.squeeze_(0)\n best_truth_overlap.squeeze_(0)\n best_prior_idx.squeeze_(1)\n best_prior_idx_filter.squeeze_(1)\n best_prior_overlap.squeeze_(1)\n best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior\n # TODO refactor: index best_prior_idx with long tensor\n # ensure every gt matches with its prior of max overlap\n for j in range(best_prior_idx.size(0)): # 判别此anchor是预测哪一个boxes\n best_truth_idx[best_prior_idx[j]] = j\n\n matches = box_gt[best_truth_idx] # Shape: [num_priors, 4] 此处为每一个anchor对应的bbox取出来\n labels = labels_gt[best_truth_idx] # Shape: [num_priors] 此处为每一个anchor对应的label取出来\n labels[best_truth_overlap < threshold] = 0 # label as background overlap<0.35的全部作为负样本\n loc = encode(matches, priors, variances)\n\n matches_landm = landmarks_gt[best_truth_idx]\n landmarks_gt = encode_landm(matches_landm, priors, variances)\n box_t[batch_id] = loc # [num_priors, 4] encoded offsets to learn\n label_t[batch_id] = labels # [num_priors] top class label for each prior\n landmarks_t[batch_id] = landmarks_gt\n\n\ndef encode(matched, priors, variances):\n \"\"\"Encode the variances from the priorbox layers into the ground truth boxes\n we have matched (based on jaccard overlap) with the prior boxes.\n Args:\n matched: (tensor) Coords of ground truth for each prior in point-form\n Shape: [num_priors, 4].\n priors: (tensor) Prior boxes in center-offset form\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n encoded boxes (tensor), Shape: [num_priors, 4]\n \"\"\"\n\n # dist b/t match center and prior's center\n g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]\n # encode variance\n g_cxcy /= variances[0] * priors[:, 2:]\n # match wh / prior wh\n g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]\n g_wh = torch.log(g_wh) / variances[1]\n # return target for smooth_l1_loss\n return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]\n\n\ndef encode_landm(matched: torch.Tensor, priors: torch.Tensor, variances: List[float]) -> torch.Tensor:\n \"\"\"Encode the variances from the priorbox layers into the ground truth boxes we have matched\n (based on jaccard overlap) with the prior boxes.\n Args:\n matched: Coords of ground truth for each prior in point-form\n Shape: [num_priors, 10].\n priors: Prior boxes in center-offset form\n Shape: [num_priors,4].\n variances: Variances of priorboxes\n Return:\n encoded landmarks, Shape: [num_priors, 10]\n \"\"\"\n\n # dist b/t match center and prior's center\n matched = torch.reshape(matched, (matched.size(0), 5, 2))\n priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)\n priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)\n priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)\n priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)\n priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2)\n g_cxcy = matched[:, :, :2] - priors[:, :, :2]\n # encode variance\n g_cxcy /= variances[0] * priors[:, :, 2:]\n # g_cxcy /= priors[:, :, 2:]\n g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1)\n # return target for smooth_l1_loss\n return g_cxcy\n\n\n# Adapted from https://github.com/Hakuyume/chainer-ssd\ndef decode(loc: torch.Tensor, priors: torch.Tensor, variances: List[float]) -> torch.Tensor:\n \"\"\"Decode locations from predictions using priors to undo the encoding we did for offset regression at train time.\n Args:\n loc: location predictions for loc layers,\n Shape: [num_priors, 4]\n priors: Prior boxes in center-offset form.\n Shape: [num_priors, 4].\n variances: Variances of priorboxes\n Return:\n decoded bounding box predictions\n \"\"\"\n\n boxes = torch.cat(\n (\n priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],\n priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1]),\n ),\n 1,\n )\n boxes[:, :2] -= boxes[:, 2:] / 2\n boxes[:, 2:] += boxes[:, :2]\n return boxes\n\n\ndef decode_landm(pre: torch.Tensor, priors: torch.Tensor, variances: List[float]) -> torch.Tensor:\n \"\"\"Decode landmarks from predictions using priors to undo the encoding we did for offset regression at train time.\n Args:\n pre: landmark predictions for loc layers,\n Shape: [num_priors, 10]\n priors: Prior boxes in center-offset form.\n Shape: [num_priors, 4].\n variances: Variances of priorboxes\n Return:\n decoded landm predictions\n \"\"\"\n landms = torch.cat(\n (\n priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],\n priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],\n priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],\n priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],\n priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],\n ),\n dim=1,\n )\n return landms\n\n\ndef log_sum_exp(x: torch.Tensor) -> torch.Tensor:\n \"\"\"Utility function for computing log_sum_exp while determining This will be used to determine unaveraged\n confidence loss across all examples in a batch.\n Args:\n x: conf_preds from conf layers\n \"\"\"\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max\n"
] |
[
[
"numpy.maximum",
"numpy.minimum",
"torch.cat",
"torch.exp",
"torch.log",
"numpy.prod",
"torch.clamp"
]
] |
j-friedrich/neuronalGPR
|
[
"0a05f0b0a30ac6ecbd5b3ff175d3cbfeb723479b"
] |
[
"table2.py"
] |
[
"import h5py\nimport GPy\nimport numpy as np\nimport os\nfrom GPnet import NN, KL\n\n\ndef KLs(data_directory):\n _DATA_DIRECTORY_PATH = \"DropoutUncertaintyExps-master/UCI_Datasets/\" + \\\n data_directory + \"/data/\"\n\n data = np.loadtxt(_DATA_DIRECTORY_PATH + \"data.txt\")\n index_features = np.loadtxt(_DATA_DIRECTORY_PATH + \"index_features.txt\")\n index_target = np.loadtxt(_DATA_DIRECTORY_PATH + \"index_target.txt\")\n X = data[:, [int(i) for i in index_features.tolist()]]\n y = data[:, int(index_target.tolist())]\n\n n_splits = int(np.loadtxt(_DATA_DIRECTORY_PATH + 'n_splits.txt'))\n n_hidden = int(np.loadtxt(_DATA_DIRECTORY_PATH + \"n_hidden.txt\"))\n\n def _get_index_train_test_path(split_num, train=True):\n \"\"\"\n Method to generate the path containing the training/test split for the given\n split number (generally from 1 to 20).\n @param split_num Split number for which the data has to be generated\n @param train Is true if the data is training data. Else false.\n @return path Path of the file containing the requried data\n \"\"\"\n if train:\n return _DATA_DIRECTORY_PATH + \"index_train_\" + str(split_num) + \".txt\"\n else:\n return _DATA_DIRECTORY_PATH + \"index_test_\" + str(split_num) + \".txt\"\n\n normalize = True\n KLpq = np.nan * np.zeros((n_splits, 3))\n KLqp = np.nan * np.zeros((n_splits, 3))\n\n for split in range(n_splits):\n index_train = np.loadtxt(_get_index_train_test_path(split, train=True))\n index_test = np.loadtxt(_get_index_train_test_path(split, train=False))\n\n X_train = X[[int(i) for i in index_train.tolist()]]\n y_train = y[[int(i) for i in index_train.tolist()]]\n X_test = X[[int(i) for i in index_test.tolist()]]\n\n if normalize:\n mean_X_train = np.mean(X_train, 0)\n std_X_train = np.std(X_train, 0)\n std_X_train[std_X_train == 0] = 1\n X_train_normalized = (X_train - mean_X_train) / std_X_train\n X_test_normalized = (X_test - mean_X_train) / std_X_train\n mean_y_train = np.mean(y_train)\n std_y_train = np.std(y_train)\n y_train_normalized = (y_train - mean_y_train) / std_y_train\n else:\n X_train_normalized = X_train\n y_train_normalized = y_train\n\n gp = GPy.models.GPRegression(X_train_normalized, y_train_normalized[:, None],\n GPy.kern.RBF(X_train.shape[1], ARD=True))\n gp[:] = h5py.File('results/GP/%s_split%g.hdf5' %\n (data_directory, split), 'r')['param_array']\n\n vfe = GPy.models.SparseGPRegression(X_train_normalized, y_train_normalized[:, None],\n GPy.kern.RBF(X_train.shape[1], ARD=True),\n num_inducing=n_hidden)\n vfe[:] = h5py.File('results/VFE/%s_split%g.hdf5' %\n (data_directory, split), 'r')['param_array']\n\n fitc = GPy.models.SparseGPRegression(X_train_normalized, y_train_normalized[:, None],\n GPy.kern.RBF(X_train.shape[1], ARD=True),\n num_inducing=n_hidden)\n fitc[:] = h5py.File('results/FITC/%s_split%g.hdf5' %\n (data_directory, split), 'r')['param_array']\n\n nn = NN(X_train_normalized, y_train_normalized[:, None],\n vfe.inducing_inputs, vfe.kern.lengthscale)\n\n KLpq[split] = [KL(gp, m, X_test_normalized) for m in (vfe, nn, fitc)]\n KLqp[split] = [KL(m, gp, X_test_normalized) for m in (vfe, nn, fitc)]\n\n return KLpq, KLqp\n\n\n_UCI_DIRECTORY_PATH = \"DropoutUncertaintyExps-master/UCI_Datasets\"\nsubfolders = [f.name for f in os.scandir(_UCI_DIRECTORY_PATH) if f.is_dir()]\nsubfolders.sort()\n\n\nKLpq = {}\nKLqp = {}\nfor f in subfolders:\n try:\n np.load('results/GP/%s.npy' % f)\n KLpq[f], KLqp[f] = KLs(f)\n except:\n continue\n\nfor j, kl in enumerate((KLpq, KLqp)):\n print(('KL(P|Q)', 'KL(Q|P)')[j])\n print(' ' * 26 + 'VFE BioNN FITC')\n for f in kl.keys():\n print('%19s' % f, end=' ')\n try:\n perf = kl[f]\n n_splits = int(np.loadtxt(_UCI_DIRECTORY_PATH + '/' + f + '/data/n_splits.txt'))\n for k in range(3):\n print((('%9.2f+-%8.2f' if k == 2 else '%6.2f+-%5.2f') % tuple(map(\n lambda x: x if x < 1e50 else np.inf, (perf.mean(0)[k],\n perf.std(0)[k] / np.sqrt(n_splits - 1))))\n ).replace('nan', ' nan '), end=' ')\n except:\n print('ERROR', end=' ')\n print()\n"
] |
[
[
"numpy.sqrt",
"numpy.std",
"numpy.mean",
"numpy.load",
"numpy.zeros",
"numpy.loadtxt"
]
] |
xapple/cbm_runner
|
[
"ec532819e0a086077475bfd479836a378f187f6f"
] |
[
"cbmcfs3_runner/pump/pre_flight.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nWritten by Lucas Sinclair and Paul Rougieux.\n\nJRC biomass Project.\nUnit D1 Bioeconomy.\n\"\"\"\n\n# Built-in modules #\n\n# Third party modules #\nimport pandas\n\n# First party modules #\n\n# Internal modules #\n\n###############################################################################\nclass PreFlight(object):\n \"\"\"\n This class will check the input data for inconsistencies.\n \"\"\"\n\n def __init__(self, parent):\n # Default attributes #\n self.parent = parent\n self.runner = parent\n\n def __call__(self):\n self.check_for_nan()\n\n def check_for_nan(self):\n \"\"\"This method will catch any 'NaN' presents in the input.\"\"\"\n # The object that will run next #\n create_xls = self.runner.default_sit.create_xls\n # Check there are CSVs #\n if create_xls.paths.csv_dir.empty:\n raise Exception(\"No CSVs present to generate the XLS.\")\n # Go over each file #\n for file_name in create_xls.file_name_to_sheet_name:\n assert create_xls.paths[file_name].exists\n df = pandas.read_csv(create_xls.paths[file_name])\n assert not df.isna().any().any()"
] |
[
[
"pandas.read_csv"
]
] |
TheGentlemanOctopus/oracle
|
[
"2857b9c1886548d9aefcb480ce6e77169ee9e7ef"
] |
[
"core/layouts/strip.py"
] |
[
"from core.devices.pixel import Pixel\nfrom layout import Layout\n\nimport numpy as np\nfrom numpy.linalg import norm\n\nclass Strip(Layout):\n \"\"\"\n Represents a strip of Leds\n start/direction are two element arrays (x,y)\n spacing/num_pixels are scalars\n \"\"\"\n def __init__(self, start, direction, spacing, num_pixels):\n normed_direction = direction/norm(direction)\n\n self.pixels = []\n for i in range(num_pixels):\n self.pixels.append(Pixel(np.array(start) + i*spacing*normed_direction))"
] |
[
[
"numpy.array",
"numpy.linalg.norm"
]
] |
PABTester/quantitativeProcedures
|
[
"c8ee3eb0dc7e2b792de36283351facf79b505490"
] |
[
"job.py"
] |
[
"#-*- coding:utf-8 -*-\nimport jqdatasdk as jq\nfrom datetime import datetime, timedelta\nimport time\nimport numpy as np\nimport math\nfrom sendmail import mail\n# 本地化的 api申请权限地址:https://www.joinquant.com/default/index/sdk\n# 聚宽平台运行的代码:https://github.com/Jack-Cherish/quantitative/blob/main/lesson1/quantitive-etf-jq.py\n# api:\n# https://www.joinquant.com/help/api/help#api:API%E6%96%87%E6%A1%A3\n# https://www.joinquant.com/help/api/help#JQData:JQData\n\n\n\n# aa 为你自己的帐号, bb 为你自己的密码\njq.auth('aa','bb')\n\n# http://fund.eastmoney.com/ETFN_jzzzl.html\nstock_pool = [\n '159915.XSHE', # 易方达创业板ETF\n '510300.XSHG', # 华泰柏瑞沪深300ETF\n '510500.XSHG', # 南方中证500ETF\n]\n\n# 动量轮动参数\nstock_num = 1 # 买入评分最高的前 stock_num 只股票\nmomentum_day = 29 # 最新动量参考最近 momentum_day 的\n\nref_stock = '000300.XSHG' #用 ref_stock 做择时计算的基础数据\nN = 18 # 计算最新斜率 slope,拟合度 r2 参考最近 N 天\nM = 600 # 计算最新标准分 zscore,rsrs_score 参考最近 M 天\nscore_threshold = 0.7 # rsrs 标准分指标阈值\n# ma 择时参数\nmean_day = 20 # 计算结束 ma 收盘价,参考最近 mean_day\nmean_diff_day = 3 # 计算初始 ma 收盘价,参考(mean_day + mean_diff_day)天前,窗口为 mean_diff_day 的一段时间\n\n\nday = 1\n\nsend_info = []\n\n# 财务数据查询\ndef get_fundamentals_info(stock):\n q = jq.query(jq.valuation.turnover_ratio,\n jq.valuation.market_cap,\n jq.indicator.eps\n ).filter(jq.valuation.code.in_([stock]))\n current_dt = time.strftime(\"%Y-%m-%d\", time.localtime())\n current_dt = datetime.strptime(current_dt, '%Y-%m-%d')\n previous_date = current_dt - timedelta(days = day)\n df = jq.get_fundamentals_continuously(q, end_date = previous_date, count = 5, panel = False)\n print(df)\n\n# 根据股票名,获取股票 code\ndef get_stock_code(stock_name):\n securities = jq.get_all_securities()\n stock_code = securities[securities['display_name'] == stock_name].index[0]\n return stock_code\n\n# 根据市值,获取股票池\ndef market_cap():\n wholeA = jq.get_fundamentals(jq.query(\n jq.valuation.code).filter(\n jq.valuation.market_cap > 2000\n ))\n wholeAList = list(wholeA['code'])\n return wholeAList\n\n# 1-1 选股模块-动量因子轮动\n# 基于股票年化收益和判定系数打分,并按照分数从大到小排名\ndef get_rank(stock_pool):\n score_list = []\n global send_info\n for stock in stock_pool:\n current_dt = time.strftime(\"%Y-%m-%d\", time.localtime())\n current_dt = datetime.strptime(current_dt, '%Y-%m-%d')\n previous_date = current_dt - timedelta(days = day)\n data = jq.get_price(stock, end_date = previous_date, count = momentum_day, frequency='daily', fields=['close'])\n # 收盘价\n y = data['log'] = np.log(data.close)\n # 分析的数据个数(天)\n x = data['num'] = np.arange(data.log.size)\n # 拟合 1 次多项式\n # y = kx + b, slope 为斜率 k,intercept 为截距 b\n slope, intercept = np.polyfit(x, y, 1)\n # (e ^ slope) ^ 250 - 1\n annualized_returns = math.pow(math.exp(slope), 250) - 1\n r_squared = 1 - (sum((y - (slope * x + intercept))**2) / ((len(y) - 1) * np.var(y, ddof=1)))\n score = annualized_returns * r_squared\n score_list.append(score)\n stock_dict = dict(zip(stock_pool, score_list))\n sort_list = sorted(stock_dict.items(), key = lambda item:item[1], reverse = True)\n print(\"#\" * 30 + \"候选\" + \"#\" * 30)\n for stock in sort_list:\n stock_code = stock[0]\n stock_score = stock[1]\n security_info = jq.get_security_info(stock_code)\n stock_name = security_info.display_name\n send_info.append('{}({}):{}'.format(stock_name, stock_code, stock_score))\n print('{}({}):{}'.format(stock_name, stock_code, stock_score))\n print('#' * 64)\n code_list = []\n for i in range((len(stock_pool))):\n code_list.append(sort_list[i][0])\n rank_stock = code_list[0:stock_num]\n return rank_stock\n\n# 2-1 择时模块-计算线性回归统计值\n# 对输入的自变量每日最低价 x(series) 和因变量每日最高价 y(series) 建立 OLS 回归模型,返回元组(截距,斜率,拟合度)\n# R2 统计学线性回归决定系数,也叫判定系数,拟合优度。\n# R2 范围 0 ~ 1,拟合优度越大,自变量对因变量的解释程度越高,越接近 1 越好。\n# 公式说明: https://blog.csdn.net/snowdroptulip/article/details/79022532\n# https://www.cnblogs.com/aviator999/p/10049646.html\ndef get_ols(x, y):\n slope, intercept = np.polyfit(x, y, 1)\n r2 = 1 - (sum((y - (slope * x + intercept))**2) / ((len(y) - 1) * np.var(y, ddof=1)))\n return (intercept, slope, r2)\n\n# 2-2 择时模块-设定初始斜率序列\n# 通过前 M 日最高最低价的线性回归计算初始的斜率,返回斜率的列表\ndef initial_slope_series():\n current_dt = time.strftime(\"%Y-%m-%d\", time.localtime())\n current_dt = datetime.strptime(current_dt, '%Y-%m-%d')\n previous_date = current_dt - timedelta(days = day)\n data = jq.get_price(ref_stock, end_date = previous_date, count = N + M, frequency='daily', fields=['high', 'low'])\n return [get_ols(data.low[i:i+N], data.high[i:i+N])[1] for i in range(M)]\n\n# 2-3 择时模块-计算标准分\n# 通过斜率列表计算并返回截至回测结束日的最新标准分\ndef get_zscore(slope_series):\n mean = np.mean(slope_series)\n std = np.std(slope_series)\n return (slope_series[-1] - mean) / std\n\n# 2-4 择时模块-计算综合信号\n# 1.获得 rsrs 与 MA 信号,rsrs 信号算法参考优化说明,MA 信号为一段时间两个端点的 MA 数值比较大小\n# 2.信号同时为 True 时返回买入信号,同为 False 时返回卖出信号,其余情况返回持仓不变信号\n# 解释:\n# MA 信号:MA 指标是英文(Moving average)的简写,叫移动平均线指标。\n# RSRS 择时信号:\n# https://www.joinquant.com/view/community/detail/32b60d05f16c7d719d7fb836687504d6?type=1\ndef get_timing_signal(stock):\n # 计算 MA 信号\n current_dt = time.strftime(\"%Y-%m-%d\", time.localtime())\n current_dt = datetime.strptime(current_dt, '%Y-%m-%d')\n previous_date = current_dt - timedelta(days = day)\n close_data = jq.get_price(ref_stock, end_date = previous_date, count = mean_day + mean_diff_day, frequency = 'daily', fields = ['close'])\n # 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1,23 天,要后 20 天\n today_MA = close_data.close[mean_diff_day:].mean()\n # 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0,23 天,要前 20 天\n before_MA = close_data.close[:-mean_diff_day].mean()\n # 计算 rsrs 信号\n high_low_data = jq.get_price(ref_stock, end_date = previous_date, count = N, frequency='daily', fields = ['high', 'low'])\n intercept, slope, r2 = get_ols(high_low_data.low, high_low_data.high)\n slope_series.append(slope)\n rsrs_score = get_zscore(slope_series[-M:]) * r2\n # 综合判断所有信号\n if rsrs_score > score_threshold and today_MA > before_MA:\n return \"BUY\"\n elif rsrs_score < -score_threshold and today_MA < before_MA:\n return \"SELL\"\n else:\n return \"KEEP\"\n\nslope_series = initial_slope_series()[:-1] # 除去回测第一天的 slope ,避免运行时重复加入\n\ndef test_100_days():\n for each_day in range(1, 100)[::-1]:\n current_dt = time.strftime(\"%Y-%m-%d\", time.localtime())\n current_dt = datetime.strptime(current_dt, '%Y-%m-%d')\n previous_date = current_dt - timedelta(days = each_day - 1)\n day = each_day\n print(each_day, previous_date)\n check_out_list = get_rank(stock_pool)\n for each_check_out in check_out_list:\n security_info = jq.get_security_info(each_check_out)\n stock_name = security_info.display_name\n stock_code = each_check_out\n print('今日自选股:{}({})'.format(stock_name, stock_code))\n #获取综合择时信号\n timing_signal = get_timing_signal(ref_stock)\n print('今日择时信号:{}'.format(timing_signal))\n print('*' * 100)\n\ndef run_today():\n current_dt = time.strftime(\"%Y-%m-%d\", time.localtime())\n current_dt = datetime.strptime(current_dt, '%Y-%m-%d')\n message = \"\"\n check_out_list = get_rank(stock_pool)\n for each_check_out in check_out_list:\n security_info = jq.get_security_info(each_check_out)\n stock_name = security_info.display_name\n stock_code = each_check_out\n # print('今日自选股:{}({})'.format(stock_name, stock_code))\n #获取综合择时信号\n timing_signal = get_timing_signal(ref_stock)\n if timing_signal == 'SELL':\n message = '清仓!卖卖卖!'\n # print('今日择时信号:{}'.format(timing_signal))\n else:\n message = \"今日自选股:{}({})\".format(stock_name, stock_code)\n # print('今日自选股:{}({})'.format(stock_name, stock_code))\n print(message)\n # print('*' * 100)\n message += \"\\r\\n\\r\\n\"\n message += \"*\" * 20 + \"备选股\" + \"*\" * 20\n message += \"\\r\\n\\r\\n\"\n message += \"\\r\\n\\r\\n\".join(send_info)\n ret = 0\n for _ in range(10):\n if ret:\n # 邮件发送成功推出\n break\n else:\n # 没有发送成功或失败继续\n ret = mail(message)\n time.sleep(1)\n\nif __name__ == \"__main__\":\n run_today()\n"
] |
[
[
"numpy.polyfit",
"numpy.log",
"numpy.arange",
"numpy.std",
"numpy.mean",
"numpy.var"
]
] |
microsoft/flin-nl2web
|
[
"ec8ec0c477df83ae95dde21b0922b91d049ed902"
] |
[
"code/train_data_preprocessing/preprocess_util.py"
] |
[
"import nltk\nimport random\nimport spacy\nimport string\n\nimport numpy as np\nfrom nltk import ngrams\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import WordNetLemmatizer\nfrom spacy.tokens import Doc\n\nnp.random.seed(1234)\nrandom.seed(1234)\n\nstopWords = set(stopwords.words('english'))\nlemmatizer = WordNetLemmatizer()\nps = PorterStemmer()\nnlp = spacy.load(\"en_core_web_sm\")\n\nchar_vocab_to_id = {'char_PAD': 0, 'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10, 'k': 11,\n 'l': 12, 'm': 13, 'n': 14, 'o': 15, 'p': 16, 'q': 17, 'r': 18, 's': 19, 't': 20, 'u': 21, 'v': 22, 'w': 23,\n 'x': 24, 'y': 25, 'z': 26,\n '$': 27, '-': 28, ':': 29, '@': 30, '.': 31, '/': 32, '\\'': 33, '&': 44, '%': 45, '<': 46, '>': 47, '_': 48,\n '0': 34, '1': 35, '2': 36, '3': 37, '4': 38, '5': 39, '6': 40, '7': 41, '8': 42, '9': 43}\n\nent_vocab_to_id = {'ent_PAD': 0, 'GPE': 1, 'LOC': 2, 'DATE': 3, 'TIME': 4, 'MONEY': 5, 'ORDINAL':6, 'CARDINAL': 7}\n\n\ndef replace_punctuations(s, default_char=''):\n ''' punctuation removal '''\n\n for c in string.punctuation:\n if c == '-':\n s = s.replace(c, ' ')\n if c not in {':', '$', '@', '.', '/', '\\'', '&', '%', '<', '>'}:\n s = s.replace(c, default_char)\n return s\n\n\nclass WhitespaceTokenizer(object):\n def __init__(self, vocab):\n self.vocab = vocab\n\n def __call__(self, text):\n words = text.split()\n # All tokens 'own' a subsequent space character in this tokenizer\n spaces = [True] * len(words)\n return Doc(self.vocab, words=words, spaces=spaces)\n\n\nnlp = spacy.load('en_core_web_sm')\nnlp.tokenizer = WhitespaceTokenizer(nlp.vocab)\n\n\ndef get_vectorized_char_seq(phrase, char_vocab_to_id, q_len, q_wd_len):\n q_char_vec = []\n\n for wd in phrase.split():\n wd_vec = []\n for char in wd:\n if char in char_vocab_to_id:\n wd_vec.append(char_vocab_to_id[char])\n else:\n wd_vec.append(0)\n\n if len(wd_vec) >= q_wd_len:\n wd_vec = wd_vec[:q_wd_len]\n else:\n wd_vec = pad_arr_seq(wd_vec, q_wd_len, 0)\n q_char_vec.append(wd_vec)\n\n if len(q_char_vec) >= q_len:\n return q_char_vec[:q_len]\n else:\n return pad_arr_seq(q_char_vec, q_len, [0] * q_wd_len)\n\n\ndef get_gold_labels_tagger(q_phrase, para_val_sample, max_seq_len):\n # q_phrase = ' '.join(q_phrase.split())\n para_val_sample = ' '.join(para_val_sample.split())\n q_word_list = q_phrase.split()\n para_val_sample_word_list = para_val_sample.split()\n\n label_vec = [0] * len(q_phrase.split())\n index_list = []\n for wd_id, q_word in enumerate(q_word_list):\n if q_word == para_val_sample_word_list[0]:\n if ' '.join(q_word_list[wd_id:]).startswith(para_val_sample):\n for j in range(len(para_val_sample_word_list)):\n index_list.append(wd_id+j)\n\n for pos_id in index_list:\n label_vec[pos_id] = 1\n assert len(label_vec) == len(q_phrase.split())\n\n if len(label_vec) >= max_seq_len:\n return label_vec[:max_seq_len], len(q_word_list)\n else:\n return pad_arr_seq(label_vec, max_seq_len, 0), len(q_word_list)\n\n\ndef get_vectorized_entity_tags(phrase, ent_vocab_to_id, q_len):\n q_ent_tag_vec = []\n\n phrase = phrase.strip()\n doc = nlp(phrase)\n word_tags = []\n\n for i in range(len(doc)):\n word_tags.append((doc[i].text, doc[i].ent_iob_, doc[i].ent_type_))\n\n if doc[i].ent_type_ in ent_vocab_to_id:\n q_ent_tag_vec.append(ent_vocab_to_id[doc[i].ent_type_])\n else:\n q_ent_tag_vec.append(ent_vocab_to_id['ent_PAD'])\n\n if len(q_ent_tag_vec) >= q_len:\n return q_ent_tag_vec[:q_len]\n else:\n return pad_arr_seq(q_ent_tag_vec, q_len, 0)\n\n\ndef get_query_n_grams(q_phrase, max_n=3, min_n=1):\n q_words = q_phrase.lower().split()\n\n pos_tag_dict = {tup[0]:tup[1] for tup in nltk.pos_tag(q_words)}\n exclueded_pos_set = { 'VB', 'VBD', 'VBG', 'VBZ'}\n\n q_uni_bigram_phrases = set()\n for n_gr in range(min_n, max_n+1, 1):\n n_gram_list = list(ngrams(q_words, n_gr))\n\n for tup in n_gram_list:\n n_gram_phrase = ' '.join([wd for wd in list(tup) if wd not in stopWords\n and pos_tag_dict[wd] not in exclueded_pos_set])\n\n if n_gram_phrase != '':\n q_uni_bigram_phrases.add(n_gram_phrase.strip())\n return q_uni_bigram_phrases\n\n\ndef pad_arr_seq(curr_seq, max_len, padding_seq):\n\n for i in range(max_len-len(curr_seq)):\n curr_seq.append(padding_seq)\n assert len(curr_seq) == max_len\n return curr_seq\n\n\ndef get_activity_id(node_DB, activity_name):\n for activity_id in node_DB['activity']:\n if node_DB['activity'][activity_id]['ActivityName'] == activity_name:\n return activity_id\n return '-'\n\n\ndef preprocess_text(phrase):\n phrase = replace_punctuations(phrase)\n\n if len(phrase) < 3:\n return ''\n token_list = []\n\n for wd in phrase.split():\n # if wd in stopWords:\n # continue\n\n if not wd.isdigit():\n token_list.append(lemmatizer.lemmatize(wd))\n else:\n token_list.append(wd)\n\n return ' '.join(token_list)\n\n\ndef has_partial_match(wd, cand_wd_set):\n cand_wd = ' '.join(cand_wd_set)\n if cand_wd.startswith(wd) or cand_wd.endswith(wd):\n sim = (len(wd) * 1.0) / len(cand_wd)\n #print(wd, sim, cand_wd)\n if 0.5 > sim >= 0.12 and wd.isdigit():\n return 1, True\n if sim >= 0.5:\n return 2, True\n return 0, False\n\n\ndef get_match_vec(q_phrase, cand_phrase, max_q_len):\n '''\n\n :param q_phrase:\n :param cand_phrase:\n :param max_q_len:\n :return:\n '''\n q_match_vec = []\n\n cand_wd_set = cand_phrase.lower().split()\n\n for wd in q_phrase.lower().split():\n if wd in cand_wd_set:\n q_match_vec.append(3)\n else:\n match_id, is_match = has_partial_match(wd, cand_wd_set)\n q_match_vec.append(match_id)\n\n if len(q_match_vec) >= max_q_len:\n return q_match_vec[:max_q_len]\n else:\n return pad_arr_seq(q_match_vec, max_q_len, 0)\n\n\ndef get_vectorized_phrase(phrase, vocab_to_id, max_seq_len):\n phrase_vec = []\n\n for wd in phrase.split():\n if wd in vocab_to_id:\n phrase_vec.append(vocab_to_id[wd])\n else:\n phrase_vec.append(0)\n\n if len(phrase_vec) >= max_seq_len:\n return phrase_vec[:max_seq_len]\n else:\n return pad_arr_seq(phrase_vec, max_seq_len, 0)\n\n\ndef extract_noun_phrases(sentence):\n doc = nlp(sentence)\n noun_phrases = set()\n\n exclude_set = set()\n for token in doc:\n if token.pos_ in {'PRON'}:\n exclude_set.add(token.text)\n\n for chunk in doc.noun_chunks:\n noun_phrases.add(chunk.text)\n\n noun_phrases = noun_phrases.difference(exclude_set)\n return noun_phrases\n\n\ndef get_candidate_query_phrases(sentence):\n noun_P = extract_noun_phrases(sentence)\n q_n_grams = get_query_n_grams(sentence)\n return q_n_grams.union(noun_P)\n\n\nif __name__ == '__main__':\n print(get_gold_labels_tagger('new york hotels for for 10 people','for 10 people' , 10))"
] |
[
[
"numpy.random.seed"
]
] |
Re3write/siamdw
|
[
"f5d7d4bda36cb8c14e93b460fbc77bb225aa8572"
] |
[
"lib/models/siamfc.py"
] |
[
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Houwen Peng and Zhipeng Zhang\n# Email: houwen.peng@microsoft.com\n# Main Results: see readme.md\n# ------------------------------------------------------------------------------\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\nclass SiamFC_(nn.Module):\n def __init__(self):\n super(SiamFC_, self).__init__()\n self.features = None\n self.connect_model = None\n self.zf = None # for online tracking\n self.criterion = nn.BCEWithLogitsLoss()\n\n def feature_extractor(self, x):\n return self.features(x)\n\n def connector(self, template_feature, search_feature):\n pred_score = self.connect_model(template_feature, search_feature)\n return pred_score\n\n def _cls_loss(self, pred, label, select):\n if len(select.size()) == 0: return 0\n pred = torch.index_select(pred, 0, select)\n label = torch.index_select(label, 0, select)\n return self.criterion(pred, label) # the same as tf version\n\n def _weighted_BCE(self, pred, label):\n pred = pred.view(-1)\n label = label.view(-1)\n pos = Variable(label.data.eq(1).nonzero().squeeze()).cuda()\n neg = Variable(label.data.eq(0).nonzero().squeeze()).cuda()\n\n loss_pos = self._cls_loss(pred, label, pos)\n loss_neg = self._cls_loss(pred, label, neg)\n return loss_pos * 0.5 + loss_neg * 0.5\n\n def template(self, z):\n self.zf = self.feature_extractor(z)\n\n def track(self, x):\n xf = self.feature_extractor(x)\n score = self.connector(self.zf, xf)\n return score\n\n def forward(self, template, search, label=None):\n zf = self.feature_extractor(template)\n xf = self.feature_extractor(search)\n score = self.connector(zf, xf)\n if self.training:\n return self._weighted_BCE(score, label)\n else:\n raise ValueError('forward is only used for training.')\n\n\n\n\n\n"
] |
[
[
"torch.nn.BCEWithLogitsLoss",
"torch.index_select"
]
] |
giovana-morais/carat
|
[
"982af4dde63e8560ccc9120f3767f1420a545b67"
] |
[
"docs/source/examples/plot_feature_map_clusters.py"
] |
[
"# -*- coding: utf-8 -*-\n# pylint: disable=C0103\n\"\"\"\n=========================\nPlot feature map clusters\n=========================\n\nThis example shows how to cluster rhythmic patterns from a feature map.\n\nThis is based on the rhythmic patterns analysis proposed in [CIM2014]_.\n\"\"\"\n\n# Code source: Martín Rocamora\n# License: MIT\n\n##############################################\n# Imports\n# - matplotlib for visualization\n#\nimport matplotlib.pyplot as plt\nfrom carat import util, audio, display, annotations, features, clustering\n\n##############################################\n# We group rhythmic patterns into clusters to aid the analysis\n# of their differences and similarities.\n#\n# First, we'll load one of the audio files included in `carat`.\naudio_path = util.example_audio_file(num_file=1)\n\ny, sr = audio.load(audio_path)\n\n##############################################\n# Next, we'll load the annotations provided for the example audio file.\nannotations_path = util.example_beats_file(num_file=1)\n\nbeats, beat_labs = annotations.load_beats(annotations_path)\ndownbeats, downbeat_labs = annotations.load_downbeats(annotations_path)\n\n##############################################\n# Then, we'll compute the accentuation feature.\n#\n# **Note:** This example is tailored towards the rhythmic patterns of the lowest\n# sounding of the three drum types taking part in the recording, so the analysis\n# focuses on the low frequencies (20 to 200 Hz).\nacce, times, _ = features.accentuation_feature(y, sr, minfreq=20, maxfreq=200)\n\n##############################################\n# Next, we'll compute the feature map.\nn_beats = int(round(beats.size/downbeats.size))\nn_tatums = 4\n\nmap_acce, _, _, _ = features.feature_map(acce, times, beats, downbeats, n_beats=n_beats,\n n_tatums=n_tatums)\n\n##############################################\n# Then, we'll group rhythmic patterns into clusters. This is done using the classical\n# K-means method with Euclidean distance (but other clustering methods and distance\n# measures can be used too).\n#\n# **Note:** The number of clusters n_clusters has to be specified as an input parameter.\nn_clusters = 4\n\ncluster_labs, centroids, _ = clustering.rhythmic_patterns(map_acce, n_clusters=n_clusters)\n\n##############################################\n# Finally we plot the feature map and the obtained clusters.\n\nplt.figure(figsize=(12, 6))\n# plot feature map\nax1 = plt.subplot(211)\ndisplay.map_show(map_acce, ax=ax1, n_tatums=n_tatums)\n# plot feature map with clusters in colors\nax2 = plt.subplot(212)\ndisplay.map_show(map_acce, ax=ax2, n_tatums=n_tatums, clusters=cluster_labs)\n\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
jmhessel/lxmert
|
[
"4ee7b1a1ec652eb163572e3ae8189ea8b93c957e"
] |
[
"src/param.py"
] |
[
"# coding=utf-8\n# Copyleft 2019 project LXRT.\n\nimport argparse\nimport random\n\nimport numpy as np\nimport torch\n\n\ndef get_optimizer(optim):\n # Bind the optimizer\n if optim == 'rms':\n print(\"Optimizer: Using RMSProp\")\n optimizer = torch.optim.RMSprop\n elif optim == 'adam':\n print(\"Optimizer: Using Adam\")\n optimizer = torch.optim.Adam\n elif optim == 'adamax':\n print(\"Optimizer: Using Adamax\")\n optimizer = torch.optim.Adamax\n elif optim == 'sgd':\n print(\"Optimizer: sgd\")\n optimizer = torch.optim.SGD\n elif 'bert' in optim:\n optimizer = 'bert' # The bert optimizer will be bind later.\n else:\n assert False, \"Please add your optimizer %s in the list.\" % optim\n\n return optimizer\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n # Data Splits\n parser.add_argument(\"--train\", default='train')\n parser.add_argument(\"--valid\", default='valid')\n parser.add_argument(\"--test\", default=None)\n\n # Training Hyper-parameters\n parser.add_argument('--batchSize', dest='batch_size', type=int, default=256)\n parser.add_argument('--optim', default='bert')\n parser.add_argument('--lr', type=float, default=1e-4)\n parser.add_argument('--epochs', type=int, default=10)\n parser.add_argument('--dropout', type=float, default=0.1)\n parser.add_argument('--seed', type=int, default=9595, help='random seed')\n\n # Debugging\n parser.add_argument('--output', type=str, default='snap/test')\n parser.add_argument(\"--fast\", action='store_const', default=False, const=True)\n parser.add_argument(\"--tiny\", action='store_const', default=False, const=True)\n parser.add_argument(\"--tqdm\", action='store_const', default=False, const=True)\n\n # Model Loading\n parser.add_argument('--load', type=str, default=None,\n help='Load the model (usually the fine-tuned model).')\n parser.add_argument('--loadLXMERT', dest='load_lxmert', type=str, default=None,\n help='Load the pre-trained LXMERT model.')\n parser.add_argument('--loadLXMERTQA', dest='load_lxmert_qa', type=str, default=None,\n help='Load the pre-trained LXMERT model with QA answer head.')\n parser.add_argument(\"--fromScratch\", dest='from_scratch', action='store_const', default=False, const=True,\n help='If none of the --load, --loadLXMERT, --loadLXMERTQA is set, '\n 'the model would be trained from scratch. If --fromScratch is'\n ' not specified, the model would load BERT-pre-trained weights by'\n ' default. ')\n\n # Optimization\n parser.add_argument(\"--mceLoss\", dest='mce_loss', action='store_const', default=False, const=True)\n\n # LXRT Model Config\n # Note: LXRT = L, X, R (three encoders), Transformer\n parser.add_argument(\"--llayers\", default=9, type=int, help='Number of Language layers')\n parser.add_argument(\"--xlayers\", default=5, type=int, help='Number of CROSS-modality layers.')\n parser.add_argument(\"--rlayers\", default=5, type=int, help='Number of object Relationship layers.')\n\n # LXMERT Pre-training Config\n parser.add_argument(\"--taskMatched\", dest='task_matched', action='store_const', default=False, const=True)\n parser.add_argument(\"--taskMaskLM\", dest='task_mask_lm', action='store_const', default=False, const=True)\n parser.add_argument(\"--taskObjPredict\", dest='task_obj_predict', action='store_const', default=False, const=True)\n parser.add_argument(\"--taskQA\", dest='task_qa', action='store_const', default=False, const=True)\n parser.add_argument(\"--visualLosses\", dest='visual_losses', default='obj,attr,feat', type=str)\n parser.add_argument(\"--qaSets\", dest='qa_sets', default=None, type=str)\n parser.add_argument(\"--wordMaskRate\", dest='word_mask_rate', default=0.15, type=float)\n parser.add_argument(\"--objMaskRate\", dest='obj_mask_rate', default=0.15, type=float)\n\n # Training configuration\n parser.add_argument(\"--multiGPU\", action='store_const', default=False, const=True)\n parser.add_argument(\"--numWorkers\", dest='num_workers', default=0)\n\n # Parse the arguments.\n args = parser.parse_args()\n\n # Bind optimizer class.\n args.optimizer = get_optimizer(args.optim)\n\n # Set seeds\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n np.random.seed(args.seed)\n\n return args\n\nargs = parse_args()\n"
] |
[
[
"torch.manual_seed",
"numpy.random.seed"
]
] |
magicknight/event-model
|
[
"9f65b9d3c452a235781604827b8b32e6db355041"
] |
[
"event_model/tests/test_em.py"
] |
[
"import copy\nimport json\nimport event_model\nimport numpy\nimport pytest\n\n\ndef test_documents():\n dn = event_model.DocumentNames\n for k in ('stop', 'start', 'descriptor',\n 'event', 'bulk_events', 'datum',\n 'resource', 'bulk_datum', 'event_page', 'datum_page'):\n assert dn(k) == getattr(dn, k)\n\n\ndef test_len():\n assert 10 == len(event_model.DocumentNames)\n\n\ndef test_schemas():\n for k in event_model.DocumentNames:\n assert k in event_model.SCHEMA_NAMES\n assert event_model.schemas[k]\n\n\ndef test_schema_validators():\n for name in event_model.schemas.keys():\n assert name in event_model.schema_validators\n\n assert len(event_model.schema_validators) == len(event_model.schemas)\n\n\ndef test_compose_run():\n # Compose each kind of document type. These calls will trigger\n # jsonschema.validate and ensure that the document-generation code composes\n # valid documents.\n bundle = event_model.compose_run()\n start_doc, compose_descriptor, compose_resource, compose_stop = bundle\n assert bundle.start_doc is start_doc\n assert bundle.compose_descriptor is compose_descriptor\n assert bundle.compose_resource is compose_resource\n assert bundle.compose_stop is compose_stop\n bundle = compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},\n 'image': {'shape': [512, 512], 'dtype': 'number',\n 'source': '...', 'external': 'FILESTORE:'}},\n name='primary')\n descriptor_doc, compose_event, compose_event_page = bundle\n assert bundle.descriptor_doc is descriptor_doc\n assert bundle.compose_event is compose_event\n assert bundle.compose_event_page is compose_event_page\n bundle = compose_resource(\n spec='TIFF', root='/tmp', resource_path='stack.tiff',\n resource_kwargs={})\n resource_doc, compose_datum, compose_datum_page = bundle\n assert bundle.resource_doc is resource_doc\n assert bundle.compose_datum is compose_datum\n assert bundle.compose_datum_page is compose_datum_page\n datum_doc = compose_datum(datum_kwargs={'slice': 5})\n event_doc = compose_event(\n data={'motor': 0, 'image': datum_doc['datum_id']},\n timestamps={'motor': 0, 'image': 0}, filled={'image': False})\n datum_page = compose_datum_page(datum_kwargs={'slice': [10, 15]})\n event_page = compose_event_page(data={'motor': [1, 2], 'image':\n datum_page['datum_id']},\n timestamps={'motor': [0, 0],\n 'image': [0, 0]},\n filled={'image': [False, False]},\n seq_num=[1, 2])\n assert 'descriptor' in event_doc\n assert 'descriptor' in event_page\n assert event_doc['seq_num'] == 1\n stop_doc = compose_stop()\n assert 'primary' in stop_doc['num_events']\n assert stop_doc['num_events']['primary'] == 3\n\n\ndef test_round_trip_pagination():\n run_bundle = event_model.compose_run()\n desc_bundle = run_bundle.compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},\n 'image': {'shape': [512, 512], 'dtype': 'number',\n 'source': '...', 'external': 'FILESTORE:'}},\n name='primary')\n res_bundle = run_bundle.compose_resource(\n spec='TIFF', root='/tmp', resource_path='stack.tiff',\n resource_kwargs={})\n datum_doc1 = res_bundle.compose_datum(datum_kwargs={'slice': 5})\n datum_doc2 = res_bundle.compose_datum(datum_kwargs={'slice': 10})\n datum_doc3 = res_bundle.compose_datum(datum_kwargs={'slice': 15})\n event_doc1 = desc_bundle.compose_event(\n data={'motor': 0, 'image': datum_doc1['datum_id']},\n timestamps={'motor': 0, 'image': 0}, filled={'image': False},\n seq_num=1)\n event_doc2 = desc_bundle.compose_event(\n data={'motor': 1, 'image': datum_doc2['datum_id']},\n timestamps={'motor': 0, 'image': 0}, filled={'image': False},\n seq_num=1)\n event_doc3 = desc_bundle.compose_event(\n data={'motor': 2, 'image': datum_doc3['datum_id']},\n timestamps={'motor': 0, 'image': 0}, filled={'image': False},\n seq_num=1)\n\n # Round trip single event -> event_page -> event.\n expected = event_doc1\n actual, = event_model.unpack_event_page(\n event_model.pack_event_page(expected))\n assert actual == expected\n\n # Round trip two events -> event_page -> events.\n expected = [event_doc1, event_doc2]\n actual = list(event_model.unpack_event_page(\n event_model.pack_event_page(*expected)))\n assert actual == expected\n\n # Round trip three events -> event_page -> events.\n expected = [event_doc1, event_doc2, event_doc3]\n actual = list(event_model.unpack_event_page(\n event_model.pack_event_page(*expected)))\n assert actual == expected\n\n # Round trip on docs that don't have a filled key\n unfilled_doc1 = event_doc1\n unfilled_doc1.pop('filled')\n unfilled_doc2 = event_doc2\n unfilled_doc2.pop('filled')\n unfilled_doc3 = event_doc3\n unfilled_doc3.pop('filled')\n expected = [unfilled_doc1, unfilled_doc2, unfilled_doc3]\n actual = list(event_model.unpack_event_page(\n event_model.pack_event_page(*expected)))\n for doc in actual:\n doc.pop('filled')\n assert actual == expected\n\n # Round trip one datum -> datum_page -> datum.\n expected = datum_doc1\n actual, = event_model.unpack_datum_page(\n event_model.pack_datum_page(expected))\n assert actual == expected\n\n # Round trip two datum -> datum_page -> datum.\n expected = [datum_doc1, datum_doc2]\n actual = list(event_model.unpack_datum_page(\n event_model.pack_datum_page(*expected)))\n assert actual == expected\n\n # Round trip three datum -> datum_page -> datum.\n expected = [datum_doc1, datum_doc2, datum_doc3]\n actual = list(event_model.unpack_datum_page(\n event_model.pack_datum_page(*expected)))\n assert actual == expected\n\n # Check edge case where datum_kwargs are empty.\n datum_doc1 = res_bundle.compose_datum(datum_kwargs={})\n datum_doc2 = res_bundle.compose_datum(datum_kwargs={})\n datum_doc3 = res_bundle.compose_datum(datum_kwargs={})\n\n # Round trip one datum -> datum_page -> datum.\n expected = datum_doc1\n actual, = event_model.unpack_datum_page(\n event_model.pack_datum_page(expected))\n assert actual == expected\n\n # Round trip two datum -> datum_page -> datum.\n expected = [datum_doc1, datum_doc2]\n actual = list(event_model.unpack_datum_page(\n event_model.pack_datum_page(*expected)))\n assert actual == expected\n\n # Round trip three datum -> datum_page -> datum.\n expected = [datum_doc1, datum_doc2, datum_doc3]\n actual = list(event_model.unpack_datum_page(\n event_model.pack_datum_page(*expected)))\n assert actual == expected\n\n\ndef test_bulk_events_to_event_page(tmp_path):\n run_bundle = event_model.compose_run()\n desc_bundle = run_bundle.compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},\n 'image': {'shape': [512, 512], 'dtype': 'number',\n 'source': '...', 'external': 'FILESTORE:'}},\n name='primary')\n desc_bundle_baseline = run_bundle.compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'}},\n name='baseline')\n\n path_root = str(tmp_path)\n\n res_bundle = run_bundle.compose_resource(\n spec='TIFF', root=path_root, resource_path='stack.tiff',\n resource_kwargs={})\n datum_doc1 = res_bundle.compose_datum(datum_kwargs={'slice': 5})\n datum_doc2 = res_bundle.compose_datum(datum_kwargs={'slice': 10})\n event1 = desc_bundle.compose_event(\n data={'motor': 0, 'image': datum_doc1['datum_id']},\n timestamps={'motor': 0, 'image': 0}, filled={'image': False},\n seq_num=1)\n event2 = desc_bundle.compose_event(\n data={'motor': 0, 'image': datum_doc2['datum_id']},\n timestamps={'motor': 0, 'image': 0}, filled={'image': False},\n seq_num=2)\n event3 = desc_bundle_baseline.compose_event(\n data={'motor': 0},\n timestamps={'motor': 0},\n seq_num=1)\n\n primary_event_page = event_model.pack_event_page(event1, event2)\n baseline_event_page = event_model.pack_event_page(event3)\n bulk_events = {'primary': [event1, event2], 'baseline': [event3]}\n pages = event_model.bulk_events_to_event_pages(bulk_events)\n assert tuple(pages) == (primary_event_page, baseline_event_page)\n\n\ndef test_sanitize_doc():\n run_bundle = event_model.compose_run()\n desc_bundle = run_bundle.compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},\n 'image': {'shape': [512, 512], 'dtype': 'number',\n 'source': '...', 'external': 'FILESTORE:'}},\n name='primary')\n desc_bundle_baseline = run_bundle.compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'}},\n name='baseline')\n event1 = desc_bundle.compose_event(\n data={'motor': 0, 'image': numpy.ones((512, 512))},\n timestamps={'motor': 0, 'image': 0}, filled={'image': True},\n seq_num=1)\n event2 = desc_bundle.compose_event(\n data={'motor': 0, 'image': numpy.ones((512, 512))},\n timestamps={'motor': 0, 'image': 0}, filled={'image': True},\n seq_num=2)\n event3 = desc_bundle_baseline.compose_event(\n data={'motor': 0},\n timestamps={'motor': 0},\n seq_num=1)\n\n event_page = event_model.pack_event_page(event1, event2)\n bulk_events = {'primary': [event1, event2], 'baseline': [event3]}\n json.dumps(event_model.sanitize_doc(event_page))\n json.dumps(event_model.sanitize_doc(bulk_events))\n json.dumps(event_model.sanitize_doc(event1))\n\n\ndef test_bulk_datum_to_datum_page():\n run_bundle = event_model.compose_run()\n res_bundle = run_bundle.compose_resource(\n spec='TIFF', root='/tmp', resource_path='stack.tiff',\n resource_kwargs={})\n datum1 = res_bundle.compose_datum(datum_kwargs={'slice': 5})\n datum2 = res_bundle.compose_datum(datum_kwargs={'slice': 10})\n\n actual = event_model.pack_datum_page(datum1, datum2)\n bulk_datum = {'resource': res_bundle.resource_doc['uid'],\n 'datum_kwarg_list': [datum1['datum_kwargs'],\n datum2['datum_kwargs']],\n 'datum_ids': [datum1['datum_id'], datum2['datum_id']]}\n expected = event_model.bulk_datum_to_datum_page(bulk_datum)\n assert actual == expected\n\n\ndef test_document_router_smoke_test():\n dr = event_model.DocumentRouter()\n run_bundle = event_model.compose_run()\n dr('start', run_bundle.start_doc)\n desc_bundle = run_bundle.compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},\n 'image': {'shape': [512, 512], 'dtype': 'number',\n 'source': '...', 'external': 'FILESTORE:'}},\n name='primary')\n dr('descriptor', desc_bundle.descriptor_doc)\n desc_bundle_baseline = run_bundle.compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'}},\n name='baseline')\n dr('descriptor', desc_bundle_baseline.descriptor_doc)\n res_bundle = run_bundle.compose_resource(\n spec='TIFF', root='/tmp', resource_path='stack.tiff',\n resource_kwargs={})\n dr('resource', res_bundle.resource_doc)\n datum_doc1 = res_bundle.compose_datum(datum_kwargs={'slice': 5})\n datum_doc2 = res_bundle.compose_datum(datum_kwargs={'slice': 10})\n dr('datum', datum_doc1)\n dr('datum', datum_doc2)\n event1 = desc_bundle.compose_event(\n data={'motor': 0, 'image': datum_doc1['datum_id']},\n timestamps={'motor': 0, 'image': 0}, filled={'image': False},\n seq_num=1)\n dr('event', event1)\n event2 = desc_bundle.compose_event(\n data={'motor': 0, 'image': datum_doc2['datum_id']},\n timestamps={'motor': 0, 'image': 0}, filled={'image': False},\n seq_num=2)\n dr('event', event2)\n event3 = desc_bundle_baseline.compose_event(\n data={'motor': 0},\n timestamps={'motor': 0},\n seq_num=1)\n dr('event', event3)\n dr('stop', run_bundle.compose_stop())\n\n\ndef test_document_router_():\n dr = event_model.DocumentRouter()\n run_bundle = event_model.compose_run()\n dr('start', run_bundle.start_doc, validate=True)\n desc_bundle = run_bundle.compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},\n 'image': {'shape': [512, 512], 'dtype': 'number',\n 'source': '...', 'external': 'FILESTORE:'}},\n name='primary')\n dr('descriptor', desc_bundle.descriptor_doc, validate=True)\n desc_bundle_baseline = run_bundle.compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'}},\n name='baseline')\n dr('descriptor', desc_bundle_baseline.descriptor_doc, validate=True)\n res_bundle = run_bundle.compose_resource(\n spec='TIFF', root='/tmp', resource_path='stack.tiff',\n resource_kwargs={})\n dr('resource', res_bundle.resource_doc, validate=True)\n datum_doc1 = res_bundle.compose_datum(datum_kwargs={'slice': 5})\n datum_doc2 = res_bundle.compose_datum(datum_kwargs={'slice': 10})\n dr('datum', datum_doc1, validate=True)\n dr('datum', datum_doc2, validate=True)\n event1 = desc_bundle.compose_event(\n data={'motor': 0, 'image': datum_doc1['datum_id']},\n timestamps={'motor': 0, 'image': 0}, filled={'image': False},\n seq_num=1)\n dr('event', event1, validate=True)\n event2 = desc_bundle.compose_event(\n data={'motor': 0, 'image': datum_doc2['datum_id']},\n timestamps={'motor': 0, 'image': 0}, filled={'image': False},\n seq_num=2)\n dr('event', event2, validate=True)\n event3 = desc_bundle_baseline.compose_event(\n data={'motor': 0},\n timestamps={'motor': 0},\n seq_num=1)\n dr('event', event3, validate=True)\n dr('stop', run_bundle.compose_stop(), validate=True)\n\n\ndef test_filler(tmp_path):\n\n class DummyHandler:\n def __init__(self, resource_path, a, b):\n assert a == 1\n assert b == 2\n assert resource_path == str(tmp_path / \"stack.tiff\")\n\n def __call__(self, c, d):\n assert c == 3\n assert d == 4\n return numpy.ones((5, 5))\n\n path_root = str(tmp_path)\n\n reg = {'DUMMY': DummyHandler}\n filler = event_model.Filler(reg, inplace=True)\n run_bundle = event_model.compose_run()\n desc_bundle = run_bundle.compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},\n 'image': {'shape': [512, 512], 'dtype': 'number',\n 'source': '...', 'external': 'FILESTORE:'}},\n name='primary')\n desc_bundle_baseline = run_bundle.compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'}},\n name='baseline')\n res_bundle = run_bundle.compose_resource(\n spec='DUMMY', root=path_root, resource_path='stack.tiff',\n resource_kwargs={'a': 1, 'b': 2})\n datum_doc = res_bundle.compose_datum(datum_kwargs={'c': 3, 'd': 4})\n raw_event = desc_bundle.compose_event(\n data={'motor': 0, 'image': datum_doc['datum_id']},\n timestamps={'motor': 0, 'image': 0}, filled={'image': False},\n seq_num=1)\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n event = copy.deepcopy(raw_event)\n assert isinstance(event['data']['image'], str)\n filler('event', event)\n stop_doc = run_bundle.compose_stop()\n filler('stop', stop_doc)\n assert event['data']['image'].shape == (5, 5)\n assert not filler._closed\n\n # Test NoFiller.\n filler = event_model.NoFiller(reg)\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n event = copy.deepcopy(raw_event)\n assert isinstance(event['data']['image'], str)\n filler('event', event)\n # Check that it *hasn't* been filled.\n assert isinstance(event['data']['image'], str)\n filler('stop', stop_doc)\n\n # Test get_handler() method.\n handler = filler.get_handler(res_bundle.resource_doc)\n # The method does not expose the internal cache of handlers, so it should\n # not return the same instance when called repeatedly.\n assert filler.get_handler(res_bundle.resource_doc) is not handler\n\n # Test closing.\n filler.close()\n with pytest.raises(event_model.EventModelRuntimeError):\n filler.get_handler(res_bundle.resource_doc)\n with pytest.raises(event_model.EventModelRuntimeError):\n filler('stop', stop_doc)\n\n # Test context manager with Event.\n with event_model.Filler(reg, inplace=True) as filler:\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n event = copy.deepcopy(raw_event)\n name, doc = filler('event', event)\n assert name == 'event'\n assert doc is event\n filler('stop', stop_doc)\n assert not filler._closed\n assert event['data']['image'].shape == (5, 5)\n assert filler._closed\n\n # Test context manager with EventPage.\n with event_model.Filler(reg, inplace=True) as filler:\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n event_page = event_model.pack_event_page(copy.deepcopy(raw_event))\n name, doc = filler('event_page', event_page)\n assert name == 'event_page'\n assert doc is event_page\n filler('stop', stop_doc)\n assert not filler._closed\n assert event_page['data']['image'][0].shape == (5, 5)\n assert filler._closed\n\n # Test undefined handler spec\n with event_model.Filler({}, inplace=True) as filler:\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n event = copy.deepcopy(raw_event)\n assert isinstance(event['data']['image'], str)\n with pytest.raises(event_model.UndefinedAssetSpecification):\n filler('event', event)\n\n # Test exclude and include.\n with pytest.raises(ValueError):\n event_model.Filler({}, include=[], exclude=[], inplace=True)\n\n with pytest.warns(DeprecationWarning):\n with event_model.Filler(reg, exclude=['image'], inplace=True) as filler:\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n event = copy.deepcopy(raw_event)\n assert isinstance(event['data']['image'], str)\n filler('event', event)\n filler('stop', stop_doc)\n\n with pytest.warns(DeprecationWarning):\n with event_model.Filler(reg, include=['image'], inplace=True) as filler:\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n event = copy.deepcopy(raw_event)\n filler('event', event)\n filler('stop', stop_doc)\n assert not filler._closed\n assert event['data']['image'].shape == (5, 5)\n\n with pytest.warns(DeprecationWarning):\n with event_model.Filler(reg, include=['image', 'EXTRA THING'],\n inplace=True) as filler:\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n event = copy.deepcopy(raw_event)\n filler('event', event)\n filler('stop', stop_doc)\n assert not filler._closed\n assert event['data']['image'].shape == (5, 5)\n\n class DummyHandlerRootMapTest:\n def __init__(self, resource_path, a, b):\n assert a == 1\n assert b == 2\n assert resource_path == str(tmp_path / \"moved\" / \"stack.tiff\")\n\n def __call__(self, c, d):\n assert c == 3\n assert d == 4\n return numpy.ones((5, 5))\n\n with event_model.Filler({'DUMMY': DummyHandlerRootMapTest},\n root_map={path_root: str(tmp_path / \"moved\")},\n inplace=True) as filler:\n\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n event = copy.deepcopy(raw_event)\n filler('event', event)\n filler('stop', stop_doc)\n assert not filler._closed\n assert event['data']['image'].shape == (5, 5)\n\n # Test verify_filled.\n with pytest.raises(event_model.UnfilledData):\n event_model.verify_filled(event_model.pack_event_page(raw_event))\n event_model.verify_filled(event_model.pack_event_page(event))\n\n # Test inplace.\n with event_model.Filler(reg, inplace=True) as filler:\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n # Test event()\n event = copy.deepcopy(raw_event)\n name, filled_event = filler('event', event)\n assert filled_event is event\n event = copy.deepcopy(raw_event)\n # Test fill_event()\n filled_event = filler.fill_event(event)\n assert filled_event is event\n # Test event_page()\n event_page = event_model.pack_event_page(copy.deepcopy(raw_event))\n _, filled_event_page = filler('event_page', event_page)\n assert filled_event_page is event_page\n # Test fill_event_page()\n event_page = event_model.pack_event_page(copy.deepcopy(raw_event))\n filled_event_page = filler.fill_event_page(event_page)\n assert filled_event_page is event_page\n\n # Test fill_event and fill_event_page again with inplace=False.\n\n # Test fill_event()\n filled_event = filler.fill_event(event, inplace=False)\n assert filled_event is not event\n # Test fill_event_page()\n event_page = event_model.pack_event_page(copy.deepcopy(raw_event))\n filled_event_page = filler.fill_event_page(event_page, inplace=False)\n assert filled_event_page is not event_page\n\n with event_model.Filler(reg, inplace=False) as filler:\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n event = copy.deepcopy(raw_event)\n name, filled_event = filler('event', event)\n assert filled_event is not event\n assert isinstance(event['data']['image'], str)\n\n event = copy.deepcopy(raw_event)\n # Test fill_event()\n filled_event = filler.fill_event(event)\n assert filled_event is not event\n # Test event_page()\n event_page = event_model.pack_event_page(copy.deepcopy(raw_event))\n _, filled_event_page = filler('event_page', event_page)\n assert filled_event_page is not event_page\n # Test fill_event_page()\n event_page = event_model.pack_event_page(copy.deepcopy(raw_event))\n filled_event_page = filler.fill_event_page(event_page)\n assert filled_event_page is not event_page\n\n # Test fill_event and fill_event_page again with inplace=True.\n\n # Test fill_event()\n filled_event = filler.fill_event(event, inplace=True)\n assert filled_event is event\n # Test fill_event_page()\n event_page = event_model.pack_event_page(copy.deepcopy(raw_event))\n filled_event_page = filler.fill_event_page(event_page, inplace=True)\n assert filled_event_page is event_page\n\n with pytest.warns(UserWarning):\n filler = event_model.Filler(reg)\n\n class OtherDummyHandler:\n \"Same as DummyHandler, but a different object to test mutating reg\"\n def __init__(self, resource_path, a, b):\n assert a == 1\n assert b == 2\n assert resource_path == str(tmp_path / \"stack.tiff\")\n\n def __call__(self, c, d):\n assert c == 3\n assert d == 4\n return numpy.ones((5, 5))\n\n with event_model.Filler(reg, inplace=False) as filler:\n with pytest.raises(event_model.EventModelTypeError):\n # Updating an existing key fails.\n filler.handler_registry['DUMMY'] = OtherDummyHandler\n with pytest.raises(event_model.EventModelTypeError):\n # Setting a new key fails.\n filler.handler_registry['SOMETHING_ELSE'] = OtherDummyHandler\n with pytest.raises(event_model.EventModelTypeError):\n # Deleting a item fails.\n del filler.handler_registry['DUMMY']\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n event = copy.deepcopy(raw_event)\n name, filled_event = filler('event', event)\n assert filled_event is not event\n assert isinstance(event['data']['image'], str)\n # Now there should be a handler instance in the cache.\n assert filler._handler_cache # implementation detail\n with pytest.raises(event_model.DuplicateHandler):\n filler.register_handler('DUMMY', OtherDummyHandler)\n filler.register_handler('DUMMY', OtherDummyHandler, overwrite=True)\n assert filler.handler_registry['DUMMY'] is OtherDummyHandler\n # Replacing the handler for a given spec should clear the cache.\n assert not filler._handler_cache # implementation detail\n # Filling should work the same....\n filler('start', run_bundle.start_doc)\n filler('descriptor', desc_bundle.descriptor_doc)\n filler('descriptor', desc_bundle_baseline.descriptor_doc)\n filler('resource', res_bundle.resource_doc)\n filler('datum', datum_doc)\n event = copy.deepcopy(raw_event)\n name, filled_event = filler('event', event)\n assert filled_event is not event\n assert isinstance(event['data']['image'], str)\n filler.deregister_handler('DUMMY')\n assert not filler.handler_registry\n assert not filler._handler_cache # implementation detail\n\n\ndef test_rechunk_event_pages():\n\n def event_page_gen(page_size, num_pages):\n \"\"\"\n Generator event_pages for testing.\n \"\"\"\n data_keys = ['x', 'y', 'z']\n array_keys = ['seq_num', 'time', 'uid']\n for _ in range(num_pages):\n yield {'descriptor': 'DESCRIPTOR',\n **{key: list(range(page_size)) for key in array_keys},\n 'data': {key: list(range(page_size)) for key in data_keys},\n 'timestamps': {key: list(range(page_size)) for key in data_keys},\n 'filled': {key: list(range(page_size)) for key in data_keys}}\n\n # Get a list of event pages of size 13.\n event_pages = list(event_page_gen(13, 31))\n # Change the size of the event_pages to size 7.\n event_pages_7 = list(event_model.rechunk_event_pages(event_pages, 7))\n assert [7] * 57 + [4] == [len(page['uid']) for page in event_pages_7]\n # Change the size back to 13.\n event_pages_13 = event_model.rechunk_event_pages(event_pages_7, 13)\n # Check that it is equal to the original list of event_pages.\n assert event_pages == list(event_pages_13)\n\n\ndef test_rechunk_datum_pages():\n\n def datum_page_gen(page_size, num_pages):\n \"\"\"\n Generator datum_pages for testing.\n \"\"\"\n data_keys = ['x', 'y', 'z']\n array_keys = ['datum_id']\n for _ in range(num_pages):\n yield {'resource': 'RESOURCE',\n **{key: list(range(page_size)) for key in array_keys},\n 'datum_kwargs': {key: list(range(page_size))\n for key in data_keys}}\n\n # Get a list of datum pages of size 13.\n datum_pages = list(datum_page_gen(13, 31))\n # Change the size of the datum_pages to size 7.\n datum_pages_7 = list(event_model.rechunk_datum_pages(datum_pages, 7))\n assert [7] * 57 + [4] == [len(page['datum_id']) for page in datum_pages_7]\n # Change the size back to 13.\n datum_pages_13 = event_model.rechunk_datum_pages(datum_pages_7, 13)\n # Check that it is equal to the original list of datum_pages.\n assert datum_pages == list(datum_pages_13)\n\n\ndef test_run_router(tmp_path):\n bundle = event_model.compose_run()\n docs = []\n start_doc, compose_descriptor, compose_resource, compose_stop = bundle\n docs.append(('start', start_doc))\n bundle = compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'},\n 'image': {'shape': [512, 512], 'dtype': 'number',\n 'source': '...', 'external': 'FILESTORE:'}},\n name='primary')\n primary_descriptor_doc, compose_primary_event, compose_event_page = bundle\n docs.append(('descriptor', primary_descriptor_doc))\n bundle = compose_descriptor(\n data_keys={'motor': {'shape': [], 'dtype': 'number', 'source': '...'}},\n name='baseline')\n baseline_descriptor_doc, compose_baseline_event, compose_event_page = bundle\n docs.append(('descriptor', baseline_descriptor_doc))\n bundle = compose_resource(\n spec='TIFF', root=str(tmp_path), resource_path='stack.tiff',\n resource_kwargs={})\n resource_doc, compose_datum, compose_datum_page = bundle\n docs.append(('resource', resource_doc))\n datum_doc = compose_datum(datum_kwargs={'slice': 5})\n docs.append(('datum', datum_doc))\n primary_event_doc = compose_primary_event(\n data={'motor': 0, 'image': datum_doc['datum_id']},\n timestamps={'motor': 0, 'image': 0}, filled={'image': False})\n docs.append(('event', primary_event_doc))\n baseline_event_doc = compose_baseline_event(\n data={'motor': 0},\n timestamps={'motor': 0})\n docs.append(('event', baseline_event_doc))\n stop_doc = compose_stop()\n docs.append(('stop', stop_doc))\n\n # Empty list of factories. Just make sure nothing blows up.\n rr = event_model.RunRouter([])\n for name, doc in docs:\n rr(name, doc)\n\n # A factory that rejects all runs.\n def null_factory(name, doc):\n return [], []\n\n rr = event_model.RunRouter([null_factory])\n for name, doc in docs:\n rr(name, doc)\n\n # A factory that accepts all runs.\n collected = []\n\n def collector(name, doc):\n if name == 'event_page':\n name = 'event'\n doc, = event_model.unpack_event_page(doc)\n elif name == 'datum_page':\n name = 'datum'\n doc, = event_model.unpack_datum_page(doc)\n collected.append((name, doc))\n\n def all_factory(name, doc):\n collector(name, doc)\n return [collector], []\n\n rr = event_model.RunRouter([all_factory])\n for name, doc in docs:\n rr(name, doc)\n\n assert collected == docs\n collected.clear()\n\n # A factory that returns a subfactory interested in 'baseline' only.\n def subfactory(name, doc):\n if doc.get('name') == 'baseline':\n return [collector]\n return []\n\n def factory_with_subfactory_only(name, doc):\n return [], [subfactory]\n\n rr = event_model.RunRouter([factory_with_subfactory_only])\n for name, doc in docs:\n rr(name, doc)\n\n expected_item = ('event', baseline_event_doc)\n unexpected_item = ('event', primary_event_doc)\n assert expected_item in collected\n assert unexpected_item not in collected\n collected.clear()\n\n # Test RunRouter with handler_registry.\n\n class FakeTiffHandler:\n def __init__(self, resource_path):\n assert resource_path == str(tmp_path / \"stack.tiff\")\n\n def __call__(self, slice):\n return numpy.ones((5, 5))\n\n reg = {'TIFF': FakeTiffHandler}\n\n def check_filled(name, doc):\n if name == 'event_page':\n for is_filled in doc['filled'].values():\n assert all(is_filled)\n elif name == 'event':\n for is_filled in doc['filled'].values():\n assert is_filled\n\n def check_not_filled(name, doc):\n if name == 'event_page':\n for is_filled in doc['filled'].values():\n assert not any(is_filled)\n elif name == 'event':\n for is_filled in doc['filled'].values():\n assert not is_filled\n\n def check_filled_factory(name, doc):\n return [check_filled], []\n\n def check_not_filled_factory(name, doc):\n return [check_not_filled], []\n\n # If reg is missing our spec (or just not given) docs pass through\n # unfilled.\n rr = event_model.RunRouter([check_not_filled_factory])\n for name, doc in docs:\n rr(name, doc)\n\n # If fill_or_fail is set to True and reg is missing our spec (or just not\n # given) we raise.\n rr = event_model.RunRouter([check_not_filled_factory], fill_or_fail=True)\n with pytest.raises(event_model.UndefinedAssetSpecification):\n for name, doc in docs:\n rr(name, doc)\n\n # If spec is provided, docs are filled, regardless of fill_or_fail.\n rr = event_model.RunRouter([check_filled_factory], reg)\n for name, doc in docs:\n rr(name, doc)\n\n rr = event_model.RunRouter([check_filled_factory], reg, fill_or_fail=True)\n for name, doc in docs:\n rr(name, doc)\n"
] |
[
[
"numpy.ones"
]
] |
akirasosa/pre-training-mol
|
[
"2fd65a959eee50e2eea260719633042ae37bb92c"
] |
[
"src/run_train.py"
] |
[
"from collections import OrderedDict\nfrom typing import Dict, Optional\n\nimport torch\nfrom omegaconf import DictConfig\n\nfrom mol.dimenet.dimenet import DimeNet\nfrom mol.dimenet.loader import AtomsBatch\nfrom mol.logging import configure_logging\nfrom mol.loss import mae_loss\nfrom mol.params import Params\nfrom mol.train_base import PLBaseModule, train\n\n\nclass PLModule(PLBaseModule):\n def __init__(self, hparams: DictConfig):\n super().__init__()\n self.hparams = hparams\n self.model = dimenet(self.hparams.pretrained_ckpt_path)\n\n def step(self, batch, prefix: str, model=None) -> Dict:\n batch = AtomsBatch(**batch)\n y_true = batch.mu.unsqueeze(-1)\n\n if model is None:\n y_pred = self.forward(batch)[:, :1]\n else:\n y_pred = model(batch)[:, :1]\n\n assert y_pred.shape == y_true.shape, f'{y_pred.shape}, {y_true.shape}'\n\n mae = mae_loss(y_pred, y_true)\n lmae = torch.log(mae)\n size = len(y_true)\n\n return {\n f'{prefix}_loss': lmae,\n f'{prefix}_mae': mae,\n f'{prefix}_size': size,\n }\n\n\ndef dimenet(ckpt_path: Optional[str]) -> DimeNet:\n model = DimeNet(\n num_targets=3, # But use only first one.\n return_hidden_outputs=False,\n )\n if ckpt_path is None:\n return model\n\n print(f'Load {ckpt_path}...')\n\n ckpt = torch.load(ckpt_path)\n\n new_dict = OrderedDict()\n for k, v in ckpt['state_dict'].items():\n if not k.startswith('ema_model'):\n continue\n new_dict[k[10:]] = v\n\n model.load_state_dict(new_dict)\n\n return model\n\n\nif __name__ == '__main__':\n configure_logging()\n params = Params.load()\n train(PLModule, params)\n"
] |
[
[
"torch.log",
"torch.load"
]
] |
KenMighell/asdf
|
[
"aae8d9aeb5ff0bfe7248bfa595f256d4756ade18"
] |
[
"asdf/tests/test_reference.py"
] |
[
"import io\nimport os\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nimport asdf\nfrom asdf import reference, util\nfrom asdf.tags.core import ndarray\n\nfrom .helpers import assert_tree_match\n\n\ndef test_external_reference(tmpdir):\n exttree = {\n \"cool_stuff\": {\"a\": np.array([0, 1, 2], float), \"b\": np.array([3, 4, 5], float)},\n \"list_of_stuff\": [\"foobar\", 42, np.array([7, 8, 9], float)],\n }\n external_path = os.path.join(str(tmpdir), \"external.asdf\")\n ext = asdf.AsdfFile(exttree)\n # Since we're testing with small arrays, force all arrays to be stored\n # in internal blocks rather than letting some of them be automatically put\n # inline.\n ext.write_to(external_path, all_array_storage=\"internal\")\n\n external_path = os.path.join(str(tmpdir), \"external2.asdf\")\n ff = asdf.AsdfFile(exttree)\n ff.write_to(external_path, all_array_storage=\"internal\")\n\n tree = {\n # The special name \"data\" here must be an array. This is\n # included so that such validation can be ignored when we just\n # have a \"$ref\".\n \"data\": {\"$ref\": \"external.asdf#/cool_stuff/a\"},\n \"science_data\": {\"$ref\": \"external.asdf#/cool_stuff/a\"},\n \"science_data2\": {\"$ref\": \"external2.asdf#/cool_stuff/a\"},\n \"foobar\": {\n \"$ref\": \"external.asdf#/list_of_stuff/0\",\n },\n \"answer\": {\"$ref\": \"external.asdf#/list_of_stuff/1\"},\n \"array\": {\n \"$ref\": \"external.asdf#/list_of_stuff/2\",\n },\n \"whole_thing\": {\"$ref\": \"external.asdf#\"},\n \"myself\": {\n \"$ref\": \"#\",\n },\n \"internal\": {\"$ref\": \"#science_data\"},\n }\n\n def do_asserts(ff):\n assert \"unloaded\" in repr(ff.tree[\"science_data\"])\n assert \"unloaded\" in str(ff.tree[\"science_data\"])\n assert len(ff._external_asdf_by_uri) == 0\n\n assert_array_equal(ff.tree[\"science_data\"], exttree[\"cool_stuff\"][\"a\"])\n assert len(ff._external_asdf_by_uri) == 1\n with pytest.raises((ValueError, RuntimeError)):\n # Assignment destination is readonly\n ff.tree[\"science_data\"][0] = 42\n\n assert_array_equal(ff.tree[\"science_data2\"], exttree[\"cool_stuff\"][\"a\"])\n assert len(ff._external_asdf_by_uri) == 2\n\n assert ff.tree[\"foobar\"]() == \"foobar\"\n assert ff.tree[\"answer\"]() == 42\n assert_array_equal(ff.tree[\"array\"], exttree[\"list_of_stuff\"][2])\n\n assert_tree_match(ff.tree[\"whole_thing\"](), exttree)\n\n assert_array_equal(ff.tree[\"whole_thing\"][\"cool_stuff\"][\"a\"], exttree[\"cool_stuff\"][\"a\"])\n\n assert_array_equal(ff.tree[\"myself\"][\"science_data\"], exttree[\"cool_stuff\"][\"a\"])\n # Make sure that referencing oneself doesn't make another call\n # to disk.\n assert len(ff._external_asdf_by_uri) == 2\n\n assert_array_equal(ff.tree[\"internal\"], exttree[\"cool_stuff\"][\"a\"])\n\n with asdf.AsdfFile(tree, uri=util.filepath_to_url(os.path.join(str(tmpdir), \"main.asdf\"))) as ff:\n do_asserts(ff)\n\n internal_path = os.path.join(str(tmpdir), \"main.asdf\")\n ff.write_to(internal_path)\n\n with asdf.open(internal_path) as ff:\n do_asserts(ff)\n\n with asdf.open(internal_path) as ff:\n assert len(ff._external_asdf_by_uri) == 0\n ff.resolve_references()\n assert len(ff._external_asdf_by_uri) == 2\n\n assert isinstance(ff.tree[\"data\"], ndarray.NDArrayType)\n assert isinstance(ff.tree[\"science_data\"], ndarray.NDArrayType)\n\n assert_array_equal(ff.tree[\"science_data\"], exttree[\"cool_stuff\"][\"a\"])\n assert_array_equal(ff.tree[\"science_data2\"], exttree[\"cool_stuff\"][\"a\"])\n\n assert ff.tree[\"foobar\"] == \"foobar\"\n assert ff.tree[\"answer\"] == 42\n assert_array_equal(ff.tree[\"array\"], exttree[\"list_of_stuff\"][2])\n\n assert_tree_match(ff.tree[\"whole_thing\"], exttree)\n\n assert_array_equal(ff.tree[\"whole_thing\"][\"cool_stuff\"][\"a\"], exttree[\"cool_stuff\"][\"a\"])\n\n assert_array_equal(ff.tree[\"myself\"][\"science_data\"], exttree[\"cool_stuff\"][\"a\"])\n\n assert_array_equal(ff.tree[\"internal\"], exttree[\"cool_stuff\"][\"a\"])\n\n\n@pytest.mark.remote_data\ndef test_external_reference_invalid(tmpdir):\n tree = {\"foo\": {\"$ref\": \"fail.asdf\"}}\n\n ff = asdf.AsdfFile(tree)\n with pytest.raises(ValueError):\n ff.resolve_references()\n\n ff = asdf.AsdfFile(tree, uri=\"http://httpstat.us/404\")\n with pytest.raises(IOError):\n ff.resolve_references()\n\n ff = asdf.AsdfFile(tree, uri=util.filepath_to_url(os.path.join(str(tmpdir), \"main.asdf\")))\n with pytest.raises(IOError):\n ff.resolve_references()\n\n\ndef test_external_reference_invalid_fragment(tmpdir):\n exttree = {\"list_of_stuff\": [\"foobar\", 42, np.array([7, 8, 9], float)]}\n external_path = os.path.join(str(tmpdir), \"external.asdf\")\n ff = asdf.AsdfFile(exttree)\n ff.write_to(external_path)\n\n tree = {\"foo\": {\"$ref\": \"external.asdf#/list_of_stuff/a\"}}\n\n with asdf.AsdfFile(tree, uri=util.filepath_to_url(os.path.join(str(tmpdir), \"main.asdf\"))) as ff:\n with pytest.raises(ValueError):\n ff.resolve_references()\n\n tree = {\"foo\": {\"$ref\": \"external.asdf#/list_of_stuff/3\"}}\n\n with asdf.AsdfFile(tree, uri=util.filepath_to_url(os.path.join(str(tmpdir), \"main.asdf\"))) as ff:\n with pytest.raises(ValueError):\n ff.resolve_references()\n\n\ndef test_make_reference(tmpdir):\n exttree = {\n # Include some ~ and / in the name to make sure that escaping\n # is working correctly\n \"f~o~o/\": {\"a\": np.array([0, 1, 2], float), \"b\": np.array([3, 4, 5], float)}\n }\n external_path = os.path.join(str(tmpdir), \"external.asdf\")\n ext = asdf.AsdfFile(exttree)\n ext.write_to(external_path)\n\n with asdf.open(external_path) as ext:\n ff = asdf.AsdfFile()\n ff.tree[\"ref\"] = ext.make_reference([\"f~o~o/\", \"a\"])\n assert_array_equal(ff.tree[\"ref\"], ext.tree[\"f~o~o/\"][\"a\"])\n\n ff.write_to(os.path.join(str(tmpdir), \"source.asdf\"))\n\n with asdf.open(os.path.join(str(tmpdir), \"source.asdf\")) as ff:\n assert ff.tree[\"ref\"]._uri == \"external.asdf#f~0o~0o~1/a\"\n\n\ndef test_internal_reference(tmpdir):\n testfile = os.path.join(str(tmpdir), \"test.asdf\")\n\n tree = {\"foo\": 2, \"bar\": {\"$ref\": \"#\"}}\n\n ff = asdf.AsdfFile(tree)\n ff.find_references()\n assert isinstance(ff.tree[\"bar\"], reference.Reference)\n ff.resolve_references()\n assert ff.tree[\"bar\"][\"foo\"] == 2\n\n tree = {\"foo\": 2}\n ff = asdf.AsdfFile(tree, uri=util.filepath_to_url(os.path.abspath(testfile)))\n ff.tree[\"bar\"] = ff.make_reference([])\n buff = io.BytesIO()\n ff.write_to(buff)\n buff.seek(0)\n ff = asdf.AsdfFile()\n content = asdf.AsdfFile()._open_impl(ff, buff, _get_yaml_content=True)\n assert b\"{$ref: ''}\" in content\n\n\ndef test_implicit_internal_reference(tmpdir):\n target = {\"foo\": \"bar\"}\n nested_in_dict = {\"target\": target}\n nested_in_list = [target]\n tree = {\"target\": target, \"nested_in_dict\": nested_in_dict, \"nested_in_list\": nested_in_list}\n\n assert tree[\"target\"] is tree[\"nested_in_dict\"][\"target\"]\n assert tree[\"target\"] is tree[\"nested_in_list\"][0]\n\n af = asdf.AsdfFile(tree)\n\n assert af[\"target\"] is af[\"nested_in_dict\"][\"target\"]\n assert af[\"target\"] is af[\"nested_in_list\"][0]\n\n output_path = os.path.join(str(tmpdir), \"test.asdf\")\n af.write_to(output_path)\n with asdf.open(output_path) as af:\n assert af[\"target\"] is af[\"nested_in_dict\"][\"target\"]\n assert af[\"target\"] is af[\"nested_in_list\"][0]\n"
] |
[
[
"numpy.testing.assert_array_equal",
"numpy.array"
]
] |
SpyRefused/componist
|
[
"002cb8d8fef3d5406d1cb3be4a23fe23ec32b2f5"
] |
[
"src/tutorial/save_restore_models.py"
] |
[
"# 1) Save and restore models\n# Model progress can be saved during—and after—training. This means a model can resume where it left\n# off and avoid long training times. Saving also means you can share your model and others can recreate your work.\n# When publishing research models and techniques, most machine learning practitioners share:\n# - code to create the model, and\n# - the trained weights, or parameters, for the model Sharing this data helps\n# others understand how the model works and try it themselves with new data.\n\n# 1.1) Options\n# There are different ways to save TensorFlow models—depending on the API you're using.\n# This guide uses tf.keras, a high-level API to build and train models in TensorFlow. For other approaches,\n# see the TensorFlow Save and Restore guide or Saving in eager.\n\n\n# 2) Setup\n# 2.1) Installs and imports\n# Install and import TensorFlow and dependencies:\n\n# !pip install -q h5py pyyaml\n\n# 2.1) Get an example dataset\n# We'll use the MNIST dataset to train our model to demonstrate saving weights.\n# To speed up these demonstration runs, only use the first 1000 examples:\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\ntf.__version__\n\n(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()\n\ntrain_labels = train_labels[:1000]\ntest_labels = test_labels[:1000]\n\ntrain_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0\ntest_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0\n\n\n# 2.2) Define a model\n# Let's build a simple model we'll use to demonstrate saving and loading weights.\n\n# Returns a short sequential model\ndef create_model():\n model = tf.keras.models.Sequential([\n keras.layers.Dense(512, activation=tf.nn.relu, input_shape=(784,)),\n keras.layers.Dropout(0.2),\n keras.layers.Dense(10, activation=tf.nn.softmax)\n ])\n\n model.compile(optimizer=tf.keras.optimizers.Adam(),\n loss=tf.keras.losses.sparse_categorical_crossentropy,\n metrics=['accuracy'])\n\n return model\n\n\n# Create a basic model instance\nmodel = create_model()\nmodel.summary()\n\n# 3) Save checkpoints during training\n# The primary use case is to automatically save checkpoints during and at the end of training.\n# This way you can use a trained model without having to retrain it, or pick-up training where\n# you left of—in case the training process was interrupted.\n# tf.keras.callbacks.ModelCheckpoint is a callback that performs this task.\n# The callback takes a couple of arguments to configure checkpointing.\n\n# 3.1) Checkpoint callback usage\n# Train the model and pass it the ModelCheckpoint callback:\n\ncheckpoint_path = \"training_1/cp.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\n# Create checkpoint callback\ncp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,\n save_weights_only=True,\n verbose=1)\n\nmodel = create_model()\n\nmodel.fit(train_images, train_labels, epochs=10,\n validation_data=(test_images, test_labels),\n callbacks=[cp_callback]) # pass callback to training\n\n# This creates a single collection of TensorFlow checkpoint files that are updated at the end of each epoch:\n# !ls {checkpoint_dir}\n\n# Create a new, untrained model. When restoring a model from only weights, you must have a model with the same\n# architecture as the original model. Since it's the same model architecture,\n# we can share weights despite that it's a different instance of the model.\n# Now rebuild a fresh, untrained model, and evaluate it on the test set.\n# An untrained model will perform at chance levels (~10% accuracy):\n\nmodel = create_model()\n\nloss, acc = model.evaluate(test_images, test_labels)\nprint(\"Untrained model, accuracy: {:5.2f}%\".format(100 * acc))\n\n# Then load the weights from the checkpoint, and re-evaluate:\n\nmodel.load_weights(checkpoint_path)\nloss, acc = model.evaluate(test_images, test_labels)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100 * acc))\n\n# 3.2) Checkpoint callback options\n# The callback provides several options to give the resulting checkpoints unique names,\n# and adjust the checkpointing frequency.\n#\n# Train a new model, and save uniquely named checkpoints once every 5-epochs:\n\n# include the epoch in the file name. (uses `str.format`)\ncheckpoint_path = \"training_2/cp-{epoch:04d}.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\ncp_callback = tf.keras.callbacks.ModelCheckpoint(\n checkpoint_path, verbose=1, save_weights_only=True,\n # Save weights, every 5-epochs.\n period=5)\n\nmodel = create_model()\nmodel.fit(train_images, train_labels,\n epochs=50, callbacks=[cp_callback],\n validation_data=(test_images, test_labels),\n verbose=0)\n\n# Now, have a look at the resulting checkpoints (sorting by modification date):\n\nimport pathlib\n\n# Sort the checkpoints by modification time.\ncheckpoints = pathlib.Path(checkpoint_dir).glob(\"*.index\")\ncheckpoints = sorted(checkpoints, key=lambda cp: cp.stat().st_mtime)\ncheckpoints = [cp.with_suffix('') for cp in checkpoints]\nlatest = str(checkpoints[-1])\ncheckpoints\n\n# To test, reset the model and load the latest checkpoint:\n\nmodel = create_model()\nmodel.load_weights(latest)\nloss, acc = model.evaluate(test_images, test_labels)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))\n\n# 4) What are these files?\n# The above code stores the weights to a collection of checkpoint-formatted files that contain only the trained\n# weights in a binary format. Checkpoints contain: * One or more shards that contain your model's weights.\n# * An index file that indicates which weights are stored in a which shard.\n#\n# If you are only training a model on a single machine, you'll have one shard with the suffix: .data-00000-of-00001\n\n# 5) Manually save weights\n# Above you saw how to load the weights into a model.\n#\n# Manually saving the weights is just as simple, use the Model.save_weights method.\n\n# Save the weights\nmodel.save_weights('./checkpoints/my_checkpoint')\n\n# Restore the weights\nmodel = create_model()\nmodel.load_weights('./checkpoints/my_checkpoint')\n\nloss,acc = model.evaluate(test_images, test_labels)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))\n\n# 6) Save the entire model\n# The entire model can be saved to a file that contains the weight values, the model's configuration,\n# and even the optimizer's configuration. This allows you to checkpoint a model and resume training later—from\n# the exact same state—without access to the original code.\n#\n# Saving a fully-functional model in Keras is very useful—you can load them in TensorFlow.js\n# and then train and run them in web browsers.\n#\n# Keras provides a basic save format using the HDF5 standard.\n# For our purposes, the saved model can be treated as a single binary blob.\n\nmodel = create_model()\n\nmodel.fit(train_images, train_labels, epochs=5)\n\n# Save entire model to a HDF5 file\nmodel.save('my_model.h5')\n\n# Now recreate the model from that file:\n\n# Recreate the exact same model, including weights and optimizer.\n\nnew_model = keras.models.load_model('my_model.h5')\nnew_model.summary()\n\n# Check its accuracy:\n\nloss, acc = new_model.evaluate(test_images, test_labels)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))\n\n# This technique saves everything:\n# - The weight values\n# - The model's configuration(architecture)\n# - The optimizer configuration\n#\n# Keras saves models by inspecting the architecture. Currently, it is not able to save TensorFlow optimizers\n# (from tf.train). When using those you will need to re-compile the model after loading,\n# and you will loose the state of the optimizer.\n\n"
] |
[
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dropout"
]
] |
nagrao/CascadeTabNet
|
[
"2ceb6371d4bef23d32a3be5f61cae0bea86fc469"
] |
[
"Table Structure Recognition/Functions/line_detection.py"
] |
[
"import cv2\r\nimport numpy as np\r\n\r\n# Input : Image\r\n# Output : hor,ver \r\ndef line_detection(image):\r\n\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, 1)\r\n bw = cv2.bitwise_not(bw)\r\n ## To visualize image after thresholding ##\r\n cv2.imshow(\"bw\",bw)\r\n cv2.waitKey(0)\r\n ###########################################\r\n horizontal = bw.copy()\r\n vertical = bw.copy()\r\n img = image.copy()\r\n # [horizontal lines]\r\n # Create structure element for extracting horizontal lines through morphology operations\r\n horizontalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 1))\r\n\r\n # Apply morphology operations\r\n horizontal = cv2.erode(horizontal, horizontalStructure)\r\n horizontal = cv2.dilate(horizontal, horizontalStructure)\r\n\r\n horizontal = cv2.dilate(horizontal, (1,1), iterations=5)\r\n horizontal = cv2.erode(horizontal, (1,1), iterations=5)\r\n\r\n ## Uncomment to visualize highlighted Horizontal lines\r\n # cv2.imshow(\"horizontal\",horizontal)\r\n # cv2.waitKey(0)\r\n\r\n # HoughlinesP function to detect horizontal lines\r\n hor_lines = cv2.HoughLinesP(horizontal,rho=1,theta=np.pi/180,threshold=100,minLineLength=30,maxLineGap=3)\r\n if hor_lines is None:\r\n return None,None\r\n temp_line = []\r\n for line in hor_lines:\r\n for x1,y1,x2,y2 in line:\r\n temp_line.append([x1,y1-5,x2,y2-5])\r\n\r\n # Sorting the list of detected lines by Y1\r\n hor_lines = sorted(temp_line,key=lambda x: x[1])\r\n\r\n ## Uncomment this part to visualize the lines detected on the image ##\r\n # print(len(hor_lines))\r\n # for x1, y1, x2, y2 in hor_lines:\r\n # cv2.line(image, (x1,y1), (x2,y2), (0, 255, 0), 1)\r\n\r\n \r\n # print(image.shape)\r\n # cv2.imshow(\"image\",image)\r\n # cv2.waitKey(0)\r\n ####################################################################\r\n\r\n ## Selection of best lines from all the horizontal lines detected ##\r\n lasty1 = -111111\r\n lines_x1 = []\r\n lines_x2 = []\r\n hor = []\r\n i=0\r\n for x1,y1,x2,y2 in hor_lines:\r\n if y1 >= lasty1 and y1 <= lasty1 + 10:\r\n lines_x1.append(x1)\r\n lines_x2.append(x2)\r\n else:\r\n if (i != 0 and len(lines_x1) is not 0):\r\n hor.append([min(lines_x1),lasty1,max(lines_x2),lasty1])\r\n lasty1 = y1\r\n lines_x1 = []\r\n lines_x2 = []\r\n lines_x1.append(x1)\r\n lines_x2.append(x2)\r\n i+=1\r\n hor.append([min(lines_x1),lasty1,max(lines_x2),lasty1])\r\n #####################################################################\r\n\r\n\r\n # [vertical lines]\r\n # Create structure element for extracting vertical lines through morphology operations\r\n verticalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 15))\r\n\r\n # Apply morphology operations\r\n vertical = cv2.erode(vertical, verticalStructure)\r\n vertical = cv2.dilate(vertical, verticalStructure)\r\n\r\n vertical = cv2.dilate(vertical, (1,1), iterations=8)\r\n vertical = cv2.erode(vertical, (1,1), iterations=7)\r\n\r\n ######## Preprocessing Vertical Lines ###############\r\n # cv2.imshow(\"vertical\",vertical)\r\n # cv2.waitKey(0)\r\n #####################################################\r\n\r\n # HoughlinesP function to detect vertical lines\r\n # ver_lines = cv2.HoughLinesP(vertical,rho=1,theta=np.pi/180,threshold=20,minLineLength=20,maxLineGap=2)\r\n ver_lines = cv2.HoughLinesP(vertical, 1, np.pi/180, 20, np.array([]), 20, 2)\r\n if ver_lines is None:\r\n return None,None\r\n temp_line = []\r\n for line in ver_lines:\r\n for x1,y1,x2,y2 in line:\r\n temp_line.append([x1,y1,x2,y2])\r\n\r\n # Sorting the list of detected lines by X1\r\n ver_lines = sorted(temp_line,key=lambda x: x[0])\r\n\r\n ## Uncomment this part to visualize the lines detected on the image ##\r\n # print(len(ver_lines))\r\n # for x1, y1, x2, y2 in ver_lines:\r\n # cv2.line(image, (x1,y1-5), (x2,y2-5), (0, 255, 0), 1)\r\n\r\n \r\n # print(image.shape)\r\n # cv2.imshow(\"image\",image)\r\n # cv2.waitKey(0)\r\n ####################################################################\r\n\r\n ## Selection of best lines from all the vertical lines detected ##\r\n lastx1 = -111111\r\n lines_y1 = []\r\n lines_y2 = []\r\n ver = []\r\n count = 0\r\n lasty1 = -11111\r\n lasty2 = -11111\r\n for x1,y1,x2,y2 in ver_lines:\r\n if x1 >= lastx1 and x1 <= lastx1 + 15 and not (((min(y1,y2)<min(lasty1,lasty2)-20 or min(y1,y2)<min(lasty1,lasty2)+20)) and ((max(y1,y2)<max(lasty1,lasty2)-20 or max(y1,y2)<max(lasty1,lasty2)+20))):\r\n lines_y1.append(y1)\r\n lines_y2.append(y2)\r\n # lasty1 = y1\r\n # lasty2 = y2\r\n else:\r\n if (count != 0 and len(lines_y1) is not 0):\r\n ver.append([lastx1,min(lines_y2)-5,lastx1,max(lines_y1)-5])\r\n lastx1 = x1\r\n lines_y1 = []\r\n lines_y2 = []\r\n lines_y1.append(y1)\r\n lines_y2.append(y2)\r\n count += 1\r\n lasty1 = -11111\r\n lasty2 = -11111\r\n ver.append([lastx1,min(lines_y2)-5,lastx1,max(lines_y1)-5])\r\n #################################################################\r\n\r\n\r\n ############ Visualization of Lines After Post Processing ############\r\n # for x1, y1, x2, y2 in ver:\r\n # cv2.line(img, (x1,y1), (x2,y2), (0, 255, 0), 1)\r\n\r\n # for x1, y1, x2, y2 in hor:\r\n # cv2.line(img, (x1,y1), (x2,y2), (0, 255, 0), 1)\r\n \r\n # cv2.imshow(\"image\",img)\r\n # cv2.waitKey(0)\r\n #######################################################################\r\n\r\n return hor,ver\r\n\r\n# line_detection(cv2.imread('path to image'))"
] |
[
[
"numpy.array"
]
] |
ngocjr7/geneticpython
|
[
"4b4157523ce13b3da56cef61282cb0a984cd317b"
] |
[
"geneticpython/tools/math.py"
] |
[
"from geneticpython.utils.typing import SimpleSolution\nfrom geneticpython.utils.validation import check_simple_solution\n\nfrom copy import deepcopy\nimport math\nimport numpy as np\n\ndef is_dominated(a: SimpleSolution, b: SimpleSolution) -> bool:\n \"\"\"\n Return True if a dominate b, otherwise return False\n \"\"\"\n if len(a) != len(b):\n msg = 'The length of objective in two solutions is not the same:\\\n a has {} objectives, b has {} objectives'\n msg = msg.format(len(a), len(b))\n raise ValueError(msg)\n\n dominated = False\n for i in range(len(a)):\n if a[i] > b[i]:\n return False\n elif a[i] < b[i]:\n dominated = True\n\n return dominated\n\ndef euclidean_distance(a: SimpleSolution, b: SimpleSolution):\n \"\"\"euclidean_distance.\n\n Args:\n a (SimpleSolution): a\n b (SimpleSolution): b\n \"\"\"\n if len(a) != len(b):\n msg = 'The length of objective in two solutions is not the same:\\\n a has {} objectives, b has {} objectives'\n msg = msg.format(len(a), len(b))\n raise ValueError(msg)\n a, b = check_simple_solution(a, b)\n\n return np.linalg.norm(a - b)\n \n \ndef l1_distance(a: SimpleSolution, b: SimpleSolution):\n if len(a) != len(b):\n msg = 'The length of objective in two solutions is not the same:\\\n a has {} objectives, b has {} objectives'\n msg = msg.format(len(a), len(b))\n raise ValueError(msg)\n a, b = check_simple_solution(a, b)\n\n return np.linalg.norm(a - b, ord=1)\n"
] |
[
[
"numpy.linalg.norm"
]
] |
ikekilinc/dnnSuperBinoculars
|
[
"b0fc584b1d449961bdbab37cf9d72c0b466f197f"
] |
[
"serverPythonClient/SRNTT/SRNTT/model.py"
] |
[
"import tensorflow as tf\nfrom .tensorlayer import *\nfrom .tensorlayer.layers import *\nfrom os.path import join, exists, split, isfile\nfrom os import makedirs, environ\nfrom shutil import rmtree\nfrom .vgg19 import *\nfrom .swap import *\nfrom glob import glob\nfrom scipy.misc import imread, imresize, imsave, imrotate\nfrom .download_vgg19_model import *\nfrom bicubic_kernel import back_projection_loss\nimport logging\nfrom scipy.io import savemat\n\n# set logging level for TensorFlow\nenviron['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n\n# set logging\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s',\n filename='SRNTT.log'\n)\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')\nconsole.setFormatter(formatter)\nlogging.getLogger('').addHandler(console)\n\n# some global variables\nMODEL_FOLDER = 'model'\nSAMPLE_FOLDER = 'sample'\nSRNTT_MODEL_NAMES = {\n 'init': 'srntt_init.npz',\n 'conditional_texture_transfer': 'srntt.npz',\n 'content_extractor': 'upscale.npz',\n 'discriminator': 'discrim.npz',\n 'weighted': 'srntt_weighted.npz'\n}\n\n\nclass SRNTT(object):\n\n MAX_IMAGE_SIZE = 2046 ** 2\n\n def __init__(\n self,\n srntt_model_path='models/SRNTT',\n vgg19_model_path='models/VGG19/imagenet-vgg-verydeep-19.mat',\n save_dir=None,\n num_res_blocks=16,\n ):\n self.srntt_model_path = srntt_model_path\n self.vgg19_model_path = vgg19_model_path\n self.save_dir = save_dir\n self.num_res_blocks = int(num_res_blocks)\n self.is_model_built = False\n download_vgg19(self.vgg19_model_path)\n\n def model(\n self,\n inputs, # LR images, in range of [-1, 1]\n maps=None, # texture feature maps after texture swapping\n weights=None, # weights of each pixel on the maps\n is_train=True,\n reuse=False,\n concat=False # concatenate weights to feature\n ):\n\n # ********************************************************************************\n # *** content extractor\n # ********************************************************************************\n # print('\\tcontent extractor')\n w_init = tf.random_normal_initializer(stddev=0.02)\n b_init = None\n g_init = tf.random_normal_initializer(1., 0.02)\n with tf.variable_scope(\"content_extractor\", reuse=reuse):\n layers.set_name_reuse(reuse)\n net = InputLayer(inputs=inputs, name='input')\n net = Conv2d(net=net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu,\n padding='SAME', W_init=w_init, name='n64s1/c')\n temp = net\n for i in range(16): # residual blocks\n net_ = Conv2d(net=net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='n64s1/c1/%s' % i)\n net_ = BatchNormLayer(layer=net_, act=tf.nn.relu, is_train=is_train,\n gamma_init=g_init, name='n64s1/b1/%s' % i)\n net_ = Conv2d(net=net_, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='n64s1/c2/%s' % i)\n net_ = BatchNormLayer(layer=net_, is_train=is_train,\n gamma_init=g_init, name='n64s1/b2/%s' % i)\n net_ = ElementwiseLayer(layer=[net, net_], combine_fn=tf.add, name='b_residual_add/%s' % i)\n net = net_\n net = Conv2d(net=net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='n64s1/c/m')\n net = BatchNormLayer(layer=net, is_train=is_train, gamma_init=g_init, name='n64s1/b/m')\n content_feature = ElementwiseLayer(layer=[net, temp], combine_fn=tf.add, name='add3')\n\n # upscaling (4x) for texture extractor\n net = Conv2d(net=content_feature, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, name='n256s1/1')\n net = SubpixelConv2d(net=net, scale=2, n_out_channel=None, act=tf.nn.relu, name='pixelshufflerx2/1')\n net = Conv2d(net=net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, name='n256s1/2')\n net = SubpixelConv2d(net=net, scale=2, n_out_channel=None, act=tf.nn.relu, name='pixelshufflerx2/2')\n\n # output value range is [-1, 1]\n net_upscale = Conv2d(net=net, n_filter=3, filter_size=(1, 1), strides=(1, 1), act=tf.nn.tanh,\n padding='SAME', W_init=w_init, name='out')\n if maps is None:\n return net_upscale, None\n\n # ********************************************************************************\n # *** conditional texture transfer\n # ********************************************************************************\n with tf.variable_scope(\"texture_transfer\", reuse=reuse):\n layers.set_name_reuse(reuse)\n assert isinstance(maps, (list, tuple))\n # fusion content and texture maps at the smallest scale\n # print('\\tfusion content and texture maps at SMALL scale')\n map_in = InputLayer(inputs=content_feature.outputs, name='content_feature_maps')\n if weights is not None and concat:\n self.a1 = tf.get_variable(dtype=tf.float32, name='small/a', initializer=1.)\n self.b1 = tf.get_variable(dtype=tf.float32, name='small/b', initializer=0.)\n map_ref = maps[0] * tf.nn.sigmoid(self.a1 * weights + self.b1)\n else:\n map_ref = maps[0]\n map_ref = InputLayer(inputs=map_ref, name='reference_feature_maps1')\n net = ConcatLayer(layer=[map_in, map_ref], concat_dim=-1, name='concatenation1')\n net = Conv2d(net=net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu,\n padding='SAME', W_init=w_init, name='small/conv1')\n for i in range(self.num_res_blocks): # residual blocks\n net_ = Conv2d(net=net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='small/resblock_%d/conv1' % i)\n net_ = BatchNormLayer(layer=net_, act=tf.nn.relu, is_train=is_train,\n gamma_init=g_init, name='small/resblock_%d/bn1' % i)\n net_ = Conv2d(net=net_, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='small/resblock_%d/conv2' % i)\n net_ = BatchNormLayer(layer=net_, is_train=is_train,\n gamma_init=g_init, name='small/resblock_%d/bn2' % i)\n net_ = ElementwiseLayer(layer=[net, net_], combine_fn=tf.add, name='small/resblock_%d/add' % i)\n net = net_\n net = Conv2d(net=net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='small/conv2')\n net = BatchNormLayer(layer=net, is_train=is_train, gamma_init=g_init, name='small/bn2')\n net = ElementwiseLayer(layer=[net, map_in], combine_fn=tf.add, name='small/add2')\n # upscaling (2x)\n net = Conv2d(net=net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, name='small/conv3')\n net = SubpixelConv2d(net=net, scale=2, n_out_channel=None, act=tf.nn.relu, name='small/subpixel')\n\n # fusion content and texture maps at the medium scale\n # print('\\tfusion content and texture maps at MEDIUM scale')\n map_in = net\n if weights is not None and concat:\n self.a2 = tf.get_variable(dtype=tf.float32, name='medium/a', initializer=1.)\n self.b2 = tf.get_variable(dtype=tf.float32, name='medium/b', initializer=0.)\n map_ref = maps[1] * tf.nn.sigmoid(self.a2 * tf.image.resize_bicubic(\n weights, [weights.get_shape()[1] * 2, weights.get_shape()[2] * 2]) + self.b2)\n else:\n map_ref = maps[1]\n map_ref = InputLayer(inputs=map_ref, name='reference_feature_maps2')\n net = ConcatLayer(layer=[map_in, map_ref], concat_dim=-1, name='concatenation2')\n net = Conv2d(net=net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu,\n padding='SAME', W_init=w_init, name='medium/conv1')\n for i in range(int(self.num_res_blocks / 2)): # residual blocks\n net_ = Conv2d(net=net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='medium/resblock_%d/conv1' % i)\n net_ = BatchNormLayer(layer=net_, act=tf.nn.relu, is_train=is_train,\n gamma_init=g_init, name='medium/resblock_%d/bn1' % i)\n net_ = Conv2d(net=net_, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='medium/resblock_%d/conv2' % i)\n net_ = BatchNormLayer(layer=net_, is_train=is_train,\n gamma_init=g_init, name='medium/resblock_%d/bn2' % i)\n net_ = ElementwiseLayer(layer=[net, net_], combine_fn=tf.add, name='medium/resblock_%d/add' % i)\n net = net_\n net = Conv2d(net=net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='medium/conv2')\n net = BatchNormLayer(layer=net, is_train=is_train, gamma_init=g_init, name='medium/bn2')\n net = ElementwiseLayer(layer=[net, map_in], combine_fn=tf.add, name='medium/add2')\n # upscaling (2x)\n net = Conv2d(net=net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, name='medium/conv3')\n net = SubpixelConv2d(net=net, scale=2, n_out_channel=None, act=tf.nn.relu, name='medium/subpixel')\n\n # fusion content and texture maps at the large scale\n # print('\\tfusion content and texture maps at LARGE scale')\n map_in = net\n if weights is not None and concat:\n self.a3 = tf.get_variable(dtype=tf.float32, name='large/a', initializer=1.)\n self.b3 = tf.get_variable(dtype=tf.float32, name='large/b', initializer=0.)\n map_ref = maps[2] * tf.nn.sigmoid(self.a3 * tf.image.resize_bicubic(\n weights, [weights.get_shape()[1] * 4, weights.get_shape()[2] * 4]) + self.b3)\n else:\n map_ref = maps[2]\n map_ref = InputLayer(inputs=map_ref, name='reference_feature_maps3')\n net = ConcatLayer(layer=[map_in, map_ref], concat_dim=-1, name='concatenation3')\n net = Conv2d(net=net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu,\n padding='SAME', W_init=w_init, name='large/conv1')\n for i in range(int(self.num_res_blocks / 4)): # residual blocks\n net_ = Conv2d(net=net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='large/resblock_%d/conv1' % i)\n net_ = BatchNormLayer(layer=net_, act=tf.nn.relu, is_train=is_train,\n gamma_init=g_init, name='large/resblock_%d/bn1' % i)\n net_ = Conv2d(net=net_, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='large/resblock_%d/conv2' % i)\n net_ = BatchNormLayer(layer=net_, is_train=is_train,\n gamma_init=g_init, name='large/resblock_%d/bn2' % i)\n net_ = ElementwiseLayer(layer=[net, net_], combine_fn=tf.add, name='large/resblock_%d/add' % i)\n net = net_\n net = Conv2d(net=net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='large/conv2')\n net = BatchNormLayer(layer=net, is_train=is_train, gamma_init=g_init, name='large/bn2')\n net = ElementwiseLayer(layer=[net, map_in], combine_fn=tf.add, name='large/add2')\n net = Conv2d(net=net, n_filter=32, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, name='large/conv3')\n # net = BatchNormLayer(layer=net, is_train=is_train, gamma_init=g_init, name='large/bn2')\n\n # output of SRNTT, range [-1, 1]\n net_srntt = Conv2d(net=net, n_filter=3, filter_size=(1, 1), strides=(1, 1), act=tf.nn.tanh,\n padding='SAME', W_init=w_init, name='out')\n\n return net_upscale, net_srntt\n\n def discriminator(self, input_image, is_train=True, reuse=False):\n w_init = tf.random_normal_initializer(stddev=0.02)\n b_init = None\n g_init = tf.random_normal_initializer(1., 0.02)\n lrelu = lambda x: act.lrelu(x, 0.2)\n df_dim = 32\n with tf.variable_scope('discriminator', reuse=reuse):\n layers.set_name_reuse(reuse)\n net = InputLayer(inputs=input_image, name='input')\n for i in range(5):\n n_channels = df_dim * 2 ** i\n net = Conv2d(net=net, n_filter=n_channels, filter_size=(3, 3), strides=(1, 1), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='n%ds1/c' % n_channels)\n net = BatchNormLayer(layer=net, act=lrelu, is_train=is_train, gamma_init=g_init, name='n%ds1/b' % n_channels)\n net = Conv2d(net=net, n_filter=n_channels, filter_size=(3, 3), strides=(2, 2), act=None,\n padding='SAME', W_init=w_init, b_init=b_init, name='n%ds2/c' % n_channels)\n net = BatchNormLayer(layer=net, act=lrelu, is_train=is_train, gamma_init=g_init,\n name='n%ds2/b' % n_channels)\n net = FlattenLayer(layer=net, name='flatten')\n net = DenseLayer(layer=net, n_units=1024, act=lrelu, name='fc2014')\n net = DenseLayer(net, n_units=1, name='output')\n logits = net.outputs\n net.outputs = tf.nn.sigmoid(net.outputs)\n\n return net, logits\n\n def tf_gram_matrix(self, x):\n x = tf.reshape(x, tf.stack([-1, tf.reduce_prod(x.get_shape()[1:-1]), x.get_shape()[-1]]))\n return tf.matmul(x, x, transpose_a=True)\n\n def eta(self, time_per_iter, n_iter_remain, current_eta=None, alpha=.8):\n eta_ = time_per_iter * n_iter_remain\n if current_eta is not None:\n eta_ = (current_eta - time_per_iter) * alpha + eta_ * (1 - alpha)\n new_eta = eta_\n\n days = eta_ // (3600 * 24)\n eta_ -= days * (3600 * 24)\n\n hours = eta_ // 3600\n eta_ -= hours * 3600\n\n minutes = eta_ // 60\n eta_ -= minutes * 60\n\n seconds = eta_\n\n if days > 0:\n if days > 1:\n time_str = '%2d days %2d hr' % (days, hours)\n else:\n time_str = '%2d day %2d hr' % (days, hours)\n elif hours > 0 or minutes > 0:\n time_str = '%02d:%02d' % (hours, minutes)\n else:\n time_str = '%02d sec' % seconds\n\n return time_str, new_eta\n\n def train(\n self,\n input_dir='data/train/input', # original images\n ref_dir='data/train/ref', # reference images\n map_dir='data/train/map_321', # texture maps after texture swapping\n batch_size=9,\n num_init_epochs=5,\n num_epochs=100,\n learning_rate=1e-4,\n beta1=0.9,\n use_pretrained_model=True,\n use_init_model_only=False, # the init model is trained only with the reconstruction loss\n weights=(1e-4, 1e-4, 1e-6, 1., 1.), # (perceptual loss, texture loss, adversarial loss, back projection loss, reconstruction_loss)\n vgg_perceptual_loss_layer='relu5_1', # the layer name to compute perceptrual loss\n is_WGAN_GP=True,\n is_L1_loss=True,\n param_WGAN_GP=10,\n input_size=40,\n use_weight_map=False,\n use_lower_layers_in_per_loss=False\n ):\n if np.sqrt(batch_size) != int(np.sqrt(batch_size)):\n logging.error('The batch size must be the power of an integer.')\n exit(0)\n\n # detect existing model if not use_pretrained_model\n if self.save_dir is None:\n self.save_dir = 'default_save_dir'\n if not use_pretrained_model and exists(join(self.save_dir, MODEL_FOLDER)):\n logging.warning('The existing model dir %s is removed!' % join(self.save_dir, MODEL_FOLDER))\n rmtree(join(self.save_dir, MODEL_FOLDER))\n\n # create save folders\n for folder in [MODEL_FOLDER, SAMPLE_FOLDER]:\n if not exists(join(self.save_dir, folder)):\n makedirs(join(self.save_dir, folder))\n\n # check input dir\n files_input = sorted(glob(join(input_dir, '*.png')))\n files_map = sorted(glob(join(map_dir, '*.npz')))\n files_ref = sorted(glob(join(ref_dir, '*.png')))\n num_files = len(files_input)\n\n assert num_files == len(files_ref) == len(files_map)\n\n # ********************************************************************************\n # *** build graph\n # ********************************************************************************\n logging.info('Building graph ...')\n # input LR images, range [-1, 1]\n self.input = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_size, input_size, 3])\n\n # original images, range [-1, 1]\n self.ground_truth = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_size * 4, input_size * 4, 3])\n\n # texture feature maps, range [0, ?]\n self.maps = tuple([tf.placeholder(dtype=tf.float32, shape=[batch_size, m.shape[0], m.shape[1], m.shape[2]])\n for m in np.load(files_map[0], allow_pickle=True)['target_map']])\n\n\n # weight maps\n self.weights = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_size, input_size])\n\n\n # reference images, ranges[-1, 1]\n self.ref = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_size, input_size, 3])\n\n # SRNTT network\n if use_weight_map:\n self.net_upscale, self.net_srntt = self.model(self.input, self.maps, weights=tf.expand_dims(self.weights, axis=-1))\n else:\n self.net_upscale, self.net_srntt = self.model(self.input, self.maps)\n\n # VGG19 network, input range [0, 255]\n self.net_vgg_sr = VGG19((self.net_srntt.outputs + 1) * 127.5, model_path=self.vgg19_model_path)\n self.net_vgg_hr = VGG19((self.ground_truth + 1) * 127.5, model_path=self.vgg19_model_path)\n \n # discriminator network\n self.net_d, d_real_logits = self.discriminator(self.ground_truth)\n _, d_fake_logits = self.discriminator(self.net_srntt.outputs, reuse=True)\n\n # ********************************************************************************\n # *** objectives\n # ********************************************************************************\n # reconstruction loss\n if is_L1_loss:\n loss_reconst = tf.reduce_mean(tf.abs(self.net_srntt.outputs - self.ground_truth))\n else:\n loss_reconst = cost.mean_squared_error(self.net_srntt.outputs, self.ground_truth, is_mean=True)\n\n # perceptual loss\n loss_percep = cost.mean_squared_error(\n self.net_vgg_sr.layers[vgg_perceptual_loss_layer], \n self.net_vgg_hr.layers[vgg_perceptual_loss_layer],\n is_mean=True)\n try:\n available_layers = ['relu2_1', 'relu3_1', 'relu4_1', 'relu5_1']\n available_layers = available_layers[:available_layers.index(vgg_perceptual_loss_layer)]\n loss_percep_lower_layers = [cost.mean_squared_error(\n self.net_vgg_sr.layers[l],\n self.net_vgg_hr.layers[l],\n is_mean=True) for l in available_layers]\n if use_lower_layers_in_per_loss:\n loss_percep = tf.reduce_mean([loss_percep] + loss_percep_lower_layers)\n except Exception:\n logging.warning('Failed to use lower layers in perceptual loss!')\n\n # texture loss\n if use_weight_map:\n self.a1, self.a2, self.a3 = -20., -20, -20\n self.b1, self.b2, self.b3 = .65, .65, .65\n loss_texture = tf.reduce_mean(tf.squared_difference(\n self.tf_gram_matrix(self.maps[0] * tf.nn.sigmoid(tf.expand_dims(self.weights, axis=-1) * self.a1 + self.b1)),\n self.tf_gram_matrix(self.net_vgg_sr.layers['relu3_1'] * tf.nn.sigmoid(tf.expand_dims(self.weights, axis=-1) * self.a1 + self.b1))\n ) / 4. / (input_size * input_size * 256) ** 2) + tf.reduce_mean(tf.squared_difference(\n self.tf_gram_matrix(\n self.maps[1] * tf.nn.sigmoid(tf.image.resize_bicubic(tf.expand_dims(self.weights, axis=-1), [input_size * 2] * 2) * self.a2 + self.b2)),\n self.tf_gram_matrix(\n self.net_vgg_sr.layers['relu2_1'] * tf.nn.sigmoid(tf.image.resize_bicubic(tf.expand_dims(self.weights, axis=-1), [input_size * 2] * 2) * self.a2 + self.b2))\n ) / 4. / (input_size * input_size * 512) ** 2) + tf.reduce_mean(tf.squared_difference(\n self.tf_gram_matrix(\n self.maps[2] * tf.nn.sigmoid(tf.image.resize_bicubic(tf.expand_dims(self.weights, axis=-1), [input_size * 4] * 2) * self.a3 + self.b3)),\n self.tf_gram_matrix(self.net_vgg_sr.layers['relu1_1'] * tf.nn.sigmoid(tf.image.resize_bicubic(tf.expand_dims(self.weights, axis=-1), [input_size * 4] * 2) * self.a3 + self.b3))\n ) / 4. / (input_size * input_size * 1024) ** 2)\n loss_texture /= 3.\n else:\n loss_texture = tf.reduce_mean(tf.squared_difference(\n self.tf_gram_matrix(self.maps[0]),\n self.tf_gram_matrix(self.net_vgg_sr.layers['relu3_1'])\n ) / 4. / (input_size * input_size * 256) ** 2) + tf.reduce_mean(tf.squared_difference(\n self.tf_gram_matrix(self.maps[1]),\n self.tf_gram_matrix(self.net_vgg_sr.layers['relu2_1'])\n ) / 4. / (input_size * input_size * 512) ** 2) + tf.reduce_mean(tf.squared_difference(\n self.tf_gram_matrix(self.maps[2]),\n self.tf_gram_matrix(self.net_vgg_sr.layers['relu1_1'])\n ) / 4. / (input_size * input_size * 1024) ** 2)\n loss_texture /= 3.\n\n # adversarial loss\n if is_WGAN_GP:\n # WGAN losses\n loss_d = tf.reduce_mean(d_fake_logits) - tf.reduce_mean(d_real_logits)\n loss_g = -tf.reduce_mean(d_fake_logits)\n # GP: gradient penalty\n alpha = tf.random_uniform(shape=[batch_size, 1, 1, 1], minval=0., maxval=1.)\n interpolates = alpha * self.ground_truth + ((1 - alpha) * self.net_srntt.outputs)\n _, disc_interpolates = self.discriminator(interpolates, reuse=True)\n gradients = tf.gradients(disc_interpolates, [interpolates])[0]\n slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=-1))\n gradient_penalty = tf.reduce_mean((slopes - 1) ** 2)\n loss_d += param_WGAN_GP * gradient_penalty\n else:\n loss_g = cost.sigmoid_cross_entropy(d_fake_logits, tf.ones_like(d_fake_logits))\n loss_d_fake = cost.sigmoid_cross_entropy(d_fake_logits, tf.zeros_like(d_fake_logits))\n loss_d_real = cost.sigmoid_cross_entropy(d_real_logits, tf.ones_like(d_real_logits))\n loss_d = loss_d_fake + loss_d_real\n\n # back projection loss\n loss_bp = back_projection_loss(tf_input=self.input, tf_output=self.net_srntt.outputs)\n \n # total loss\n loss_init = weights[4] * loss_reconst + weights[3] * loss_bp\n loss = weights[4] * loss_reconst + weights[3] * loss_bp + \\\n weights[2] * loss_g + \\\n weights[1] * loss_texture + \\\n weights[0] * loss_percep\n\n # ********************************************************************************\n # *** optimizers\n # ********************************************************************************\n # trainable variables\n trainable_vars = tf.trainable_variables()\n var_g = [v for v in trainable_vars if 'texture_transfer' in v.name]\n var_d = [v for v in trainable_vars if 'discriminator' in v.name]\n\n # learning rate decay\n global_step = tf.Variable(0, trainable=False, name='global_step')\n num_batches = int(num_files / batch_size)\n decayed_learning_rate = tf.train.exponential_decay(\n learning_rate=learning_rate,\n global_step=global_step,\n decay_steps=max(num_epochs * num_batches / 2, 1),\n decay_rate=.1,\n staircase=True\n )\n\n # optimizer\n optimizer_init = tf.train.AdamOptimizer(\n learning_rate=learning_rate, beta1=beta1).minimize(loss_init, var_list=var_g)\n optimizer = tf.train.AdamOptimizer(\n learning_rate=decayed_learning_rate, beta1=beta1).minimize(loss, var_list=var_g, global_step=global_step)\n optimizer_d = tf.train.AdamOptimizer(\n learning_rate=decayed_learning_rate, beta1=beta1).minimize(loss_d, var_list=var_d, global_step=global_step)\n\n # ********************************************************************************\n # *** samples for monitoring the training process\n # ********************************************************************************\n np.random.seed(2019)\n idx = np.random.choice(np.arange(num_files), batch_size, replace=False)\n samples_in = [imread(files_input[i], mode='RGB') for i in idx]\n samples_ref = [imresize(imread(files_ref[i], mode='RGB'), (input_size * 4, input_size * 4), interp='bicubic')\n for i in idx]\n samples_input = [imresize(img, (input_size, input_size), interp='bicubic').astype(np.float32) / 127.5 - 1\n for img in samples_in]\n samples_texture_map_tmp = [np.load(files_map[i], allow_pickle=True)['target_map'] for i in idx]\n samples_texture_map = [[] for _ in range(len(samples_texture_map_tmp[0]))]\n for s in samples_texture_map_tmp:\n for i, item in enumerate(samples_texture_map):\n item.append(s[i])\n samples_texture_map = [np.array(b) for b in samples_texture_map]\n if use_weight_map:\n samples_weight_map = [np.pad(np.load(files_map[i], allow_pickle=True)['weights'], ((1, 1), (1, 1)), 'edge') for i in idx]\n else:\n samples_weight_map = np.zeros(shape=(batch_size, input_size, input_size))\n frame_size = int(np.sqrt(batch_size))\n vis.save_images(np.array(samples_in), [frame_size, frame_size], join(self.save_dir, SAMPLE_FOLDER, 'HR.png'))\n vis.save_images(np.round((np.array(samples_input) + 1) * 127.5).astype(np.uint8), [frame_size, frame_size],\n join(self.save_dir, SAMPLE_FOLDER, 'LR.png'))\n vis.save_images(np.array(samples_ref), [frame_size, frame_size], join(self.save_dir, SAMPLE_FOLDER, 'Ref.png'))\n\n # ********************************************************************************\n # *** load models and training\n # ********************************************************************************\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n logging.info('Loading models ...')\n tf.global_variables_initializer().run()\n\n # load pre-trained upscaling.\n model_path = join(self.srntt_model_path, SRNTT_MODEL_NAMES['content_extractor'])\n if files.load_and_assign_npz(\n sess=sess,\n name=model_path,\n network=self.net_upscale) is False:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n vis.save_images(\n np.round((self.net_upscale.outputs.eval({self.input: samples_input}) + 1) * 127.5).astype(np.uint8),\n [frame_size, frame_size], join(self.save_dir, SAMPLE_FOLDER, 'Upscale.png'))\n\n # load the specific texture transfer model, specified by save_dir\n is_load_success = False\n if use_init_model_only:\n model_path = join(self.save_dir, MODEL_FOLDER, SRNTT_MODEL_NAMES['init'])\n if files.load_and_assign_npz(\n sess=sess,\n name=model_path,\n network=self.net_srntt):\n num_init_epochs = 0\n is_load_success = True\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.warning('FAILED load %s' % model_path)\n elif use_pretrained_model:\n model_path = join(self.save_dir, MODEL_FOLDER, SRNTT_MODEL_NAMES['conditional_texture_transfer'])\n if files.load_and_assign_npz(\n sess=sess,\n name=model_path,\n network=self.net_srntt):\n num_init_epochs = 0\n is_load_success = True\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.warning('FAILED load %s' % model_path)\n\n model_path = join(self.save_dir, MODEL_FOLDER, SRNTT_MODEL_NAMES['discriminator'])\n if files.load_and_assign_npz(\n sess=sess,\n name=model_path,\n network=self.net_d):\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.warning('FAILED load %s' % model_path)\n\n # load pre-trained conditional texture transfer\n if not is_load_success:\n use_weight_map = False\n if use_init_model_only:\n model_path = join(self.srntt_model_path, SRNTT_MODEL_NAMES['init'])\n if files.load_and_assign_npz(\n sess=sess,\n name=model_path,\n network=self.net_srntt):\n num_init_epochs = 0\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n elif use_pretrained_model:\n model_path = join(self.srntt_model_path, SRNTT_MODEL_NAMES['conditional_texture_transfer'])\n if files.load_and_assign_npz(\n sess=sess,\n name=model_path,\n network=self.net_srntt):\n num_init_epochs = 0\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n\n logging.info('**********'\n ' Start training '\n '**********')\n # pre-train with only reconstruction loss\n current_eta = None\n idx = np.arange(num_files)\n for epoch in xrange(num_init_epochs):\n np.random.shuffle(idx)\n for n_batch in xrange(num_batches):\n step_time = time.time()\n sub_idx = idx[n_batch * batch_size:n_batch * batch_size + batch_size]\n batch_imgs = [imread(files_input[i], mode='RGB') for i in sub_idx]\n batch_truth = [img.astype(np.float32) / 127.5 - 1 for img in batch_imgs]\n batch_input = [imresize(img, .25, interp='bicubic').astype(np.float32)/127.5-1 for img in batch_imgs]\n batch_maps_tmp = [np.load(files_map[i], allow_pickle=True)['target_map'] for i in sub_idx]\n batch_maps = [[] for _ in range(len(batch_maps_tmp[0]))]\n for s in batch_maps_tmp:\n for i, item in enumerate(batch_maps):\n item.append(s[i])\n batch_maps = [np.array(b) for b in batch_maps]\n\n if use_weight_map:\n batch_weights = [np.pad(np.load(files_map[i], allow_pickle=True)['weights'], ((1, 1), (1, 1)), 'edge')\n for i in sub_idx]\n\n else:\n batch_weights = np.zeros(shape=(batch_size, input_size, input_size))\n # train with reference\n _, l_reconst, l_bp, map_hr_3, map_hr_2, map_hr_1 = sess.run(\n fetches=[optimizer_init, loss_reconst, loss_bp,\n self.net_vgg_hr.layers['relu3_1'],\n self.net_vgg_hr.layers['relu2_1'],\n self.net_vgg_hr.layers['relu1_1']],\n feed_dict={\n self.input: batch_input,\n self.maps: batch_maps,\n self.ground_truth: batch_truth,\n self.weights: batch_weights\n }\n )\n\n # train with truth\n _, l_reconst, l_bp = sess.run(\n fetches=[optimizer_init, loss_reconst, loss_bp],\n feed_dict={\n self.input: batch_input,\n self.maps: [map_hr_3, map_hr_2, map_hr_1],\n self.ground_truth: batch_truth,\n self.weights: np.ones_like(np.array(batch_weights))\n }\n )\n\n # print\n time_per_iter = time.time() - step_time\n n_iter_remain = (num_init_epochs - epoch - 1) * num_batches + num_batches - n_batch\n eta_str, eta_ = self.eta(time_per_iter, n_iter_remain, current_eta)\n current_eta = eta_\n logging.info('Pre-train: Epoch [%02d/%02d] Batch [%03d/%03d]\\tETA: %s\\n'\n '\\tl_rec = %.4f \\t l_bp = %.4f' %\n (epoch + 1, num_init_epochs, n_batch + 1, num_batches, eta_str,\n weights[4] * l_reconst, weights[3] * l_bp))\n\n # save intermediate results\n vis.save_images(\n np.round((self.net_srntt.outputs.eval({\n self.input: samples_input, self.maps: samples_texture_map,\n self.weights: samples_weight_map}) + 1) * 127.5).astype(np.uint8),\n [frame_size, frame_size],\n join(self.save_dir, SAMPLE_FOLDER, 'init_E%03d.png' % (epoch+1)))\n\n # save model for each epoch\n files.save_npz(\n save_list=self.net_srntt.all_params,\n name=join(self.save_dir, MODEL_FOLDER, SRNTT_MODEL_NAMES['init']),\n sess=sess)\n\n # train with all losses\n current_eta = None\n for epoch in xrange(num_epochs):\n np.random.shuffle(idx)\n for n_batch in xrange(num_batches):\n step_time = time.time()\n sub_idx = idx[n_batch * batch_size:n_batch * batch_size + batch_size]\n batch_imgs = [imread(files_input[i], mode='RGB') for i in sub_idx]\n batch_truth = [img.astype(np.float32) / 127.5 - 1 for img in batch_imgs]\n batch_input = [imresize(img, .25, interp='bicubic').astype(np.float32)/127.5-1 for img in batch_imgs]\n batch_maps_tmp = [np.load(files_map[i], allow_pickle=True)['target_map'] for i in sub_idx]\n batch_maps = [[] for _ in range(len(batch_maps_tmp[0]))]\n for s in batch_maps_tmp:\n for i, item in enumerate(batch_maps):\n item.append(s[i])\n batch_maps = [np.array(b) for b in batch_maps]\n if use_weight_map:\n batch_weights = [np.pad(np.load(files_map[i], allow_pickle=True)['weights'], ((1, 1), (1, 1)), 'edge')\n for i in sub_idx]\n else:\n batch_weights = np.zeros(shape=(batch_size, input_size, input_size))\n\n # train with reference\n for _ in xrange(2):\n _ = sess.run(\n fetches=[optimizer_d],\n feed_dict={\n self.input: batch_input,\n self.maps: batch_maps,\n self.ground_truth: batch_truth,\n self.weights: batch_weights\n }\n )\n _, _, l_rec, l_per, l_tex, l_adv, l_dis, l_bp, map_hr_3, map_hr_2, map_hr_1 = sess.run(\n fetches=[optimizer, optimizer_d, loss_reconst, loss_percep, loss_texture, loss_g, loss_d, loss_bp,\n self.net_vgg_hr.layers['relu3_1'],\n self.net_vgg_hr.layers['relu2_1'],\n self.net_vgg_hr.layers['relu1_1'],\n ],\n feed_dict={\n self.input: batch_input,\n self.maps: batch_maps,\n self.ground_truth: batch_truth,\n self.weights: batch_weights\n }\n )\n\n # train with truth\n _, _, l_rec, l_per, l_tex, l_adv, l_dis, l_bp = sess.run(\n fetches=[optimizer, optimizer_d, loss_reconst, loss_percep, loss_texture, loss_g, loss_d, loss_bp],\n feed_dict={\n self.input: batch_input,\n self.maps: [map_hr_3, map_hr_2, map_hr_1],\n self.ground_truth: batch_truth,\n self.weights: np.ones_like(np.array(batch_weights))\n }\n )\n\n # print\n time_per_iter = time.time() - step_time\n n_iter_remain = (num_epochs - epoch - 1) * num_batches + num_batches - n_batch\n eta_str, eta_ = self.eta(time_per_iter, n_iter_remain, current_eta)\n current_eta = eta_\n logging.info('Epoch [%02d/%02d] Batch [%03d/%03d]\\tETA: %s\\n'\n '\\tl_rec = %.4f\\tl_bp = %.4f\\n'\n '\\tl_per = %.4f\\tl_tex = %.4f\\n'\n '\\tl_adv = %.4f\\tl_dis = %.4f' %\n (epoch + 1, num_epochs, n_batch + 1, num_batches, eta_str,\n weights[4] * l_rec, weights[3] * l_bp,\n weights[0] * l_per, weights[1] * l_tex,\n weights[2] * l_adv, l_dis))\n\n # save intermediate results\n vis.save_images(\n np.round((self.net_srntt.outputs.eval({\n self.input: samples_input, self.maps: samples_texture_map,\n self.weights: samples_weight_map}) + 1) * 127.5).astype(np.uint8),\n [frame_size, frame_size],\n join(self.save_dir, SAMPLE_FOLDER, 'E%03d.png' % (epoch + 1)))\n\n # save models for each epoch\n files.save_npz(\n save_list=self.net_srntt.all_params,\n name=join(self.save_dir, MODEL_FOLDER, SRNTT_MODEL_NAMES['conditional_texture_transfer']),\n sess=sess)\n files.save_npz(\n save_list=self.net_d.all_params,\n name=join(self.save_dir, MODEL_FOLDER, SRNTT_MODEL_NAMES['discriminator']),\n sess=sess)\n\n def test(\n self,\n input_dir, # original image\n ref_dir=None, # reference images\n use_pretrained_model=True,\n use_init_model_only=False, # the init model is trained only with the reconstruction loss\n use_weight_map=False,\n result_dir=None,\n ref_scale=1.0,\n is_original_image=True,\n max_batch_size=16,\n save_ref=True\n ):\n logging.info('Testing mode')\n\n if ref_dir is None:\n return self.test_without_ref(\n input_dir=input_dir,\n use_pretrained_model=use_pretrained_model,\n use_init_model_only=use_init_model_only,\n use_weight_map=use_weight_map,\n result_dir=result_dir,\n ref_scale=ref_scale,\n is_original_image=is_original_image,\n max_batch_size=max_batch_size,\n save_ref=save_ref\n )\n\n # ********************************************************************************\n # *** check input and reference images\n # ********************************************************************************\n # check input_dir\n img_input, img_hr = None, None\n if isinstance(input_dir, np.ndarray):\n assert len(input_dir.shape) == 3\n img_input = np.copy(input_dir)\n elif isfile(input_dir):\n img_input = imread(input_dir, mode='RGB')\n else:\n logging.error('Unrecognized input_dir %s' % input_dir)\n exit(0)\n\n h, w, _ = img_input.shape\n if is_original_image:\n # ensure that the size of img_input can be divided by 4 with no remainder\n h = int(h // 4 * 4)\n w = int(w // 4 * 4)\n img_hr = img_input[0:h, 0:w, ::]\n img_input = imresize(img_hr, .25, interp='bicubic')\n h, w, _ = img_input.shape\n img_input_copy = np.copy(img_input)\n\n if h * w * 16 > SRNTT.MAX_IMAGE_SIZE: # avoid OOM\n # split img_input into patches\n patches = []\n grids = []\n patch_size = 128\n stride = 100\n for ind_row in range(0, h - (patch_size - stride), stride):\n for ind_col in range(0, w - (patch_size - stride), stride):\n patch = img_input[ind_row:ind_row + patch_size, ind_col:ind_col + patch_size, :]\n if patch.shape != (patch_size, patch_size, 3):\n patch = np.pad(patch,\n ((0, patch_size - patch.shape[0]), (0, patch_size - patch.shape[1]), (0, 0)),\n 'reflect')\n patches.append(patch)\n grids.append((ind_row * 4, ind_col * 4, patch_size * 4))\n grids = np.stack(grids, axis=0)\n img_input = np.stack(patches, axis=0)\n else:\n grids = None\n img_input = np.expand_dims(img_input, axis=0)\n\n # check ref_dir\n img_ref = []\n if not isinstance(ref_dir, (list, tuple)):\n ref_dir = [ref_dir]\n\n for ref in ref_dir:\n if isinstance(ref, np.ndarray):\n assert len(ref.shape) == 3\n img_ref.append(np.copy(ref))\n elif isfile(ref):\n img_ref.append(imread(ref, mode='RGB'))\n else:\n logging.error('Unrecognized ref_dir type!')\n exit(0)\n\n if ref_scale <= 0: # keep the same scale as HR image\n img_ref = [imresize(img, (h * 4, w * 4), interp='bicubic') for img in img_ref]\n elif ref_scale != 1:\n img_ref = [imresize(img, float(ref_scale), interp='bicubic') for img in img_ref]\n\n for i in xrange(len(img_ref)):\n h2, w2, _ = img_ref[i].shape\n h2 = int(h2 // 4 * 4)\n w2 = int(w2 // 4 * 4)\n img_ref[i] = img_ref[i][0:h2, 0:w2, ::]\n\n # create result folder\n if result_dir is None:\n result_dir = join(self.save_dir, 'test')\n if not exists(result_dir):\n makedirs(result_dir)\n if not exists(join(result_dir, 'tmp')):\n makedirs(join(result_dir, 'tmp'))\n\n # ********************************************************************************\n # *** build graph\n # ********************************************************************************\n if not self.is_model_built:\n self.is_model_built = True\n logging.info('Building graph ...')\n # input image, range [-1, 1]\n self.input_srntt = tf.placeholder(shape=[1, None, None, 3], dtype=tf.float32)\n\n # reference images, range [0, 255]\n self.input_vgg19 = tf.placeholder(shape=[1, None, None, 3], dtype=tf.float32)\n\n\n # swapped feature map and weights\n self.maps = (\n tf.placeholder(\n dtype=tf.float32,\n shape=(1, None, None, 256)),\n tf.placeholder(\n dtype=tf.float32,\n shape=(1, None, None, 128)),\n tf.placeholder(\n dtype=tf.float32,\n shape=(1, None, None, 64))\n )\n\n self.weights = tf.placeholder(\n dtype=tf.float32,\n shape=(1, None, None))\n\n # SRNTT network\n logging.info('Build SRNTT model')\n if use_weight_map:\n self.net_upscale, self.net_srntt = self.model(\n self.input_srntt, self.maps, weights=tf.expand_dims(self.weights, axis=-1), is_train=False)\n else:\n self.net_upscale, self.net_srntt = self.model(self.input_srntt, self.maps, is_train=False)\n\n # VGG19 network, input range [0, 255]\n logging.info('Build VGG19 model')\n self.net_vgg19 = VGG19(\n input_image=self.input_vgg19,\n model_path=self.vgg19_model_path,\n final_layer='relu3_1'\n )\n\n # ********************************************************************************\n # *** load models\n # ********************************************************************************\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = False\n self.sess = tf.Session(config=config)\n\n # instant of Swap()\n logging.info('Initialize the swapper')\n self.swaper = Swap(sess=self.sess)\n\n logging.info('Loading models ...')\n self.sess.run(tf.global_variables_initializer())\n\n # load pre-trained content extractor, including upscaling.\n model_path = join(self.srntt_model_path, SRNTT_MODEL_NAMES['content_extractor'])\n if files.load_and_assign_npz(\n sess=self.sess,\n name=model_path,\n network=self.net_upscale) is False:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n\n # load the specific conditional texture transfer model, specified by save_dir\n if self.save_dir is None:\n if use_init_model_only:\n model_path = join(self.srntt_model_path, SRNTT_MODEL_NAMES['init'])\n if files.load_and_assign_npz(\n sess=self.sess,\n name=model_path,\n network=self.net_srntt):\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n else:\n model_path = join(self.srntt_model_path, SRNTT_MODEL_NAMES['conditional_texture_transfer'])\n if files.load_and_assign_npz(\n sess=self.sess,\n name=model_path,\n network=self.net_srntt):\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n else:\n if use_init_model_only:\n model_path = join(self.save_dir, MODEL_FOLDER, SRNTT_MODEL_NAMES['init'])\n if files.load_and_assign_npz(\n sess=self.sess,\n name=model_path,\n network=self.net_srntt):\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n else:\n model_path = join(self.save_dir, MODEL_FOLDER,\n SRNTT_MODEL_NAMES['conditional_texture_transfer'])\n if files.load_and_assign_npz(\n sess=self.sess,\n name=model_path,\n network=self.net_srntt):\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n\n logging.info('**********'\n ' Start testing '\n '**********')\n\n matching_layer = ['relu3_1', 'relu2_1', 'relu1_1']\n\n logging.info('Get VGG19 Feature Maps')\n\n logging.info('\\t[1/2] Getting feature map of Ref image ...')\n t_start = time.time()\n map_ref = []\n for i in img_ref:\n map_ref.append(\n self.net_vgg19.get_layer_output(\n sess=self.sess, layer_name=matching_layer,\n feed_image=i)\n )\n styles = [[] for _ in xrange(len(matching_layer))]\n for i in map_ref:\n for j in xrange(len(styles)):\n styles[j].append(i[j])\n\n logging.info('\\t[2/2] Getting feature map of LR->SR Ref image ...')\n map_ref_sr = []\n for i in img_ref:\n img_ref_downscale = imresize(i, .25, interp='bicubic')\n img_ref_upscale = imresize(img_ref_downscale, 4., interp='bicubic')\n map_ref_sr.append(\n self.net_vgg19.get_layer_output(\n sess=self.sess, layer_name=matching_layer[0],\n feed_image=img_ref_upscale)\n )\n\n # swap ref to in\n logging.info('Patch-Wise Matching and Swapping')\n for idx, patch in enumerate(img_input):\n logging.info('\\tPatch %03d/%03d' % (idx + 1, img_input.shape[0]))\n\n # skip if the results exists\n if exists(join(result_dir, 'tmp', 'srntt_%05d.png' % idx)):\n continue\n\n logging.info('\\tGetting feature map of input LR image ...')\n img_input_upscale = imresize(patch, 4., interp='bicubic')\n map_sr = self.net_vgg19.get_layer_output(\n sess=self.sess, layer_name=matching_layer[0], feed_image=img_input_upscale)\n\n logging.info('\\tMatching and swapping features ...')\n map_target, weight, _ = self.swaper.conditional_swap_multi_layer(\n content=map_sr,\n style=styles[0],\n condition=map_ref_sr,\n other_styles=styles[1:],\n is_weight=use_weight_map\n )\n\n logging.info('Obtain SR patches')\n if use_weight_map:\n weight = np.pad(weight, ((1, 1), (1, 1)), 'edge')\n out_srntt, out_upscale = self.sess.run(\n fetches=[self.net_srntt.outputs, self.net_upscale.outputs],\n feed_dict={\n self.input_srntt: [patch / 127.5 - 1],\n self.maps: [np.expand_dims(m, axis=0) for m in map_target],\n self.weights: [weight]\n }\n )\n else:\n time_step_1 = time.time()\n out_srntt, out_upscale = self.sess.run(\n fetches=[self.net_srntt.outputs, self.net_upscale.outputs],\n feed_dict={\n self.input_srntt: [patch / 127.5 - 1],\n self.maps: [np.expand_dims(m, axis=0) for m in map_target],\n }\n )\n time_step_2 = time.time()\n\n logging.info('Time elapsed: PM: %.3f sec, SR: %.3f sec' %\n ((time_step_1 - t_start), (time_step_2 - time_step_1)))\n\n\n imsave(join(result_dir, 'tmp', 'srntt_%05d.png' % idx),\n np.round((out_srntt.squeeze() + 1) * 127.5).astype(np.uint8))\n imsave(join(result_dir, 'tmp', 'upscale_%05d.png' % idx),\n np.round((out_upscale.squeeze() + 1) * 127.5).astype(np.uint8))\n logging.info('Saved to %s' % join(result_dir, 'tmp', 'srntt_%05d.png' % idx))\n t_end = time.time()\n logging.info('Reconstruct SR image')\n out_srntt_files = sorted(glob(join(result_dir, 'tmp', 'srntt_*.png')))\n out_upscale_files = sorted(glob(join(result_dir, 'tmp', 'upscale_*.png')))\n\n if grids is not None:\n patch_size = grids[0, 2]\n h_l, w_l = grids[-1, 0] + patch_size, grids[-1, 1] + patch_size\n out_upscale_large = np.zeros((h_l, w_l, 3), dtype=np.float32)\n out_srntt_large = np.copy(out_upscale_large)\n counter = np.zeros_like(out_srntt_large, dtype=np.float32)\n for idx in xrange(len(grids)):\n out_upscale_large[\n grids[idx, 0]:grids[idx, 0] + patch_size,\n grids[idx, 1]:grids[idx, 1] + patch_size, :] += imread(out_upscale_files[idx], mode='RGB').astype(np.float32)\n\n out_srntt_large[\n grids[idx, 0]:grids[idx, 0] + patch_size,\n grids[idx, 1]:grids[idx, 1] + patch_size, :] += imread(out_srntt_files[idx], mode='RGB').astype(np.float32)\n\n counter[\n grids[idx, 0]:grids[idx, 0] + patch_size,\n grids[idx, 1]:grids[idx, 1] + patch_size, :] += 1\n\n out_upscale_large /= counter\n out_srntt_large /= counter\n out_upscale = out_upscale_large[:h * 4, :w * 4, :]\n out_srntt = out_srntt_large[:h * 4, :w * 4, :]\n else:\n out_upscale = imread(out_upscale_files[0], mode='RGB')\n out_srntt = imread(out_srntt_files[0], mode='RGB')\n\n\n # log run time\n with open(join(result_dir, 'run_time.txt'), 'w') as f:\n line = '%02d min %02d sec\\n' % ((t_end - t_start) // 60, (t_end - t_start) % 60)\n f.write(line)\n f.close()\n\n # save results\n # save HR image if it exists\n if img_hr is not None:\n imsave(join(result_dir, 'HR.png'), img_hr)\n # save LR (input) image\n imsave(join(result_dir, 'LR.png'), img_input_copy)\n # save reference image(s)\n if save_ref:\n for idx, ref in enumerate(img_ref):\n imsave(join(result_dir, 'Ref_%02d.png' % idx), ref)\n # save bicubic\n imsave(join(result_dir, 'Bicubic.png'), imresize(img_input_copy, 4., interp='bicubic'))\n # save SR images\n imsave(join(result_dir, 'Upscale.png'), np.array(out_upscale).squeeze().round().clip(0, 255).astype(np.uint8))\n imsave(join(result_dir, 'SRNTT.png'), np.array(out_srntt).squeeze().round().clip(0, 255).astype(np.uint8))\n logging.info('Saved results to folder %s' % result_dir)\n\n return np.array(out_srntt).squeeze().round().clip(0, 255).astype(np.uint8)\n\n def test_without_ref(\n self,\n input_dir, # original image\n ref_dir=None, # reference images\n use_pretrained_model=True,\n use_init_model_only=False, # the init model is trained only with the reconstruction loss\n use_weight_map=False,\n result_dir=None,\n ref_scale=1.0,\n is_original_image=True,\n max_batch_size=16,\n save_ref=True\n ):\n logging.info('Testing without references')\n\n # ********************************************************************************\n # *** check input and reference images\n # ********************************************************************************\n # check input_dir\n img_input, img_hr = None, None\n if isinstance(input_dir, np.ndarray):\n assert len(input_dir.shape) == 3\n img_input = np.copy(input_dir)\n elif isfile(input_dir):\n img_input = imread(input_dir, mode='RGB')\n else:\n logging.info('Unrecognized input_dir %s' % input_dir)\n exit(0)\n\n h, w, _ = img_input.shape\n if is_original_image:\n # ensure that the size of img_input can be divided by 4 with no remainder\n h = int(h // 4 * 4)\n w = int(w // 4 * 4)\n img_hr = img_input[0:h, 0:w, ::]\n img_input = imresize(img_hr, .25, interp='bicubic')\n h, w, _ = img_input.shape\n img_input_copy = np.copy(img_input)\n\n if h * w * 16 > SRNTT.MAX_IMAGE_SIZE: # avoid OOM\n # split img_input into patches\n patches = []\n grids = []\n patch_size = 128\n stride = 100\n for ind_row in range(0, h - (patch_size - stride), stride):\n for ind_col in range(0, w - (patch_size - stride), stride):\n patch = img_input[ind_row:ind_row + patch_size, ind_col:ind_col + patch_size, :]\n if patch.shape != (patch_size, patch_size, 3):\n patch = np.pad(patch,\n ((0, patch_size - patch.shape[0]), (0, patch_size - patch.shape[1]), (0, 0)),\n 'reflect')\n patches.append(patch)\n grids.append((ind_row * 4, ind_col * 4, patch_size * 4))\n grids = np.stack(grids, axis=0)\n img_input = np.stack(patches, axis=0)\n else:\n grids = None\n img_input = np.expand_dims(img_input, axis=0)\n\n # check ref_dir\n is_ref = True\n if ref_dir is None:\n is_ref = False\n ref_dir = input_dir\n\n img_ref = []\n\n if not isinstance(ref_dir, (list, tuple)):\n ref_dir = [ref_dir]\n\n for ref in ref_dir:\n if isinstance(ref, np.ndarray):\n assert len(ref.shape) == 3\n img_ref.append(np.copy(ref))\n elif isfile(ref):\n img_ref.append(imread(ref, mode='RGB'))\n else:\n logging.info('Unrecognized ref_dir type!')\n exit(0)\n\n if ref_scale <= 0: # keep the same scale as HR image\n img_ref = [imresize(img, (h * 4, w * 4), interp='bicubic') for img in img_ref]\n elif ref_scale != 1:\n img_ref = [imresize(img, float(ref_scale), interp='bicubic') for img in img_ref]\n\n for i in xrange(len(img_ref)):\n h2, w2, _ = img_ref[i].shape\n h2 = int(h2 // 4 * 4)\n w2 = int(w2 // 4 * 4)\n img_ref[i] = img_ref[i][0:h2, 0:w2, ::]\n if not is_ref and is_original_image:\n img_ref[i] = imresize(img_ref[i], .25, interp='bicubic')\n\n # create result folder\n if result_dir is None:\n result_dir = join(self.save_dir, 'test')\n if not exists(result_dir):\n makedirs(result_dir)\n if not exists(join(result_dir, 'tmp')):\n makedirs(join(result_dir, 'tmp'))\n\n # ********************************************************************************\n # *** build graph\n # ********************************************************************************\n if not self.is_model_built:\n self.is_model_built = True\n logging.info('Building graph ...')\n # input image, range [-1, 1]\n self.input_srntt = tf.placeholder(shape=[1, None, None, 3], dtype=tf.float32)\n\n # reference images, range [0, 255]\n self.input_vgg19 = tf.placeholder(shape=[1, None, None, 3], dtype=tf.float32)\n\n # swapped feature map and weights\n self.maps = (\n tf.placeholder(\n dtype=tf.float32,\n shape=(1, None, None, 256)),\n tf.placeholder(\n dtype=tf.float32,\n shape=(1, None, None, 128)),\n tf.placeholder(\n dtype=tf.float32,\n shape=(1, None, None, 64))\n )\n\n self.weights = tf.placeholder(\n dtype=tf.float32,\n shape=(1, None, None))\n\n # SRNTT network\n logging.info('Build SRNTT model')\n if use_weight_map:\n self.net_upscale, self.net_srntt = self.model(\n self.input_srntt, self.maps, weights=tf.expand_dims(self.weights, axis=-1), is_train=False)\n else:\n self.net_upscale, self.net_srntt = self.model(self.input_srntt, self.maps, is_train=False)\n\n # VGG19 network, input range [0, 255]\n logging.info('Build VGG19 model')\n self.net_vgg19 = VGG19(\n input_image=self.input_vgg19,\n model_path=self.vgg19_model_path,\n final_layer='relu3_1'\n )\n\n # ********************************************************************************\n # *** load models\n # ********************************************************************************\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = False\n self.sess = tf.Session(config=config)\n\n # instant of Swap()\n logging.info('Initialize the swapper')\n self.swaper = Swap(sess=self.sess)\n\n logging.info('Loading models ...')\n self.sess.run(tf.global_variables_initializer())\n\n # load pre-trained content extractor, including upscaling.\n model_path = join(self.srntt_model_path, SRNTT_MODEL_NAMES['content_extractor'])\n if files.load_and_assign_npz(\n sess=self.sess,\n name=model_path,\n network=self.net_upscale) is False:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n\n # load the specific conditional texture transfer model, specified by save_dir\n if self.save_dir is None:\n if use_init_model_only:\n model_path = join(self.srntt_model_path, SRNTT_MODEL_NAMES['init'])\n if files.load_and_assign_npz(\n sess=self.sess,\n name=model_path,\n network=self.net_srntt):\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n else:\n model_path = join(self.srntt_model_path, SRNTT_MODEL_NAMES['conditional_texture_transfer'])\n if files.load_and_assign_npz(\n sess=self.sess,\n name=model_path,\n network=self.net_srntt):\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n else:\n if use_init_model_only:\n model_path = join(self.save_dir, MODEL_FOLDER, SRNTT_MODEL_NAMES['init'])\n if files.load_and_assign_npz(\n sess=self.sess,\n name=model_path,\n network=self.net_srntt):\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n else:\n model_path = join(self.save_dir, MODEL_FOLDER,\n SRNTT_MODEL_NAMES['conditional_texture_transfer'])\n if files.load_and_assign_npz(\n sess=self.sess,\n name=model_path,\n network=self.net_srntt):\n logging.info('SUCCESS load %s' % model_path)\n else:\n logging.error('FAILED load %s' % model_path)\n exit(0)\n\n logging.info('**********'\n ' Start testing '\n '**********')\n\n matching_layer = ['relu3_1', 'relu2_1', 'relu1_1']\n\n logging.info('Get VGG19 Feature Maps')\n\n logging.info('\\t[1/2] Getting feature map of Ref image ...')\n t_start = time.time()\n map_ref = []\n for i in img_ref:\n map_ref.append(\n self.net_vgg19.get_layer_output(\n sess=self.sess, layer_name=matching_layer,\n feed_image=i)\n )\n styles = [[] for _ in xrange(len(matching_layer))]\n for i in map_ref:\n for j in xrange(len(styles)):\n styles[j].append(i[j])\n\n logging.info('\\t[2/2] Getting feature map of LR->SR Ref image ...')\n map_ref_sr = []\n if is_ref:\n for i in img_ref:\n img_ref_downscale = imresize(i, .25, interp='bicubic')\n img_ref_upscale = self.net_upscale.outputs.eval({self.input_srntt: [img_ref_downscale / 127.5 - 1]}, session=self.sess)\n img_ref_upscale = (img_ref_upscale + 1) * 127.5\n map_ref_sr.append(\n self.net_vgg19.get_layer_output(\n sess=self.sess, layer_name=matching_layer[0],\n feed_image=img_ref_upscale)\n )\n else:\n map_ref_sr = styles\n\n # swap ref to in\n logging.info('Patch-Wise Matching and Swapping')\n for idx, patch in enumerate(img_input):\n logging.info('\\tPatch %03d/%03d' % (idx + 1, img_input.shape[0]))\n\n # skip if the results exists\n if exists(join(result_dir, 'tmp', 'srntt_%05d.png' % idx)):\n continue\n\n logging.info('\\tGetting feature map of input LR image ...')\n\n if 'Urban' in input_dir:\n img_input_upscale = imread(\n join('../EDSR-PyTorch/test_Urban100_MDSR', split(input_dir)[-1], 'SRNTT.png'),\n mode='RGB').astype(np.float32)\n elif 'CUFED5' in input_dir and False:\n img_input_upscale = imread(\n join('../EDSR-PyTorch/test_CUFED5_MDSR', split(input_dir)[-1], 'SRNTT.png'),\n mode='RGB').astype(np.float32)\n elif 'Sun80' in input_dir or 'sun80' in input_dir:\n img_input_upscale = imread(\n join('../EDSR-PyTorch/test_Sun100_MDSR', split(input_dir)[-1].split('.')[0], 'SRNTT.png'),\n mode='RGB').astype(np.float32)\n else:\n img_input_upscale = self.net_upscale.outputs.eval({self.input_srntt: [patch / 127.5 - 1]}, session=self.sess)\n img_input_upscale = (img_input_upscale + 1) * 127.5\n\n if is_ref:\n map_sr = self.net_vgg19.get_layer_output(\n sess=self.sess, layer_name=matching_layer[0], feed_image=img_input_upscale)\n else:\n map_sr = self.net_vgg19.get_layer_output(\n sess=self.sess, layer_name=matching_layer, feed_image=img_input_upscale)\n\n logging.info('\\tMatching and swapping features ...')\n if is_ref:\n map_target, weight, _ = self.swaper.conditional_swap_multi_layer(\n content=map_sr,\n style=styles[0],\n condition=map_ref_sr,\n other_styles=styles[1:],\n is_weight=use_weight_map\n )\n else:\n map_target, weight = [], []\n for i in xrange(len(matching_layer)):\n m_target, w, _ = self.swaper.conditional_swap_multi_layer(\n content=map_sr[i],\n style=styles[i],\n condition=map_ref_sr[i],\n is_weight=use_weight_map\n )\n map_target.append(np.squeeze(m_target))\n weight.append(w)\n\n logging.info('Obtain SR patches')\n if use_weight_map:\n weight = np.pad(weight, ((1, 1), (1, 1)), 'edge')\n out_srntt, out_upscale = self.sess.run(\n fetches=[self.net_srntt.outputs, self.net_upscale.outputs],\n feed_dict={\n self.input_srntt: [patch / 127.5 - 1],\n self.maps: [np.expand_dims(m, axis=0) for m in map_target],\n self.weights: [weight]\n }\n )\n else:\n time_step_1 = time.time()\n out_srntt, out_upscale = self.sess.run(\n fetches=[self.net_srntt.outputs, self.net_upscale.outputs],\n feed_dict={\n self.input_srntt: [patch / 127.5 - 1],\n self.maps: [np.expand_dims(m, axis=0) for m in map_target],\n }\n )\n time_step_2 = time.time()\n\n logging.info('Time elapsed: PM: %.3f sec, SR: %.3f sec' %\n ((time_step_1 - t_start), (time_step_2 - time_step_1)))\n\n imsave(join(result_dir, 'tmp', 'srntt_%05d.png' % idx),\n np.round((out_srntt.squeeze() + 1) * 127.5).astype(np.uint8))\n imsave(join(result_dir, 'tmp', 'upscale_%05d.png' % idx),\n np.round((out_upscale.squeeze() + 1) * 127.5).astype(np.uint8))\n logging.info('Saved to %s' % join(result_dir, 'tmp', 'srntt_%05d.png' % idx))\n logging.info('Reconstruct SR image')\n out_srntt_files = sorted(glob(join(result_dir, 'tmp', 'srntt_*.png')))\n out_upscale_files = sorted(glob(join(result_dir, 'tmp', 'upscale_*.png')))\n\n if grids is not None:\n patch_size = grids[0, 2]\n h_l, w_l = grids[-1, 0] + patch_size, grids[-1, 1] + patch_size\n out_upscale_large = np.zeros((h_l, w_l, 3), dtype=np.float32)\n out_srntt_large = np.copy(out_upscale_large)\n counter = np.zeros_like(out_srntt_large, dtype=np.float32)\n for idx in xrange(len(grids)):\n out_upscale_large[\n grids[idx, 0]:grids[idx, 0] + patch_size,\n grids[idx, 1]:grids[idx, 1] + patch_size, :] += imread(out_upscale_files[idx], mode='RGB').astype(np.float32)\n\n out_srntt_large[\n grids[idx, 0]:grids[idx, 0] + patch_size,\n grids[idx, 1]:grids[idx, 1] + patch_size, :] += imread(out_srntt_files[idx], mode='RGB').astype(np.float32)\n\n counter[\n grids[idx, 0]:grids[idx, 0] + patch_size,\n grids[idx, 1]:grids[idx, 1] + patch_size, :] += 1\n\n out_upscale_large /= counter\n out_srntt_large /= counter\n out_upscale = out_upscale_large[:(h * 4), :(w * 4), :]\n out_srntt = out_srntt_large[:(h * 4), :(w * 4), :]\n else:\n out_upscale = imread(out_upscale_files[0], mode='RGB')\n out_srntt = imread(out_srntt_files[0], mode='RGB')\n\n t_end = time.time()\n\n # log run time\n with open(join(result_dir, 'run_time.txt'), 'w') as f:\n line = '%02d min %02d sec\\n' % ((t_end - t_start) // 60, (t_end - t_start) % 60)\n f.write(line)\n f.close()\n\n # save results\n # save HR image if it exists\n if img_hr is not None:\n imsave(join(result_dir, 'HR.png'), img_hr)\n # save LR (input) image\n imsave(join(result_dir, 'LR.png'), img_input_copy)\n # save reference image(s)\n if save_ref:\n for idx, ref in enumerate(img_ref):\n imsave(join(result_dir, 'Ref_%02d.png' % idx), ref)\n # save bicubic\n imsave(join(result_dir, 'Bicubic.png'), imresize(img_input_copy, 4., interp='bicubic'))\n # save SR images\n imsave(join(result_dir, 'Upscale.png'), np.array(out_upscale).squeeze().round().clip(0, 255).astype(np.uint8))\n imsave(join(result_dir, 'SRNTT.png'), np.array(out_srntt).squeeze().round().clip(0, 255).astype(np.uint8))\n logging.info('Saved results to folder %s' % result_dir)\n\n return np.array(out_srntt).squeeze().round().clip(0, 255).astype(np.uint8)\n"
] |
[
[
"tensorflow.get_variable",
"tensorflow.train.AdamOptimizer",
"tensorflow.Variable",
"tensorflow.gradients",
"tensorflow.ConfigProto",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.trainable_variables",
"tensorflow.random_normal_initializer",
"tensorflow.matmul",
"tensorflow.nn.sigmoid",
"tensorflow.placeholder",
"tensorflow.zeros_like",
"tensorflow.global_variables_initializer",
"scipy.misc.imresize",
"tensorflow.reduce_mean",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"scipy.misc.imread",
"tensorflow.variable_scope",
"tensorflow.random_uniform",
"tensorflow.abs"
]
] |
RodolpheMth/oculus
|
[
"4057a2f17837876b927d9b38e5480b11a68a4922"
] |
[
"app/utils/data_to_viz.py"
] |
[
"import csv\nimport pandas as pd\n\ntest = data = pd.read_csv(\"C:/Users/rodol/Documents/2020-2021/Projet 4/oculus_monitoring/data/tweets/TechCrunch_content.csv\", sep=';')\n\n# for raw in test['Company Name']:\n# print(raw.count(\"Lime\"))\n\n"
] |
[
[
"pandas.read_csv"
]
] |
jaumecolom/hackeseiaat
|
[
"fe6777fc95d26ed88949c4b9f2992414a7e54335"
] |
[
"POL_code/detectDoorsv1.py"
] |
[
"#REFERENCE: http://www.pyimagesearch.com/2015/05/04/target-acquired-finding-targets-in-drone-and-quadcopter-video-streams-using-python-and-opencv/\r\n\r\n# What does this code?\r\n#Finally, the code detects all the structure with green margins or similars,\r\n#with a polygonal shape and a minimum dimensions. Once detected,contourns are\r\n#plotted to objects that fit on the filter and image momentum is computed.\r\n#Is it possible to use this main code with simple images or with the webcam\r\n\r\n#What is the main code objective?\r\n#Is a first approach into detecting obstacles in real time for a drone, focused\r\n#in locate objectives or doors to go through.\r\n\r\n# import the necessary packages\r\nimport cv2\r\nimport numpy as np\r\nimport imutils\r\n\r\n# load the video (try different ports until the webCam is found)\r\ncap = cv2.VideoCapture(\"../boat.mp4\")\r\n\r\n# keep looping\r\nwhile True:\r\n\t#grabbed tells us if a frame has been detected by the webCam.\r\n\t#Frame is the image generated by the webCam at each moment\r\n\t(grabbed, frame) = cap.read()\r\n\tstatus = \"Not detected\"\r\n\r\n\t# Uncomment this line to use an image instead of a webCam\r\n\t# frame = cv2.imread('../IMG-20170511-WA0017.jpg')\r\n\r\n\t#If the frames stop appearing, this would stop the program\r\n\tif not grabbed:\r\n\t\tbreak\r\n\r\n\t#copy the original frame to modify the copy with filters\r\n\tShapMaskFrame = frame.copy();\r\n\t# pass BGR to HSV\r\n\tShapMaskFrame = cv2.cvtColor(ShapMaskFrame, cv2.COLOR_BGR2HSV)\r\n\r\n # HSV color filter\r\n\tcolor = 60; #green\r\n\tsensitivity = 60\r\n\tlower = np.array([color - sensitivity, 0, 0])\r\n\tupper = np.array([color + sensitivity, 255, 255])\r\n\t# first we filter the color\r\n\tshapeMask = cv2.inRange(ShapMaskFrame, lower, upper)\r\n\t#shapeMask is a black frame with only white pixels that satisfy the filter\r\n\t#conditions imposed on the cv2.inRange function\r\n\t#We look for all the countours of the shapeMask, all the margins of the\r\n\t#color range selected\r\n\t(_,cnts, _) = cv2.findContours(shapeMask.copy(), cv2.RETR_EXTERNAL,\r\n\t\t\t\t\t\t\t\tcv2.CHAIN_APPROX_SIMPLE)\r\n\r\n\t#----------------------------------------------------------------\r\n\r\n\t# loop over the contours\r\n\tfor c in cnts:\r\n\t\t# approximate the contour\r\n\t\tperi = cv2.arcLength(c, True)\r\n\t\t#approximate slopes to straight line polygons\r\n\t\tapprox = cv2.approxPolyDP(c, 0.005 * peri, True)\r\n\t\t#print(len(approx))\r\n\t\t#approx is an array, inside there is the value for each of the segments\r\n\t\t#that conform the polygons\r\n\t\tif len(approx) >= 10 and len(approx) <= 300:\r\n\t\t\t#We find dimensions and coordinates of the segments that form each\r\n\t\t\t#of the polygons that we located with the color filter\r\n\t\t\t(x, y, w, h) = cv2.boundingRect(approx)\r\n\t\t\t#we use the aspectRatio as a geometrical condition to erase unwanted\r\n\t\t\t#polygons\r\n\t\t\taspectRatio = float(h) / float(w)\r\n\r\n\t\t\tkeepDims = w > 70 and h > 200 #set a minimum size for the object\r\n\t\t\t#guarantee that is \"squared\" or a rod shape\r\n\t\t\tkeepAspectRatio = aspectRatio >= 0.7 and aspectRatio <= 10\r\n\r\n\t\t\t# we will only plot the polygons that fit on the geometrical and\r\n\t\t\t#color conditions\r\n\t\t\tif keepDims and keepAspectRatio :\r\n\t\t\t\t# draw an outline around the target and update the status\r\n\t\t\t\t# text\r\n\t\t\t\tcv2.drawContours(frame, [approx], -1, (0, 0, 255), 4)\r\n\t\t\t\tstatus = \"Target(s) Acquired\"\r\n\r\n\t\t\t\t# compute the center of the contour ploted on the image. Also,\r\n\t\t\t\t# refered as image moment, and plot it on the image.\r\n\t\t\t\tM = cv2.moments(approx)\r\n\t\t\t\t(cX, cY) = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\r\n\t\t\t\t(startX, endX) = (int(cX - (w * 0.15)), int(cX + (w * 0.15)))\r\n\t\t\t\t(startY, endY) = (int(cY - (h * 0.15)), int(cY + (h * 0.15)))\r\n\t\t\t\tcv2.line(frame, (startX, cY), (endX, cY), (0, 0, 255), 3)\r\n\t\t\t\tcv2.line(frame, (cX, startY), (cX, endY), (0, 0, 255), 3)\r\n\r\n\t# draw the status text on the frame\r\n\tcv2.putText(frame, status, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\r\n\t\t(0, 0, 255), 2)\r\n\t#Show the edited video\r\n\tcv2.imshow(\"Frame\", frame)\r\n\r\n\t# if the 'q' key is pressed, stop the loop\r\n\tkey = cv2.waitKey(1) & 0xFF\r\n\tif key == ord(\"q\"):\r\n\t\tbreak\r\n\r\n# cleanup the camera and close any open windows\r\ncamera.release()\r\ncv2.destroyAllWindows()\r\n"
] |
[
[
"numpy.array"
]
] |
YufengJin/deep-reinforcement-learning
|
[
"141cf00f169b46aa492c9e7520429bfdaab0117d"
] |
[
"ddpg-pendulum/ddpg.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"DDPG.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/github/YufengJin/deep-reinforcement-learning/blob/master/ddpg-pendulum/DDPG.ipynb\n\n# Deep Deterministic Policy Gradients (DDPG)\n---\nIn this notebook, we train DDPG with OpenAI Gym's Pendulum-v0 environment.\n\n### Import the Necessary Packages\n\"\"\"\n\n# Commented out IPython magic to ensure Python compatibility.\nimport gym\nimport random\nimport torch\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pyplot as plt\n# %matplotlib inline\n\nfrom ddpg_agent import Agent\n\n\"\"\"### Instantiate the Environment and Agent\"\"\"\n\nenv = gym.make('Pendulum-v0')\nenv.seed(2)\nagent = Agent(state_size=3, action_size=1, random_seed=2)\n\n\n\"\"\"### Load the saved torch file for actor and critic\"\"\"\n\nagent.actor_local.load_state_dict(torch.load('checkpoint_actor.pth'))\nagent.critic_local.load_state_dict(torch.load('checkpoint_critic.pth'))\n\n\"\"\"### Train the Agent with DDPG\"\"\"\n\ndef ddpg(n_episodes=1000, max_t=300, print_every=100):\n scores_deque = deque(maxlen=print_every)\n scores = []\n for i_episode in range(1, n_episodes+1):\n state = env.reset()\n agent.reset()\n score = 0\n for t in range(max_t):\n action = agent.act(state)\n next_state, reward, done, _ = env.step(action)\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_deque.append(score)\n scores.append(score)\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), end=\"\")\n torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')\n torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')\n if i_episode % print_every == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))\n \n return scores\n\nscores = ddpg()\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(1, len(scores)+1), scores)\nplt.ylabel('Score')\nplt.xlabel('Episode #')\nplt.show()\n\n\n\"\"\"### Watch a Smart Agent!\"\"\"\n\n\nstate = env.reset()\nfor t in range(200):\n action = agent.act(state, add_noise=False)\n env.render()\n state, reward, done, _ = env.step(action)\n if done:\n break \n\nenv.close()\n\n\"\"\"### Explore\n\nIn this exercise, we have provided a sample DDPG agent and demonstrated how to use it to solve an OpenAI Gym environment. To continue your learning, you are encouraged to complete any (or all!) of the following tasks:\n- Amend the various hyperparameters and network architecture to see if you can get your agent to solve the environment faster than this benchmark implementation. Once you build intuition for the hyperparameters that work well with this environment, try solving a different OpenAI Gym task!\n- Write your own DDPG implementation. Use this code as reference only when needed -- try as much as you can to write your own algorithm from scratch.\n- You may also like to implement prioritized experience replay, to see if it speeds learning. \n- The current implementation adds Ornsetein-Uhlenbeck noise to the action space. However, it has [been shown](https://blog.openai.com/better-exploration-with-parameter-noise/) that adding noise to the parameters of the neural network policy can improve performance. Make this change to the code, to verify it for yourself!\n- Write a blog post explaining the intuition behind the DDPG algorithm and demonstrating how to use it to solve an RL environment of your choosing. \n\"\"\""
] |
[
[
"torch.load",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
ydecastro/lar_testing
|
[
"26c20a57fe85c889bf0670f8b25b350e21656872"
] |
[
"lars/Knockoffs.py"
] |
[
"import numpy as np\nfrom cvxopt import solvers\nfrom cvxopt.solvers import conelp\nfrom sklearn import linear_model\nfrom cvxopt import matrix\nimport scipy.cluster.hierarchy\nfrom statsmodels.stats.moment_helpers import cov2corr\n\nclass Knockoffs():\n\n\tdef __init__(self):\n\t\tpass\n\n\tdef fdr_power_knockoffs(self, X, y, true_support, alpha=0.1, mode='equicorrelated', knockoff_plus=True, **kwargs):\n\t\tsupport_knockoff = self.support_fdr_knockoffs(X, y, alpha=alpha, mode=mode, knockoff_plus=knockoff_plus, **kwargs)\n\t\tFDR = self.FDR(support_knockoff, true_support)\n\t\tpower = self.power(support_knockoff, true_support)\n\t\treturn FDR, power\n\n\tdef support_fdr_knockoffs(self, X, y, alpha=0.1, mode='equicorrelated', knockoff_plus=True, **kwargs):\n\t\tself.n, self.p = X.shape\n\t\tXcorr = X / np.tile(np.linalg.norm(X, axis=0).reshape(1,-1),(self.n,1))\n\t\tXcorr[np.where(np.isnan(Xcorr))] = 0\n\t\tknockoff = self.knockoff(Xcorr, mode=mode, **kwargs)\n\t\tdesign = np.hstack((Xcorr,knockoff))\n\t\talphas, actives, coefs = linear_model.lars_path(design, y, method='lasso', verbose=False)\n\t\tZ = np.zeros(self.p)\n\t\tZtilde = np.zeros(self.p)\n\t\tcount = 0\n\t\talready_seen = np.zeros(2*self.p)\n\t\tfor index in actives:\n\t\t\tif not already_seen[index]:\n\t\t\t\tif index < self.p:\n\t\t\t\t\tZ[index] = alphas[count]\n\t\t\t\telse:\n\t\t\t\t\tZtilde[index % self.p] = alphas[count]\n\t\t\t\talready_seen[index] = 1\n\t\t\t\tcount += 1 \n\t\tW = np.zeros(self.p)\n\t\tfor i in range(self.p):\n\t\t\tW[i] = max(Z[i], Ztilde[i])\n\t\t\tif Z[i]<Ztilde[i]:\n\t\t\t\tW[i] *= -1\n\t\tWsort = np.sort(np.abs(W))\n\t\tT = -1\n\t\tstop = False\n\t\twhile (not(stop) and T<self.p-1):\n\t\t\tT += 1\n\t\t\tplus = np.sum(Wsort[T]<=W)\n\t\t\tmoins = np.sum(W<=-Wsort[T])\n\t\t\tif knockoff_plus:\n\t\t\t\tmoins += 1\n\t\t\tstop = (moins/max(1, plus))<=alpha\n\t\treturn np.where(W >= Wsort[T])[0]\n\n\tdef knockoff(self, X, mode='equicorrelated', **kwargs):\n\t\tSigma = X.T @ X\n\t\tif mode=='equicorrelated':\n\t\t\ts = self.s_equicorrelated(Sigma)\n\t\t\treturn self.s2knockoff(X, Sigma, s)\n\t\telif mode=='SDP':\n\t\t\ts = self.s_SDP(Sigma)\n\t\t\treturn self.s2knockoff(X, Sigma, s)\n\t\telif mode=='ASDP':\n\t\t\ts = self.s_ASDP(Sigma)\n\t\t\treturn self.s2knockoff(X, Sigma, s, **kwargs)\n\n\tdef s2knockoff(self, X, Sigma, s):\n\t\tinvSigma = np.linalg.inv(Sigma)\n\t\tA = 2*np.diag(s) - np.diag(s) @ invSigma @ np.diag(s)\n\t\tw, v = np.linalg.eig(A)\n\t\tw = np.real(w)\n\t\tw *= (w>0)\n\t\tC = np.diag(np.sqrt(w)) @ v.T\n\t\tu, __, __ = np.linalg.svd(X)\n\t\tu = u[:,:self.p]\n\t\tproj = np.eye(self.n) - u @ np.linalg.pinv(u)\n\t\tU, __, __ = np.linalg.svd(proj)\n\t\tU = U[:,:self.p]\n\t\treturn (X @ (np.eye(self.p) - invSigma @ np.diag(s)) + U @ C)\n\n\tdef s_equicorrelated(self, Sigma):\n\t\tlambda_min = np.min(np.linalg.eigvals(Sigma))\n\t\ts = min(2*lambda_min, 1) * np.ones(self.p)\n\t\ts *= s>0\n\t\t# Compensate for numerical errors (feasibility)\n\t\t# psd = False\n\t\t# s_eps = 1e-8\n\t\t# while not psd:\n\t\t# \tpsd = np.all(np.linalg.eigvals(2*Sigma-diag(s*(1-s_eps)))> 0)\n\t\t# \tif not psd:\n\t\t# \t\ts_eps = s_eps*10\n\t\t# s = s*(1-s_eps)\n\t\treturn s\n\n\tdef s_SDP(self, Sigma):\n\t\tp = Sigma.shape[0]\n\t\tc = -np.ones(p)\n\t\tc = matrix(c)\n\t\tG = np.zeros((2*p+p**2,p))\n\t\tG[:p,:] = np.eye(p)\n\t\tG[p:2*p,:] = -np.eye(p)\n\t\tfor i in range(p):\n\t\t\tG[2*p+p*i,i] = 1\n\t\tG = matrix(G)\n\t\th = np.ones(2*p+p**2)\n\t\th[p:2*p] *= 0\n\t\th[2*p:] *= 2*(Sigma).reshape(-1)\n\t\th = matrix(h)\n\t\tdims = {'l': 2*p, 'q': [], 's': [p]}\n\t\tsolvers.options['show_progress'] = False\n\t\tsol = conelp(c, G, h, dims)\n\t\ts = np.array(sol['x']).reshape(-1)\n\t\treturn s\n\n\tdef s_ASDP(self, Sigma, **kwargs):\n\t\t\"\"\" Section 3.4.2 : Panning for Gold:Model-X Knockoffs for High-dimensional Controlled Variable Selection \"\"\"\n\t\tmaxclustersize = kwargs.get('maxclustersize', self.p)\n\t\taccuracy = kwargs.get('accuracy', 1e-2)\n\t\tmax_iter = kwargs.get('max_iter', 100)\n\t\tlinkage = scipy.cluster.hierarchy.linkage(Sigma, method='single', metric='euclidean')\n\t\tgroups = {i:[i] for i in range(self.p)}\n\t\tnext_group = self.p \n\t\tfor i in range(linkage.shape[0]):\n\t\t\ttry:\n\t\t\t\tgroup1 = groups[linkage[i,0]]\n\t\t\t\tgroup2 = groups[linkage[i,1]]\n\t\t\t\tif len(group1)+len(group2) <= maxclustersize:\n\t\t\t\t\tgroups[next_group] = group1 + group2\n\t\t\t\t\tdel groups[linkage[i,0]]\n\t\t\t\t\tdel groups[linkage[i,1]]\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tnext_group += 1\n\n\t\tblocks = list(groups.values())\n\t\ts = np.zeros(self.p)\n\t\tfor block in blocks:\n\t\t\ttemp = Sigma[block,:]\n\t\t\tshat = self.s_SDP(temp[:,block])\n\t\t\ts[block] = shat\n\t\t# Gershgorin circle theorem\n\t\tmaxgamma = min(1, np.min(2*np.diag(Sigma)/s))\n\t\tmingamma = 0\n\t\tnbite = 0\n\t\twhile (nbite<max_iter and np.abs(maxgamma-mingamma)<accuracy):\n\t\t\tgamma = (maxgamma + mingamma)/2\n\t\t\tnbite += 1\n\t\t\ttry:\n\t\t\t\t__ = np.linalg.cholesky(2*Sigma - np.diag(gamma*s))\n\t\t\t\tmingamma = gamma\n\t\t\texcept:\n\t\t\t\tmaxgamma = gamma\n\t\tgamma = (maxgamma + mingamma)/2\n\t\treturn gamma*s"
] |
[
[
"numpy.diag",
"numpy.linalg.eigvals",
"numpy.sqrt",
"numpy.where",
"numpy.hstack",
"numpy.linalg.svd",
"numpy.linalg.eig",
"numpy.eye",
"sklearn.linear_model.lars_path",
"numpy.real",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.isnan",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.linalg.norm",
"numpy.ones",
"numpy.linalg.pinv"
]
] |
liuhanyao98/nums-1
|
[
"4c27262e424ff7911cb9f4500d1df3019945d7fe"
] |
[
"nums/core/array/application.py"
] |
[
"# coding=utf-8\n# Copyright (C) 2020 NumS Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import List\n\nimport numpy as np\n\nfrom nums.core.array.blockarray import BlockArray, Block\nfrom nums.core.array import utils as array_utils\nfrom nums.core.storage.storage import ArrayGrid, StoredArray, StoredArrayS3\n# TODO(hme): Remove dependence on specific system and scheduler implementations.\nfrom nums.core.systems.systems import System, RaySystem, SerialSystem\nfrom nums.core.systems.schedulers import BlockCyclicScheduler\nfrom nums.core.systems import utils as systems_utils\nfrom nums.core.systems.filesystem import FileSystem\nfrom nums.core.array.random import NumsRandomState\n\n\n# pylint: disable = too-many-lines\n\n\nclass ArrayApplication(object):\n\n def __init__(self, system: System, filesystem: FileSystem):\n self.system: System = system\n self._filesystem: FileSystem = filesystem\n self._array_grids: (str, ArrayGrid) = {}\n self.random = self.random_state()\n\n self.one_half = self.scalar(.5)\n self.two = self.scalar(2.0)\n self.one = self.scalar(1.0)\n self.zero = self.scalar(0.0)\n self._block_shape_map = {}\n\n def num_cores_total(self):\n if isinstance(self.system, RaySystem):\n system: RaySystem = self.system\n nodes = system.nodes()\n num_cores = sum(map(lambda n: n[\"Resources\"][\"CPU\"], nodes))\n else:\n assert isinstance(self.system, SerialSystem)\n num_cores = systems_utils.get_num_cores()\n return int(num_cores)\n\n def compute_block_shape(self,\n shape: tuple,\n dtype: np.dtype,\n cluster_shape=None,\n num_cores=None):\n # TODO (hme): Add support for downstream optimizer to decide block shape.\n if dtype in (np.float32, np.float64, float):\n dtype = np.finfo(dtype).dtype\n elif dtype in (np.int32, np.int64, int):\n dtype = np.iinfo(dtype).dtype\n elif dtype in (bool, np.bool_):\n dtype = np.dtype(np.bool_)\n else:\n raise ValueError(\"dtype %s not supported\" % str(dtype))\n\n nbytes = dtype.alignment\n size = np.product(shape) * nbytes\n # If the object is less than 100 megabytes, there's not much value in constructing\n # a block tensor.\n if size < 10 ** 8:\n block_shape = shape\n return block_shape\n\n if num_cores is not None:\n pass\n else:\n num_cores = self.num_cores_total()\n\n if cluster_shape is not None:\n pass\n elif isinstance(self.system, RaySystem) \\\n and isinstance(self.system.scheduler, BlockCyclicScheduler):\n # This configuration is the default.\n cluster_shape = self.system.scheduler.cluster_shape\n else:\n assert isinstance(self.system, SerialSystem)\n cluster_shape = (1, 1)\n\n if len(shape) < len(cluster_shape):\n cluster_shape = cluster_shape[:len(shape)]\n elif len(shape) > len(cluster_shape):\n cluster_shape = list(cluster_shape)\n for axis in range(len(shape)):\n if axis >= len(cluster_shape):\n cluster_shape.append(1)\n cluster_shape = tuple(cluster_shape)\n\n shape_np = np.array(shape, dtype=np.int)\n # Softmax on cluster shape gives strong preference to larger dimensions.\n cluster_weights = np.exp(np.array(cluster_shape)) / np.sum(np.exp(cluster_shape))\n shape_fracs = np.array(shape) / np.sum(shape)\n # cluster_weights weight the proportion of cores available along each axis,\n # and shape_fracs is the proportion of data along each axis.\n weighted_shape_fracs = cluster_weights * shape_fracs\n weighted_shape_fracs = weighted_shape_fracs / np.sum(weighted_shape_fracs)\n\n # Compute dimensions of grid shape\n # so that the number of blocks are close to the number of cores.\n grid_shape_frac = num_cores ** weighted_shape_fracs\n grid_shape = np.floor(grid_shape_frac)\n # Put remainder on largest axis.\n remaining = np.sum(grid_shape_frac - grid_shape)\n grid_shape[np.argmax(shape)] += remaining\n grid_shape = np.ceil(grid_shape).astype(np.int)\n\n # We use ceiling of floating block shape\n # so that resulting grid shape is <= to what we compute above.\n block_shape = tuple((shape_np + grid_shape - 1) // grid_shape)\n return block_shape\n\n def get_block_shape(self, shape, dtype: np.dtype):\n # Simple way to ensure shape compatibility for basic linear algebra operations.\n block_shape = self.compute_block_shape(shape, dtype)\n final_block_shape = []\n for axis in range(len(shape)):\n shape_dim = shape[axis]\n block_shape_dim = block_shape[axis]\n if shape_dim not in self._block_shape_map:\n self._block_shape_map[shape_dim] = block_shape_dim\n final_block_shape.append(self._block_shape_map[shape_dim])\n return tuple(final_block_shape)\n\n def _get_array_grid(self, filename: str, stored_array_cls) -> ArrayGrid:\n if filename not in self._array_grids:\n store_inst: StoredArray = stored_array_cls(filename)\n self._array_grids[filename] = store_inst.get_grid()\n return self._array_grids[filename]\n\n ######################################\n # Filesystem API\n ######################################\n\n def write_fs(self, ba: BlockArray, filename: str):\n res = self._write(ba, filename, self._filesystem.write_block_fs)\n self._filesystem.write_meta_fs(ba, filename)\n return res\n\n def read_fs(self, filename: str):\n meta = self._filesystem.read_meta_fs(filename)\n addresses = meta[\"addresses\"]\n grid_meta = meta[\"grid_meta\"]\n grid = ArrayGrid.from_meta(grid_meta)\n ba: BlockArray = BlockArray(grid, self.system)\n for grid_entry in addresses:\n node_address = addresses[grid_entry]\n options = {\"resources\": {node_address: 1.0 / 10 ** 4}}\n ba.blocks[grid_entry].oid = self._filesystem.read_block_fs(filename,\n grid_entry,\n grid_meta,\n options=options)\n return ba\n\n def delete_fs(self, filename: str):\n meta = self._filesystem.read_meta_fs(filename)\n addresses = meta[\"addresses\"]\n grid_meta = meta[\"grid_meta\"]\n grid = ArrayGrid.from_meta(grid_meta)\n result_grid = ArrayGrid(grid.grid_shape,\n tuple(np.ones_like(grid.shape, dtype=np.int)),\n dtype=dict.__name__)\n rarr = BlockArray(result_grid, self.system)\n for grid_entry in addresses:\n node_address = addresses[grid_entry]\n options = {\"resources\": {node_address: 1.0 / 10 ** 4}}\n rarr.blocks[grid_entry].oid = self._filesystem.delete_block_fs(filename,\n grid_entry,\n grid_meta,\n options=options)\n self._filesystem.delete_meta_fs(filename)\n return rarr\n\n def write_s3(self, ba: BlockArray, filename: str):\n grid_entry = tuple(np.zeros_like(ba.shape, dtype=np.int))\n result = self._filesystem.write_meta_s3(filename,\n grid_meta=ba.grid.to_meta(),\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": ba.grid.grid_shape\n })\n assert \"ETag\" in self.system.get(result).item(), \"Metadata write failed.\"\n return self._write(ba, filename, self._filesystem.write_block_s3)\n\n def _write(self, ba: BlockArray, filename, remote_func):\n grid = ba.grid\n result_grid = ArrayGrid(grid.grid_shape,\n tuple(np.ones_like(grid.shape, dtype=np.int)),\n dtype=dict.__name__)\n rarr = BlockArray(result_grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n rarr.blocks[grid_entry].oid = remote_func(ba.blocks[grid_entry].oid,\n filename,\n grid_entry,\n grid.to_meta(),\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n return rarr\n\n def read_s3(self, filename: str):\n store_cls, remote_func = StoredArrayS3, self._filesystem.read_block_s3\n grid = self._get_array_grid(filename, store_cls)\n grid_meta = grid.to_meta()\n grid_entry_iterator = grid.get_entry_iterator()\n rarr = BlockArray(grid, self.system)\n for grid_entry in grid_entry_iterator:\n rarr.blocks[grid_entry].oid = remote_func(filename, grid_entry, grid_meta,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n return rarr\n\n def delete_s3(self, filename: str):\n grid = self._get_array_grid(filename, StoredArrayS3)\n grid_entry = tuple(np.zeros_like(grid.shape, dtype=np.int))\n result = self._filesystem.delete_meta_s3(filename,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n deleted_key = self.system.get(result).item()[\"Deleted\"][0][\"Key\"]\n assert deleted_key == StoredArrayS3(filename, grid).get_meta_key()\n results: BlockArray = self._delete(filename,\n StoredArrayS3,\n self._filesystem.delete_block_s3)\n return results\n\n def _delete(self, filename, store_cls, remote_func):\n grid = self._get_array_grid(filename, store_cls)\n result_grid = ArrayGrid(grid.grid_shape,\n tuple(np.ones_like(grid.shape, dtype=np.int)),\n dtype=dict.__name__)\n rarr = BlockArray(result_grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n rarr.blocks[grid_entry].oid = remote_func(filename, grid_entry, grid.to_meta(),\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n return rarr\n\n def read_csv(self, filename, dtype=np.float, delimiter=',', has_header=False, num_workers=None):\n if num_workers is None:\n num_workers = self.num_cores_total()\n arrays: list = self._filesystem.read_csv(filename, dtype, delimiter, has_header,\n num_workers)\n shape = np.zeros(len(arrays[0].shape), dtype=int)\n for array in arrays:\n shape += np.array(array.shape, dtype=int)\n shape = tuple(shape)\n block_shape = self.get_block_shape(shape, dtype)\n result = self.concatenate(arrays, axis=0, axis_block_size=block_shape[0])\n # Release references immediately, in case we need to do another reshape.\n del arrays\n if result.block_shape[1] != block_shape[1]:\n result = result.reshape(block_shape=block_shape)\n return result\n\n def loadtxt(self, fname, dtype=float, comments='# ', delimiter=' ',\n converters=None, skiprows=0, usecols=None, unpack=False,\n ndmin=0, encoding='bytes', max_rows=None, num_workers=None) -> BlockArray:\n if num_workers is None:\n num_workers = self.num_cores_total()\n return self._filesystem.loadtxt(\n fname, dtype=dtype, comments=comments, delimiter=delimiter,\n converters=converters, skiprows=skiprows,\n usecols=usecols, unpack=unpack, ndmin=ndmin,\n encoding=encoding, max_rows=max_rows, num_workers=num_workers)\n\n ######################################\n # Array Operations API\n ######################################\n\n def scalar(self, value):\n return BlockArray.from_scalar(value, self.system)\n\n def array(self, array: np.ndarray, block_shape: tuple = None):\n assert len(array.shape) == len(block_shape)\n return BlockArray.from_np(array,\n block_shape=block_shape,\n copy=False,\n system=self.system)\n\n def zeros(self, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n return self._new_array(\"zeros\", shape, block_shape, dtype)\n\n def ones(self, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n return self._new_array(\"ones\", shape, block_shape, dtype)\n\n def empty(self, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n return self._new_array(\"empty\", shape, block_shape, dtype)\n\n def _new_array(self, op_name: str, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n assert len(shape) == len(block_shape)\n if dtype is None:\n dtype = np.float64\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n grid_meta = grid.to_meta()\n rarr = BlockArray(grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n rarr.blocks[grid_entry].oid = self.system.new_block(op_name,\n grid_entry,\n grid_meta,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n return rarr\n\n def concatenate(self, arrays: List, axis: int, axis_block_size: int = None):\n num_arrs = len(arrays)\n assert num_arrs > 1\n first_arr: BlockArray = arrays[0]\n num_axes = len(first_arr.shape)\n # Check assumptions and define result shapes and block shapes.\n for i in range(num_arrs):\n curr_ba: BlockArray = arrays[i]\n assert num_axes == len(curr_ba.shape), \"Unequal num axes.\"\n assert curr_ba.dtype == first_arr.dtype, \"Incompatible dtypes \" \\\n \"%s, %s\" % (curr_ba.dtype, first_arr.dtype)\n for curr_axis in range(num_axes):\n first_block_size = first_arr.block_shape[curr_axis]\n block_size = curr_ba.block_shape[curr_axis]\n if first_block_size == block_size:\n continue\n elif axis == curr_axis:\n assert axis_block_size is not None, \"block axis size is required \" \\\n \"when block shapes are neq.\"\n else:\n raise ValueError(\"Other axis shapes and block shapes must be equal.\")\n\n # Compute result shapes.\n result_shape = []\n result_block_shape = []\n for curr_axis in range(num_axes):\n if curr_axis == axis:\n if axis_block_size is None:\n # They are all equal.\n axis_block_size = first_arr.block_shape[curr_axis]\n result_block_size = axis_block_size\n result_size = 0\n for i in range(num_arrs):\n curr_ba: BlockArray = arrays[i]\n size = curr_ba.shape[curr_axis]\n result_size += size\n else:\n result_size = first_arr.shape[curr_axis]\n result_block_size = first_arr.block_shape[curr_axis]\n result_shape.append(result_size)\n result_block_shape.append(result_block_size)\n result_shape, result_block_shape = tuple(result_shape), tuple(result_block_shape)\n result_ba = self.empty(result_shape, result_block_shape, first_arr.dtype)\n\n # Write result blocks.\n # TODO (hme): This can be optimized by updating blocks directly.\n pos = 0\n for arr in arrays:\n delta = arr.shape[axis]\n axis_slice = slice(pos, pos+delta)\n result_selector = tuple([slice(None, None) for _ in range(axis)] + [axis_slice, ...])\n result_ba[result_selector] = arr\n pos += delta\n return result_ba\n\n def eye(self, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n assert len(shape) == len(block_shape) == 2\n if dtype is None:\n dtype = np.float64\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n grid_meta = grid.to_meta()\n rarr = BlockArray(grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n syskwargs = {\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape}\n if np.all(np.diff(grid_entry) == 0):\n # This is a diagonal block.\n rarr.blocks[grid_entry].oid = self.system.new_block(\"eye\",\n grid_entry,\n grid_meta,\n syskwargs=syskwargs)\n else:\n rarr.blocks[grid_entry].oid = self.system.new_block(\"zeros\",\n grid_entry,\n grid_meta,\n syskwargs=syskwargs)\n return rarr\n\n def diag(self, X: BlockArray) -> BlockArray:\n if len(X.shape) == 1:\n shape = X.shape[0], X.shape[0]\n block_shape = X.block_shape[0], X.block_shape[0]\n grid = ArrayGrid(shape, block_shape, X.dtype.__name__)\n grid_meta = grid.to_meta()\n rarr = BlockArray(grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n syskwargs = {\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape}\n if np.all(np.diff(grid_entry) == 0):\n # This is a diagonal block.\n rarr.blocks[grid_entry].oid = self.system.diag(X.blocks[grid_entry[0]].oid,\n syskwargs=syskwargs)\n else:\n rarr.blocks[grid_entry].oid = self.system.new_block(\"zeros\",\n grid_entry,\n grid_meta,\n syskwargs=syskwargs)\n elif len(X.shape) == 2:\n assert X.shape[0] == X.shape[1]\n assert X.block_shape[0] == X.block_shape[1]\n shape = X.shape[0],\n block_shape = X.block_shape[0],\n grid = ArrayGrid(shape, block_shape, X.dtype.__name__)\n rarr = BlockArray(grid, self.system)\n for grid_entry in X.grid.get_entry_iterator():\n out_grid_entry = grid_entry[:1]\n out_grid_shape = grid.grid_shape[:1]\n syskwargs = {\"grid_entry\": out_grid_entry, \"grid_shape\": out_grid_shape}\n if np.all(np.diff(grid_entry) == 0):\n # This is a diagonal block.\n rarr.blocks[out_grid_entry].oid = self.system.diag(X.blocks[grid_entry].oid,\n syskwargs=syskwargs)\n else:\n raise ValueError(\"X must have 1 or 2 axes.\")\n return rarr\n\n def arange(self, shape, block_shape, step=1, dtype=np.int64) -> BlockArray:\n assert step == 1\n # Generate ranges per block.\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n rarr = BlockArray(grid, self.system)\n for _, grid_entry in enumerate(grid.get_entry_iterator()):\n syskwargs = {\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape}\n start = block_shape[0] * grid_entry[0]\n entry_shape = grid.get_block_shape(grid_entry)\n stop = start + entry_shape[0]\n rarr.blocks[grid_entry].oid = self.system.arange(start,\n stop,\n step,\n dtype,\n syskwargs=syskwargs)\n return rarr\n\n def linspace(self, start, stop, shape, block_shape, endpoint, retstep, dtype, axis):\n assert axis == 0\n assert endpoint is True\n assert retstep is False\n step_size = (stop - start) / (shape[0]-1)\n result = self.arange(shape, block_shape)\n result = start + result * step_size\n if dtype is not None and dtype != result.dtype:\n result = result.astype(dtype)\n return result\n\n def log(self, X: BlockArray):\n return X.ufunc(\"log\")\n\n def exp(self, X: BlockArray):\n return X.ufunc(\"exp\")\n\n def abs(self, X: BlockArray):\n return X.ufunc(\"abs\")\n\n def min(self, X: BlockArray, axis=None, keepdims=False):\n return self.reduce(\"min\", X, axis, keepdims)\n\n def max(self, X: BlockArray, axis=None, keepdims=False):\n return self.reduce(\"max\", X, axis, keepdims)\n\n def argmin(self, X: BlockArray, axis=None):\n pass\n\n def sum(self, X: BlockArray, axis=None, keepdims=False, dtype=None):\n return self.reduce(\"sum\", X, axis, keepdims, dtype)\n\n def reduce(self, op_name: str, X: BlockArray, axis=None, keepdims=False, dtype=None):\n res = X.reduce_axis(op_name, axis, keepdims=keepdims)\n if dtype is not None:\n res = res.astype(dtype)\n return res\n\n def mean(self, X: BlockArray, axis=None, keepdims=False, dtype=None):\n if X.dtype not in (float, np.float32, np.float64):\n X = X.astype(np.float64)\n num_summed = np.product(X.shape) if axis is None else X.shape[axis]\n res = self.sum(X, axis=axis, keepdims=keepdims) / num_summed\n if dtype is not None:\n res = res.astype(dtype)\n return res\n\n def var(self, X: BlockArray, axis=None, ddof=0, keepdims=False, dtype=None):\n mean = self.mean(X, axis=axis, keepdims=True)\n ss = self.sum((X - mean)**self.two, axis=axis, keepdims=keepdims)\n num_summed = (np.product(X.shape) if axis is None else X.shape[axis]) - ddof\n res = ss / num_summed\n if dtype is not None:\n res = res.astype(dtype)\n return res\n\n def std(self, X: BlockArray, axis=None, ddof=0, keepdims=False, dtype=None):\n res = self.sqrt(self.var(X, axis, ddof, keepdims))\n if dtype is not None:\n res = res.astype(dtype)\n return res\n\n def argop(self, op_name: str, arr: BlockArray, axis=None):\n if len(arr.shape) > 1:\n raise NotImplementedError(\"%s currently supports one-dimensional arrays.\" % op_name)\n if axis is None:\n axis = 0\n assert axis == 0\n grid = ArrayGrid(shape=(), block_shape=(), dtype=np.int64.__name__)\n result = BlockArray(grid, self.system)\n reduction_result = None, None\n for grid_entry in arr.grid.get_entry_iterator():\n block_slice: slice = arr.grid.get_slice(grid_entry)[0]\n block: Block = arr.blocks[grid_entry]\n syskwargs = {\n \"grid_entry\": grid_entry,\n \"grid_shape\": arr.grid.grid_shape,\n \"options\": {\"num_returns\": 2},\n }\n reduction_result = self.system.arg_op(op_name,\n block.oid,\n block_slice,\n *reduction_result,\n syskwargs=syskwargs)\n argoptima, _ = reduction_result\n result.blocks[()].oid = argoptima\n return result\n\n def sqrt(self, X):\n if X.dtype not in (float, np.float32, np.float64):\n X = X.astype(np.float64)\n return X.ufunc(\"sqrt\")\n\n def norm(self, X):\n return self.sqrt(X.T @ X)\n\n def xlogy(self, x: BlockArray, y: BlockArray) -> BlockArray:\n if x.dtype not in (float, np.float32, np.float64):\n x = x.astype(np.float64)\n if x.dtype not in (float, np.float32, np.float64):\n y = y.astype(np.float64)\n return self.map_bop(\"xlogy\", x, y)\n\n def where(self, condition: BlockArray, x=None, y=None):\n result_oids = []\n shape_oids = []\n num_axes = max(1, len(condition.shape))\n # Stronger constraint than necessary, but no reason for anything stronger.\n if x is not None or y is not None:\n assert x is not None and y is not None\n assert condition.shape == x.shape == y.shape\n assert condition.block_shape == x.block_shape == y.block_shape\n for grid_entry in condition.grid.get_entry_iterator():\n block: Block = condition.blocks[grid_entry]\n block_slice_tuples = condition.grid.get_slice_tuples(grid_entry)\n roids = self.system.where(block.oid, x, y,\n block_slice_tuples,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": condition.grid.grid_shape,\n \"options\": {\"num_returns\": num_axes+1}\n })\n block_oids, shape_oid = roids[:-1], roids[-1]\n shape_oids.append(shape_oid)\n result_oids.append(block_oids)\n shapes = self.system.get(shape_oids)\n result_shape = (np.sum(shapes),)\n if result_shape == (0,):\n return (self.array(np.array([], dtype=np.int64), block_shape=(0,)),)\n # Remove empty shapes.\n result_shape_pair = []\n for i, shape in enumerate(shapes):\n if np.sum(shape) > 0:\n result_shape_pair.append((result_oids[i], shape))\n result_block_shape = self.compute_block_shape(result_shape, np.int64)\n result_arrays = []\n for axis in range(num_axes):\n block_arrays = []\n for i in range(len(result_oids)):\n if shapes[i] == (0,):\n continue\n block_arrays.append(BlockArray.from_oid(result_oids[i][axis],\n shapes[i],\n np.int64,\n self.system))\n if len(block_arrays) == 1:\n axis_result = block_arrays[0]\n else:\n axis_result = self.concatenate(block_arrays, 0, result_block_shape[0])\n result_arrays.append(axis_result)\n return tuple(result_arrays)\n\n def map_uop(self,\n op_name: str,\n arr: BlockArray,\n out: BlockArray = None,\n where=True,\n args=None,\n kwargs=None) -> BlockArray:\n \"\"\"\n A map, for unary operators, that applies to every entry of an array.\n :param op_name: An element-wise unary operator.\n :param arr: A BlockArray.\n :param out: A BlockArray to which the result is written.\n :param where: An indicator specifying the indices to which op is applied.\n :param args: Args provided to op.\n :param kwargs: Keyword args provided to op.\n :return: A BlockArray.\n \"\"\"\n if where is not True:\n raise NotImplementedError(\"'where' argument is not yet supported.\")\n args = () if args is None else args\n kwargs = {} if kwargs is None else kwargs\n shape = arr.shape\n block_shape = arr.block_shape\n dtype = array_utils.get_uop_output_type(op_name, arr.dtype)\n assert len(shape) == len(block_shape)\n if out is None:\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n rarr = BlockArray(grid, self.system)\n else:\n rarr = out\n grid = rarr.grid\n assert rarr.shape == arr.shape and rarr.block_shape == arr.block_shape\n for grid_entry in grid.get_entry_iterator():\n # TODO(hme): Faster to create ndarray first,\n # and instantiate block array on return\n # to avoid instantiating blocks on BlockArray initialization.\n rarr.blocks[grid_entry] = arr.blocks[grid_entry].uop_map(op_name,\n args=args,\n kwargs=kwargs)\n return rarr\n\n def matmul(self,\n arr_1: BlockArray,\n arr_2: BlockArray) -> BlockArray:\n return arr_1 @ arr_2\n\n def tensordot(self,\n arr_1: BlockArray,\n arr_2: BlockArray,\n axes: int = 2) -> BlockArray:\n return arr_1.tensordot(arr_2, axes)\n\n def map_bop(self,\n op_name: str,\n arr_1: BlockArray,\n arr_2: BlockArray,\n out: BlockArray = None,\n where=True,\n args=None,\n kwargs=None) -> BlockArray:\n # TODO (hme): Move this into BlockArray, and invoke on operator implementations.\n \"\"\"\n A map, for binary operators, that applies element-wise to every entry of the input arrays.\n :param op_name: An element-wise binary operator.\n :param arr_1: A BlockArray.\n :param arr_2: A BlockArray.\n :param out: A BlockArray to which the result is written.\n :param where: An indicator specifying the indices to which op is applied.\n :param args: Args provided to op.\n :param kwargs: Keyword args provided to op.\n :return: A BlockArray.\n \"\"\"\n if where is not True:\n raise NotImplementedError(\"'where' argument is not yet supported.\")\n if args is not None:\n raise NotImplementedError(\"'args' is not yet supported.\")\n if not (kwargs is None or len(kwargs) == 0):\n raise NotImplementedError(\"'kwargs' is not yet supported.\")\n\n try:\n ufunc = np.__getattribute__(op_name)\n if (op_name.endswith(\"max\") or op_name == \"maximum\"\n or op_name.endswith(\"min\") or op_name == \"minimum\"\n or op_name.startswith(\"logical\")):\n rarr = self._broadcast_bop(op_name, arr_1, arr_2)\n else:\n result_blocks: np.ndarray = ufunc(arr_1.blocks, arr_2.blocks)\n rarr = BlockArray.from_blocks(result_blocks,\n result_shape=None,\n system=self.system)\n except Exception as _:\n rarr = self._broadcast_bop(op_name, arr_1, arr_2)\n if out is not None:\n assert out.grid.grid_shape == rarr.grid.grid_shape\n assert out.shape == rarr.shape\n assert out.block_shape == rarr.block_shape\n out.blocks[:] = rarr.blocks[:]\n rarr = out\n return rarr\n\n def _broadcast_bop(self, op_name, arr_1, arr_2) -> BlockArray:\n \"\"\"\n We want to avoid invoking this op whenever possible; NumPy's imp is faster.\n :param op_name: Name of binary operation.\n :param arr_1: A BlockArray.\n :param arr_2: A BlockArray.\n :return: A BlockArray.\n \"\"\"\n if arr_1.shape != arr_2.shape:\n output_grid_shape = array_utils.broadcast_shape(arr_1.grid.grid_shape,\n arr_2.grid.grid_shape)\n arr_1 = arr_1.broadcast_to(output_grid_shape)\n arr_2 = arr_2.broadcast_to(output_grid_shape)\n dtype = array_utils.get_bop_output_type(op_name,\n arr_1.dtype,\n arr_2.dtype)\n grid = ArrayGrid(arr_1.shape, arr_1.block_shape, dtype.__name__)\n rarr = BlockArray(grid, self.system)\n for grid_entry in rarr.grid.get_entry_iterator():\n block_1: Block = arr_1.blocks[grid_entry]\n block_2: Block = arr_2.blocks[grid_entry]\n rarr.blocks[grid_entry] = block_1.bop(op_name, block_2, {})\n return rarr\n\n def get(self, *arrs):\n if len(arrs) == 1:\n if isinstance(arrs[0], BlockArray):\n return arrs[0].get()\n else:\n return arrs[0]\n else:\n r = []\n for item in arrs:\n if isinstance(item, BlockArray):\n r.append(item.get())\n else:\n r.append(item)\n return r\n\n def allclose(self, a: BlockArray, b: BlockArray, rtol=1.e-5, atol=1.e-8):\n assert a.shape == b.shape and a.block_shape == b.block_shape\n bool_list = []\n grid_shape = a.grid.grid_shape\n for grid_entry in a.grid.get_entry_iterator():\n a_block, b_block = a.blocks[grid_entry].oid, b.blocks[grid_entry].oid\n bool_list.append(self.system.allclose(a_block, b_block, rtol, atol,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid_shape\n }))\n oid = self.system.logical_and(*bool_list,\n syskwargs={\"grid_entry\": (0, 0), \"grid_shape\": (1, 1)})\n return BlockArray.from_oid(oid, (), np.bool, self.system)\n\n def qr(self, X: BlockArray):\n return self.indirect_tsqr(X)\n\n def indirect_tsr(self, X: BlockArray, reshape_output=True):\n assert len(X.shape) == 2\n # TODO (hme): This assertion is temporary and ensures returned\n # shape of qr of block is correct.\n assert X.block_shape[0] >= X.shape[1]\n # Compute R for each block.\n grid = X.grid\n grid_shape = grid.grid_shape\n shape = X.shape\n block_shape = X.block_shape\n R_oids = []\n # Assume no blocking along second dim.\n for i in range(grid_shape[0]):\n # Select a row according to block_shape.\n row = []\n for j in range(grid_shape[1]):\n row.append(X.blocks[i, j].oid)\n R_oids.append(self.system.qr(*row,\n mode=\"r\",\n axis=1,\n syskwargs={\n \"grid_entry\": (i, 0),\n \"grid_shape\": (grid_shape[0], 1),\n \"options\": {\"num_returns\": 1}\n })\n )\n\n # Construct R by summing over R blocks.\n # TODO (hme): Communication may be inefficient due to redundancy of data.\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n tsR = BlockArray(ArrayGrid(shape=R_shape,\n block_shape=R_shape,\n dtype=X.dtype.__name__),\n self.system)\n tsR.blocks[0, 0].oid = self.system.qr(*R_oids,\n mode=\"r\",\n axis=0,\n syskwargs={\n \"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1),\n \"options\": {\"num_returns\": 1}\n })\n # If blocking is \"tall-skinny,\" then we're done.\n if R_shape != R_block_shape:\n if reshape_output:\n R = tsR.reshape(shape=R_shape, block_shape=R_block_shape)\n else:\n R = tsR\n else:\n R = tsR\n return R\n\n def indirect_tsqr(self, X: BlockArray, reshape_output=True):\n shape = X.shape\n block_shape = X.block_shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n tsR = self.indirect_tsr(X, reshape_output=False)\n\n # Compute inverse of R.\n tsR_inverse = self.inv(tsR)\n # If blocking is \"tall-skinny,\" then we're done.\n if R_shape != R_block_shape:\n R_inverse = tsR_inverse.reshape(shape=R_shape, block_shape=R_block_shape)\n if reshape_output:\n R = tsR.reshape(shape=R_shape, block_shape=R_block_shape)\n else:\n R = tsR\n else:\n R_inverse = tsR_inverse\n R = tsR\n\n Q = X @ R_inverse\n return Q, R\n\n def direct_tsqr(self, X, reshape_output=True):\n assert len(X.shape) == 2\n\n # Compute R for each block.\n shape = X.shape\n grid = X.grid\n grid_shape = grid.grid_shape\n block_shape = X.block_shape\n Q_oids = []\n R_oids = []\n QR_dims = []\n Q2_shape = [0, shape[1]]\n for i in range(grid_shape[0]):\n # Select a row according to block_shape.\n row = []\n for j in range(grid_shape[1]):\n row.append(X.blocks[i, j].oid)\n # We invoke \"reduced\", so q, r is returned with dimensions (M, K), (K, N), K = min(M, N)\n M = grid.get_block_shape((i, 0))[0]\n N = shape[1]\n K = min(M, N)\n QR_dims.append(((M, K), (K, N)))\n Q2_shape[0] += K\n # Run each row on separate nodes along first axis.\n # This maintains some data locality.\n Q_oid, R_oid = self.system.qr(*row,\n mode=\"reduced\",\n axis=1,\n syskwargs={\n \"grid_entry\": (i, 0),\n \"grid_shape\": (grid_shape[0], 1),\n \"options\": {\"num_returns\": 2}\n })\n R_oids.append(R_oid)\n Q_oids.append(Q_oid)\n\n # TODO (hme): This pulls several order N^2 R matrices on a single node.\n # A solution is the recursive extension to direct TSQR.\n Q2_oid, R2_oid = self.system.qr(*R_oids,\n mode=\"reduced\",\n axis=0,\n syskwargs={\n \"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1),\n \"options\": {\"num_returns\": 2}\n })\n\n Q2_shape = tuple(Q2_shape)\n Q2_block_shape = (QR_dims[0][1][0], shape[1])\n Q2 = self._vec_from_oids([Q2_oid],\n shape=Q2_shape,\n block_shape=Q2_block_shape,\n dtype=X.dtype)\n # The resulting Q's from this operation are N^2 (same size as above R's).\n Q2_oids = list(map(lambda block: block.oid, Q2.blocks.flatten()))\n\n # Construct Q.\n Q = self.zeros(shape=shape,\n block_shape=(block_shape[0], shape[1]),\n dtype=X.dtype)\n for i, grid_entry in enumerate(Q.grid.get_entry_iterator()):\n Q_dims, R_dims = QR_dims[i]\n Q1_block_shape = Q_dims\n Q2_block_shape = R_dims\n Q.blocks[grid_entry].oid = self.system.bop(\"tensordot\", Q_oids[i], Q2_oids[i],\n a1_shape=Q1_block_shape,\n a2_shape=Q2_block_shape,\n a1_T=False, a2_T=False, axes=1,\n syskwargs={\"grid_entry\": grid_entry,\n \"grid_shape\": Q.grid.grid_shape})\n\n # Construct R.\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n tsR = self._vec_from_oids([R2_oid], shape=R_shape, block_shape=R_shape, dtype=X.dtype)\n # If blocking is \"tall-skinny,\" then we're done.\n if R_shape == R_block_shape or not reshape_output:\n R = tsR\n else:\n R = tsR.reshape(shape=R_shape, block_shape=R_block_shape)\n\n if Q.shape != block_shape or not reshape_output:\n Q = Q.reshape(shape=shape, block_shape=block_shape)\n\n return Q, R\n\n def svd(self, X):\n # TODO(hme): Optimize by merging with direct qr to compute U directly,\n # to avoid wasting space storing intermediate Q.\n # This may not really help until we have operator fusion.\n assert len(X.shape) == 2\n block_shape = X.block_shape\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n Q, R = self.direct_tsqr(X, reshape_output=False)\n assert R.shape == R.block_shape\n R_U, S, VT = self.system.svd(R.blocks[(0, 0)].oid,\n syskwargs={\"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1)})\n R_U: BlockArray = self._vec_from_oids([R_U], R_shape, R_block_shape, X.dtype)\n S: BlockArray = self._vec_from_oids([S], R_shape[:1], R_block_shape[:1], X.dtype)\n VT = self._vec_from_oids([VT], R_shape, R_block_shape, X.dtype)\n U = Q @ R_U\n\n return U, S, VT\n\n def inv(self, X: BlockArray):\n return self._inv(self.system.inv, {}, X)\n\n def _inv(self, remote_func, kwargs, X: BlockArray):\n # TODO (hme): Implement scalable version.\n block_shape = X.block_shape\n assert len(X.shape) == 2\n assert X.shape[0] == X.shape[1]\n single_block = X.shape[0] == X.block_shape[0] and X.shape[1] == X.block_shape[1]\n if single_block:\n result = X.copy()\n else:\n result = X.reshape(block_shape=X.shape)\n result.blocks[0, 0].oid = remote_func(result.blocks[0, 0].oid,\n **kwargs,\n syskwargs={\n \"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1)\n })\n if not single_block:\n result = result.reshape(block_shape=block_shape)\n return result\n\n def cholesky(self, X: BlockArray):\n # TODO (hme): Implement scalable version.\n # Note:\n # A = Q, R\n # A.T @ A = R.T @ R\n # A.T @ A = L @ L.T\n # => R == L.T\n block_shape = X.block_shape\n assert len(X.shape) == 2\n assert X.shape[0] == X.shape[1]\n single_block = X.shape[0] == X.block_shape[0] and X.shape[1] == X.block_shape[1]\n if single_block:\n result = X.copy()\n else:\n result = X.reshape(block_shape=X.shape)\n result.blocks[0, 0].oid = self.system.cholesky(result.blocks[0, 0].oid,\n syskwargs={\n \"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1)\n })\n if not single_block:\n result = result.reshape(block_shape=block_shape)\n return result\n\n def fast_linear_regression(self, X: BlockArray, y: BlockArray):\n assert len(X.shape) == 2\n assert len(y.shape) == 1\n block_shape = X.block_shape\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n Q, R = self.indirect_tsqr(X, reshape_output=False)\n R_inv = self.inv(R)\n if R_shape != R_block_shape:\n R_inv = R_inv.reshape(shape=R_shape, block_shape=R_block_shape)\n theta = R_inv @ (Q.T @ y)\n return theta\n\n def linear_regression(self, X: BlockArray, y: BlockArray):\n assert len(X.shape) == 2\n assert len(y.shape) == 1\n block_shape = X.block_shape\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n Q, R = self.direct_tsqr(X, reshape_output=False)\n # Invert R.\n R_inv = self.inv(R)\n if R_shape != R_block_shape:\n R_inv = R_inv.reshape(shape=R_shape, block_shape=R_block_shape)\n theta = R_inv @ (Q.T @ y)\n return theta\n\n def ridge_regression(self, X: BlockArray, y: BlockArray, lamb: float):\n assert len(X.shape) == 2\n assert len(y.shape) == 1\n assert lamb >= 0\n block_shape = X.block_shape\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n R = self.indirect_tsr(X)\n lamb_vec = self.array(lamb*np.eye(R_shape[0]), block_shape=R_block_shape)\n # TODO (hme): A better solution exists, which inverts R by augmenting X and y.\n # See Murphy 7.5.2.\n theta = self.inv(lamb_vec + R.T @ R) @ (X.T @ y)\n return theta\n\n def _vec_from_oids(self, oids, shape, block_shape, dtype):\n arr = BlockArray(ArrayGrid(shape=shape,\n block_shape=shape,\n dtype=dtype.__name__),\n self.system)\n # Make sure resulting grid shape is a vector (1 dimensional).\n assert np.sum(arr.grid.grid_shape) == (max(arr.grid.grid_shape)\n + len(arr.grid.grid_shape) - 1)\n for i, grid_entry in enumerate(arr.grid.get_entry_iterator()):\n arr.blocks[grid_entry].oid = oids[i]\n if block_shape != shape:\n return arr.reshape(block_shape=block_shape)\n return arr\n\n def random_state(self, seed=None):\n return NumsRandomState(self.system, seed)\n"
] |
[
[
"numpy.__getattribute__",
"numpy.product",
"numpy.ones_like",
"numpy.eye",
"numpy.dtype",
"numpy.finfo",
"numpy.ceil",
"numpy.argmax",
"numpy.zeros_like",
"numpy.iinfo",
"numpy.floor",
"numpy.diff",
"numpy.exp",
"numpy.array",
"numpy.sum"
]
] |
LiWen525/ultrasound-nerve-segmentation
|
[
"918a630108cca29d17aa5126176040dd2915827a"
] |
[
"train.py"
] |
[
"from __future__ import print_function\n\nimport os\nfrom skimage.transform import resize\nfrom skimage.io import imsave\nimport numpy as np\nfrom keras.models import Model\nfrom keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import backend as K\n\nfrom data import load_train_data, load_test_data\n\nK.set_image_data_format('channels_last') # TF dimension ordering in this code\n\nimg_rows = 96\nimg_cols = 96\n\nsmooth = 1.\n\n\ndef dice_coef(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\n\ndef dice_coef_loss(y_true, y_pred):\n return -dice_coef(y_true, y_pred)\n\n\ndef get_unet():\n inputs = Input((img_rows, img_cols, 1))\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)\n\n up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)\n\n up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)\n\n up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)\n\n up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)\n\n conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)\n\n model = Model(inputs=[inputs], outputs=[conv10])\n\n model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])\n\n return model\n\n\ndef preprocess(imgs):\n imgs_p = np.ndarray((imgs.shape[0], img_rows, img_cols), dtype=np.uint8)\n for i in range(imgs.shape[0]):\n imgs_p[i] = resize(imgs[i], (img_cols, img_rows), preserve_range=True)\n\n imgs_p = imgs_p[..., np.newaxis]\n return imgs_p\n\n\ndef train_and_predict():\n print('-'*30)\n print('Loading and preprocessing train data...')\n print('-'*30)\n imgs_train, imgs_mask_train = load_train_data()\n\n imgs_train = preprocess(imgs_train)\n imgs_mask_train = preprocess(imgs_mask_train)\n\n imgs_train = imgs_train.astype('float32')\n mean = np.mean(imgs_train) # mean for data centering\n std = np.std(imgs_train) # std for data normalization\n\n imgs_train -= mean\n imgs_train /= std\n\n imgs_mask_train = imgs_mask_train.astype('float32')\n imgs_mask_train /= 255. # scale masks to [0, 1]\n\n print('-'*30)\n print('Creating and compiling model...')\n print('-'*30)\n model = get_unet()\n model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)\n\n print('-'*30)\n print('Fitting model...')\n print('-'*30)\n model.fit(imgs_train, imgs_mask_train, batch_size=32, nb_epoch=20, verbose=1,validation_split=0.2,\n shuffle=True,\n callbacks=[model_checkpoint])\n\n print('-'*30)\n print('Loading and preprocessing test data...')\n print('-'*30)\n imgs_test, imgs_id_test = load_test_data()\n imgs_test = preprocess(imgs_test)\n\n imgs_test = imgs_test.astype('float32')\n imgs_test -= mean\n imgs_test /= std\n\n print('-'*30)\n print('Loading saved weights...')\n print('-'*30)\n model.load_weights('weights.h5')\n\n print('-'*30)\n print('Predicting masks on test data...')\n print('-'*30)\n imgs_mask_test = model.predict(imgs_test, verbose=1)\n np.save('imgs_mask_test.npy', imgs_mask_test)\n\n print('-' * 30)\n print('Saving predicted masks to files...')\n print('-' * 30)\n pred_dir = 'preds'\n if not os.path.exists(pred_dir):\n os.mkdir(pred_dir)\n for image, image_id in zip(imgs_mask_test, imgs_id_test):\n image = (image[:, :, 0] * 255.).astype(np.uint8)\n imsave(os.path.join(pred_dir, str(image_id) + '_pred.png'), image)\n\nif __name__ == '__main__':\n train_and_predict()\n"
] |
[
[
"numpy.std",
"numpy.mean",
"numpy.ndarray",
"numpy.save"
]
] |
granrothge/multiphonon
|
[
"486a998eeb6b73b964a58ba0f98fe3ece15bdf6e"
] |
[
"tests/data/work-V/round-5/plot_sqe.py"
] |
[
"#!/usr/bin/env python\nimport os\ncurdir = os.path.dirname(__file__)\nimport matplotlib as mpl\nmpl.rcParams['figure.figsize'] = 12,9\nfrom multiphonon.backward.plotutils import plot_intermediate_result_sqe as plot\nplot(curdir)\nfrom matplotlib import pyplot as plt\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.show"
]
] |
3secondz-lab/Vanshe
|
[
"535616f70635115521c7888369b4f304512dd9a7"
] |
[
"imu_publisher/utils/imu_time_check.py"
] |
[
"#!/usr/bin/env python3\n\nimport rospy\nimport rosbag\nfrom sensor_msgs.msg import *\nfrom matplotlib import pyplot as plt\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='Rosbag IMU time checker')\nparser.add_argument('--input', type=str, required=False,\n # default='/home/rnd/Data/200922/2020-09-22-13-42-25.bag',\n default='/home/rnd/Data/200922/2020-09-22-15-39-31.bag',\n help='Path to input bag')\nparser.add_argument('--topic', type=str, required=False,\n default='/imu_publisher/imu',\n # default='/imu/imu',\n help='Topic name')\n\nargs = parser.parse_args()\n\n\nif __name__ == '__main__':\n bag_in = rosbag.Bag(args.input, 'r')\n\n print('{}'.format(args.input))\n\n t_base = 0\n\n t_bag = []\n t_topic = []\n t_diff = []\n\n for topic, msg, t in bag_in.read_messages():\n if t_base == 0:\n t_base = t.to_sec()\n\n if topic == args.topic:\n t_b = t.to_sec() - t_base\n t_t = msg.header.stamp.to_sec() - t_base\n t_d = t_b - t_t\n\n # if t_b < 119.50 or t_b > 120.0:\n # continue\n\n t_bag.append(t_b)\n t_topic.append(t_t)\n t_diff.append(t_d)\n\n print('seq : {} : {:.16f} / {:.16f} / {:.16f}'.format(msg.header.seq, t_b, t_t, t_d))\n\n fig = plt.figure()\n\n ax1 = fig.add_subplot(3, 1, 1)\n ax2 = fig.add_subplot(3, 1, 2)\n ax3 = fig.add_subplot(3, 1, 3)\n\n ax1.plot(t_bag, t_topic)\n ax1.set_xlabel('Bag')\n ax1.set_ylabel('Topic')\n ax1.set_title('Bag vs. Topic')\n\n ax2.plot(t_bag, t_diff)\n ax2.set_xlabel('Bag')\n ax2.set_ylabel('Diff')\n ax2.set_title('Bag vs. Diff')\n\n ax3.plot(t_topic)\n\n plt.show()\n\n print('Check finished.')\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
Omerdan03/image-caption-dnz
|
[
"5e3c0ac4ab01cd02d50406d45bf06b0345614779"
] |
[
"main.py"
] |
[
"import numpy as np\nimport cv2\nimport os\nfrom flask import Flask, request, render_template, send_file\n\nTO_SCALE = 600\n\napp = Flask(__name__)\n\nUPLOAD_FOLDER = os.path.basename('uploads')\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n# get the categories names and colors\nf = open(\"coco.names\", \"r\")\ncoco_classes = f.read().split(\"\\n\")[:-1]\ncolor_arr = np.random.randint(0, 256, size=(80, 3), dtype='int')\n\n# build the pretrained network\nNET = cv2.dnn.readNetFromDarknet('yolov3.cfg', 'yolov3.weights')\n\n\n@app.route(\"/main\")\n@app.route(\"/\")\ndef home_page():\n print(\"Start\")\n return render_template('upload.html')\n\n\n@app.route('/upload', methods=['POST'])\ndef upload_file():\n print('start upload method')\n file = request.files['image']\n if not file:\n return render_template('upload.html')\n # Read image we got from the user to np.array\n\n image = cv2.imdecode(np.frombuffer(file.read(), np.uint8), cv2.IMREAD_UNCHANGED)\n print('image converted to np.array')\n print(f'image ordinal size: {image.shape}')\n # caption\n\n output_img = caption(image)\n print('finished captioning')\n\n # local\\temp save\n filename = 'output.png'\n cv2.imwrite(filename, output_img)\n print(f'Saved {filename}')\n\n # Return image\n return send_file(filename, mimetype='image/png')\n\n\ndef output_coordinates_to_box_coordinates(cx, cy, w, h, img_h, img_w):\n abs_x = int((cx - w/2) * img_w)\n abs_y = int((cy - h/2) * img_h)\n abs_w = int(w * img_w)\n abs_h = int(h * img_h)\n return abs_x, abs_y, abs_w, abs_h\n\n\ndef numpy_to_list(array):\n return [int(num) for num in array]\n\n\ndef caption(img):\n \"\"\"\n This function gets an np.array of an image and returns an np.array of image with the right tags\n :param img: np.array\n :return: np.array\n \"\"\"\n print('caption started')\n\n\n # Scaling the photo so the largest dimension will be 600\n scale = TO_SCALE / max(img.shape)\n width = int(img.shape[1] * scale)\n height = int(img.shape[0] * scale)\n dim = (width, height)\n img = cv2.resize(img, dim, cv2.INTER_AREA)\n print(f'scaled to {dim}')\n\n print('captioning with model')\n blob = cv2.dnn.blobFromImage(img, 1/255, (416, 416), swapRB=True, crop=False)\n NET.setInput(blob)\n NET.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n unconnected = NET.getUnconnectedOutLayers()\n output_names = [NET.getLayerNames()[layer_num[0] - 1] for layer_num in unconnected]\n large, medium, small = NET.forward(output_names)\n all_outputs = np.vstack((large, medium, small))\n objs = all_outputs[all_outputs[:, 4] > 0.1]\n boxes = [output_coordinates_to_box_coordinates(*obj[:4], *img.shape[:2]) for obj in objs]\n confidences = [float(obj[4]) for obj in objs]\n class_names = [coco_classes[np.argmax(obj[5:])] for obj in objs]\n colors = [numpy_to_list(color_arr[np.argmax(obj[5:])]) for obj in objs]\n indices = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n\n img_yolo = img.copy()\n print('finished captioning, putting tags on the image')\n for i in indices.flatten():\n x, y, w, h = boxes[i]\n class_name = class_names[i]\n confidence = confidences[i]\n color = colors[i]\n text = f'{class_name} {confidence:.3}'\n cv2.rectangle(img_yolo, (x, y), (x + w, y + h), color, 5)\n cv2.putText(img_yolo, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)\n\n print('finish tagging, return image')\n return img_yolo\n\n\ndef main():\n port = os.environ.get('PORT')\n if port:\n app.run(host='0.0.0.0', port=int(port))\n else:\n app.run(host='0.0.0.0')\n\n\nif __name__ == '__main__':\n main()\n\n\n"
] |
[
[
"numpy.argmax",
"numpy.vstack",
"numpy.random.randint"
]
] |
Lukas-Justen/Airbnb-Price-Evaluator
|
[
"8759e32e94510520984223d18f0f9b09396aa448"
] |
[
"scripts/text_analysis.py"
] |
[
"import pandas as pd\nimport re\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom sklearn.preprocessing import StandardScaler\nfrom nltk.corpus import stopwords\nimport nltk\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\ndata = pd.read_csv('data/seattle/3/listings_texts.csv')\n\ncorpus = data['description']\ny = data['price']\nX=[]\nfor i,line in enumerate(corpus):\n clear = [x for x in re.sub(r'[^\\w\\'\\s]', '',line.lower()).split() if x not in stopwords.words('english')]\n X.append(' '.join(clear))\n if i%100 == 0:\n print(\"Progress : \", i)\n if i == 4000:\n break\n\nprint(\"Moving on!\")\nvectorizer = TfidfVectorizer()\nX = vectorizer.fit_transform(X)\n\nnet = Sequential()\nnet.add(Dense(200, input_dim=X[0].shape[1], kernel_initializer='normal',activation='relu'))\nnet.add(Dense(100, input_dim=200, kernel_initializer='normal',activation='relu'))\nnet.add(Dense(1, input_dim=100, kernel_initializer='normal'))\nnet.compile(loss='mean_squared_error', optimizer='adam')\nnet.fit(X[:3000],y[:3000], epochs=70, batch_size=100)\n\nprint(net.evaluate(X[3001:] ,y[3001:]))\nfor i in range(50):\n print(net.predict(X[3001+i]), y[3001+i])\n"
] |
[
[
"pandas.read_csv",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
rmkm/handwritten-japanese-ocr
|
[
"82945a0fa0eb0b9b8f55a98a8892f059373a8970"
] |
[
"handwritten-japanese-OCR-touch-panel-demo.py"
] |
[
"\"\"\"\nHandwritten Japanese OCR demo program\n Based on a sample program from OpenVINO 2020.2 (handwritten-japanese-recognition-demo.py)\n\"\"\"\n\n\"\"\"\n Copyright (c) 2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport math\nimport logging as log\nfrom argparse import ArgumentParser, SUPPRESS\n\nimport cv2\nimport numpy as np\nfrom functools import reduce\n\nfrom PIL import ImageFont, ImageDraw, Image\n\nfrom openvino.inference_engine import IENetwork, IECore\nfrom utils.codec import CTCCodec\n\n# Canvas size is the same as the input size of the text detection model (to ommit resizing before text area inference)\n_canvas_x = 1280\n_canvas_y = 768\n\n\n# -----------------------------------------------------------------\n\ndef get_characters(char_file):\n with open(char_file, 'r', encoding='utf-8') as f:\n return ''.join(line.strip('\\n') for line in f)\n\n\ndef preprocess_input(src, height, width):\n src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n ratio = float(src.shape[1]) / float(src.shape[0])\n tw = int(height * ratio)\n\n rsz = cv2.resize(src, (tw, height), interpolation=cv2.INTER_CUBIC).astype(np.float32)\n outimg = np.full((height, width), 255., np.float32)\n rsz_h, rsz_w = rsz.shape\n outimg[:rsz_h, :rsz_w] = rsz\n cv2.imshow('OCR input image', outimg)\n\n outimg = np.reshape(outimg, (1, height, width))\n return outimg\n\n# -----------------------------------------------------------------\n\ndef softmax_channel(data):\n for i in range(0, len(data), 2):\n m=max(data[i], data[i+1])\n data[i ] = math.exp(data[i ]-m)\n data[i+1] = math.exp(data[i+1]-m)\n s=data[i ]+data[i+1]\n data[i ]/=s\n data[i+1]/=s\n return data\n\n\ndef findRoot(point, group_mask):\n root = point\n update_parent = False\n while group_mask[root] != -1:\n root = group_mask[root]\n update_parent = True\n if update_parent:\n group_mask[point] = root\n return root\n\n\ndef join(p1, p2, group_mask):\n root1 = findRoot(p1, group_mask)\n root2 = findRoot(p2, group_mask)\n if root1 != root2:\n group_mask[root1] = root2\n\n\ndef get_all(points, w, h, group_mask):\n root_map = {}\n mask = np.zeros((h, w), np.int32)\n for px, py in points:\n point_root = findRoot(px+py*w, group_mask)\n if not point_root in root_map:\n root_map[point_root] = len(root_map)+1\n mask[py, px] = root_map[point_root]\n return mask\n\n\ndef decodeImageByJoin(segm_data, segm_data_shape, link_data, link_data_shape, segm_conf_thresh, link_conf_thresh):\n h = segm_data_shape[1]\n w = segm_data_shape[2]\n pixel_mask = np.full((h*w,), False, dtype=np.bool)\n group_mask = {}\n points = []\n for i, segm in enumerate(segm_data):\n if segm>segm_conf_thresh:\n pixel_mask[i] = True\n points.append((i%w, i//w))\n group_mask[i] = -1\n else:\n pixel_mask[i] = False\n \n link_mask = np.array([ ld>=link_conf_thresh for ld in link_data ])\n\n neighbours = int(link_data_shape[3])\n for px, py in points:\n neighbor = 0\n for ny in range(py-1, py+1+1):\n for nx in range(px-1, px+1+1):\n if nx==px and ny==py:\n continue\n if nx<0 or nx>=w or ny<0 or ny>=h:\n continue\n pixel_value = pixel_mask[ny*w + nx]\n link_value = link_mask [py*w + px*neighbours + neighbor ]\n if pixel_value and link_value:\n join(px+py*w, nx+ny*w, group_mask)\n neighbor+=1\n return get_all(points, w, h, group_mask)\n\n\ndef maskToBoxes(mask, min_area, min_height, image_size):\n _X=0\n _Y=1\n bboxes = []\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(mask)\n max_bbox_idx = int(max_val)\n resized_mask = cv2.resize(mask, image_size, interpolation=cv2.INTER_NEAREST)\n\n for i in range(1, max_bbox_idx+1):\n bbox_mask = np.where(resized_mask==i, 255, 0).astype(np.uint8)\n contours, hierarchy = cv2.findContours(bbox_mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n if len(contours)==0:\n continue\n center, size, angle = cv2.minAreaRect(contours[0])\n if min(size[_X], size[_Y]) < min_height:\n continue\n if size[_X]*size[_Y] < min_area:\n continue\n bboxes.append((center, size, angle))\n return bboxes\n\n\ndef text_detection_postprocess(link, segm, image_size, segm_conf_thresh, link_conf_thresh):\n _N = 0\n _C = 1\n _H = 2\n _W = 3\n kMinArea = 300\n kMinHeight = 10\n\n link_shape = link.shape\n link_data_size = reduce(lambda a, b: a*b, link_shape)\n link_data = link.transpose((_N, _H, _W, _C))\n link_data = link_data.flatten()\n link_data = softmax_channel(link_data)\n link_data = link_data.reshape((-1,2))[:,1]\n new_link_data_shape = [ link_shape[0], link_shape[2], link_shape[3], link_shape[1]/2 ]\n\n segm_shape = segm.shape\n segm_data_size = reduce(lambda a, b: a*b, segm_shape)\n segm_data = segm.transpose((_N, _H, _W, _C))\n segm_data = segm_data.flatten()\n segm_data = softmax_channel(segm_data)\n segm_data = segm_data.reshape((-1,2))[:,1]\n new_segm_data_shape = [ segm_shape[0], segm_shape[2], segm_shape[3], segm_shape[1]/2 ]\n\n mask = decodeImageByJoin(segm_data, new_segm_data_shape, link_data, new_link_data_shape, \n segm_conf_thresh, link_conf_thresh)\n rects = maskToBoxes(mask, kMinArea, kMinHeight, image_size)\n\n return rects\n\n\n\n# ----------------------------------------------------------------------------\n\ndef topLeftPoint(points):\n big_number = 1e10\n _X=0\n _Y=1\n most_left = [big_number, big_number]\n almost_most_left = [big_number, big_number]\n most_left_idx = -1\n almost_most_left_idx = -1\n\n for i, point in enumerate(points):\n px, py = point\n if most_left[_X]>px:\n if most_left[_X]<big_number:\n almost_most_left = most_left\n almost_most_left_idx = most_left_idx\n most_left = [px, py]\n most_left_idx = i\n if almost_most_left[_X] > px and [px,py]!=most_left:\n almost_most_left = [px,py]\n almost_most_left_idx = i\n if almost_most_left[_Y]<most_left[_Y]:\n most_left = almost_most_left\n most_left_idx = almost_most_left_idx\n return most_left_idx, most_left\n\n\ndef cropRotatedImage(image, points, top_left_point_idx):\n _X=1\n _Y=0\n _C=2\n \n point0 = points[ top_left_point_idx ]\n point1 = points[(top_left_point_idx+1) % 4]\n point2 = points[(top_left_point_idx+2) % 4]\n \n target_size = (int(np.linalg.norm(point2-point1, ord=2)), int(np.linalg.norm(point1-point0, ord=2)), 3)\n\n crop = np.full(target_size, 255, np.uint8)\n \n _from = np.array([ point0, point1, point2 ], dtype=np.float32)\n _to = np.array([ [0,0], [target_size[_X]-1, 0], [target_size[_X]-1, target_size[_Y]-1] ], dtype=np.float32)\n\n M = cv2.getAffineTransform(_from, _to)\n crop = cv2.warpAffine(image, M, (target_size[_X], target_size[_Y]))\n\n return crop\n\n# ----------------------------------------------------------------------------\n\ng_mouseX=-1\ng_mouseY=-1\ng_mouseBtn = -1 # 0=left, 1=right, -1=none\n\ng_UIState = 0 # 0: normal UI, 1: wait for a click\ng_clickedFlag = False\ng_recogFlag = False\n\ng_threshold = 50\ng_canvas = []\n\ndef putJapaneseText(img, x, y, text, size=32):\n if os.name =='nt':\n #fontName = 'meiryo.ttc' # Win10\n fontName = 'msgothic.ttc' # Win10\n elif os.name == 'posix':\n fontName = 'NotoSansCJK-Regular.ttc' # Ubuntu\n elif os.name == 'Darwin':\n fontName = 'Osaka.ttf' # Not tested ...\n else:\n fontName = 'UnknownOS'\n\n try:\n font = ImageFont.truetype(fontName, size)\n except IOError:\n cv2.putText(img, 'font \"{}\" not found'.format(fontName), (x,y-8), cv2.FONT_HERSHEY_PLAIN, 2, (0,0,255), 2)\n else:\n img_pil = Image.fromarray(img)\n draw = ImageDraw.Draw(img_pil)\n w,h = draw.textsize(text, font)\n draw.text((x, y-h*1.2), text, font=font, fill=(255,0,0,0))\n img = np.array(img_pil)\n\n return img\n\n\ndef drawUI(image):\n cv2.circle(image, (0 , 0), 100, ( 0, 255, 255), -1)\n cv2.circle(image, (image.shape[1]-1, 0), 100, ( 0, 255, 0), -1)\n cv2.putText(image, 'RECOGNIZE', (4 ,20), cv2.FONT_HERSHEY_PLAIN, 1, ( 0, 0, 0), 2)\n cv2.putText(image, 'CLEAR' , (image.shape[1]-60 ,20), cv2.FONT_HERSHEY_PLAIN, 1, ( 0, 0, 0), 2)\n\n\ndef clearCanvas():\n global g_canvas\n g_canvas = np.full((_canvas_y, _canvas_x, 3), [255,255,255], np.uint8)\n\n\ndef dispCanvas():\n global g_canvas\n canvas = g_canvas.copy()\n drawUI(canvas)\n cv2.imshow('canvas', canvas)\n cv2.waitKey(1)\n\n\n# Mouse event handler\ndef onMouse(event, x, y, flags, param):\n global g_mouseX, g_mouseY\n global g_mouseBtn\n global g_recogFlag\n global g_clickedFlag\n global g_UIState\n\n global g_canvas\n\n black_pen = lambda x1, y1, x2, y2: cv2.line(g_canvas, (x1, y1), (x2, y2), ( 0, 0, 0), thickness=12)\n white_pen = lambda x1, y1, x2, y2: cv2.line(g_canvas, (x1, y1), (x2, y2), (255,255,255), thickness=36)\n\n if g_UIState==0: # Normal UI\n if event == cv2.EVENT_LBUTTONDOWN:\n p0=np.array([ 0, 0])\n p1=np.array([_canvas_x, 0])\n pp=np.array([ x, y])\n if np.linalg.norm(pp-p0, ord=2)<100: # Recognition\n g_recogFlag = True\n elif np.linalg.norm(pp-p1, ord=2)<100: # Clear\n clearCanvas()\n else:\n g_mouseBtn = 0 # left button\n if event == cv2.EVENT_LBUTTONUP:\n if g_mouseBtn==0:\n black_pen(g_mouseX, g_mouseY, x, y)\n g_mouseBtn = -1\n if event == cv2.EVENT_RBUTTONDOWN:\n g_mouseBtn = 1 # right button\n if event == cv2.EVENT_RBUTTONUP:\n if g_mouseBtn==1:\n white_pen(g_mouseX, g_mouseY, x, y)\n g_mouseBtn = -1\n if event == cv2.EVENT_MOUSEMOVE:\n if g_mouseBtn==0:\n black_pen(g_mouseX, g_mouseY, x, y)\n elif g_mouseBtn==1:\n white_pen(g_mouseX, g_mouseY, x, y)\n elif g_UIState==1: # no draw. wait for click state\n if event == cv2.EVENT_LBUTTONUP:\n g_clickedFlag=True\n\n g_mouseX = x\n g_mouseY = y\n\ndef onTrackbar(x):\n global g_threshold\n g_threshold = x\n\n# ----------------------------------------------------------------------------\n\ndef main():\n _H=0\n _W=1\n _C=2\n\n global g_canvas\n global g_threshold\n global g_UIState\n global g_recogFlag\n global g_clickedFlag\n\n # Plugin initialization\n ie = IECore()\n\n # text-detection-0003 in: (1,3,768,1280) out: model/link_logits_/add(1,16,192,320) model/segm_logits/add(1,2,192,320)\n model='text-detection-0003'\n model = './intel/'+model+'/FP16/'+model\n net_td = ie.read_network(model+'.xml', model+'.bin')\n input_blob_td = next(iter(net_td.inputs))\n out_blob_td = next(iter(net_td.outputs))\n exec_net_td = ie.load_network(net_td, 'CPU')\n\n # handwritten-japanese-recognition\n model = 'handwritten-japanese-recognition-0001'\n model = './intel/'+model+'/FP16/'+model\n net = ie.read_network(model+'.xml', model+'.bin')\n input_blob = next(iter(net.inputs))\n out_blob = next(iter(net.outputs))\n input_batch_size, input_channel, input_height, input_width= net.inputs[input_blob].shape\n exec_net = ie.load_network(net, 'CPU')\n\n characters = get_characters('data/kondate_nakayosi_char_list.txt')\n codec = CTCCodec(characters)\n\n clearCanvas()\n cv2.namedWindow('canvas')\n cv2.setMouseCallback('canvas', onMouse)\n cv2.createTrackbar('Threshold', 'canvas', 50, 100, onTrackbar)\n\n while True:\n g_UIState = 0\n while g_recogFlag==False:\n dispCanvas()\n key=cv2.waitKey(100)\n if key==27:\n return\n if key==ord(' '):\n break\n g_recogFlag = False\n g_UIState = 1\n\n print('text detection')\n img = cv2.resize(g_canvas, (_canvas_x, _canvas_y))\n img = img.transpose((_C, _H, _W))\n img = img.reshape((1, 3, _canvas_y, _canvas_x))\n res_td = exec_net_td.infer(inputs={input_blob_td: img})\n link = res_td['model/link_logits_/add'] # 1,16,192,320\n segm = res_td['model/segm_logits/add' ] # 1, 2,192,320\n rects = text_detection_postprocess(link, segm, (_canvas_x, _canvas_y), g_threshold/100., g_threshold/100.)\n print('text detection - completed')\n\n canvas2 = g_canvas.copy()\n for i, rect in enumerate(rects):\n box = cv2.boxPoints(rect).astype(np.int32)\n cv2.polylines(canvas2, [box], True, (255,0,0), 4)\n\n most_left_idx, most_left = topLeftPoint(box)\n crop = cropRotatedImage(g_canvas, box, most_left_idx)\n input_image = preprocess_input(crop, input_height, input_width)[None,:,:,:]\n\n preds = exec_net.infer(inputs={input_blob: input_image})\n preds = preds[out_blob]\n result = codec.decode(preds)\n print('OCR result ({}): {}'.format(i, result))\n \n canvas2 = putJapaneseText(canvas2, most_left[0], most_left[1], result[0])\n cv2.imshow('canvas', canvas2)\n cv2.waitKey(1)\n\n cv2.putText(canvas2, 'Hit any key, tap screen or click L-button to continue', (0, 40), cv2.FONT_HERSHEY_PLAIN, 2, (0,0,0), 2)\n cv2.imshow('canvas', canvas2)\n g_clickedFlag=False\n key=-1\n while g_clickedFlag==False and key==-1:\n key=cv2.waitKey(100)\n\n return\n\nif __name__ == '__main__':\n print('Handwritten Japanese OCR Demo')\n print('ESC: Quit')\n print('Mouse L-Button: Draw')\n print('Mouse R-Button: Erase')\n print('Threshold = Text area detect threshold')\n main()\n"
] |
[
[
"numpy.reshape",
"numpy.linalg.norm",
"numpy.full",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] |
laure-delisle/cs148-hw2
|
[
"dc7c4ba22876370c1b8288c45b9a3be8dbb0bb17"
] |
[
"generate_split.py"
] |
[
"import numpy as np\nimport os\n\nnp.random.seed(2020) # to ensure you always get the same train/test split\n\ndata_path = '../data/RedLights2011_Medium'\ngts_path = '../data/hw02_annotations'\nsplit_path = '../data/hw02_splits'\nos.makedirs(split_path, exist_ok=True) # create directory if needed\n\nsplit_test = False # set to True and run when annotations are available\n\ntrain_frac = 0.85\n\n# get sorted list of files:\nfile_names = sorted(os.listdir(data_path))\n\n# remove any non-JPEG files:\nfile_names = [f for f in file_names if '.jpg' in f]\n\n# shuffle file names\nnum_files = len(file_names)\ntrain_indices = np.random.permutation(num_files)\nnum_train_files = np.rint(num_files*train_frac).astype(int)\n\n# split file names into train and test\nfile_names_train = np.array(file_names)[train_indices[:num_train_files]]\nfile_names_test = np.array(file_names)[train_indices[num_train_files:]]\n\n\nassert (len(file_names_train) + len(file_names_test)) == len(file_names)\nassert len(np.intersect1d(file_names_train,file_names_test)) == 0\n\nnp.save(os.path.join(split_path,'file_names_train.npy'),file_names_train)\nnp.save(os.path.join(split_path,'file_names_test.npy'),file_names_test)\n\nif split_test:\n with open(os.path.join(gts_path, 'annotations.json'),'r') as f:\n gts = json.load(f)\n \n # Use file_names_train and file_names_test to apply the split to the\n # annotations\n gts_train = {}\n gts_test = {}\n\n for file_name in file_names_train:\n gts_train[file_name] = gts[file_name]\n for file_name in file_names_test:\n gts_test[file_name] = gts[file_name]\n \n with open(os.path.join(gts_path, 'annotations_train.json'),'w') as f:\n json.dump(gts_train,f)\n \n with open(os.path.join(gts_path, 'annotations_test.json'),'w') as f:\n json.dump(gts_test,f)\n \n \n"
] |
[
[
"numpy.random.seed",
"numpy.rint",
"numpy.intersect1d",
"numpy.random.permutation",
"numpy.array"
]
] |
rheiland/pc4fury
|
[
"41ef56afcfdfc7931fd1b82450f36fd33dfc7697"
] |
[
"bin/substrates.py"
] |
[
"# substrates Tab\n\nimport os, math\nfrom pathlib import Path\nfrom shutil import copyfile\nfrom ipywidgets import Layout, Label, Text, Checkbox, Button, BoundedIntText, HBox, VBox, Box, \\\n FloatText, Dropdown, interactive\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import BoundaryNorm\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.patches import Circle, Ellipse, Rectangle\nfrom matplotlib.collections import PatchCollection\nimport matplotlib.colors as mplc\nfrom collections import deque\nimport numpy as np\nimport scipy.io\nimport xml.etree.ElementTree as ET # https://docs.python.org/2/library/xml.etree.elementtree.html\nimport glob\nimport platform\nimport zipfile\nfrom debug import debug_view \nimport warnings\n\nhublib_flag = True\nif platform.system() != 'Windows':\n try:\n# print(\"Trying to import hublib.ui\")\n from hublib.ui import Download\n except:\n hublib_flag = False\nelse:\n hublib_flag = False\n\n#warnings.warn(message, mplDeprecation, stacklevel=1)\nwarnings.filterwarnings(\"ignore\")\n\nclass SubstrateTab(object):\n\n def __init__(self):\n \n self.output_dir = '.'\n # self.output_dir = 'tmpdir'\n\n self.figsize_width_substrate = 15.0 # allow extra for colormap\n self.figsize_height_substrate = 12.5\n self.figsize_width_svg = 12.0\n self.figsize_height_svg = 12.0\n\n # self.fig = plt.figure(figsize=(7.2,6)) # this strange figsize results in a ~square contour plot\n\n self.first_time = True\n self.modulo = 1\n\n self.use_defaults = True\n\n self.svg_delta_t = 1\n self.substrate_delta_t = 1\n self.svg_frame = 1\n self.substrate_frame = 1\n\n self.customized_output_freq = False\n self.therapy_activation_time = 1000000\n self.max_svg_frame_pre_therapy = 1000000\n self.max_substrate_frame_pre_therapy = 1000000\n\n self.svg_xmin = 0\n\n # Probably don't want to hardwire these if we allow changing the domain size\n # self.svg_xrange = 2000\n # self.xmin = -1000.\n # self.xmax = 1000.\n # self.ymin = -1000.\n # self.ymax = 1000.\n # self.x_range = 2000.\n # self.y_range = 2000.\n\n self.show_nucleus = False\n self.show_edge = True\n\n # initial value\n self.field_index = 4\n # self.field_index = self.mcds_field.value + 4\n\n self.skip_cb = False\n\n # define dummy size of mesh (set in the tool's primary module)\n self.numx = 0\n self.numy = 0\n\n self.title_str = ''\n\n tab_height = '600px'\n tab_height = '500px'\n constWidth = '180px'\n constWidth2 = '150px'\n tab_layout = Layout(width='900px', # border='2px solid black',\n height=tab_height, ) #overflow_y='scroll')\n\n max_frames = 1 \n # self.mcds_plot = interactive(self.plot_substrate, frame=(0, max_frames), continuous_update=False) \n # self.i_plot = interactive(self.plot_plots, frame=(0, max_frames), continuous_update=False) \n self.i_plot = interactive(self.plot_substrate, frame=(0, max_frames), continuous_update=False) \n\n # \"plot_size\" controls the size of the tab height, not the plot (rf. figsize for that)\n # NOTE: the Substrates Plot tab has an extra row of widgets at the top of it (cf. Cell Plots tab)\n svg_plot_size = '700px'\n svg_plot_size = '600px'\n svg_plot_size = '700px'\n svg_plot_size = '900px'\n self.i_plot.layout.width = svg_plot_size\n self.i_plot.layout.height = svg_plot_size\n\n self.fontsize = 20\n\n # description='# cell frames',\n self.max_frames = BoundedIntText(\n min=0, max=99999, value=max_frames,\n description='# frames',\n layout=Layout(width='160px'),\n )\n self.max_frames.observe(self.update_max_frames)\n\n # self.field_min_max = {'dummy': [0., 1., False]}\n # NOTE: manually setting these for now (vs. parsing them out of data/initial.xml)\n self.field_min_max = {'director signal':[0.,1.,False], 'cargo signal':[0.,1.,False] }\n # hacky I know, but make a dict that's got (key,value) reversed from the dict in the Dropdown below\n # self.field_dict = {0:'dummy'}\n self.field_dict = {0:'director signal', 1:'cargo signal'}\n\n self.mcds_field = Dropdown(\n options={'director signal': 0, 'cargo signal':1},\n disabled=True,\n value=0,\n # description='Field',\n layout=Layout(width=constWidth)\n )\n # print(\"substrate __init__: self.mcds_field.value=\",self.mcds_field.value)\n# self.mcds_field.observe(self.mcds_field_cb)\n self.mcds_field.observe(self.mcds_field_changed_cb)\n\n self.field_cmap = Dropdown(\n options=['viridis', 'jet', 'YlOrRd'],\n value='YlOrRd',\n disabled=True,\n # description='Field',\n layout=Layout(width=constWidth)\n )\n# self.field_cmap.observe(self.plot_substrate)\n self.field_cmap.observe(self.mcds_field_cb)\n\n self.cmap_fixed_toggle = Checkbox(\n description='Fix',\n disabled=True,\n# layout=Layout(width=constWidth2),\n )\n self.cmap_fixed_toggle.observe(self.mcds_field_cb)\n\n# def cmap_fixed_toggle_cb(b):\n# # self.update()\n# # self.field_min_max = {'oxygen': [0., 30.,True], 'glucose': [0., 1.,False]}\n# field_name = self.field_dict[self.mcds_field.value]\n# if (self.cmap_fixed_toggle.value): \n# self.field_min_max[field_name][0] = self.cmap_min.value\n# self.field_min_max[field_name][1] = self.cmap_max.value\n# self.field_min_max[field_name][2] = True\n# else:\n# # self.field_min_max[field_name][0] = self.cmap_min.value\n# # self.field_min_max[field_name][1] = self.cmap_max.value\n# self.field_min_max[field_name][2] = False\n# self.i_plot.update()\n\n # self.cmap_fixed_toggle.observe(cmap_fixed_toggle_cb)\n\n# self.save_min_max= Button(\n# description='Save', #style={'description_width': 'initial'},\n# button_style='success', # 'success', 'info', 'warning', 'danger' or ''\n# tooltip='Save min/max for this substrate',\n# disabled=True,\n# layout=Layout(width='90px')\n# )\n\n# def save_min_max_cb(b):\n# # field_name = self.mcds_field.options[]\n# # field_name = next(key for key, value in self.mcds_field.options.items() if value == self.mcds_field.value)\n# field_name = self.field_dict[self.mcds_field.value]\n# # print(field_name)\n# # self.field_min_max = {'oxygen': [0., 30.], 'glucose': [0., 1.], 'H+ ions': [0., 1.], 'ECM': [0., 1.], 'NP1': [0., 1.], 'NP2': [0., 1.]}\n# self.field_min_max[field_name][0] = self.cmap_min.value\n# self.field_min_max[field_name][1] = self.cmap_max.value\n# # print(self.field_min_max)\n\n# self.save_min_max.on_click(save_min_max_cb)\n\n\n self.cmap_min = FloatText(\n description='Min',\n value=0,\n step = 0.1,\n disabled=True,\n layout=Layout(width=constWidth2),\n )\n self.cmap_min.observe(self.mcds_field_cb)\n\n self.cmap_max = FloatText(\n description='Max',\n value=38,\n step = 0.1,\n disabled=True,\n layout=Layout(width=constWidth2),\n )\n self.cmap_max.observe(self.mcds_field_cb)\n\n def cmap_fixed_toggle_cb(b):\n field_name = self.field_dict[self.mcds_field.value]\n # print(self.cmap_fixed_toggle.value)\n if (self.cmap_fixed_toggle.value): # toggle on fixed range\n self.cmap_min.disabled = False\n self.cmap_max.disabled = False\n self.field_min_max[field_name][0] = self.cmap_min.value\n self.field_min_max[field_name][1] = self.cmap_max.value\n self.field_min_max[field_name][2] = True\n # self.save_min_max.disabled = False\n else: # toggle off fixed range\n self.cmap_min.disabled = True\n self.cmap_max.disabled = True\n self.field_min_max[field_name][2] = False\n # self.save_min_max.disabled = True\n# self.mcds_field_cb()\n self.i_plot.update()\n\n self.cmap_fixed_toggle.observe(cmap_fixed_toggle_cb)\n\n field_cmap_row2 = HBox([self.field_cmap, self.cmap_fixed_toggle])\n\n# field_cmap_row3 = HBox([self.save_min_max, self.cmap_min, self.cmap_max])\n items_auto = [\n # self.save_min_max, #layout=Layout(flex='3 1 auto', width='auto'),\n self.cmap_min, \n self.cmap_max, \n ]\n box_layout = Layout(display='flex',\n flex_flow='row',\n align_items='stretch',\n width='80%')\n field_cmap_row3 = Box(children=items_auto, layout=box_layout)\n\n # self.debug_str = Text(\n # value='debug info',\n # description='Debug:',\n # disabled=True,\n # layout=Layout(width='600px'), #constWidth = '180px'\n # )\n\n #---------------------\n self.cell_nucleus_toggle = Checkbox(\n description='nuclei',\n disabled=False,\n value = self.show_nucleus,\n# layout=Layout(width=constWidth2),\n )\n def cell_nucleus_toggle_cb(b):\n # self.update()\n if (self.cell_nucleus_toggle.value): \n self.show_nucleus = True\n else:\n self.show_nucleus = False\n self.i_plot.update()\n\n self.cell_nucleus_toggle.observe(cell_nucleus_toggle_cb)\n\n #----\n self.cell_edges_toggle = Checkbox(\n description='edges',\n disabled=False,\n value=self.show_edge,\n# layout=Layout(width=constWidth2),\n )\n def cell_edges_toggle_cb(b):\n # self.update()\n if (self.cell_edges_toggle.value): \n self.show_edge = True\n else:\n self.show_edge = False\n self.i_plot.update()\n\n self.cell_edges_toggle.observe(cell_edges_toggle_cb)\n\n self.cells_toggle = Checkbox(\n description='Cells',\n disabled=False,\n value=True,\n# layout=Layout(width=constWidth2),\n )\n def cells_toggle_cb(b):\n # self.update()\n self.i_plot.update()\n if (self.cells_toggle.value):\n self.cell_edges_toggle.disabled = False\n self.cell_nucleus_toggle.disabled = False\n else:\n self.cell_edges_toggle.disabled = True\n self.cell_nucleus_toggle.disabled = True\n\n self.cells_toggle.observe(cells_toggle_cb)\n\n #---------------------\n self.substrates_toggle = Checkbox(\n description='Substrates',\n disabled=True,\n value=False,\n# layout=Layout(width=constWidth2),\n )\n def substrates_toggle_cb(b):\n if (self.substrates_toggle.value): # seems bass-ackwards\n self.cmap_fixed_toggle.disabled = False\n self.cmap_min.disabled = False\n self.cmap_max.disabled = False\n self.mcds_field.disabled = False\n self.field_cmap.disabled = False\n else:\n self.cmap_fixed_toggle.disabled = True\n self.cmap_min.disabled = True\n self.cmap_max.disabled = True\n self.mcds_field.disabled = True\n self.field_cmap.disabled = True\n\n self.substrates_toggle.observe(substrates_toggle_cb)\n\n self.grid_toggle = Checkbox(\n description='grid',\n disabled=False,\n value=True,\n# layout=Layout(width=constWidth2),\n )\n def grid_toggle_cb(b):\n # self.update()\n self.i_plot.update()\n\n self.grid_toggle.observe(grid_toggle_cb)\n\n# field_cmap_row3 = Box([self.save_min_max, self.cmap_min, self.cmap_max])\n\n # mcds_tab = widgets.VBox([mcds_dir, mcds_plot, mcds_play], layout=tab_layout)\n # mcds_params = VBox([self.mcds_field, field_cmap_row2, field_cmap_row3, self.max_frames]) # mcds_dir\n# mcds_params = VBox([self.mcds_field, field_cmap_row2, field_cmap_row3,]) # mcds_dir\n\n# self.tab = HBox([mcds_params, self.mcds_plot], layout=tab_layout)\n\n help_label = Label('select slider: drag or left/right arrows')\n # row1 = Box([help_label, Box( [self.max_frames, self.mcds_field, self.field_cmap], layout=Layout(border='0px solid black',\n row1a = Box( [self.max_frames, self.mcds_field, self.field_cmap], layout=Layout(border='1px solid black',\n width='50%',\n height='',\n align_items='stretch',\n flex_direction='row',\n display='flex')) \n row1b = Box( [self.cells_toggle, self.cell_nucleus_toggle, self.cell_edges_toggle], layout=Layout(border='1px solid black',\n width='50%',\n height='',\n align_items='stretch',\n flex_direction='row',\n display='flex')) \n row1 = HBox( [row1a, Label('.....'), row1b])\n\n row2a = Box([self.cmap_fixed_toggle, self.cmap_min, self.cmap_max], layout=Layout(border='1px solid black',\n width='50%',\n height='',\n align_items='stretch',\n flex_direction='row',\n display='flex'))\n # row2b = Box( [self.substrates_toggle, self.grid_toggle], layout=Layout(border='1px solid black',\n row2b = Box( [self.substrates_toggle, ], layout=Layout(border='1px solid black',\n width='50%',\n height='',\n align_items='stretch',\n flex_direction='row',\n display='flex')) \n # row2 = HBox( [row2a, self.substrates_toggle, self.grid_toggle])\n row2 = HBox( [row2a, Label('.....'), row2b])\n\n if (hublib_flag):\n self.fury_button= Button(\n description=\"Send current frame's 3D data to Fury\", #style={'description_width': 'initial'},\n button_style='success', # 'success', 'info', 'warning', 'danger' or ''\n tooltip='Click to send data to the Fury GPU server',\n disabled=False,\n layout=Layout(width='280px')\n )\n self.fury_feedback_str = Label(value='')\n\n def send_to_fury_cb(b):\n self.fury_feedback_str.value = \"working...\"\n session_dir = os.getenv('SESSIONDIR')\n print('session_dir = ',session_dir)\n session_id = os.getenv('SESSION')\n print('session_id = ',session_id)\n user_id = os.getenv('USER')\n print('user_id = ',user_id)\n fury_data_path_str = \"/data/tools/shared/\" + user_id + \"/fury/\" + session_id\n\n # dummy to test locally\n # fury_data_path_str = \"/tmp/\" + user_id + \"/fury\" \n print(\"fury_data_path_str = \",fury_data_path_str)\n\n os.makedirs(fury_data_path_str, exist_ok=True)\n # data_file = \"output00000001_cells_physicell.mat\"\n data_file = \"output%08d_cells_physicell.mat\" % self.svg_frame\n # from the app's root directory\n from_file = \"tmpdir/\" + data_file\n print(\"from: \",from_file)\n to_file = fury_data_path_str + \"/\" + data_file\n print(\"to: \",to_file)\n copyfile(from_file, to_file)\n # copyfile(\"tmpdir/\" + data_file, fury_data_path_str + \"/\" + \"output00000001_cells_physicell.mat\")\n\n self.fury_feedback_str.value = \"\"\n\n self.fury_button.on_click(send_to_fury_cb)\n fury_row = HBox([self.fury_button, self.fury_feedback_str])\n\n# self.fury_button = Button(description='random_seed', disabled=True, layout=name_button_layout)\n# param_name1.style.button_color = 'lightgreen'\n\n self.download_button = Download('mcds.zip', style='warning', icon='cloud-download', \n tooltip='Download data', cb=self.download_cb)\n\n self.download_svg_button = Download('svg.zip', style='warning', icon='cloud-download', \n tooltip='You need to allow pop-ups in your browser', cb=self.download_svg_cb)\n download_row = HBox([self.download_button.w, self.download_svg_button.w, Label(\"Download all cell plots (browser must allow pop-ups).\")])\n\n # box_layout = Layout(border='0px solid')\n controls_box = VBox([row1, row2]) # ,width='50%', layout=box_layout)\n self.tab = VBox([controls_box, self.i_plot, fury_row, download_row])\n # self.tab = VBox([controls_box, self.debug_str, self.i_plot, download_row])\n else:\n # self.tab = VBox([row1, row2])\n self.tab = VBox([row1, row2, self.i_plot])\n\n #---------------------------------------------------\n def update_dropdown_fields(self, data_dir):\n # print('update_dropdown_fields called --------')\n self.output_dir = data_dir\n tree = None\n try:\n fname = os.path.join(self.output_dir, \"initial.xml\")\n tree = ET.parse(fname)\n xml_root = tree.getroot()\n except:\n print(\"Cannot open \",fname,\" to read info, e.g., names of substrate fields.\")\n return\n\n xml_root = tree.getroot()\n self.field_min_max = {}\n self.field_dict = {}\n dropdown_options = {}\n uep = xml_root.find('.//variables')\n comment_str = \"\"\n field_idx = 0\n if (uep):\n for elm in uep.findall('variable'):\n # print(\"-----> \",elm.attrib['name'])\n field_name = elm.attrib['name']\n self.field_min_max[field_name] = [0., 1., False]\n self.field_dict[field_idx] = field_name\n dropdown_options[field_name] = field_idx\n\n self.field_min_max[field_name][0] = 0 \n self.field_min_max[field_name][1] = 1\n\n # self.field_min_max[field_name][0] = field_idx #rwh: helps debug\n # self.field_min_max[field_name][1] = field_idx+1 \n self.field_min_max[field_name][2] = False\n field_idx += 1\n\n# constWidth = '180px'\n # print('options=',dropdown_options)\n # print(self.field_min_max) # debug\n self.mcds_field.value = 0\n self.mcds_field.options = dropdown_options\n# self.mcds_field = Dropdown(\n# # options={'oxygen': 0, 'glucose': 1},\n# options=dropdown_options,\n# value=0,\n# # description='Field',\n# layout=Layout(width=constWidth)\n# )\n\n # def update_max_frames_expected(self, value): # called when beginning an interactive Run\n # self.max_frames.value = value # assumes naming scheme: \"snapshot%08d.svg\"\n # self.mcds_plot.children[0].max = self.max_frames.value\n\n#------------------------------------------------------------------------------\n def update_params(self, config_tab, user_params_tab):\n # xml_root.find(\".//x_min\").text = str(self.xmin.value)\n # xml_root.find(\".//x_max\").text = str(self.xmax.value)\n # xml_root.find(\".//dx\").text = str(self.xdelta.value)\n # xml_root.find(\".//y_min\").text = str(self.ymin.value)\n # xml_root.find(\".//y_max\").text = str(self.ymax.value)\n # xml_root.find(\".//dy\").text = str(self.ydelta.value)\n # xml_root.find(\".//z_min\").text = str(self.zmin.value)\n # xml_root.find(\".//z_max\").text = str(self.zmax.value)\n # xml_root.find(\".//dz\").text = str(self.zdelta.value)\n\n self.xmin = config_tab.xmin.value \n self.xmax = config_tab.xmax.value \n self.x_range = self.xmax - self.xmin\n self.svg_xrange = self.xmax - self.xmin\n self.ymin = config_tab.ymin.value\n self.ymax = config_tab.ymax.value \n self.y_range = self.ymax - self.ymin\n\n self.numx = math.ceil( (self.xmax - self.xmin) / config_tab.xdelta.value)\n self.numy = math.ceil( (self.ymax - self.ymin) / config_tab.ydelta.value)\n\n if (self.x_range > self.y_range): \n ratio = self.y_range / self.x_range\n self.figsize_width_substrate = 15.0 # allow extra for colormap\n self.figsize_height_substrate = 12.5 * ratio\n self.figsize_width_svg = 12.0\n self.figsize_height_svg = 12.0 * ratio\n else: # x < y\n ratio = self.x_range / self.y_range\n self.figsize_width_substrate = 15.0 * ratio \n self.figsize_height_substrate = 12.5\n self.figsize_width_svg = 12.0 * ratio\n self.figsize_height_svg = 12.0 \n\n self.svg_flag = config_tab.toggle_svg.value\n self.substrates_flag = config_tab.toggle_mcds.value\n # print(\"substrates: update_params(): svg_flag, toggle=\",self.svg_flag,config_tab.toggle_svg.value) \n # print(\"substrates: update_params(): self.substrates_flag = \",self.substrates_flag)\n self.svg_delta_t = config_tab.svg_interval.value\n self.substrate_delta_t = config_tab.mcds_interval.value\n self.modulo = int(self.substrate_delta_t / self.svg_delta_t)\n # print(\"substrates: update_params(): modulo=\",self.modulo) \n\n if self.customized_output_freq:\n# self.therapy_activation_time = user_params_tab.therapy_activation_time.value # NOTE: edit for user param name\n # print(\"substrates: update_params(): therapy_activation_time=\",self.therapy_activation_time)\n self.max_svg_frame_pre_therapy = int(self.therapy_activation_time/self.svg_delta_t)\n self.max_substrate_frame_pre_therapy = int(self.therapy_activation_time/self.substrate_delta_t)\n\n#------------------------------------------------------------------------------\n# def update(self, rdir):\n# Called from driver module (e.g., pc4*.py) (among other places?)\n def update(self, rdir=''):\n # with debug_view:\n # print(\"substrates: update rdir=\", rdir) \n # print(\"substrates: update rdir=\", rdir) \n\n if rdir:\n self.output_dir = rdir\n\n # print('update(): self.output_dir = ', self.output_dir)\n\n if self.first_time:\n # if True:\n self.first_time = False\n full_xml_filename = Path(os.path.join(self.output_dir, 'config.xml'))\n # print(\"substrates: update(), config.xml = \",full_xml_filename) \n # self.num_svgs = len(glob.glob(os.path.join(self.output_dir, 'snap*.svg')))\n # self.num_substrates = len(glob.glob(os.path.join(self.output_dir, 'output*.xml')))\n # print(\"substrates: num_svgs,num_substrates =\",self.num_svgs,self.num_substrates) \n # argh - no! If no files created, then denom = -1\n # self.modulo = int((self.num_svgs - 1) / (self.num_substrates - 1))\n # print(\"substrates: update(): modulo=\",self.modulo) \n if full_xml_filename.is_file():\n tree = ET.parse(str(full_xml_filename)) # this file cannot be overwritten; part of tool distro\n xml_root = tree.getroot()\n self.svg_delta_t = float(xml_root.find(\".//SVG//interval\").text)\n self.substrate_delta_t = float(xml_root.find(\".//full_data//interval\").text)\n # print(\"substrates: svg,substrate delta_t values=\",self.svg_delta_t,self.substrate_delta_t) \n self.modulo = int(self.substrate_delta_t / self.svg_delta_t)\n # print(\"substrates: update(): modulo=\",self.modulo) \n\n\n # all_files = sorted(glob.glob(os.path.join(self.output_dir, 'output*.xml'))) # if the substrates/MCDS\n\n all_files = sorted(glob.glob(os.path.join(self.output_dir, 'snap*.svg'))) # if .svg\n if len(all_files) > 0:\n last_file = all_files[-1]\n self.max_frames.value = int(last_file[-12:-4]) # assumes naming scheme: \"snapshot%08d.svg\"\n else:\n substrate_files = sorted(glob.glob(os.path.join(self.output_dir, 'output*.xml')))\n if len(substrate_files) > 0:\n last_file = substrate_files[-1]\n self.max_frames.value = int(last_file[-12:-4])\n\n def download_svg_cb(self):\n file_str = os.path.join(self.output_dir, '*.svg')\n # print('zip up all ',file_str)\n with zipfile.ZipFile('svg.zip', 'w') as myzip:\n for f in glob.glob(file_str):\n myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive\n\n def download_cb(self):\n file_xml = os.path.join(self.output_dir, '*.xml')\n file_mat = os.path.join(self.output_dir, '*.mat')\n # print('zip up all ',file_str)\n with zipfile.ZipFile('mcds.zip', 'w') as myzip:\n for f in glob.glob(file_xml):\n myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive\n for f in glob.glob(file_mat):\n myzip.write(f, os.path.basename(f))\n\n def update_max_frames(self,_b):\n self.i_plot.children[0].max = self.max_frames.value\n\n # called if user selected different substrate in dropdown\n def mcds_field_changed_cb(self, b):\n # print(\"mcds_field_changed_cb: self.mcds_field.value=\",self.mcds_field.value)\n if (self.mcds_field.value == None):\n return\n self.field_index = self.mcds_field.value + 4\n\n field_name = self.field_dict[self.mcds_field.value]\n # print('mcds_field_changed_cb: field_name='+ field_name)\n # print(self.field_min_max[field_name])\n # self.debug_str.value = 'mcds_field_changed_cb: '+ field_name + str(self.field_min_max[field_name])\n # self.debug_str.value = 'cb1: '+ str(self.field_min_max)\n\n # BEWARE of these triggering the mcds_field_cb() callback! Hence, the \"skip_cb\"\n self.skip_cb = True\n self.cmap_min.value = self.field_min_max[field_name][0]\n self.cmap_max.value = self.field_min_max[field_name][1]\n self.cmap_fixed_toggle.value = bool(self.field_min_max[field_name][2])\n self.skip_cb = False\n\n self.i_plot.update()\n\n # called if user provided different min/max values for colormap, or a different colormap\n def mcds_field_cb(self, b):\n if self.skip_cb:\n return\n\n self.field_index = self.mcds_field.value + 4\n\n field_name = self.field_dict[self.mcds_field.value]\n # print('mcds_field_cb: field_name='+ field_name)\n\n # print('mcds_field_cb: '+ field_name)\n self.field_min_max[field_name][0] = self.cmap_min.value \n self.field_min_max[field_name][1] = self.cmap_max.value\n self.field_min_max[field_name][2] = self.cmap_fixed_toggle.value\n # print(self.field_min_max[field_name])\n # self.debug_str.value = 'mcds_field_cb: ' + field_name + str(self.field_min_max[field_name])\n # self.debug_str.value = 'cb2: '+ str(self.field_min_max)\n # print('--- cb2: '+ str(self.field_min_max)) #rwh2\n # self.cmap_fixed_toggle.value = self.field_min_max[field_name][2]\n\n # field_name = self.mcds_field.options[self.mcds_field.value]\n # self.cmap_min.value = self.field_min_max[field_name][0] # oxygen, etc\n # self.cmap_max.value = self.field_min_max[field_name][1] # oxygen, etc\n\n# self.field_index = self.mcds_field.value + 4\n# print('field_index=',self.field_index)\n self.i_plot.update()\n\n\n #---------------------------------------------------------------------------\n def circles(self, x, y, s, c='b', vmin=None, vmax=None, **kwargs):\n \"\"\"\n See https://gist.github.com/syrte/592a062c562cd2a98a83 \n\n Make a scatter plot of circles. \n Similar to plt.scatter, but the size of circles are in data scale.\n Parameters\n ----------\n x, y : scalar or array_like, shape (n, )\n Input data\n s : scalar or array_like, shape (n, ) \n Radius of circles.\n c : color or sequence of color, optional, default : 'b'\n `c` can be a single color format string, or a sequence of color\n specifications of length `N`, or a sequence of `N` numbers to be\n mapped to colors using the `cmap` and `norm` specified via kwargs.\n Note that `c` should not be a single numeric RGB or RGBA sequence \n because that is indistinguishable from an array of values\n to be colormapped. (If you insist, use `color` instead.) \n `c` can be a 2-D array in which the rows are RGB or RGBA, however. \n vmin, vmax : scalar, optional, default: None\n `vmin` and `vmax` are used in conjunction with `norm` to normalize\n luminance data. If either are `None`, the min and max of the\n color array is used.\n kwargs : `~matplotlib.collections.Collection` properties\n Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), \n norm, cmap, transform, etc.\n Returns\n -------\n paths : `~matplotlib.collections.PathCollection`\n Examples\n --------\n a = np.arange(11)\n circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')\n plt.colorbar()\n License\n --------\n This code is under [The BSD 3-Clause License]\n (http://opensource.org/licenses/BSD-3-Clause)\n \"\"\"\n\n if np.isscalar(c):\n kwargs.setdefault('color', c)\n c = None\n\n if 'fc' in kwargs:\n kwargs.setdefault('facecolor', kwargs.pop('fc'))\n if 'ec' in kwargs:\n kwargs.setdefault('edgecolor', kwargs.pop('ec'))\n if 'ls' in kwargs:\n kwargs.setdefault('linestyle', kwargs.pop('ls'))\n if 'lw' in kwargs:\n kwargs.setdefault('linewidth', kwargs.pop('lw'))\n # You can set `facecolor` with an array for each patch,\n # while you can only set `facecolors` with a value for all.\n\n zipped = np.broadcast(x, y, s)\n patches = [Circle((x_, y_), s_)\n for x_, y_, s_ in zipped]\n collection = PatchCollection(patches, **kwargs)\n if c is not None:\n c = np.broadcast_to(c, zipped.shape).ravel()\n collection.set_array(c)\n collection.set_clim(vmin, vmax)\n\n ax = plt.gca()\n ax.add_collection(collection)\n ax.autoscale_view()\n # plt.draw_if_interactive()\n if c is not None:\n plt.sci(collection)\n # return collection\n\n #------------------------------------------------------------\n # def plot_svg(self, frame, rdel=''):\n def plot_svg(self, frame):\n # global current_idx, axes_max\n global current_frame\n current_frame = frame\n fname = \"snapshot%08d.svg\" % frame\n full_fname = os.path.join(self.output_dir, fname)\n # with debug_view:\n # print(\"plot_svg:\", full_fname) \n # print(\"-- plot_svg:\", full_fname) \n if not os.path.isfile(full_fname):\n print(\"Once output files are generated, click the slider.\") \n return\n\n xlist = deque()\n ylist = deque()\n rlist = deque()\n rgb_list = deque()\n\n # print('\\n---- ' + fname + ':')\n# tree = ET.parse(fname)\n tree = ET.parse(full_fname)\n root = tree.getroot()\n # print('--- root.tag ---')\n # print(root.tag)\n # print('--- root.attrib ---')\n # print(root.attrib)\n # print('--- child.tag, child.attrib ---')\n numChildren = 0\n for child in root:\n # print(child.tag, child.attrib)\n # print(\"keys=\",child.attrib.keys())\n if self.use_defaults and ('width' in child.attrib.keys()):\n self.axes_max = float(child.attrib['width'])\n # print(\"debug> found width --> axes_max =\", axes_max)\n if child.text and \"Current time\" in child.text:\n svals = child.text.split()\n # remove the \".00\" on minutes\n self.title_str += \" cells: \" + svals[2] + \"d, \" + svals[4] + \"h, \" + svals[7][:-3] + \"m\"\n\n # self.cell_time_mins = int(svals[2])*1440 + int(svals[4])*60 + int(svals[7][:-3])\n # self.title_str += \" cells: \" + str(self.cell_time_mins) + \"m\" # rwh\n\n # print(\"width \",child.attrib['width'])\n # print('attrib=',child.attrib)\n # if (child.attrib['id'] == 'tissue'):\n if ('id' in child.attrib.keys()):\n # print('-------- found tissue!!')\n tissue_parent = child\n break\n\n # print('------ search tissue')\n cells_parent = None\n\n for child in tissue_parent:\n # print('attrib=',child.attrib)\n if (child.attrib['id'] == 'cells'):\n # print('-------- found cells, setting cells_parent')\n cells_parent = child\n break\n numChildren += 1\n\n num_cells = 0\n # print('------ search cells')\n for child in cells_parent:\n # print(child.tag, child.attrib)\n # print('attrib=',child.attrib)\n for circle in child: # two circles in each child: outer + nucleus\n # circle.attrib={'cx': '1085.59','cy': '1225.24','fill': 'rgb(159,159,96)','r': '6.67717','stroke': 'rgb(159,159,96)','stroke-width': '0.5'}\n # print(' --- cx,cy=',circle.attrib['cx'],circle.attrib['cy'])\n xval = float(circle.attrib['cx'])\n\n # map SVG coords into comp domain\n # xval = (xval-self.svg_xmin)/self.svg_xrange * self.x_range + self.xmin\n xval = xval/self.x_range * self.x_range + self.xmin\n\n s = circle.attrib['fill']\n # print(\"s=\",s)\n # print(\"type(s)=\",type(s))\n if (s[0:3] == \"rgb\"): # if an rgb string, e.g. \"rgb(175,175,80)\" \n rgb = list(map(int, s[4:-1].split(\",\"))) \n rgb[:] = [x / 255. for x in rgb]\n else: # otherwise, must be a color name\n rgb_tuple = mplc.to_rgb(mplc.cnames[s]) # a tuple\n rgb = [x for x in rgb_tuple]\n\n # test for bogus x,y locations (rwh TODO: use max of domain?)\n too_large_val = 10000.\n if (np.fabs(xval) > too_large_val):\n print(\"bogus xval=\", xval)\n break\n yval = float(circle.attrib['cy'])\n # yval = (yval - self.svg_xmin)/self.svg_xrange * self.y_range + self.ymin\n yval = yval/self.y_range * self.y_range + self.ymin\n if (np.fabs(yval) > too_large_val):\n print(\"bogus xval=\", xval)\n break\n\n rval = float(circle.attrib['r'])\n # if (rgb[0] > rgb[1]):\n # print(num_cells,rgb, rval)\n xlist.append(xval)\n ylist.append(yval)\n rlist.append(rval)\n rgb_list.append(rgb)\n\n # For .svg files with cells that *have* a nucleus, there will be a 2nd\n if (not self.show_nucleus):\n #if (not self.show_nucleus):\n break\n\n num_cells += 1\n\n # if num_cells > 3: # for debugging\n # print(fname,': num_cells= ',num_cells,\" --- debug exit.\")\n # sys.exit(1)\n # break\n\n # print(fname,': num_cells= ',num_cells)\n\n xvals = np.array(xlist)\n yvals = np.array(ylist)\n rvals = np.array(rlist)\n rgbs = np.array(rgb_list)\n # print(\"xvals[0:5]=\",xvals[0:5])\n # print(\"rvals[0:5]=\",rvals[0:5])\n # print(\"rvals.min, max=\",rvals.min(),rvals.max())\n\n # rwh - is this where I change size of render window?? (YES - yipeee!)\n # plt.figure(figsize=(6, 6))\n # plt.cla()\n # if (self.substrates_toggle.value):\n self.title_str += \" (\" + str(num_cells) + \" agents)\"\n # title_str = \" (\" + str(num_cells) + \" agents)\"\n # else:\n # mins= round(int(float(root.find(\".//current_time\").text))) # TODO: check units = mins\n # hrs = int(mins/60)\n # days = int(hrs/24)\n # title_str = '%dd, %dh, %dm' % (int(days),(hrs%24), mins - (hrs*60))\n plt.title(self.title_str)\n\n plt.xlim(self.xmin, self.xmax)\n plt.ylim(self.ymin, self.ymax)\n\n # plt.xlim(axes_min,axes_max)\n # plt.ylim(axes_min,axes_max)\n # plt.scatter(xvals,yvals, s=rvals*scale_radius, c=rgbs)\n\n # TODO: make figsize a function of plot_size? What about non-square plots?\n # self.fig = plt.figure(figsize=(9, 9))\n\n# axx = plt.axes([0, 0.05, 0.9, 0.9]) # left, bottom, width, height\n# axx = fig.gca()\n# print('fig.dpi=',fig.dpi) # = 72\n\n # im = ax.imshow(f.reshape(100,100), interpolation='nearest', cmap=cmap, extent=[0,20, 0,20])\n # ax.xlim(axes_min,axes_max)\n # ax.ylim(axes_min,axes_max)\n\n # convert radii to radii in pixels\n # ax2 = self.fig.gca()\n # N = len(xvals)\n # rr_pix = (ax2.transData.transform(np.vstack([rvals, rvals]).T) -\n # ax2.transData.transform(np.vstack([np.zeros(N), np.zeros(N)]).T))\n # rpix, _ = rr_pix.T\n\n # markers_size = (144. * rpix / self.fig.dpi)**2 # = (2*rpix / fig.dpi * 72)**2\n # markers_size = markers_size/4000000.\n # print('max=',markers_size.max())\n\n #rwh - temp fix - Ah, error only occurs when \"edges\" is toggled on\n if (self.show_edge):\n try:\n # plt.scatter(xvals,yvals, s=markers_size, c=rgbs, edgecolor='black', linewidth=0.5)\n self.circles(xvals,yvals, s=rvals, color=rgbs, edgecolor='black', linewidth=0.5)\n # cell_circles = self.circles(xvals,yvals, s=rvals, color=rgbs, edgecolor='black', linewidth=0.5)\n # plt.sci(cell_circles)\n except (ValueError):\n pass\n else:\n # plt.scatter(xvals,yvals, s=markers_size, c=rgbs)\n self.circles(xvals,yvals, s=rvals, color=rgbs)\n\n # if (self.show_tracks):\n # for key in self.trackd.keys():\n # xtracks = self.trackd[key][:,0]\n # ytracks = self.trackd[key][:,1]\n # plt.plot(xtracks[0:frame],ytracks[0:frame], linewidth=5)\n\n # plt.xlim(self.axes_min, self.axes_max)\n # plt.ylim(self.axes_min, self.axes_max)\n # ax.grid(False)\n# axx.set_title(title_str)\n # plt.title(title_str)\n\n #---------------------------------------------------------------------------\n # assume \"frame\" is cell frame #, unless Cells is togggled off, then it's the substrate frame #\n # def plot_substrate(self, frame, grid):\n def plot_substrate(self, frame):\n # global current_idx, axes_max, gFileId, field_index\n\n # print(\"plot_substrate(): frame*self.substrate_delta_t = \",frame*self.substrate_delta_t)\n # print(\"plot_substrate(): frame*self.svg_delta_t = \",frame*self.svg_delta_t)\n self.title_str = ''\n\n # Recall:\n # self.svg_delta_t = config_tab.svg_interval.value\n # self.substrate_delta_t = config_tab.mcds_interval.value\n # self.modulo = int(self.substrate_delta_t / self.svg_delta_t)\n # self.therapy_activation_time = user_params_tab.therapy_activation_time.value\n\n # print(\"plot_substrate(): pre_therapy: max svg, substrate frames = \",max_svg_frame_pre_therapy, max_substrate_frame_pre_therapy)\n\n # Assume: # .svg files >= # substrate files\n# if (self.cells_toggle.value):\n\n # if (self.substrates_toggle.value and frame*self.substrate_delta_t <= self.svg_frame*self.svg_delta_t):\n # if (self.substrates_toggle.value and (frame % self.modulo == 0)):\n if (self.substrates_toggle.value):\n # self.fig = plt.figure(figsize=(14, 15.6))\n # self.fig = plt.figure(figsize=(15.0, 12.5))\n self.fig = plt.figure(figsize=(self.figsize_width_substrate, self.figsize_height_substrate))\n\n # rwh - funky way to figure out substrate frame for pc4cancerbots (due to user-defined \"save_interval*\")\n # self.cell_time_mins \n # self.substrate_frame = int(frame / self.modulo)\n if (self.customized_output_freq and (frame > self.max_svg_frame_pre_therapy)):\n # max_svg_frame_pre_therapy = int(self.therapy_activation_time/self.svg_delta_t)\n # max_substrate_frame_pre_therapy = int(self.therapy_activation_time/self.substrate_delta_t)\n self.substrate_frame = self.max_substrate_frame_pre_therapy + (frame - self.max_svg_frame_pre_therapy)\n else:\n self.substrate_frame = int(frame / self.modulo)\n\n # print(\"plot_substrate(): self.substrate_frame=\",self.substrate_frame) \n\n # if (self.substrate_frame > (self.num_substrates-1)):\n # self.substrate_frame = self.num_substrates-1\n\n # print('self.substrate_frame = ',self.substrate_frame)\n # if (self.cells_toggle.value):\n # self.modulo = int((self.num_svgs - 1) / (self.num_substrates - 1))\n # self.substrate_frame = frame % self.modulo\n # else:\n # self.substrate_frame = frame \n fname = \"output%08d_microenvironment0.mat\" % self.substrate_frame\n xml_fname = \"output%08d.xml\" % self.substrate_frame\n # fullname = output_dir_str + fname\n\n # fullname = fname\n full_fname = os.path.join(self.output_dir, fname)\n # print(\"--- plot_substrate(): full_fname=\",full_fname)\n full_xml_fname = os.path.join(self.output_dir, xml_fname)\n # self.output_dir = '.'\n\n # if not os.path.isfile(fullname):\n if not os.path.isfile(full_fname):\n print(\"Once output files are generated, click the slider.\") # No: output00000000_microenvironment0.mat\n return\n\n # tree = ET.parse(xml_fname)\n tree = ET.parse(full_xml_fname)\n xml_root = tree.getroot()\n mins = round(int(float(xml_root.find(\".//current_time\").text))) # TODO: check units = mins\n self.substrate_mins= round(int(float(xml_root.find(\".//current_time\").text))) # TODO: check units = mins\n\n hrs = int(mins/60)\n days = int(hrs/24)\n self.title_str = 'substrate: %dd, %dh, %dm' % (int(days),(hrs%24), mins - (hrs*60))\n # self.title_str = 'substrate: %dm' % (mins ) # rwh\n\n\n info_dict = {}\n # scipy.io.loadmat(fullname, info_dict)\n scipy.io.loadmat(full_fname, info_dict)\n M = info_dict['multiscale_microenvironment']\n # global_field_index = int(mcds_field.value)\n # print('plot_substrate: field_index =',field_index)\n f = M[self.field_index, :] # 4=tumor cells field, 5=blood vessel density, 6=growth substrate\n # plt.clf()\n # my_plot = plt.imshow(f.reshape(400,400), cmap='jet', extent=[0,20, 0,20])\n \n # self.fig = plt.figure(figsize=(18.0,15)) # this strange figsize results in a ~square contour plot\n\n # plt.subplot(grid[0:1, 0:1])\n # main_ax = self.fig.add_subplot(grid[0:1, 0:1]) # works, but tiny upper-left region\n #main_ax = self.fig.add_subplot(grid[0:2, 0:2])\n # main_ax = self.fig.add_subplot(grid[0:, 0:2])\n #main_ax = self.fig.add_subplot(grid[:-1, 0:]) # nrows, ncols\n #main_ax = self.fig.add_subplot(grid[0:, 0:]) # nrows, ncols\n #main_ax = self.fig.add_subplot(grid[0:4, 0:]) # nrows, ncols\n\n\n # main_ax = self.fig.add_subplot(grid[0:3, 0:]) # nrows, ncols\n # main_ax = self.fig.add_subplot(111) # nrows, ncols\n\n\n # plt.rc('font', size=10) # TODO: does this affect the Cell plots fonts too? YES. Not what we want.\n\n # fig.set_tight_layout(True)\n # ax = plt.axes([0, 0.05, 0.9, 0.9 ]) #left, bottom, width, height\n # ax = plt.axes([0, 0.0, 1, 1 ])\n # cmap = plt.cm.viridis # Blues, YlOrBr, ...\n # im = ax.imshow(f.reshape(100,100), interpolation='nearest', cmap=cmap, extent=[0,20, 0,20])\n # ax.grid(False)\n\n # print(\"substrates.py: ------- numx, numy = \", self.numx, self.numy )\n # if (self.numx == 0): # need to parse vals from the config.xml\n # # print(\"--- plot_substrate(): full_fname=\",full_fname)\n # fname = os.path.join(self.output_dir, \"config.xml\")\n # tree = ET.parse(fname)\n # xml_root = tree.getroot()\n # self.xmin = float(xml_root.find(\".//x_min\").text)\n # self.xmax = float(xml_root.find(\".//x_max\").text)\n # dx = float(xml_root.find(\".//dx\").text)\n # self.ymin = float(xml_root.find(\".//y_min\").text)\n # self.ymax = float(xml_root.find(\".//y_max\").text)\n # dy = float(xml_root.find(\".//dy\").text)\n # self.numx = math.ceil( (self.xmax - self.xmin) / dx)\n # self.numy = math.ceil( (self.ymax - self.ymin) / dy)\n\n try:\n xgrid = M[0, :].reshape(self.numy, self.numx)\n ygrid = M[1, :].reshape(self.numy, self.numx)\n except:\n print(\"substrates.py: mismatched mesh size for reshape: numx,numy=\",self.numx, self.numy)\n pass\n# xgrid = M[0, :].reshape(self.numy, self.numx)\n# ygrid = M[1, :].reshape(self.numy, self.numx)\n\n num_contours = 15\n levels = MaxNLocator(nbins=num_contours).tick_values(self.cmap_min.value, self.cmap_max.value)\n contour_ok = True\n if (self.cmap_fixed_toggle.value):\n try:\n # substrate_plot = main_ax.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy, self.numx), levels=levels, extend='both', cmap=self.field_cmap.value, fontsize=self.fontsize)\n substrate_plot = plt.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy, self.numx), levels=levels, extend='both', cmap=self.field_cmap.value, fontsize=self.fontsize)\n except:\n contour_ok = False\n # print('got error on contourf 1.')\n else: \n try:\n # substrate_plot = main_ax.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy,self.numx), num_contours, cmap=self.field_cmap.value)\n substrate_plot = plt.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy,self.numx), num_contours, cmap=self.field_cmap.value)\n except:\n contour_ok = False\n # print('got error on contourf 2.')\n\n if (contour_ok):\n # main_ax.set_title(self.title_str, fontsize=self.fontsize)\n plt.title(self.title_str, fontsize=self.fontsize)\n # main_ax.tick_params(labelsize=self.fontsize)\n # cbar = plt.colorbar(my_plot)\n # cbar = self.fig.colorbar(substrate_plot, ax=main_ax)\n cbar = self.fig.colorbar(substrate_plot)\n cbar.ax.tick_params(labelsize=self.fontsize)\n # cbar = main_ax.colorbar(my_plot)\n # cbar.ax.tick_params(labelsize=self.fontsize)\n # axes_min = 0\n # axes_max = 2000\n\n # main_ax.set_xlim([self.xmin, self.xmax])\n # main_ax.set_ylim([self.ymin, self.ymax])\n plt.xlim(self.xmin, self.xmax)\n plt.ylim(self.ymin, self.ymax)\n\n # if (frame == 0): # maybe allow substrate grid display later\n # xs = np.linspace(self.xmin,self.xmax,self.numx)\n # ys = np.linspace(self.ymin,self.ymax,self.numy)\n # hlines = np.column_stack(np.broadcast_arrays(xs[0], ys, xs[-1], ys))\n # vlines = np.column_stack(np.broadcast_arrays(xs, ys[0], xs, ys[-1]))\n # grid_lines = np.concatenate([hlines, vlines]).reshape(-1, 2, 2)\n # line_collection = LineCollection(grid_lines, color=\"gray\", linewidths=0.5)\n # # ax = main_ax.gca()\n # main_ax.add_collection(line_collection)\n # # ax.set_xlim(xs[0], xs[-1])\n # # ax.set_ylim(ys[0], ys[-1])\n\n\n # Now plot the cells (possibly on top of the substrate)\n if (self.cells_toggle.value):\n if (not self.substrates_toggle.value):\n # self.fig = plt.figure(figsize=(12, 12))\n self.fig = plt.figure(figsize=(self.figsize_width_svg, self.figsize_height_svg))\n # self.plot_svg(frame)\n self.svg_frame = frame\n # print('plot_svg with frame=',self.svg_frame)\n self.plot_svg(self.svg_frame)\n\n # plt.subplot(grid[2, 0])\n # oxy_ax = self.fig.add_subplot(grid[2:, 0:1])\n #oxy_ax = self.fig.add_subplot(grid[:2, 2:])\n\n #oxy_ax = self.fig.add_subplot(grid[:-1, 0:2]) # nrows, ncols\n #oxy_ax = self.fig.add_subplot(grid[2:3, 0:1]) # nrows, ncols\n\n # oxy_ax = self.fig.add_subplot(grid[4:4, 0:1]) # invalid\n# main_ax = self.fig.add_subplot(grid[0:1, 0:1])\n\n # experiment with small plot of oxygen (or whatever)\n # oxy_ax = self.fig.add_subplot(grid[3:4, 0:1]) # nrows, ncols\n # x = np.linspace(0, 500)\n # oxy_ax.plot(x, 300*np.sin(x))\n\n #---------------------------------------------------------------------------\n # def plot_plots(self, frame):\n # # if (self.first_time):\n # # self.svg_delta_t = 1\n # # self.substrate_delta_t = 1\n # # self.first_time = False\n\n # if (self.substrates_toggle.value):\n # self.fig = plt.figure(figsize=(14, 15.6))\n # else: # only cells being displayed (maybe)\n # self.fig = plt.figure(figsize=(12, 12))\n # # grid = plt.GridSpec(4, 3, wspace=0.10, hspace=0.2) # (nrows, ncols)\n # # self.plot_substrate(frame, grid)\n # self.plot_substrate(frame)\n # # self.plot_svg(frame)\n"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.collections.PatchCollection",
"matplotlib.pyplot.sci",
"matplotlib.pyplot.title",
"matplotlib.colors.to_rgb",
"matplotlib.pyplot.ylim",
"matplotlib.patches.Circle",
"numpy.broadcast",
"matplotlib.pyplot.xlim",
"numpy.broadcast_to",
"numpy.isscalar",
"matplotlib.ticker.MaxNLocator",
"numpy.array",
"numpy.fabs",
"matplotlib.pyplot.figure"
]
] |
daniel-s-cunha/probability
|
[
"4b9bdefc2466dace58f3109c117802ab7ad4929b"
] |
[
"tensorflow_probability/python/internal/distribution_util_test.py"
] |
[
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for distribution_utility functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions import Categorical\nfrom tensorflow_probability.python.distributions import Mixture\nfrom tensorflow_probability.python.distributions import MixtureSameFamily\nfrom tensorflow_probability.python.distributions import MultivariateNormalDiag\nfrom tensorflow_probability.python.distributions import Normal\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.internal import test_case\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\ndef _logit(x):\n x = np.asarray(x)\n return np.log(x) - np.log1p(-x)\n\n\ndef _powerset(x):\n s = list(x)\n return itertools.chain.from_iterable(\n itertools.combinations(s, r) for r in range(len(s) + 1))\n\n\ndef _matrix_diag(d):\n \"\"\"Batch version of np.diag.\"\"\"\n orig_shape = d.shape\n d = np.reshape(d, (int(np.prod(d.shape[:-1])), d.shape[-1]))\n diag_list = []\n for i in range(d.shape[0]):\n diag_list.append(np.diag(d[i, ...]))\n return np.reshape(diag_list, orig_shape + (d.shape[-1],))\n\n\ndef _make_tril_scale(\n loc=None,\n scale_tril=None,\n scale_diag=None,\n scale_identity_multiplier=None,\n shape_hint=None):\n if scale_tril is not None:\n scale_tril = np.tril(scale_tril)\n if scale_diag is not None:\n scale_tril += _matrix_diag(np.array(scale_diag, dtype=np.float32))\n if scale_identity_multiplier is not None:\n scale_tril += (\n scale_identity_multiplier * _matrix_diag(np.ones(\n [scale_tril.shape[-1]], dtype=np.float32)))\n return scale_tril\n return _make_diag_scale(\n loc, scale_diag, scale_identity_multiplier, shape_hint)\n\n\ndef _make_diag_scale(\n loc=None,\n scale_diag=None,\n scale_identity_multiplier=None,\n shape_hint=None):\n if scale_diag is not None:\n scale_diag = np.asarray(scale_diag)\n if scale_identity_multiplier is not None:\n scale_diag += scale_identity_multiplier\n return _matrix_diag(scale_diag)\n\n if loc is None and shape_hint is None:\n return None\n\n if shape_hint is None:\n shape_hint = loc.shape[-1]\n if scale_identity_multiplier is None:\n scale_identity_multiplier = 1.\n return scale_identity_multiplier * np.diag(np.ones(shape_hint))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass MakeTrilScaleTest(test_case.TestCase):\n\n def _testLegalInputs(\n self, loc=None, shape_hint=None, scale_params=None):\n for args in _powerset(scale_params.items()):\n args = dict(args)\n\n scale_args = dict({\n 'loc': loc,\n 'shape_hint': shape_hint}, **args)\n expected_scale = _make_tril_scale(**scale_args)\n if expected_scale is None:\n # Not enough shape information was specified.\n with self.assertRaisesRegexp(ValueError, ('is specified.')):\n scale = distribution_util.make_tril_scale(**scale_args)\n self.evaluate(scale.to_dense())\n else:\n scale = distribution_util.make_tril_scale(**scale_args)\n self.assertAllClose(expected_scale, self.evaluate(scale.to_dense()))\n\n def testLegalInputs(self):\n self._testLegalInputs(\n loc=np.array([-1., -1.], dtype=np.float32),\n shape_hint=2,\n scale_params={\n 'scale_identity_multiplier': 2.,\n 'scale_diag': [2., 3.],\n 'scale_tril': [[1., 0.],\n [-3., 3.]],\n })\n\n def testLegalInputsMultidimensional(self):\n self._testLegalInputs(\n loc=np.array([[[-1., -1., 2.], [-2., -3., 4.]]], dtype=np.float32),\n shape_hint=3,\n scale_params={\n 'scale_identity_multiplier': 2.,\n 'scale_diag': [[[2., 3., 4.], [3., 4., 5.]]],\n 'scale_tril': [[[[1., 0., 0.],\n [-3., 3., 0.],\n [1., -2., 1.]],\n [[2., 1., 0.],\n [-4., 7., 0.],\n [1., -1., 1.]]]]\n })\n\n def testZeroTriU(self):\n scale = distribution_util.make_tril_scale(scale_tril=[[1., 1], [1., 1.]])\n self.assertAllClose([[1., 0], [1., 1.]], self.evaluate(scale.to_dense()))\n\n def testValidateArgs(self):\n with self.assertRaisesOpError('diagonal part must be non-zero'):\n scale = distribution_util.make_tril_scale(\n scale_tril=[[0., 1], [1., 1.]], validate_args=True)\n self.evaluate(scale.to_dense())\n\n def testAssertPositive(self):\n with self.assertRaisesOpError('diagonal part must be positive'):\n scale = distribution_util.make_tril_scale(\n scale_tril=[[-1., 1], [1., 1.]],\n validate_args=True,\n assert_positive=True)\n self.evaluate(scale.to_dense())\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass MakeDiagScaleTest(test_case.TestCase):\n\n def _testLegalInputs(\n self, loc=None, shape_hint=None, scale_params=None):\n for args in _powerset(scale_params.items()):\n args = dict(args)\n\n scale_args = dict({\n 'loc': loc,\n 'shape_hint': shape_hint}, **args)\n expected_scale = _make_diag_scale(**scale_args)\n if expected_scale is None:\n # Not enough shape information was specified.\n with self.assertRaisesRegexp(ValueError, ('is specified.')):\n scale = distribution_util.make_diag_scale(**scale_args)\n self.evaluate(scale.to_dense())\n else:\n scale = distribution_util.make_diag_scale(**scale_args)\n self.assertAllClose(expected_scale, self.evaluate(scale.to_dense()))\n\n def testLegalInputs(self):\n self._testLegalInputs(\n loc=np.array([-1., -1.], dtype=np.float32),\n shape_hint=2,\n scale_params={\n 'scale_identity_multiplier': 2.,\n 'scale_diag': [2., 3.]\n })\n\n def testLegalInputsMultidimensional(self):\n self._testLegalInputs(\n loc=np.array([[[-1., -1., 2.], [-2., -3., 4.]]], dtype=np.float32),\n shape_hint=3,\n scale_params={\n 'scale_identity_multiplier': 2.,\n 'scale_diag': [[[2., 3., 4.], [3., 4., 5.]]]\n })\n\n def testValidateArgs(self):\n with self.assertRaisesOpError('diagonal part must be non-zero'):\n scale = distribution_util.make_diag_scale(\n scale_diag=[[0., 1], [1., 1.]], validate_args=True)\n self.evaluate(scale.to_dense())\n\n def testAssertPositive(self):\n with self.assertRaisesOpError('diagonal part must be positive'):\n scale = distribution_util.make_diag_scale(\n scale_diag=[[-1., 1], [1., 1.]],\n validate_args=True,\n assert_positive=True)\n self.evaluate(scale.to_dense())\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ShapesFromLocAndScaleTest(test_case.TestCase):\n\n def test_static_loc_static_scale_non_matching_event_size_raises(self):\n loc = tf.zeros([2, 4])\n diag = tf.ones([5, 1, 3])\n with self.assertRaisesRegexp(ValueError, 'could not be broadcast'):\n distribution_util.shapes_from_loc_and_scale(\n loc, tf.linalg.LinearOperatorDiag(diag))\n\n def test_static_loc_static_scale(self):\n loc = tf.zeros([2, 3])\n diag = tf.ones([5, 1, 3])\n batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(\n loc, tf.linalg.LinearOperatorDiag(diag))\n\n if not tf.executing_eagerly():\n self.assertAllEqual([5, 2], tf.get_static_value(batch_shape))\n self.assertAllEqual([3], tf.get_static_value(event_shape))\n\n batch_shape_, event_shape_ = self.evaluate([batch_shape, event_shape])\n self.assertAllEqual([5, 2], batch_shape_)\n self.assertAllEqual([3], event_shape_)\n\n def test_static_loc_dynamic_scale(self):\n loc = tf.zeros([2, 3])\n diag = tf1.placeholder_with_default(np.ones([5, 1, 3]), shape=None)\n batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(\n loc, tf.linalg.LinearOperatorDiag(diag))\n\n if not tf.executing_eagerly():\n # batch_shape depends on both args, and so is dynamic. Since loc did not\n # have static shape, we inferred event shape entirely from scale, and this\n # is available statically.\n self.assertIsNone(tf.get_static_value(batch_shape))\n self.assertAllEqual([3], tf.get_static_value(event_shape))\n\n batch_shape_, event_shape_ = self.evaluate([batch_shape, event_shape])\n self.assertAllEqual([5, 2], batch_shape_)\n self.assertAllEqual([3], event_shape_)\n\n def test_dynamic_loc_static_scale(self):\n loc = tf1.placeholder_with_default(np.zeros([2, 3]), shape=None)\n diag = tf.ones([5, 2, 3])\n batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(\n loc, tf.linalg.LinearOperatorDiag(diag))\n\n if not tf.executing_eagerly():\n # batch_shape depends on both args, and so is dynamic. Since loc did not\n # have static shape, we inferred event shape entirely from scale, and this\n # is available statically.\n self.assertIsNone(tf.get_static_value(batch_shape))\n self.assertAllEqual([3], tf.get_static_value(event_shape))\n\n batch_shape_, event_shape_ = self.evaluate([batch_shape, event_shape])\n self.assertAllEqual([5, 2], batch_shape_)\n self.assertAllEqual([3], event_shape_)\n\n def test_dynamic_loc_dynamic_scale(self):\n loc = tf1.placeholder_with_default(np.ones([2, 3]), shape=None)\n diag = tf1.placeholder_with_default(np.ones([5, 2, 3]), shape=None)\n batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(\n loc, tf.linalg.LinearOperatorDiag(diag))\n\n if not tf.executing_eagerly():\n self.assertIsNone(tf.get_static_value(batch_shape))\n self.assertIsNone(tf.get_static_value(event_shape))\n\n batch_shape_, event_shape_ = self.evaluate([batch_shape, event_shape])\n self.assertAllEqual([5, 2], batch_shape_)\n self.assertAllEqual([3], event_shape_)\n\n def test_none_loc_static_scale(self):\n loc = None\n diag = tf.ones([5, 1, 3])\n batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(\n loc, tf.linalg.LinearOperatorDiag(diag))\n\n if not tf.executing_eagerly():\n self.assertAllEqual([5, 1], tf.get_static_value(batch_shape))\n self.assertAllEqual([3], tf.get_static_value(event_shape))\n\n batch_shape_, event_shape_ = self.evaluate([batch_shape, event_shape])\n self.assertAllEqual([5, 1], batch_shape_)\n self.assertAllEqual([3], event_shape_)\n\n def test_none_loc_dynamic_scale(self):\n loc = None\n diag = tf1.placeholder_with_default(np.ones([5, 1, 3]), shape=None)\n batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(\n loc, tf.linalg.LinearOperatorDiag(diag))\n\n if not tf.executing_eagerly():\n self.assertIsNone(tf.get_static_value(batch_shape))\n self.assertIsNone(tf.get_static_value(event_shape))\n\n batch_shape_, event_shape_ = self.evaluate([batch_shape, event_shape])\n self.assertAllEqual([5, 1], batch_shape_)\n self.assertAllEqual([3], event_shape_)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass GetBroadcastShapeTest(test_case.TestCase):\n\n def test_all_static_shapes_work(self):\n x = tf.ones((2, 1, 3))\n y = tf.ones((1, 5, 3))\n z = tf.ones(())\n self.assertAllEqual([2, 5, 3],\n distribution_util.get_broadcast_shape(x, y, z))\n\n def test_with_some_dynamic_shapes_works(self):\n if tf.executing_eagerly(): return\n x = tf.ones([2, 1, 3])\n y = tf1.placeholder_with_default(\n np.ones([1, 5, 3], dtype=np.float32),\n shape=None)\n z = tf.ones([])\n bcast_shape = self.evaluate(distribution_util.get_broadcast_shape(x, y, z))\n self.assertAllEqual([2, 5, 3], bcast_shape)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass MixtureStddevTest(test_case.TestCase):\n\n def test_mixture_dev(self):\n mixture_weights = np.array([\n [1.0/3, 1.0/3, 1.0/3],\n [0.750, 0.250, 0.000]\n ])\n component_means = np.array([\n [1.0, 1.0, 1.0],\n [-5, 0, 1.25]\n ])\n component_devs = np.array([\n [1.0, 1.0, 1.0],\n [0.01, 2.0, 0.1]\n ])\n\n # The first case should trivially have a standard deviation of 1.0 because\n # all components are identical and have that standard deviation.\n # The second case was computed by hand.\n expected_devs = np.array([\n 1.0,\n 2.3848637277\n ])\n\n weights_tf = tf.constant(mixture_weights)\n means_tf = tf.constant(component_means)\n sigmas_tf = tf.constant(component_devs)\n mix_dev = distribution_util.mixture_stddev(weights_tf,\n means_tf,\n sigmas_tf)\n\n self.assertAllClose(expected_devs, self.evaluate(mix_dev))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass PadMixtureDimensionsTest(test_case.TestCase):\n\n def test_pad_mixture_dimensions_mixture(self):\n gm = Mixture(\n cat=Categorical(probs=[[0.3, 0.7]]),\n components=[\n Normal(loc=[-1.0], scale=[1.0]),\n Normal(loc=[1.0], scale=[0.5])\n ])\n\n x = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n x_pad = distribution_util.pad_mixture_dimensions(\n x, gm, gm.cat, tensorshape_util.rank(gm.event_shape))\n x_out, x_pad_out = self.evaluate([x, x_pad])\n\n self.assertAllEqual(x_pad_out.shape, [2, 2])\n self.assertAllEqual(x_out.reshape([-1]), x_pad_out.reshape([-1]))\n\n def test_pad_mixture_dimensions_mixture_same_family(self):\n gm = MixtureSameFamily(\n mixture_distribution=Categorical(probs=[0.3, 0.7]),\n components_distribution=MultivariateNormalDiag(\n loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1.0, 0.5]))\n\n x = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n x_pad = distribution_util.pad_mixture_dimensions(\n x, gm, gm.mixture_distribution, tensorshape_util.rank(gm.event_shape))\n x_out, x_pad_out = self.evaluate([x, x_pad])\n\n self.assertAllEqual(x_pad_out.shape, [2, 2, 1])\n self.assertAllEqual(x_out.reshape([-1]), x_pad_out.reshape([-1]))\n\n\nclass _PadTest(object):\n\n def testNegAxisCorrectness(self):\n x_ = np.float32([[1., 2, 3],\n [4, 5, 6]])\n value_ = np.float32(0.25)\n count_ = np.int32(2)\n\n x = tf1.placeholder_with_default(\n x_, shape=x_.shape if self.is_static_shape else None)\n value = (\n tf.constant(value_) if self.is_static_shape else\n tf1.placeholder_with_default(value_, shape=None))\n count = (\n tf.constant(count_) if self.is_static_shape else\n tf1.placeholder_with_default(count_, shape=None))\n\n x0_front = distribution_util.pad(\n x, axis=-2, value=value, count=count, front=True)\n x0_back = distribution_util.pad(\n x, axis=-2, count=count, back=True)\n x0_both = distribution_util.pad(\n x, axis=-2, value=value, front=True, back=True)\n\n if self.is_static_shape:\n self.assertAllEqual([4, 3], x0_front.shape)\n self.assertAllEqual([4, 3], x0_back.shape)\n self.assertAllEqual([4, 3], x0_both.shape)\n\n [x0_front_, x0_back_, x0_both_] = self.evaluate([\n x0_front, x0_back, x0_both])\n\n self.assertAllClose(\n np.float32([[value_]*3,\n [value_]*3,\n [1, 2, 3],\n [4, 5, 6]]),\n x0_front_, atol=0., rtol=1e-6)\n self.assertAllClose(\n np.float32([[1, 2, 3],\n [4, 5, 6],\n [0.]*3,\n [0.]*3]),\n x0_back_, atol=0., rtol=1e-6)\n self.assertAllClose(\n np.float32([[value_]*3,\n [1, 2, 3],\n [4, 5, 6],\n [value_]*3]),\n x0_both_, atol=0., rtol=1e-6)\n\n def testPosAxisCorrectness(self):\n x_ = np.float32([[1., 2, 3],\n [4, 5, 6]])\n value_ = np.float32(0.25)\n count_ = np.int32(2)\n x = tf1.placeholder_with_default(\n x_, shape=x_.shape if self.is_static_shape else None)\n value = (\n tf.constant(value_) if self.is_static_shape else\n tf1.placeholder_with_default(value_, shape=None))\n count = (\n tf.constant(count_) if self.is_static_shape else\n tf1.placeholder_with_default(count_, shape=None))\n\n x1_front = distribution_util.pad(\n x, axis=1, value=value, count=count, front=True)\n x1_back = distribution_util.pad(\n x, axis=1, count=count, back=True)\n x1_both = distribution_util.pad(\n x, axis=1, value=value, front=True, back=True)\n\n if self.is_static_shape:\n self.assertAllEqual([2, 5], x1_front.shape)\n self.assertAllEqual([2, 5], x1_back.shape)\n self.assertAllEqual([2, 5], x1_both.shape)\n\n [x1_front_, x1_back_, x1_both_] = self.evaluate([\n x1_front, x1_back, x1_both])\n\n self.assertAllClose(\n np.float32([[value_]*2 + [1, 2, 3],\n [value_]*2 + [4, 5, 6]]),\n x1_front_, atol=0., rtol=1e-6)\n self.assertAllClose(\n np.float32([[1, 2, 3] + [0.]*2,\n [4, 5, 6] + [0.]*2]),\n x1_back_, atol=0., rtol=1e-6)\n self.assertAllClose(\n np.float32([[value_, 1, 2, 3, value_],\n [value_, 4, 5, 6, value_]]),\n x1_both_, atol=0., rtol=1e-6)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass PadStaticTest(_PadTest, test_case.TestCase):\n\n @property\n def is_static_shape(self):\n return True\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass PadDynamicTest(_PadTest, test_case.TestCase):\n\n @property\n def is_static_shape(self):\n return False\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass PickScalarConditionTest(test_case.TestCase):\n\n def test_pick_scalar_condition_static(self):\n\n pos = np.exp(np.random.randn(3, 2, 4)).astype(np.float32)\n neg = -np.exp(np.random.randn(3, 2, 4)).astype(np.float32)\n\n # Python static cond\n self.assertAllEqual(\n distribution_util.pick_scalar_condition(True, pos, neg), pos)\n self.assertAllEqual(\n distribution_util.pick_scalar_condition(False, pos, neg), neg)\n\n # TF static cond\n self.assertAllEqual(distribution_util.pick_scalar_condition(\n tf.constant(True), pos, neg), pos)\n self.assertAllEqual(distribution_util.pick_scalar_condition(\n tf.constant(False), pos, neg), neg)\n\n # Dynamic tests don't need to (/can't) run in Eager mode.\n def test_pick_scalar_condition_dynamic(self):\n pos = np.exp(np.random.randn(3, 2, 4)).astype(np.float32)\n neg = -np.exp(np.random.randn(3, 2, 4)).astype(np.float32)\n\n # TF dynamic cond\n dynamic_true = tf1.placeholder_with_default(input=True, shape=None)\n dynamic_false = tf1.placeholder_with_default(\n input=False, shape=None)\n pos_ = self.evaluate(distribution_util.pick_scalar_condition(\n dynamic_true, pos, neg))\n neg_ = self.evaluate(distribution_util.pick_scalar_condition(\n dynamic_false, pos, neg))\n self.assertAllEqual(pos_, pos)\n self.assertAllEqual(neg_, neg)\n\n # TF dynamic everything\n pos_dynamic = tf1.placeholder_with_default(input=pos, shape=None)\n neg_dynamic = tf1.placeholder_with_default(input=neg, shape=None)\n pos_ = self.evaluate(distribution_util.pick_scalar_condition(\n dynamic_true, pos_dynamic, neg_dynamic))\n neg_ = self.evaluate(distribution_util.pick_scalar_condition(\n dynamic_false, pos_dynamic, neg_dynamic))\n self.assertAllEqual(pos_, pos)\n self.assertAllEqual(neg_, neg)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass TestMoveDimension(test_case.TestCase):\n\n def test_move_dimension_static_shape(self):\n\n x = tf.random.normal(shape=[200, 30, 4, 1, 6])\n\n x_perm = distribution_util.move_dimension(x, 1, 1)\n self.assertAllEqual(\n tensorshape_util.as_list(x_perm.shape), [200, 30, 4, 1, 6])\n\n x_perm = distribution_util.move_dimension(x, 0, 3)\n self.assertAllEqual(\n tensorshape_util.as_list(x_perm.shape), [30, 4, 1, 200, 6])\n\n x_perm = distribution_util.move_dimension(x, 0, -2)\n self.assertAllEqual(\n tensorshape_util.as_list(x_perm.shape), [30, 4, 1, 200, 6])\n\n x_perm = distribution_util.move_dimension(x, 4, 2)\n self.assertAllEqual(\n tensorshape_util.as_list(x_perm.shape), [200, 30, 6, 4, 1])\n\n def test_move_dimension_dynamic_shape(self):\n\n x_ = tf.random.normal(shape=[200, 30, 4, 1, 6])\n x = tf1.placeholder_with_default(input=x_, shape=None)\n\n x_perm1 = distribution_util.move_dimension(x, 1, 1)\n x_perm2 = distribution_util.move_dimension(x, 0, 3)\n x_perm3 = distribution_util.move_dimension(x, 0, -2)\n x_perm4 = distribution_util.move_dimension(x, 4, 2)\n x_perm5 = distribution_util.move_dimension(x, -1, 2)\n\n x_perm1_, x_perm2_, x_perm3_, x_perm4_, x_perm5_ = self.evaluate([\n tf.shape(input=x_perm1),\n tf.shape(input=x_perm2),\n tf.shape(input=x_perm3),\n tf.shape(input=x_perm4),\n tf.shape(input=x_perm5)\n ])\n\n self.assertAllEqual(x_perm1_, [200, 30, 4, 1, 6])\n\n self.assertAllEqual(x_perm2_, [30, 4, 1, 200, 6])\n\n self.assertAllEqual(x_perm3_, [30, 4, 1, 200, 6])\n\n self.assertAllEqual(x_perm4_, [200, 30, 6, 4, 1])\n\n self.assertAllEqual(x_perm5_, [200, 30, 6, 4, 1])\n\n def test_move_dimension_dynamic_indices(self):\n\n x_ = tf.random.normal(shape=[200, 30, 4, 1, 6])\n x = tf1.placeholder_with_default(input=x_, shape=None)\n\n x_perm1 = distribution_util.move_dimension(\n x, tf1.placeholder_with_default(input=1, shape=[]),\n tf1.placeholder_with_default(input=1, shape=[]))\n\n x_perm2 = distribution_util.move_dimension(\n x, tf1.placeholder_with_default(input=0, shape=[]),\n tf1.placeholder_with_default(input=3, shape=[]))\n\n x_perm3 = distribution_util.move_dimension(\n x, tf1.placeholder_with_default(input=0, shape=[]),\n tf1.placeholder_with_default(input=-2, shape=[]))\n\n x_perm4 = distribution_util.move_dimension(\n x, tf1.placeholder_with_default(input=4, shape=[]),\n tf1.placeholder_with_default(input=2, shape=[]))\n\n x_perm5 = distribution_util.move_dimension(\n x, tf1.placeholder_with_default(input=-1, shape=[]),\n tf1.placeholder_with_default(input=2, shape=[]))\n\n x_perm1_, x_perm2_, x_perm3_, x_perm4_, x_perm5_ = self.evaluate([\n tf.shape(input=x_perm1),\n tf.shape(input=x_perm2),\n tf.shape(input=x_perm3),\n tf.shape(input=x_perm4),\n tf.shape(input=x_perm5)\n ])\n\n self.assertAllEqual(x_perm1_, [200, 30, 4, 1, 6])\n\n self.assertAllEqual(x_perm2_, [30, 4, 1, 200, 6])\n\n self.assertAllEqual(x_perm3_, [30, 4, 1, 200, 6])\n\n self.assertAllEqual(x_perm4_, [200, 30, 6, 4, 1])\n\n self.assertAllEqual(x_perm5_, [200, 30, 6, 4, 1])\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass AssertCloseTest(test_case.TestCase):\n\n def testAssertIntegerForm(self):\n # This should only be detected as an integer.\n x = tf1.placeholder_with_default(\n np.array([1., 5, 10, 15, 20], dtype=np.float32), shape=None)\n y = tf1.placeholder_with_default(\n np.array([1.1, 5, 10, 15, 20], dtype=np.float32), shape=None)\n # First component isn't less than float32.eps = 1e-7\n z = tf1.placeholder_with_default(\n np.array([1.0001, 5, 10, 15, 20], dtype=np.float32), shape=None)\n # This shouldn't be detected as an integer.\n w = tf1.placeholder_with_default(\n np.array([1e-8, 5, 10, 15, 20], dtype=np.float32), shape=None)\n\n with tf.control_dependencies([distribution_util.assert_integer_form(x)]):\n self.evaluate(tf.identity(x))\n\n with self.assertRaisesOpError('has non-integer components'):\n with tf.control_dependencies(\n [distribution_util.assert_integer_form(y)]):\n self.evaluate(tf.identity(y))\n\n with self.assertRaisesOpError('has non-integer components'):\n with tf.control_dependencies(\n [distribution_util.assert_integer_form(z)]):\n self.evaluate(tf.identity(z))\n\n with self.assertRaisesOpError('has non-integer components'):\n with tf.control_dependencies(\n [distribution_util.assert_integer_form(w)]):\n self.evaluate(tf.identity(w))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass MaybeGetStaticTest(test_case.TestCase):\n\n def testGetStaticInt(self):\n x = 2\n self.assertEqual(x, distribution_util.maybe_get_static_value(x))\n self.assertAllClose(\n np.array(2.),\n distribution_util.maybe_get_static_value(x, dtype=np.float64))\n\n def testGetStaticNumpyArray(self):\n x = np.array(2, dtype=np.int32)\n self.assertEqual(x, distribution_util.maybe_get_static_value(x))\n self.assertAllClose(\n np.array(2.),\n distribution_util.maybe_get_static_value(x, dtype=np.float64))\n\n def testGetStaticConstant(self):\n x = tf.constant(2, dtype=tf.int32)\n self.assertEqual(np.array(2, dtype=np.int32),\n distribution_util.maybe_get_static_value(x))\n self.assertAllClose(\n np.array(2.),\n distribution_util.maybe_get_static_value(x, dtype=np.float64))\n\n def testGetStaticPlaceholder(self):\n if tf.executing_eagerly(): return\n x = tf1.placeholder_with_default(\n np.array([2.], dtype=np.int32), shape=[1])\n self.assertEqual(None, distribution_util.maybe_get_static_value(x))\n self.assertEqual(\n None, distribution_util.maybe_get_static_value(x, dtype=np.float64))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass GetLogitsAndProbsTest(test_case.TestCase):\n\n def testImproperArguments(self):\n with self.assertRaises(ValueError):\n distribution_util.get_logits_and_probs(logits=None, probs=None)\n\n with self.assertRaises(ValueError):\n distribution_util.get_logits_and_probs(logits=[0.1], probs=[0.1])\n\n def testLogits(self):\n p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)\n logits = _logit(p)\n\n new_logits, new_p = distribution_util.get_logits_and_probs(\n logits=logits, validate_args=True)\n\n self.assertAllClose(p, self.evaluate(new_p), rtol=1e-5, atol=0.)\n self.assertAllClose(logits, self.evaluate(new_logits), rtol=1e-5, atol=0.)\n\n def testLogitsMultidimensional(self):\n p = np.array([0.2, 0.3, 0.5], dtype=np.float32)\n logits = np.log(p)\n\n new_logits, new_p = distribution_util.get_logits_and_probs(\n logits=logits, multidimensional=True, validate_args=True)\n\n self.assertAllClose(self.evaluate(new_p), p)\n self.assertAllClose(self.evaluate(new_logits), logits)\n\n def testProbability(self):\n p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)\n\n new_logits, new_p = distribution_util.get_logits_and_probs(\n probs=p, validate_args=True)\n\n self.assertAllClose(_logit(p), self.evaluate(new_logits))\n self.assertAllClose(p, self.evaluate(new_p))\n\n def testProbabilityMultidimensional(self):\n p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)\n\n new_logits, new_p = distribution_util.get_logits_and_probs(\n probs=p, multidimensional=True, validate_args=True)\n\n self.assertAllClose(np.log(p), self.evaluate(new_logits))\n self.assertAllClose(p, self.evaluate(new_p))\n\n def testProbabilityValidateArgs(self):\n p = [0.01, 0.2, 0.5, 0.7, .99]\n # Component less than 0.\n p2 = [-1, 0.2, 0.5, 0.3, .2]\n # Component greater than 1.\n p3 = [2, 0.2, 0.5, 0.3, .2]\n\n _, prob = distribution_util.get_logits_and_probs(\n probs=p, validate_args=True)\n self.evaluate(prob)\n\n with self.assertRaisesOpError('Condition x >= 0'):\n _, prob = distribution_util.get_logits_and_probs(\n probs=p2, validate_args=True)\n self.evaluate(prob)\n\n _, prob = distribution_util.get_logits_and_probs(\n probs=p2, validate_args=False)\n self.evaluate(prob)\n\n with self.assertRaisesOpError('probs has components greater than 1'):\n _, prob = distribution_util.get_logits_and_probs(\n probs=p3, validate_args=True)\n self.evaluate(prob)\n\n _, prob = distribution_util.get_logits_and_probs(\n probs=p3, validate_args=False)\n self.evaluate(prob)\n\n def testProbabilityValidateArgsMultidimensional(self):\n p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)\n # Component less than 0. Still sums to 1.\n p2 = np.array([[-.3, 0.4, 0.9], [0.1, 0.5, 0.4]], dtype=np.float32)\n # Component greater than 1. Does not sum to 1.\n p3 = np.array([[1.3, 0.0, 0.0], [0.1, 0.5, 0.4]], dtype=np.float32)\n # Does not sum to 1.\n p4 = np.array([[1.1, 0.3, 0.4], [0.1, 0.5, 0.4]], dtype=np.float32)\n\n _, prob = distribution_util.get_logits_and_probs(\n probs=p, multidimensional=True)\n self.evaluate(prob)\n\n with self.assertRaisesOpError('Condition x >= 0'):\n _, prob = distribution_util.get_logits_and_probs(\n probs=p2, multidimensional=True, validate_args=True)\n self.evaluate(prob)\n\n _, prob = distribution_util.get_logits_and_probs(\n probs=p2, multidimensional=True, validate_args=False)\n self.evaluate(prob)\n\n with self.assertRaisesOpError(\n '(probs has components greater than 1|probs does not sum to 1)'):\n _, prob = distribution_util.get_logits_and_probs(\n probs=p3, multidimensional=True, validate_args=True)\n self.evaluate(prob)\n\n _, prob = distribution_util.get_logits_and_probs(\n probs=p3, multidimensional=True, validate_args=False)\n self.evaluate(prob)\n\n with self.assertRaisesOpError('probs does not sum to 1'):\n _, prob = distribution_util.get_logits_and_probs(\n probs=p4, multidimensional=True, validate_args=True)\n self.evaluate(prob)\n\n _, prob = distribution_util.get_logits_and_probs(\n probs=p4, multidimensional=True, validate_args=False)\n self.evaluate(prob)\n\n def testProbsMultidimShape(self):\n with self.assertRaises(ValueError):\n p = tf.ones([int(2**11+1)], dtype=tf.float16)\n distribution_util.get_logits_and_probs(\n probs=p, multidimensional=True, validate_args=True)\n\n if tf.executing_eagerly(): return\n\n with self.assertRaisesOpError(\n 'Number of classes exceeds `dtype` precision'):\n p = np.ones([int(2**11+1)], dtype=np.float16)\n p = tf1.placeholder_with_default(p, shape=None)\n self.evaluate(distribution_util.get_logits_and_probs(\n probs=p, multidimensional=True, validate_args=True))\n\n def testLogitsMultidimShape(self):\n with self.assertRaises(ValueError):\n l = tf.ones([int(2**11+1)], dtype=tf.float16)\n distribution_util.get_logits_and_probs(\n logits=l, multidimensional=True, validate_args=True)\n\n if tf.executing_eagerly(): return\n\n with self.assertRaisesOpError(\n 'Number of classes exceeds `dtype` precision'):\n l = np.ones([int(2**11+1)], dtype=np.float16)\n l = tf1.placeholder_with_default(l, shape=None)\n logit, _ = distribution_util.get_logits_and_probs(\n logits=l, multidimensional=True, validate_args=True)\n self.evaluate(logit)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass EmbedCheckCategoricalEventShapeTest(test_case.TestCase):\n\n def testTooSmall(self):\n with self.assertRaises(ValueError):\n param = tf.ones([1], dtype=np.float16)\n checked_param = distribution_util.embed_check_categorical_event_shape(\n param)\n\n if tf.executing_eagerly(): return\n with self.assertRaisesOpError(\n 'must have at least 2 events'):\n param = tf1.placeholder_with_default(\n np.ones([1], dtype=np.float16), shape=None)\n checked_param = distribution_util.embed_check_categorical_event_shape(\n param)\n self.evaluate(checked_param)\n\n def testTooLarge(self):\n with self.assertRaises(ValueError):\n param = tf.ones([int(2**11+1)], dtype=tf.float16)\n checked_param = distribution_util.embed_check_categorical_event_shape(\n param)\n\n if tf.executing_eagerly(): return\n with self.assertRaisesOpError(\n 'Number of classes exceeds `dtype` precision'):\n param = tf1.placeholder_with_default(\n np.ones([int(2**11+1)], dtype=np.float16), shape=None)\n checked_param = distribution_util.embed_check_categorical_event_shape(\n param)\n self.evaluate(checked_param)\n\n def testUnsupportedDtype(self):\n param = tf.convert_to_tensor(\n value=np.ones([2**11 + 1]).astype(tf.qint16.as_numpy_dtype),\n dtype=tf.qint16)\n with self.assertRaises(TypeError):\n distribution_util.embed_check_categorical_event_shape(param)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass EmbedCheckIntegerCastingClosedTest(test_case.TestCase):\n\n def testCorrectlyAssertsNonnegative(self):\n with self.assertRaisesOpError('Elements must be non-negative'):\n x = tf1.placeholder_with_default(\n np.array([1, -1], dtype=np.float16), shape=None)\n x_checked = distribution_util.embed_check_integer_casting_closed(\n x, target_dtype=tf.int16)\n self.evaluate(x_checked)\n\n def testCorrectlyAssertsPositive(self):\n with self.assertRaisesOpError('Elements must be positive'):\n x = tf1.placeholder_with_default(\n np.array([1, 0], dtype=np.float16), shape=None)\n x_checked = distribution_util.embed_check_integer_casting_closed(\n x, target_dtype=tf.int16, assert_positive=True)\n self.evaluate(x_checked)\n\n def testCorrectlyAssersIntegerForm(self):\n with self.assertRaisesOpError('Elements must be int16-equivalent.'):\n x = tf1.placeholder_with_default(\n np.array([1, 1.5], dtype=np.float16), shape=None)\n x_checked = distribution_util.embed_check_integer_casting_closed(\n x, target_dtype=tf.int16)\n self.evaluate(x_checked)\n\n def testCorrectlyAssertsLargestPossibleInteger(self):\n with self.assertRaisesOpError('Elements cannot exceed 32767.'):\n x = tf1.placeholder_with_default(\n np.array([1, 2**15], dtype=np.int32), shape=None)\n x_checked = distribution_util.embed_check_integer_casting_closed(\n x, target_dtype=tf.int16)\n self.evaluate(x_checked)\n\n def testCorrectlyAssertsSmallestPossibleInteger(self):\n with self.assertRaisesOpError('Elements cannot be smaller than 0.'):\n x = tf1.placeholder_with_default(\n np.array([1, -1], dtype=np.int32), shape=None)\n x_checked = distribution_util.embed_check_integer_casting_closed(\n x, target_dtype=tf.uint16, assert_nonnegative=False)\n self.evaluate(x_checked)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass DynamicShapeTest(test_case.TestCase):\n\n def testSameDynamicShape(self):\n scalar = tf.constant(2.)\n scalar1 = tf1.placeholder_with_default(\n np.array(2., dtype=np.float32), shape=None)\n\n vector = tf.constant([0.3, 0.4, 0.5])\n vector1 = tf1.placeholder_with_default(\n np.array([2., 3., 4.], dtype=np.float32), shape=[None])\n vector2 = tf1.placeholder_with_default(\n np.array([2., 3.5, 6.], dtype=np.float32), shape=[None])\n\n multidimensional = tf.constant([[0.3, 0.4], [0.2, 0.6]])\n multidimensional1 = tf1.placeholder_with_default(\n np.array([[2., 3.], [3., 4.]], dtype=np.float32),\n shape=[None, None])\n multidimensional2 = tf1.placeholder_with_default(\n np.array([[1., 3.5], [6.3, 2.3]], dtype=np.float32),\n shape=[None, None])\n multidimensional3 = tf1.placeholder_with_default(\n np.array([[1., 3.5, 5.], [6.3, 2.3, 7.1]], dtype=np.float32),\n shape=[None, None])\n\n # Scalar\n self.assertTrue(self.evaluate(\n distribution_util.same_dynamic_shape(scalar, scalar1)))\n\n # Vector\n self.assertTrue(self.evaluate(\n distribution_util.same_dynamic_shape(vector, vector1)))\n self.assertTrue(self.evaluate(\n distribution_util.same_dynamic_shape(vector1, vector2)))\n\n # Multidimensional\n self.assertTrue(self.evaluate(\n distribution_util.same_dynamic_shape(\n multidimensional, multidimensional1)))\n self.assertTrue(self.evaluate(\n distribution_util.same_dynamic_shape(\n multidimensional1, multidimensional2)))\n\n # Scalar, X\n self.assertFalse(self.evaluate(\n distribution_util.same_dynamic_shape(scalar, vector1)))\n self.assertFalse(self.evaluate(\n distribution_util.same_dynamic_shape(scalar1, vector1)))\n self.assertFalse(self.evaluate(\n distribution_util.same_dynamic_shape(scalar, multidimensional1)))\n self.assertFalse(self.evaluate(\n distribution_util.same_dynamic_shape(scalar1, multidimensional1)))\n\n # Vector, X\n self.assertFalse(self.evaluate(\n distribution_util.same_dynamic_shape(vector, vector1[:2])))\n self.assertFalse(self.evaluate(\n distribution_util.same_dynamic_shape(vector1, vector2[-1:])))\n self.assertFalse(self.evaluate(\n distribution_util.same_dynamic_shape(vector, multidimensional1)))\n self.assertFalse(self.evaluate(\n distribution_util.same_dynamic_shape(vector1, multidimensional1)))\n\n # Multidimensional, X\n self.assertFalse(self.evaluate(\n distribution_util.same_dynamic_shape(\n multidimensional, multidimensional3)))\n self.assertFalse(self.evaluate(\n distribution_util.same_dynamic_shape(\n multidimensional1, multidimensional3)))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass RotateTransposeTest(test_case.TestCase):\n\n def _np_rotate_transpose(self, x, shift):\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n return np.transpose(x, np.roll(np.arange(len(x.shape)), shift))\n\n def testRollStatic(self):\n if tf.executing_eagerly():\n error_message = r'Attempt to convert a value \\(None\\)'\n else:\n error_message = 'None values not supported.'\n with self.assertRaisesRegexp(ValueError, error_message):\n distribution_util.rotate_transpose(None, 1)\n for x in (np.ones(1), np.ones((2, 1)), np.ones((3, 2, 1))):\n for shift in np.arange(-5, 5):\n y = distribution_util.rotate_transpose(x, shift)\n self.assertAllEqual(\n self._np_rotate_transpose(x, shift), self.evaluate(y))\n self.assertAllEqual(\n np.roll(x.shape, shift), tensorshape_util.as_list(y.shape))\n\n def testRollDynamic(self):\n for x_value in (np.ones(1, dtype=np.float32),\n np.ones([2, 1], dtype=np.float32),\n np.ones([3, 2, 1], dtype=np.float32)):\n for shift_value in np.arange(-5, 5).astype(np.int32):\n x = tf1.placeholder_with_default(x_value, shape=None)\n shift = tf1.placeholder_with_default(shift_value, shape=None)\n self.assertAllEqual(\n self._np_rotate_transpose(x_value, shift_value),\n self.evaluate(distribution_util.rotate_transpose(x, shift)))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass PickVectorTest(test_case.TestCase):\n\n def testCorrectlyPicksVector(self):\n x = np.arange(10, 12)\n y = np.arange(15, 18)\n self.assertAllEqual(\n x, self.evaluate(distribution_util.pick_vector(tf.less(0, 5), x, y)))\n self.assertAllEqual(\n y, self.evaluate(distribution_util.pick_vector(tf.less(5, 0), x, y)))\n self.assertAllEqual(x,\n distribution_util.pick_vector(\n tf.constant(True), x, y)) # No eval.\n self.assertAllEqual(y,\n distribution_util.pick_vector(\n tf.constant(False), x, y)) # No eval.\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass PreferStaticRankTest(test_case.TestCase):\n\n def testNonEmptyConstantTensor(self):\n x = tf.zeros([2, 3, 4])\n rank = distribution_util.prefer_static_rank(x)\n if not tf.executing_eagerly():\n self.assertIsInstance(rank, np.ndarray)\n self.assertEqual(3, rank)\n\n def testEmptyConstantTensor(self):\n x = tf.constant([])\n rank = distribution_util.prefer_static_rank(x)\n if not tf.executing_eagerly():\n self.assertIsInstance(rank, np.ndarray)\n self.assertEqual(1, rank)\n\n def testScalarTensor(self):\n x = tf.constant(1.)\n rank = distribution_util.prefer_static_rank(x)\n if not tf.executing_eagerly():\n self.assertIsInstance(rank, np.ndarray)\n self.assertEqual(0, rank)\n\n def testDynamicRankEndsUpBeingNonEmpty(self):\n if tf.executing_eagerly(): return\n x = tf1.placeholder_with_default(\n np.zeros([2, 3], dtype=np.float64), shape=None)\n rank = distribution_util.prefer_static_rank(x)\n self.assertAllEqual(2, self.evaluate(rank))\n\n def testDynamicRankEndsUpBeingEmpty(self):\n if tf.executing_eagerly(): return\n x = tf1.placeholder_with_default(\n np.array([], dtype=np.int32), shape=None)\n rank = distribution_util.prefer_static_rank(x)\n self.assertAllEqual(1, self.evaluate(rank))\n\n def testDynamicRankEndsUpBeingScalar(self):\n if tf.executing_eagerly(): return\n x = tf1.placeholder_with_default(\n np.array(1, dtype=np.int32), shape=None)\n rank = distribution_util.prefer_static_rank(x)\n self.assertAllEqual(0, self.evaluate(rank))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass PreferStaticShapeTest(test_case.TestCase):\n\n def testNonEmptyConstantTensor(self):\n x = tf.zeros((2, 3, 4))\n shape = distribution_util.prefer_static_shape(x)\n self.assertIsInstance(shape, np.ndarray)\n self.assertAllEqual([2, 3, 4], shape)\n\n def testEmptyConstantTensor(self):\n x = tf.constant([])\n shape = distribution_util.prefer_static_shape(x)\n self.assertIsInstance(shape, np.ndarray)\n self.assertAllEqual([0], shape)\n\n def testScalarTensor(self):\n x = tf.constant(1.)\n shape = distribution_util.prefer_static_shape(x)\n self.assertIsInstance(shape, np.ndarray)\n self.assertAllEqual([], shape)\n\n def testDynamicShapeEndsUpBeingNonEmpty(self):\n if tf.executing_eagerly(): return\n x = tf1.placeholder_with_default(\n np.zeros([2, 3], dtype=np.float64), shape=None)\n shape = distribution_util.prefer_static_shape(x)\n self.assertAllEqual([2, 3], self.evaluate(shape))\n\n def testDynamicShapeEndsUpBeingEmpty(self):\n if tf.executing_eagerly(): return\n x = tf1.placeholder_with_default(\n np.array([], dtype=np.int32), shape=None)\n shape = distribution_util.prefer_static_shape(x)\n self.assertAllEqual([0], self.evaluate(shape))\n\n def testDynamicShapeEndsUpBeingScalar(self):\n if tf.executing_eagerly(): return\n x = tf1.placeholder_with_default(\n np.array(1, dtype=np.int32), shape=None)\n shape = distribution_util.prefer_static_shape(x)\n self.assertAllEqual([], self.evaluate(shape))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass PreferStaticValueTest(test_case.TestCase):\n\n def testNonEmptyConstantTensor(self):\n x = tf.zeros((2, 3, 4))\n value = distribution_util.prefer_static_value(x)\n self.assertIsInstance(value, np.ndarray)\n self.assertAllEqual(np.zeros((2, 3, 4)), value)\n\n def testEmptyConstantTensor(self):\n x = tf.constant([])\n value = distribution_util.prefer_static_value(x)\n self.assertIsInstance(value, np.ndarray)\n self.assertAllEqual(np.array([]), value)\n\n def testScalarTensor(self):\n x = tf.constant(1.)\n value = distribution_util.prefer_static_value(x)\n if not tf.executing_eagerly():\n self.assertIsInstance(value, np.ndarray)\n self.assertAllEqual(np.array(1.), value)\n\n def testDynamicValueEndsUpBeingNonEmpty(self):\n if tf.executing_eagerly(): return\n x = tf1.placeholder_with_default(\n np.zeros((2, 3), dtype=np.float64), shape=None)\n value = distribution_util.prefer_static_value(x)\n self.assertAllEqual(np.zeros((2, 3)),\n self.evaluate(value))\n\n def testDynamicValueEndsUpBeingEmpty(self):\n if tf.executing_eagerly(): return\n x = tf1.placeholder_with_default(\n np.array([], dtype=np.int32), shape=None)\n value = distribution_util.prefer_static_value(x)\n self.assertAllEqual(np.array([]), self.evaluate(value))\n\n def testDynamicValueEndsUpBeingScalar(self):\n if tf.executing_eagerly(): return\n x = tf1.placeholder_with_default(\n np.array(1, dtype=np.int32), shape=None)\n value = distribution_util.prefer_static_value(x)\n self.assertAllEqual(np.array(1), self.evaluate(value))\n\n\n# No need for eager tests; this function doesn't depend on TF.\nclass GenNewSeedTest(test_case.TestCase):\n\n def testOnlyNoneReturnsNone(self):\n self.assertIsNotNone(distribution_util.gen_new_seed(0, 'salt'))\n self.assertIsNone(distribution_util.gen_new_seed(None, 'salt'))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ArgumentsTest(test_case.TestCase):\n\n def testNoArguments(self):\n def foo():\n return distribution_util.parent_frame_arguments()\n\n self.assertEqual({}, foo())\n\n def testPositionalArguments(self):\n def foo(a, b, c, d): # pylint: disable=unused-argument\n return distribution_util.parent_frame_arguments()\n\n self.assertEqual({'a': 1, 'b': 2, 'c': 3, 'd': 4}, foo(1, 2, 3, 4))\n\n # Tests that it does not matter where this function is called, and\n # no other local variables are returned back.\n def bar(a, b, c):\n unused_x = a * b\n unused_y = c * 3\n return distribution_util.parent_frame_arguments()\n\n self.assertEqual({'a': 1, 'b': 2, 'c': 3}, bar(1, 2, 3))\n\n def testOverloadedArgumentValues(self):\n def foo(a, b, c): # pylint: disable=unused-argument\n a = 42\n b = 31\n c = 42\n return distribution_util.parent_frame_arguments()\n self.assertEqual({'a': 42, 'b': 31, 'c': 42}, foo(1, 2, 3))\n\n def testKeywordArguments(self):\n def foo(**kwargs): # pylint: disable=unused-argument\n return distribution_util.parent_frame_arguments()\n\n self.assertEqual({'a': 1, 'b': 2, 'c': 3, 'd': 4}, foo(a=1, b=2, c=3, d=4))\n\n def testPositionalKeywordArgs(self):\n def foo(a, b, c, **kwargs): # pylint: disable=unused-argument\n return distribution_util.parent_frame_arguments()\n\n self.assertEqual({'a': 1, 'b': 2, 'c': 3}, foo(a=1, b=2, c=3))\n self.assertEqual({'a': 1, 'b': 2, 'c': 3, 'unicorn': None},\n foo(a=1, b=2, c=3, unicorn=None))\n\n def testNoVarargs(self):\n def foo(a, b, c, *varargs, **kwargs): # pylint: disable=unused-argument\n return distribution_util.parent_frame_arguments()\n\n self.assertEqual({'a': 1, 'b': 2, 'c': 3}, foo(a=1, b=2, c=3))\n self.assertEqual({'a': 1, 'b': 2, 'c': 3}, foo(1, 2, 3, *[1, 2, 3]))\n self.assertEqual({'a': 1, 'b': 2, 'c': 3, 'unicorn': None},\n foo(1, 2, 3, unicorn=None))\n self.assertEqual({'a': 1, 'b': 2, 'c': 3, 'unicorn': None},\n foo(1, 2, 3, *[1, 2, 3], unicorn=None))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ExpandToVectorTest(test_case.TestCase):\n\n def _check_static(self, expected, actual, dtype=np.int32):\n const_actual = tf.get_static_value(actual)\n self.assertAllEqual(expected, const_actual)\n self.assertEqual(dtype, const_actual.dtype)\n\n def _check(self, expected, actual, expected_dtype=np.int32):\n self.assertAllEqual(expected, actual)\n self.assertEquals(expected_dtype, actual.dtype)\n\n def test_expand_to_vector_on_literals(self):\n self._check_static([1], distribution_util.expand_to_vector(1))\n self._check_static(\n [3.5], distribution_util.expand_to_vector(3.5), dtype=np.float32)\n\n self._check_static([3], distribution_util.expand_to_vector((3,)))\n self._check_static([0, 0], distribution_util.expand_to_vector((0, 0)))\n self._check_static(\n [1.25, 2.75, 3.0],\n distribution_util.expand_to_vector((1.25, 2.75, 3.0)),\n dtype=np.float32)\n\n self._check_static([3], distribution_util.expand_to_vector([3,]))\n self._check_static([0, 0], distribution_util.expand_to_vector([0, 0]))\n self._check_static(\n [1.25, 2.75, 3.0],\n distribution_util.expand_to_vector([1.25, 2.75, 3.0]),\n dtype=np.float32)\n\n # Empty lists and tuples are converted to `tf.float32`.\n self._check_static(\n [], distribution_util.expand_to_vector(()), dtype=np.float32)\n self._check_static(\n [], distribution_util.expand_to_vector([]), dtype=np.float32)\n\n # Test for error on input with rank >= 2.\n with self.assertRaises(ValueError):\n distribution_util.expand_to_vector([[1, 2], [3, 4]])\n\n def test_expand_to_vector_on_constants(self):\n # Helper to construct a const Tensor and call expand_to_tensor on it.\n def _expand_tensor(x, dtype=tf.int32):\n return distribution_util.expand_to_vector(\n tf.convert_to_tensor(value=x, dtype=dtype), op_name='test')\n\n self._check_static([], _expand_tensor([]))\n self._check_static([], _expand_tensor(()))\n\n self._check_static([17], _expand_tensor(17))\n self._check_static([1.125], _expand_tensor(1.125, np.float32), np.float32)\n\n self._check_static([314], _expand_tensor([314]))\n self._check_static(\n [3.75, 0], _expand_tensor([3.75, 0], np.float64), np.float64)\n self._check_static([1, 2, 3], _expand_tensor([1, 2, 3], np.int64), np.int64)\n\n # Test for error on input with rank >= 2.\n with self.assertRaises(ValueError):\n _expand_tensor([[[]]], tf.float32)\n\n def test_expand_to_vector_on_tensors(self):\n # Helper to construct a placeholder and call expand_to_tensor on it.\n def _expand_tensor(x, shape=None, dtype=np.int32, validate_args=False):\n return distribution_util.expand_to_vector(\n tf1.placeholder_with_default(\n np.array(x, dtype=dtype), shape=shape),\n tensor_name='name_for_tensor',\n validate_args=validate_args)\n\n for dtype in [np.int64, np.float32, np.float64, np.int64]:\n\n self._check([], _expand_tensor([], shape=[0], dtype=dtype), dtype)\n self._check([], _expand_tensor([], shape=[None], dtype=dtype), dtype)\n self._check([], _expand_tensor([], shape=None, dtype=dtype), dtype)\n\n self._check([7], _expand_tensor(7, shape=[], dtype=dtype), dtype)\n\n self._check(\n [1, 2, 3], _expand_tensor([1, 2, 3], shape=[3], dtype=dtype), dtype)\n self._check(\n [1, 2, 3],\n _expand_tensor([1, 2, 3], shape=[None], dtype=dtype), dtype)\n self._check(\n [1, 2, 3], _expand_tensor([1, 2, 3], shape=None, dtype=dtype), dtype)\n\n # Test for error on input with rank >= 2.\n with self.assertRaises(ValueError):\n _expand_tensor([[1, 2]], shape=[1, 2])\n with self.assertRaises(ValueError):\n _expand_tensor([[1, 2]], shape=[None, None])\n if tf.executing_eagerly():\n with self.assertRaises(ValueError):\n _expand_tensor([[1, 2]], shape=None)\n else:\n with self.assertRaises(tf.errors.InvalidArgumentError):\n self.evaluate(_expand_tensor([[1, 2]], shape=None, validate_args=True))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass WithDependenciesTestCase(test_util.TensorFlowTestCase):\n\n def testTupleDependencies(self):\n counter = tf.Variable(0, name='my_counter')\n const_with_dep = distribution_util.with_dependencies(\n (tf1.assign_add(counter, 1), tf.constant(42)),\n tf.constant(7))\n\n self.evaluate(tf1.global_variables_initializer())\n self.assertEqual(1 if tf.executing_eagerly() else 0,\n self.evaluate(counter))\n self.assertEqual(7, self.evaluate(const_with_dep))\n self.assertEqual(1, self.evaluate(counter))\n\n def testListDependencies(self):\n counter = tf.Variable(0, name='my_counter')\n const_with_dep = distribution_util.with_dependencies(\n [tf1.assign_add(counter, 1), tf.constant(42)],\n tf.constant(7))\n\n self.evaluate(tf1.global_variables_initializer())\n self.assertEqual(1 if tf.executing_eagerly() else 0,\n self.evaluate(counter))\n self.assertEqual(7, self.evaluate(const_with_dep))\n self.assertEqual(1, self.evaluate(counter))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"numpy.diag",
"tensorflow.compat.v2.executing_eagerly",
"numpy.asarray",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.identity",
"numpy.random.randn",
"tensorflow.compat.v1.placeholder_with_default",
"numpy.roll",
"numpy.tril",
"numpy.reshape",
"numpy.arange",
"tensorflow.compat.v2.zeros",
"numpy.float32",
"numpy.log1p",
"numpy.zeros",
"numpy.log",
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.less",
"tensorflow.compat.v2.constant",
"numpy.array",
"tensorflow.compat.v2.random.normal",
"tensorflow.compat.v2.get_static_value",
"numpy.int32",
"numpy.ones",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.assign_add",
"numpy.prod",
"tensorflow.compat.v2.linalg.LinearOperatorDiag"
]
] |
HieuLsw/blobjob.editor
|
[
"c33473ffb7836a70ba3a1b2a9dd9452a9d3a1b81"
] |
[
"libs/cocos/particle.py"
] |
[
"# ----------------------------------------------------------------------------\n# cocos2d\n# Copyright (c) 2008 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, Lucio Torre\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright \n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of cocos2d nor the names of its\n# contributors may be used to endorse or promote products\n# derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n# ----------------------------------------------------------------------------\n'''Particle system engine'''\n\nimport random\nimport pyglet\nfrom pyglet.gl import *\nimport math\nimport copy\nimport numpy\nimport ctypes\n\nfrom cocosnode import CocosNode\nfrom euclid import Point2\n\nrand = lambda: random.random() * 2 - 1\n\n# PointerToNumpy by Gary Herron\n# from pyglet's user list\ndef PointerToNumpy(a, ptype=ctypes.c_float):\n a = numpy.ascontiguousarray(a) # Probably a NO-OP, but perhaps not\n return a.ctypes.data_as(ctypes.POINTER(ptype)) # Ugly and undocumented! \n\nclass Color( object ):\n def __init__( self, r,g,b,a ):\n self.r = r\n self.g = g\n self.b = b\n self.a = a\n\n def to_array(self):\n return self.r, self.g, self.b, self.a\n\n\nclass ParticleSystem( CocosNode ):\n\n # type of particle\n POSITION_FREE, POSITION_GROUPED = range(2)\n\n #: is the particle system active ?\n active = True\n\n #: duration in seconds of the system. -1 is infinity\n duration = 0\n\n #: time elapsed since the start of the system (in seconds)\n elapsed = 0\n\n #: Gravity of the particles\n gravity = Point2(0.0, 0.0)\n\n #: position is from \"superclass\" CocosNode\n #: Position variance\n pos_var = Point2(0.0, 0.0)\n\n #: The angle (direction) of the particles measured in degrees\n angle = 0.0\n #: Angle variance measured in degrees;\n angle_var = 0.0\n\n #: The speed the particles will have.\n speed = 0.0\n #: The speed variance\n speed_var = 0.0\n\n #: Tangential acceleration\n tangential_accel = 0.0\n #: Tangential acceleration variance\n tangential_accel_var = 0.0\n\n #: Radial acceleration\n radial_accel = 0.0\n #: Radial acceleration variance\n radial_accel_var = 0.0\n\n #: Size of the particles\n size = 0.0\n #: Size variance\n size_var = 0.0\n\n #: How many seconds will the particle live\n life = 0\n #: Life variance\n life_var = 0\n\n #: Start color of the particles\n start_color = Color(0.0,0.0,0.0,0.0)\n #: Start color variance\n start_color_var = Color(0.0,0.0,0.0,0.0)\n #: End color of the particles\n end_color = Color(0.0,0.0,0.0,0.0)\n #: End color variance\n end_color_var = Color(0.0,0.0,0.0,0.0)\n\n #: Maximum particles\n total_particles = 0\n\n #:texture of the particles\n texture = pyglet.resource.image('fire.png').texture\n\n #:blend additive\n blend_additive = False\n\n #:color modulate\n color_modulate = True\n\n # position type\n position_type = POSITION_GROUPED\n\n def __init__(self):\n super(ParticleSystem,self).__init__()\n\n # particles\n # position x 2\n self.particle_pos = numpy.zeros( (self.total_particles, 2), numpy.float32 )\n # direction x 2\n self.particle_dir = numpy.zeros( (self.total_particles, 2), numpy.float32 )\n # rad accel x 1\n self.particle_rad = numpy.zeros( (self.total_particles, 1), numpy.float32 )\n # tan accel x 1\n self.particle_tan = numpy.zeros( (self.total_particles, 1), numpy.float32 )\n # gravity x 2\n self.particle_grav = numpy.zeros( (self.total_particles, 2), numpy.float32 )\n # colors x 4\n self.particle_color = numpy.zeros( (self.total_particles, 4), numpy.float32 )\n # delta colors x 4\n self.particle_delta_color = numpy.zeros( (self.total_particles, 4), numpy.float32 )\n # life x 1\n self.particle_life = numpy.zeros( (self.total_particles, 1), numpy.float32 )\n self.particle_life.fill(-1.0)\n # size x 1\n self.particle_size = numpy.zeros( (self.total_particles, 1), numpy.float32 )\n # start position\n self.start_pos = numpy.zeros( (self.total_particles, 2), numpy.float32 )\n\n #: How many particles can be emitted per second\n self.emit_counter = 0\n \n #: Count of particles\n self.particle_count = 0\n\n #: auto remove when particle finishes\n self.auto_remove_on_finish = False\n\n self.schedule( self.step )\n\n def on_enter( self ):\n super( ParticleSystem, self).on_enter()\n self.add_particle()\n\n def draw( self ):\n glPushMatrix()\n self.transform()\n\n glPointSize( self.size )\n\n glEnable(GL_TEXTURE_2D)\n glBindTexture(GL_TEXTURE_2D, self.texture.id )\n\n glEnable(GL_POINT_SPRITE)\n glTexEnvi( GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE )\n\n\n glEnableClientState(GL_VERTEX_ARRAY)\n vertex_ptr = PointerToNumpy( self.particle_pos )\n glVertexPointer(2,GL_FLOAT,0,vertex_ptr);\n\n glEnableClientState(GL_COLOR_ARRAY)\n color_ptr = PointerToNumpy( self.particle_color)\n glColorPointer(4,GL_FLOAT,0,color_ptr);\n\n glPushAttrib(GL_COLOR_BUFFER_BIT)\n glEnable(GL_BLEND)\n if self.blend_additive:\n glBlendFunc(GL_SRC_ALPHA, GL_ONE);\n else:\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);\n\n# mode = GLint()\n# glTexEnviv( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, mode )\n#\n# if self.color_modulate:\n# glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE )\n# else:\n# glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE )\n\n\n glDrawArrays(GL_POINTS, 0, self.total_particles);\n\n # un -blend\n glPopAttrib()\n\n# # restore env mode\n# glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, mode)\n\n # disable states\n glDisableClientState(GL_COLOR_ARRAY);\n glDisableClientState(GL_VERTEX_ARRAY);\n glDisable(GL_POINT_SPRITE);\n glDisable(GL_TEXTURE_2D);\n\n glPopMatrix()\n\n\n def step( self, delta ):\n\n # update particle count\n self.particle_count = numpy.sum( self.particle_life >= 0 )\n\n if self.active:\n rate = 1.0 / self.emission_rate\n self.emit_counter += delta\n\n# if random.random() < 0.01:\n# delta += 0.5\n\n while self.particle_count < self.total_particles and self.emit_counter > rate:\n self.add_particle()\n self.emit_counter -= rate\n\n self.elapsed += delta\n\n if self.duration != -1 and self.duration < self.elapsed:\n self.stop_system()\n\n self.update_particles( delta )\n\n if self.particle_count == 0 and self.auto_remove_on_finish == True:\n self.unschedule( self.step )\n self.parent.remove( self )\n\n def add_particle( self ):\n self.init_particle()\n self.particle_count += 1\n\n def stop_system( self ):\n self.active = False\n self.elapsed= self.duration\n self.emit_counter = 0\n\n def reset_system( self ):\n self.elapsed= self.duration\n self.emit_counter = 0\n\n def update_particles( self, delta ):\n # radial: posx + posy\n norm = numpy.sqrt( self.particle_pos[:,0] ** 2 + self.particle_pos[:,1] ** 2 )\n # XXX prevent div by 0\n norm = numpy.select( [norm==0], [0.0000001], default=norm )\n posx = self.particle_pos[:,0] / norm\n posy = self.particle_pos[:,1] / norm\n\n radial = numpy.array( [posx, posy] )\n tangential = numpy.array( [-posy, posx] )\n\n # update dir\n radial = numpy.swapaxes(radial,0,1)\n radial *= self.particle_rad\n tangential = numpy.swapaxes(tangential,0,1)\n tangential *= self.particle_tan\n\n self.particle_dir += (tangential + radial + self.particle_grav) * delta\n\n # update pos with updated dir\n self.particle_pos += self.particle_dir * delta\n\n # life\n self.particle_life -= delta\n\n\n # position: free or grouped\n if self.position_type == self.POSITION_FREE:\n tuple = numpy.array( [self.x, self.y] )\n tmp = tuple - self.start_pos\n self.particle_pos -= tmp\n\n\n # color\n self.particle_color += self.particle_delta_color * delta\n\n # if life < 0, set alpha in 0\n self.particle_color[:,3] = numpy.select( [self.particle_life[:,0] < 0], [0], default=self.particle_color[:,3] )\n\n# print self.particles[0]\n# print self.pas[0,0:4]\n\n def init_particle( self ):\n # position\n# p=self.particles[idx]\n\n a = self.particle_life < 0\n idxs = a.nonzero()\n\n idx = -1\n\n if len(idxs[0]) > 0:\n idx = idxs[0][0] \n else:\n raise Exception(\"No empty particle\")\n\n # position\n self.particle_pos[idx][0] = self.pos_var.x * rand()\n self.particle_pos[idx][1] = self.pos_var.y * rand()\n\n # start position\n self.start_pos[idx][0] = self.x\n self.start_pos[idx][1] = self.y\n\n a = math.radians( self.angle + self.angle_var * rand() )\n v = Point2( math.cos( a ), math.sin( a ) )\n s = self.speed + self.speed_var * rand()\n\n dir = v * s\n\n # direction\n self.particle_dir[idx][0] = dir.x\n self.particle_dir[idx][1] = dir.y\n\n # radial accel\n self.particle_rad[idx] = self.radial_accel + self.radial_accel_var * rand()\n\n\n # tangential accel\n self.particle_tan[idx] = self.tangential_accel + self.tangential_accel_var * rand()\n \n # life\n life = self.particle_life[idx] = self.life + self.life_var * rand()\n\n # Color\n # start\n sr = self.start_color.r + self.start_color_var.r * rand()\n sg = self.start_color.g + self.start_color_var.g * rand()\n sb = self.start_color.b + self.start_color_var.b * rand()\n sa = self.start_color.a + self.start_color_var.a * rand()\n\n self.particle_color[idx][0] = sr\n self.particle_color[idx][1] = sg\n self.particle_color[idx][2] = sb\n self.particle_color[idx][3] = sa\n\n # end\n er = self.end_color.r + self.end_color_var.r * rand()\n eg = self.end_color.g + self.end_color_var.g * rand()\n eb = self.end_color.b + self.end_color_var.b * rand()\n ea = self.end_color.a + self.end_color_var.a * rand()\n\n delta_color_r = (er - sr) / life\n delta_color_g = (eg - sg) / life\n delta_color_b = (eb - sb) / life\n delta_color_a = (ea - sa) / life\n\n self.particle_delta_color[idx][0] = delta_color_r\n self.particle_delta_color[idx][1] = delta_color_g\n self.particle_delta_color[idx][2] = delta_color_b\n self.particle_delta_color[idx][3] = delta_color_a\n\n # size\n self.particle_size[idx] = self.size + self.size_var * rand()\n\n # gravity\n self.particle_grav[idx][0] = self.gravity.x\n self.particle_grav[idx][1] = self.gravity.y\n"
] |
[
[
"numpy.swapaxes",
"numpy.sqrt",
"numpy.ascontiguousarray",
"numpy.select",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
stefanvdlugt/markedgraphs
|
[
"1eee51fdaf4add9064d1e53ecc3a811bcea82f06"
] |
[
"markedgraphs/sequences.py"
] |
[
"import numpy as np\n\ndef integer_sequences(L, S, nondecr=False, m=None, M=None):\n \"\"\"\n Generate sequences of non-negative integers.\n \n Parameters:\n L: the length of the sequences\n S: the sum of the integers in each sequence\n \n Optional parameters:\n nondecr: (boolean) return only non-decreasing sequences (default: False)\n m: tuple of length L; gives lower bounds for coefficients of list (default: None)\n M: tuple of length L; gives upper bounds for coefficients of list (default: None)\n \"\"\"\n \n # If M and m are not given, use the following defaults.\n if M is None:\n M = (S,)*L\n if m is None:\n m = (0,)*L\n \n # If length=0 and sum=0 then yield the empty tuple.\n # Otherwise, yield nothing.\n if L==0:\n if S==0:\n yield tuple()\n # If length=1 and sum lies within given boundaries, yield 1-tuple.\n elif L==1:\n if m[0]<=S<=M[0]: \n yield (S,)\n # If length>1, then loop through possible values for first coefficient,\n # and recursively generate (L-1)-tuples that give the remaining coefficients.\n elif L>1:\n for first in range(m[0], min(S,M[0])+1):\n m_next = m[1:] if nondecr==False else (first,)+m[2:]\n for tail in integer_sequences(L=L-1, S=S-first, nondecr=nondecr, m=m_next, M=M[1:]):\n yield (first,)+tail\n\ndef matrices_from_degree_sequence(deg_seq=tuple()):\n \"\"\"\n Generate all matrices that can occur as adjacency matrices\n of r-marked graphs whose degree_sequence() equals deg_seq.\n \n These are symmetric (r×r)-matrices with non-negative integer coefficients\n with even coefficients on the diagonal, whose row sum equals deg_seq.\n \"\"\"\n L = len(deg_seq)\n \n M = np.zeros((L,L), dtype=int)\n if L==0:\n yield M\n elif L>0:\n # Range over possible values for top left entries:\n for top_left in range(0,deg_seq[0]+1,2):\n M[0,0] = top_left\n # Range over sequences that make up the rest of the first row:\n for top_row_rest in integer_sequences(L=L-1, S=deg_seq[0]-top_left, M=deg_seq[1:]):\n M[0,1:] = M[1:,0] = top_row_rest\n # Compute the row sum of the remaining bottom right square\n row_sum_rem = tuple(deg_seq[i] - M[0,i] for i in range(1,L))\n \n # Loop over all possible bottom right squares:\n for BRS in matrices_from_degree_sequence(deg_seq=row_sum_rem):\n M[1:,1:]=BRS\n yield M.copy()\n\ndef increasing_injections(r,s,m=1):\n \"\"\"\n Generates all increasing tuples of length r\n with values in {1, ..., s}.\n \n Optional argument:\n m: minimum value of first entry (default=1, used for recursion)\n \"\"\"\n if r==0:\n yield tuple()\n elif r==1:\n for i in range(m,s+1):\n yield (i,)\n else:\n for first in range(m,s-r+2):\n for tail in increasing_injections(r-1,s,m=first+1):\n yield (first,)+tail\n "
] |
[
[
"numpy.zeros"
]
] |
swaldow/pycaret
|
[
"f46a388ad8db6896a6a4252ac21bfd582b803fa1"
] |
[
"pycaret/internal/preprocess.py"
] |
[
"# Module: Preprocess\n# Author: Fahad Akbar <m.akbar@queensu.ca>\n# License: MIT\n\nimport pandas as pd\nimport numpy as np\nimport ipywidgets as wg\nfrom IPython.display import display\nfrom ipywidgets import Layout\nfrom sklearn.base import BaseEstimator, TransformerMixin, clone\nfrom sklearn.impute._base import _BaseImputer\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.preprocessing import MaxAbsScaler\nfrom sklearn.preprocessing import PowerTransformer\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.decomposition import PCA\nfrom sklearn.decomposition import KernelPCA\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import IncrementalPCA\nfrom sklearn.preprocessing import KBinsDiscretizer\nfrom pyod.models.knn import KNN\nfrom pyod.models.iforest import IForest\nfrom pyod.models.pca import PCA as PCA_od\nfrom sklearn import cluster\nfrom scipy import stats\nfrom sklearn.ensemble import RandomForestClassifier as rfc\nfrom sklearn.ensemble import RandomForestRegressor as rfr\nfrom lightgbm import LGBMClassifier as lgbmc\nfrom lightgbm import LGBMRegressor as lgbmr\nimport sys\nimport gc\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import metrics\nfrom datetime import datetime\nimport calendar\nfrom sklearn.preprocessing import LabelEncoder\nfrom collections import defaultdict\nfrom typing import Optional, Union\nfrom pycaret.internal.logging import get_logger\nfrom pycaret.internal.utils import infer_ml_usecase\n\nfrom sklearn.utils.validation import check_is_fitted, check_random_state\n\npd.set_option(\"display.max_columns\", 500)\npd.set_option(\"display.max_rows\", 500)\n\nSKLEARN_EMPTY_STEP = \"passthrough\"\n\n# _____________________________________________________________________________________________________________________________\n\n\ndef str_if_not_null(x):\n if pd.isnull(x) or (x is None) or pd.isna(x) or (x is not x):\n return x\n return str(x)\n\n\ndef find_id_columns(data, target, numerical_features):\n # some times we have id column in the data set, we will try to find it and then will drop it if found\n len_samples = len(data)\n id_columns = []\n for i in data.select_dtypes(\n include=[\"object\", \"int64\", \"float64\", \"float32\"]\n ).columns:\n col = data[i]\n if i not in numerical_features and i != target:\n if sum(col.isnull()) == 0:\n try:\n col = col.astype(\"int64\")\n except:\n continue\n if col.nunique() == len_samples:\n # we extract column and sort it\n features = col.sort_values()\n # no we subtract i+1-th value from i-th (calculating increments)\n increments = features.diff()[1:]\n # if all increments are 1 (with float tolerance), then the column is ID column\n if sum(np.abs(increments - 1) < 1e-7) == len_samples - 1:\n id_columns.append(i)\n return id_columns\n\n\nclass DataTypes_Auto_infer(BaseEstimator, TransformerMixin):\n \"\"\"\n - This will try to infer data types automatically, option to override learent data types is also available.\n - This alos automatically delets duplicate columns (values or same colume name), removes rows where target variable is null and \n remove columns and rows where all the records are null\n \"\"\"\n\n def __init__(\n self,\n target,\n ml_usecase,\n categorical_features=[],\n numerical_features=[],\n time_features=[],\n features_todrop=[],\n id_columns=[],\n display_types=True,\n ): # nothing to define\n \"\"\"\n User to define the target (y) variable\n args:\n target: string, name of the target variable\n ml_usecase: string , 'regresson' or 'classification . For now, only supports two class classification\n - this is useful in case target variable is an object / string . it will replace the strings with integers\n categorical_features: list of categorical features, default None, when None best guess will be used to identify categorical features\n numerical_features: list of numerical features, default None, when None best guess will be used to identify numerical features\n time_features: list of date/time features, default None, when None best guess will be used to identify date/time features \n \"\"\"\n self.target = target\n self.ml_usecase = ml_usecase\n self.features_todrop = [str(x) for x in features_todrop]\n self.categorical_features = [\n x for x in categorical_features if x not in self.features_todrop\n ]\n self.numerical_features = [\n x for x in numerical_features if x not in self.features_todrop\n ]\n self.time_features = [x for x in time_features if x not in self.features_todrop]\n self.display_types = display_types\n self.id_columns = id_columns\n\n def fit(self, dataset, y=None): # learning data types of all the columns\n \"\"\"\n Args: \n data: accepts a pandas data frame\n Returns:\n Panda Data Frame\n \"\"\"\n\n data = dataset.copy()\n\n # also make sure that all the column names are string\n data.columns = [str(i) for i in data.columns]\n\n # drop any columns that were asked to drop\n data.drop(columns=self.features_todrop, errors=\"ignore\", inplace=True)\n\n # remove sepcial char from column names\n # data.columns= data.columns.str.replace('[,]','')\n\n # we will take float as numberic, object as categorical from the begning\n # fir int64, we will check to see what is the proportion of unique counts to the total lenght of the data\n # if proportion is lower, then it is probabaly categorical\n # however, proportion can be lower / disturebed due to samller denominator (total lenghth / number of samples)\n # so we will take the following chart\n # 0-50 samples, threshold is 24%\n # 50-100 samples, th is 12%\n # 50-250 samples , th is 4.8%\n # 250-500 samples, th is 2.4%\n # 500 and above 2% or belwo\n\n # if there are inf or -inf then replace them with NaN\n data.replace([np.inf, -np.inf], np.NaN, inplace=True)\n\n # we canc check if somehow everything is object, we can try converting them in float\n for i in data.select_dtypes(include=[\"object\"]).columns:\n try:\n data[i] = data[i].astype(\"int64\")\n except:\n None\n\n for i in (\n data.select_dtypes(include=[\"object\"])\n .drop(self.target, axis=1, errors=\"ignore\")\n .columns\n ):\n try:\n data[i] = pd.to_datetime(\n data[i], infer_datetime_format=True, utc=False, errors=\"raise\"\n )\n except:\n continue\n\n # if data type is bool or pandas Categorical , convert to categorical\n for i in data.select_dtypes(include=[\"bool\", \"category\"]).columns:\n data[i] = data[i].astype(\"object\")\n\n # wiith csv , if we have any null in a colum that was int , panda will read it as float.\n # so first we need to convert any such floats that have NaN and unique values are lower than 20\n for i in data.select_dtypes(include=[\"float64\"]).columns:\n data[i] = data[i].astype(\"float32\")\n # count how many Nas are there\n na_count = sum(data[i].isnull())\n # count how many digits are there that have decimiles\n count_float = np.nansum(\n [False if r.is_integer() else True for r in data[i]]\n )\n # total decimiels digits\n count_float = (\n count_float - na_count\n ) # reducing it because we know NaN is counted as a float digit\n # now if there isnt any float digit , & unique levales are less than 20 and there are Na's then convert it to object\n if (count_float == 0) & (data[i].nunique() <= 20) & (na_count > 0):\n data[i] = data[i].astype(\"object\")\n\n # should really be an absolute number say 20\n # length = len(data.iloc[:,0])\n # if length in range(0,51):\n # th=.25\n # elif length in range(51,101):\n # th=.12\n # elif length in range(101,251):\n # th=.048\n # elif length in range(251,501):\n # th=.024\n # elif length > 500:\n # th=.02\n\n # if column is int and unique counts are more than two, then: (exclude target)\n for i in data.select_dtypes(include=[\"int64\"]).columns:\n if i != self.target:\n if data[i].nunique() <= 20: # hard coded\n data[i] = data[i].apply(str_if_not_null)\n else:\n data[i] = data[i].astype(\"float32\")\n # # if colum is objfloat and only have two unique counts , this is probabaly one hot encoded\n # # make it object\n for i in data.select_dtypes(include=[\"float32\"]).columns:\n if data[i].nunique() == 2:\n data[i] = data[i].apply(str_if_not_null)\n\n # for time & dates\n # self.drop_time = [] # for now we are deleting time columns\n\n # now in case we were given any specific columns dtypes in advance , we will over ride theos\n for i in self.categorical_features:\n try:\n data[i] = data[i].apply(str_if_not_null)\n except:\n data[i] = dataset[i].apply(str_if_not_null)\n\n for i in self.numerical_features:\n try:\n data[i] = data[i].astype(\"float32\")\n except:\n data[i] = dataset[i].astype(\"float32\")\n\n for i in self.time_features:\n try:\n data[i] = pd.to_datetime(\n data[i], infer_datetime_format=True, utc=False, errors=\"raise\"\n )\n except:\n data[i] = pd.to_datetime(\n dataset[i], infer_datetime_format=True, utc=False, errors=\"raise\"\n )\n\n for i in data.select_dtypes(\n include=[\"datetime64\", \"datetime64[ns, UTC]\"]\n ).columns:\n data[i] = data[i].astype(\"datetime64[ns]\")\n\n # table of learent types\n self.learned_dtypes = data.dtypes\n # self.training_columns = data.drop(self.target,axis=1).columns\n\n # if there are inf or -inf then replace them with NaN\n data = data.replace([np.inf, -np.inf], np.NaN).astype(self.learned_dtypes)\n\n # lets remove duplicates\n # remove duplicate columns (columns with same values)\n # (too expensive on bigger data sets)\n # data_c = data.T.drop_duplicates()\n # data = data_c.T\n # remove columns with duplicate name\n data = data.loc[:, ~data.columns.duplicated()]\n # Remove NAs\n data.dropna(axis=0, how=\"all\", inplace=True)\n data.dropna(axis=1, how=\"all\", inplace=True)\n # remove the row if target column has NA\n try:\n data.dropna(subset=[self.target], inplace=True)\n except KeyError:\n pass\n\n # self.training_columns = data.drop(self.target,axis=1).columns\n\n # since due to transpose , all data types have changed, lets change the dtypes to original---- not required any more since not transposing any more\n # for i in data.columns: # we are taking all the columns in test , so we dot have to worry about droping target column\n # data[i] = data[i].astype(self.learned_dtypes[self.learned_dtypes.index==i])\n\n if self.display_types == True:\n display(\n wg.Text(\n value=\"Following data types have been inferred automatically, if they are correct press enter to continue or type 'quit' otherwise.\",\n layout=Layout(width=\"100%\"),\n ),\n display_id=\"m1\",\n )\n\n dt_print_out = pd.DataFrame(\n self.learned_dtypes, columns=[\"Feature_Type\"]\n ).drop(\"UNSUPERVISED_DUMMY_TARGET\", errors=\"ignore\")\n dt_print_out[\"Data Type\"] = \"\"\n\n for i in dt_print_out.index:\n if i != self.target:\n if i in self.id_columns:\n dt_print_out.loc[i, \"Data Type\"] = \"ID Column\"\n elif dt_print_out.loc[i, \"Feature_Type\"] == \"object\":\n dt_print_out.loc[i, \"Data Type\"] = \"Categorical\"\n elif dt_print_out.loc[i, \"Feature_Type\"] == \"float32\":\n dt_print_out.loc[i, \"Data Type\"] = \"Numeric\"\n elif dt_print_out.loc[i, \"Feature_Type\"] == \"datetime64[ns]\":\n dt_print_out.loc[i, \"Data Type\"] = \"Date\"\n # elif dt_print_out.loc[i,'Feature_Type'] == 'int64':\n # dt_print_out.loc[i,'Data Type'] = 'Categorical'\n else:\n dt_print_out.loc[i, \"Data Type\"] = \"Label\"\n\n # if we added the dummy target column , then drop it\n dt_print_out.drop(index=\"dummy_target\", errors=\"ignore\", inplace=True)\n\n # increase maximum displayed rows to 1000\n pd.set_option(\"display.max_rows\", 1000)\n\n display(dt_print_out[[\"Data Type\"]])\n\n # reset pandas option\n pd.reset_option(\"display.max_rows\")\n \n self.response = input()\n\n if self.response in [\n \"quit\",\n \"Quit\",\n \"exit\",\n \"EXIT\",\n \"q\",\n \"Q\",\n \"e\",\n \"E\",\n \"QUIT\",\n \"Exit\",\n ]:\n sys.exit(\n \"Read the documentation of setup to learn how to overwrite data types over the inferred types. setup function must run again before you continue modeling.\"\n )\n\n # drop time columns\n # data.drop(self.drop_time,axis=1,errors='ignore',inplace=True)\n\n # drop id columns\n data.drop(self.id_columns, axis=1, errors=\"ignore\", inplace=True)\n\n return data\n\n def transform(self, dataset, y=None):\n \"\"\"\n Args: \n data: accepts a pandas data frame\n Returns:\n Panda Data Frame\n \"\"\"\n\n data = dataset.copy()\n\n # also make sure that all the column names are string\n data.columns = [str(i) for i in data.columns]\n\n # drop any columns that were asked to drop\n data.drop(columns=self.features_todrop, errors=\"ignore\", inplace=True)\n data = data[self.final_training_columns]\n\n # also make sure that all the column names are string\n data.columns = [str(i) for i in data.columns]\n\n # if there are inf or -inf then replace them with NaN\n data.replace([np.inf, -np.inf], np.NaN, inplace=True)\n\n try:\n data.dropna(subset=[self.target], inplace=True)\n except KeyError:\n pass\n\n # remove sepcial char from column names\n # data.columns= data.columns.str.replace('[,]','')\n\n # very first thing we need to so is to check if the training and test data hace same columns\n\n for i in self.final_training_columns:\n if i not in data.columns:\n raise TypeError(\n f\"test data does not have column {i} which was used for training.\"\n )\n\n # just keep picking the data and keep applying to the test data set (be mindful of target variable)\n for (\n i\n ) in (\n data.columns\n ): # we are taking all the columns in test , so we dot have to worry about droping target column\n if i == self.target and (\n (self.ml_usecase == \"classification\")\n and (self.learned_dtypes[self.target] == \"object\")\n ):\n data[i] = self.le.transform(data[i].apply(str).astype(\"object\"))\n data[i] = data[i].astype(\"int64\")\n else:\n if self.learned_dtypes[i].name == \"datetime64[ns]\":\n data[i] = pd.to_datetime(\n data[i], infer_datetime_format=True, utc=False, errors=\"coerce\"\n )\n data[i] = data[i].astype(self.learned_dtypes[i])\n\n # drop time columns\n # data.drop(self.drop_time,axis=1,errors='ignore',inplace=True)\n\n # drop id columns\n data.drop(self.id_columns, axis=1, errors=\"ignore\", inplace=True)\n\n return data\n\n # fit_transform\n def fit_transform(self, dataset, y=None):\n\n data = dataset\n\n # since this is for training , we dont nees any transformation since it has already been transformed in fit\n data = self.fit(data)\n\n # additionally we just need to treat the target variable\n # for ml use ase\n if (self.ml_usecase == \"classification\") & (\n data[self.target].dtype == \"object\"\n ):\n self.le = LabelEncoder()\n data[self.target] = self.le.fit_transform(\n data[self.target].apply(str).astype(\"object\")\n )\n self.replacement = _get_labelencoder_reverse_dict(self.le)\n\n # self.u = list(pd.unique(data[self.target]))\n # self.replacement = np.arange(0,len(self.u))\n # data[self.target]= data[self.target].replace(self.u,self.replacement)\n # data[self.target] = data[self.target].astype('int64')\n # self.replacement = pd.DataFrame(dict(target_variable=self.u,replaced_with=self.replacement))\n\n # drop time columns\n # data.drop(self.drop_time,axis=1,errors='ignore',inplace=True)\n\n # drop id columns\n data.drop(self.id_columns, axis=1, errors=\"ignore\", inplace=True)\n # finally save a list of columns that we would need from test data set\n self.final_training_columns = data.columns.to_list()\n self.final_training_columns.remove(self.target)\n\n return data\n\n\n# _______________________________________________________________________________________________________________________\n# Imputation\n\n\nclass Simple_Imputer(_BaseImputer):\n \"\"\"\n Imputes all type of data (numerical,categorical & Time).\n Highly recommended to run Define_dataTypes class first\n Numerical values can be imputed with mean or median or filled with zeros\n categorical missing values will be replaced with \"Other\"\n Time values are imputed with the most frequesnt value\n Ignores target (y) variable \n Args: \n Numeric_strategy: string , all possible values {'mean','median','zero'}\n categorical_strategy: string , all possible values {'not_available','most frequent'}\n target: string , name of the target variable\n\n \"\"\"\n\n _numeric_strategies = {\n \"mean\": \"mean\",\n \"median\": \"median\",\n \"most frequent\": \"most_frequent\",\n \"most_frequent\": \"most_frequent\",\n \"zero\": \"constant\",\n }\n _categorical_strategies = {\n \"most frequent\": \"most_frequent\",\n \"most_frequent\": \"most_frequent\",\n \"not_available\": \"constant\",\n }\n\n def __init__(\n self,\n numeric_strategy,\n categorical_strategy,\n target_variable,\n fill_value_numerical=0,\n fill_value_categorical=\"not_available\",\n ):\n if numeric_strategy not in self._numeric_strategies:\n numeric_strategy = \"zero\"\n self.numeric_strategy = numeric_strategy\n self.target = target_variable\n if categorical_strategy not in self._categorical_strategies:\n categorical_strategy = \"most_frequent\"\n self.categorical_strategy = categorical_strategy\n self.numeric_imputer = SimpleImputer(\n strategy=self._numeric_strategies[self.numeric_strategy],\n fill_value=fill_value_numerical,\n )\n self.categorical_imputer = SimpleImputer(\n strategy=self._categorical_strategies[self.categorical_strategy],\n fill_value=fill_value_categorical,\n )\n self.most_frequent_time = []\n\n def fit(self, dataset, y=None): #\n try:\n data = dataset.drop(self.target, axis=1)\n except:\n data = dataset\n self.numeric_columns = data.select_dtypes(include=[\"float32\", \"int64\"]).columns\n self.categorical_columns = data.select_dtypes(include=[\"object\"]).columns\n self.time_columns = data.select_dtypes(include=[\"datetime64[ns]\"]).columns\n\n statistics = []\n\n if not self.numeric_columns.empty:\n self.numeric_imputer.fit(data[self.numeric_columns])\n statistics.append((self.numeric_imputer.statistics_, self.numeric_columns))\n if not self.categorical_columns.empty:\n self.categorical_imputer.fit(data[self.categorical_columns])\n statistics.append(\n (self.categorical_imputer.statistics_, self.categorical_columns)\n )\n if not self.time_columns.empty:\n self.most_frequent_time = []\n for col in self.time_columns:\n self.most_frequent_time.append(data[col].mode()[0])\n statistics.append((self.most_frequent_time, self.time_columns))\n\n self.statistics_ = np.zeros(shape=len(data.columns), dtype=object)\n columns = list(data.columns)\n for s, index in statistics:\n for i, j in enumerate(index):\n self.statistics_[columns.index(j)] = s[i]\n\n return\n\n def transform(self, dataset, y=None):\n data = dataset\n imputed_data = []\n if not self.numeric_columns.empty:\n numeric_data = pd.DataFrame(\n self.numeric_imputer.transform(data[self.numeric_columns]),\n columns=self.numeric_columns,\n index=data.index,\n )\n imputed_data.append(numeric_data)\n if not self.categorical_columns.empty:\n categorical_data = pd.DataFrame(\n self.categorical_imputer.transform(data[self.categorical_columns]),\n columns=self.categorical_columns,\n index=data.index,\n )\n for col in categorical_data.columns:\n categorical_data[col] = categorical_data[col].apply(str)\n imputed_data.append(categorical_data)\n if not self.time_columns.empty:\n time_data = data[self.time_columns]\n for i, col in enumerate(time_data.columns):\n time_data[col].fillna(self.most_frequent_time[i])\n imputed_data.append(time_data)\n\n if imputed_data:\n data.update(pd.concat(imputed_data, axis=1))\n data.astype(dataset.dtypes)\n\n return data\n\n def fit_transform(self, dataset, y=None):\n data = dataset\n self.fit(data)\n return self.transform(data)\n\n\n# _______________________________________________________________________________________________________________________\n# Imputation with surrogate columns\nclass Surrogate_Imputer(_BaseImputer):\n \"\"\"\n Imputes feature with surrogate column (numerical,categorical & Time).\n - Highly recommended to run Define_dataTypes class first\n - it is also recommended to only apply this to features where it makes business sense to creat surrogate column\n - feature name has to be provided\n - only able to handle one feature at a time\n - Numerical values can be imputed with mean or median or filled with zeros\n - categorical missing values will be replaced with \"Other\"\n - Time values are imputed with the most frequesnt value\n - Ignores target (y) variable \n Args: \n feature_name: string, provide features name\n feature_type: string , all possible values {'numeric','categorical','date'}\n strategy: string ,all possible values {'mean','median','zero','not_available','most frequent'}\n target: string , name of the target variable\n\n \"\"\"\n\n def __init__(self, numeric_strategy, categorical_strategy, target_variable):\n self.numeric_strategy = numeric_strategy\n self.target = target_variable\n self.categorical_strategy = categorical_strategy\n\n def fit(self, dataset, y=None): #\n def zeros(x):\n return 0\n\n data = dataset\n # make a table for numerical variable with strategy stats\n if self.numeric_strategy == \"mean\":\n self.numeric_stats = (\n data.drop(self.target, axis=1)\n .select_dtypes(include=[\"float32\", \"int64\"])\n .apply(np.nanmean)\n )\n elif self.numeric_strategy == \"median\":\n self.numeric_stats = (\n data.drop(self.target, axis=1)\n .select_dtypes(include=[\"float32\", \"int64\"])\n .apply(np.nanmedian)\n )\n else:\n self.numeric_stats = (\n data.drop(self.target, axis=1)\n .select_dtypes(include=[\"float32\", \"int64\"])\n .apply(zeros)\n )\n\n self.numeric_columns = (\n data.drop(self.target, axis=1)\n .select_dtypes(include=[\"float32\", \"int64\"])\n .columns\n )\n # also need to learn if any columns had NA in training\n self.numeric_na = pd.DataFrame(columns=self.numeric_columns)\n for i in self.numeric_columns:\n if data[i].isnull().any() == True:\n self.numeric_na.loc[0, i] = True\n else:\n self.numeric_na.loc[0, i] = False\n\n # for Catgorical ,\n if self.categorical_strategy == \"most frequent\":\n self.categorical_columns = (\n data.drop(self.target, axis=1).select_dtypes(include=[\"object\"]).columns\n )\n self.categorical_stats = pd.DataFrame(\n columns=self.categorical_columns\n ) # place holder\n for i in self.categorical_stats.columns:\n self.categorical_stats.loc[0, i] = data[i].value_counts().index[0]\n # also need to learn if any columns had NA in training, but this is only valid if strategy is \"most frequent\"\n self.categorical_na = pd.DataFrame(columns=self.categorical_columns)\n for i in self.categorical_columns:\n if sum(data[i].isnull()) > 0:\n self.categorical_na.loc[0, i] = True\n else:\n self.categorical_na.loc[0, i] = False\n else:\n self.categorical_columns = (\n data.drop(self.target, axis=1).select_dtypes(include=[\"object\"]).columns\n )\n self.categorical_na = pd.DataFrame(columns=self.categorical_columns)\n self.categorical_na.loc[\n 0, :\n ] = False # (in this situation we are not making any surrogate column)\n\n # for time, there is only one way, pick up the most frequent one\n self.time_columns = (\n data.drop(self.target, axis=1)\n .select_dtypes(include=[\"datetime64[ns]\"])\n .columns\n )\n self.time_stats = pd.DataFrame(columns=self.time_columns) # place holder\n self.time_na = pd.DataFrame(columns=self.time_columns)\n for i in self.time_columns:\n self.time_stats.loc[0, i] = data[i].value_counts().index[0]\n\n # learn if time columns were NA\n for i in self.time_columns:\n if data[i].isnull().any() == True:\n self.time_na.loc[0, i] = True\n else:\n self.time_na.loc[0, i] = False\n\n return data # nothing to return\n\n def transform(self, dataset, y=None):\n data = dataset\n # for numeric columns\n for i, s in zip(data[self.numeric_columns].columns, self.numeric_stats):\n array = data[i].isnull()\n data[i].fillna(s, inplace=True)\n # make a surrogate column if there was any\n if self.numeric_na.loc[0, i] == True:\n data[i + \"_surrogate\"] = array\n # make it string\n data[i + \"_surrogate\"] = data[i + \"_surrogate\"].apply(str)\n\n # for categorical columns\n if self.categorical_strategy == \"most frequent\":\n for i in self.categorical_stats.columns:\n # data[i].fillna(self.categorical_stats.loc[0,i],inplace=True)\n array = data[i].isnull()\n data[i] = data[i].fillna(self.categorical_stats.loc[0, i])\n data[i] = data[i].apply(str)\n # make surrogate column\n if self.categorical_na.loc[0, i] == True:\n data[i + \"_surrogate\"] = array\n # make it string\n data[i + \"_surrogate\"] = data[i + \"_surrogate\"].apply(str)\n else: # this means replace na with \"not_available\"\n for i in self.categorical_columns:\n data[i].fillna(\"not_available\", inplace=True)\n data[i] = data[i].apply(str)\n # no need to make surrogate since not_available is itself a new colum\n\n # for time\n for i in self.time_stats.columns:\n array = data[i].isnull()\n data[i].fillna(self.time_stats.loc[0, i], inplace=True)\n # make surrogate column\n if self.time_na.loc[0, i] == True:\n data[i + \"_surrogate\"] = array\n # make it string\n data[i + \"_surrogate\"] = data[i + \"_surrogate\"].apply(str)\n\n return data\n\n def fit_transform(self, dataset, y=None):\n data = dataset\n data = self.fit(data)\n return self.transform(data)\n\n\nclass Iterative_Imputer(_BaseImputer):\n def __init__(\n self,\n regressor: BaseEstimator,\n classifier: BaseEstimator,\n *,\n target=None,\n missing_values=np.nan,\n initial_strategy_numeric: str = \"mean\",\n initial_strategy_categorical: str = \"most_frequent\",\n ordinal_columns: Optional[list] = None,\n max_iter: int = 10,\n warm_start: bool = False,\n imputation_order: str = \"ascending\",\n verbose: int = 0,\n random_state: int = None,\n add_indicator: bool = False,\n ):\n super().__init__(missing_values=missing_values, add_indicator=add_indicator)\n\n self.regressor = regressor\n self.classifier = classifier\n self.initial_strategy_numeric = initial_strategy_numeric\n self.initial_strategy_categorical = initial_strategy_categorical\n self.max_iter = max_iter\n self.warm_start = warm_start\n self.imputation_order = imputation_order\n self.verbose = verbose\n self.random_state = random_state\n self.target = target\n if ordinal_columns is None:\n ordinal_columns = []\n self.ordinal_columns = list(ordinal_columns)\n self._column_cleaner = Clean_Colum_Names()\n\n def _initial_imputation(self, X):\n if self.initial_imputer_ is None:\n self.initial_imputer_ = Simple_Imputer(\n target_variable=\"__TARGET__\", # dummy value, we don't actually want to drop anything\n numeric_strategy=self.initial_strategy_numeric,\n categorical_strategy=self.initial_strategy_categorical,\n )\n X_filled = self.initial_imputer_.fit_transform(X)\n else:\n X_filled = self.initial_imputer_.transform(X)\n\n return X_filled\n\n def _impute_one_feature(self, X, column, X_na_mask, fit):\n if not fit:\n check_is_fitted(self)\n is_classification = (\n X[column].dtype.name == \"object\" or column in self.ordinal_columns\n )\n if is_classification:\n if column in self.classifiers_:\n time, dummy, le, estimator = self.classifiers_[column]\n elif not fit:\n return X\n else:\n estimator = clone(self._classifier)\n time = Make_Time_Features()\n dummy = Dummify(column)\n le = LabelEncoder()\n else:\n if column in self.regressors_:\n time, dummy, le, estimator = self.regressors_[column]\n elif not fit:\n return X\n else:\n estimator = clone(self._regressor)\n time = Make_Time_Features()\n dummy = Dummify(column)\n le = None\n\n if fit:\n fit_kwargs = {}\n X_train = X[~X_na_mask[column]]\n y_train = X_train[column]\n # catboost handles categoricals itself\n if \"catboost\" not in str(type(estimator)).lower():\n X_train = time.fit_transform(X_train)\n X_train = dummy.fit_transform(X_train)\n X_train.drop(column, axis=1, inplace=True)\n else:\n X_train.drop(column, axis=1, inplace=True)\n fit_kwargs[\"cat_features\"] = []\n for i, col in enumerate(X_train.columns):\n if X_train[col].dtype.name == \"object\":\n X_train[col] = pd.Categorical(\n X_train[col], ordered=column in self.ordinal_columns\n )\n fit_kwargs[\"cat_features\"].append(i)\n fit_kwargs[\"cat_features\"] = np.array(\n fit_kwargs[\"cat_features\"], dtype=int\n )\n X_train = self._column_cleaner.fit_transform(X_train)\n\n if le:\n y_train = le.fit_transform(y_train)\n\n try:\n assert self.warm_start\n estimator.partial_fit(X_train, y_train)\n except:\n estimator.fit(X_train, y_train, **fit_kwargs)\n\n X_test = X.drop(column, axis=1)[X_na_mask[column]]\n X_test = time.transform(X_test)\n # catboost handles categoricals itself\n if \"catboost\" not in str(type(estimator)).lower():\n X_test = dummy.transform(X_test)\n else:\n for col in X_test.select_dtypes(\"object\").columns:\n X_test[col] = pd.Categorical(\n X_test[col], ordered=column in self.ordinal_columns\n )\n result = estimator.predict(X_test)\n if le:\n result = le.inverse_transform(result)\n\n if fit:\n if is_classification:\n self.classifiers_[column] = (time, dummy, le, estimator)\n else:\n self.regressors_[column] = (time, dummy, le, estimator)\n\n if result.dtype.name == \"float64\":\n result = result.astype(\"float32\")\n\n X_test[column] = result\n X.update(X_test[column])\n\n gc.collect()\n\n return X\n\n def _impute(self, X, fit: bool):\n if self.target in X.columns:\n target_column = X[self.target]\n X = X.drop(self.target, axis=1)\n else:\n target_column = None\n\n original_columns = X.columns\n original_index = X.index\n\n X = X.reset_index(drop=True)\n X = self._column_cleaner.fit_transform(X)\n\n self.imputation_sequence_ = (\n X.isnull().sum().sort_values(ascending=self.imputation_order == \"ascending\")\n )\n self.imputation_sequence_ = [\n col\n for col in self.imputation_sequence_[self.imputation_sequence_ > 0].index\n if X[col].dtype.name != \"datetime64[ns]\"\n ]\n\n X_na_mask = X.isnull()\n\n X_imputed = self._initial_imputation(X.copy())\n\n for i in range(self.max_iter if fit else 1):\n for feature in self.imputation_sequence_:\n get_logger().info(f\"Iterative Imputation: {i+1} cycle | {feature}\")\n X_imputed = self._impute_one_feature(X_imputed, feature, X_na_mask, fit)\n\n X_imputed.columns = original_columns\n X_imputed.index = original_index\n\n if target_column is not None:\n X_imputed[self.target] = target_column\n return X_imputed\n\n def transform(self, X, y=None, **fit_params):\n return self._impute(X, fit=False)\n\n def fit_transform(self, X, y=None, **fit_params):\n self.random_state_ = getattr(\n self, \"random_state_\", check_random_state(self.random_state)\n )\n if self.regressor is None:\n raise ValueError(\"No regressor provided\")\n else:\n self._regressor = clone(self.regressor)\n try:\n self._regressor.set_param(random_state=self.random_state_)\n except:\n pass\n if self.classifier is None:\n raise ValueError(\"No classifier provided\")\n else:\n self._classifier = clone(self.classifier)\n try:\n self._classifier.set_param(random_state=self.random_state_)\n except:\n pass\n\n self.classifiers_ = {}\n self.regressors_ = {}\n\n self.initial_imputer_ = None\n\n return self._impute(X, fit=True)\n\n def fit(self, X, y=None, **fit_params):\n self.fit_transform(X, y=y, **fit_params)\n\n return self\n\n\n# _______________________________________________________________________________________________________________________\n# Zero and Near Zero Variance\nclass Zroe_NearZero_Variance(BaseEstimator, TransformerMixin):\n \"\"\"\n - it eliminates the features having zero variance\n - it eliminates the features haveing near zero variance\n - Near zero variance is determined by \n -1) Count of unique points divided by the total length of the feature has to be lower than a pre sepcified threshold \n -2) Most common point(count) divided by the second most common point(count) in the feature is greater than a pre specified threshold\n Once both conditions are met , the feature is dropped \n -Ignores target variable\n \n Args: \n threshold_1: float (between 0.0 to 1.0) , default is .10 \n threshold_2: int (between 1 to 100), default is 20 \n tatget variable : string, name of the target variable\n\n \"\"\"\n\n def __init__(self, target, threshold_1=0.1, threshold_2=20):\n self.threshold_1 = threshold_1\n self.threshold_2 = threshold_2\n self.target = target\n\n def fit(\n self, dataset, y=None\n ): # from training data set we are going to learn what columns to drop\n data = dataset\n self.to_drop = []\n sampl_len = len(data[self.target])\n for i in data.drop(self.target, axis=1).columns:\n # get the number of unique counts\n u = pd.DataFrame(data[i].value_counts()).sort_values(\n by=i, ascending=False, inplace=False\n )\n # take len of u and divided it by the total sample numbers, so this will check the 1st rule , has to be low say 10%\n # import pdb; pdb.set_trace()\n first = len(u) / sampl_len\n # then check if most common divided by 2nd most common ratio is 20 or more\n if (\n len(u[i]) == 1\n ): # this means that if column is non variance , automatically make the number big to drop it\n second = 100\n else:\n second = u.iloc[0, 0] / u.iloc[1, 0]\n # if both conditions are true then drop the column, however, we dont want to alter column that indicate NA's\n if (first <= self.threshold_1) and (second >= self.threshold_2) and (i[-10:] != \"_surrogate\"):\n self.to_drop.append(i)\n # now drop if the column has zero variance\n if (second == 100) and (i[-10:] != \"_surrogate\"):\n self.to_drop.append(i)\n\n def transform(\n self, dataset, y=None\n ): # since it is only for training data set , nothing here\n data = dataset.drop(self.to_drop, axis=1)\n return data\n\n def fit_transform(self, dataset, y=None):\n data = dataset\n self.fit(data)\n return self.transform(data)\n\n\n# ____________________________________________________________________________________________________________________________\n# rare catagorical variables\nclass Catagorical_variables_With_Rare_levels(BaseEstimator, TransformerMixin):\n \"\"\"\n -Merges levels in catagorical features with more frequent level if they appear less than a threshold count \n e.g. Col=[a,a,a,a,b,b,c,c]\n if threshold is set to 2 , then c will be mrged with b because both are below threshold\n There has to be atleast two levels belwo threshold for this to work \n the process will keep going until all the levels have atleast 2(threshold) counts\n -Only handles catagorical features\n -It is recommended to run the Zroe_NearZero_Variance and Define_dataTypes first\n -Ignores target variable \n Args: \n threshold: int , default 10\n target: string , name of the target variable\n new_level_name: string , name given to the new level generated, default 'others'\n\n \"\"\"\n\n def __init__(self, target, new_level_name=\"others_infrequent\", threshold=0.05):\n self.threshold = threshold\n self.target = target\n self.new_level_name = new_level_name\n\n def fit(\n self, dataset, y=None\n ): # we will learn for what columnns what are the level to merge as others\n # every level of the catagorical feature has to be more than threshols, if not they will be clubed togather as \"others\"\n # in order to apply, there should be atleast two levels belwo the threshold !\n # creat a place holder\n data = dataset\n self.ph = pd.DataFrame(\n columns=data.drop(self.target, axis=1)\n .select_dtypes(include=\"object\")\n .columns\n )\n # ph.columns = df.columns# catagorical only\n for i in data[self.ph.columns].columns:\n # determine the infrequebt count\n v_c = data[i].value_counts()\n count_th = round(v_c.quantile(self.threshold))\n a = np.sum(\n pd.DataFrame(data[i].value_counts().sort_values())[i] <= count_th\n )\n if a >= 2: # rare levels has to be atleast two\n count = pd.DataFrame(data[i].value_counts().sort_values())\n count.columns = [\"fre\"]\n count = count[count[\"fre\"] <= count_th]\n to_club = list(count.index)\n self.ph.loc[0, i] = to_club\n else:\n self.ph.loc[0, i] = []\n # # also need to make a place holder that keep records of all the levels , and in case a new level appears in test we will change it to others\n # self.ph_level = pd.DataFrame(columns=data.drop(self.target,axis=1).select_dtypes(include=\"object\").columns)\n # for i in self.ph_level.columns:\n # self.ph_level.loc[0,i] = list(data[i].value_counts().sort_values().index)\n\n def transform(self, dataset, y=None): #\n # transorm\n data = dataset\n for i in data[self.ph.columns].columns:\n t_replace = self.ph.loc[0, i]\n data[i].replace(\n to_replace=t_replace, value=self.new_level_name, inplace=True\n )\n return data\n\n def fit_transform(self, dataset, y=None):\n data = dataset\n self.fit(data)\n return self.transform(data)\n\n\n# _______________________________________________________________________________________________________________________\n# new catagorical level in test\nclass New_Catagorical_Levels_in_TestData(BaseEstimator, TransformerMixin):\n \"\"\"\n -This treats if a new level appears in the test dataset catagorical's feature (i.e a level on whihc model was not trained previously) \n -It simply replaces the new level in test data set with the most frequent or least frequent level in the same feature in the training data set\n -It is recommended to run the Zroe_NearZero_Variance and Define_dataTypes first\n -Ignores target variable \n Args: \n target: string , name of the target variable\n replacement_strategy:string , 'raise exception', 'least frequent' or 'most frequent' (default 'most frequent' )\n\n \"\"\"\n\n def __init__(self, target, replacement_strategy=\"most frequent\"):\n self.target = target\n self.replacement_strategy = replacement_strategy\n\n def fit(self, data, y=None):\n # need to make a place holder that keep records of all the levels , and in case a new level appears in test we will change it to others\n self.ph_train_level = pd.DataFrame(\n columns=data.drop(self.target, axis=1)\n .select_dtypes(include=\"object\")\n .columns\n )\n for i in self.ph_train_level.columns:\n if self.replacement_strategy == \"least frequent\":\n self.ph_train_level.loc[0, i] = list(\n data[i].value_counts().sort_values().index\n )\n else:\n self.ph_train_level.loc[0, i] = list(data[i].value_counts().index)\n\n def transform(self, data, y=None): #\n # transorm\n # we need to learn the same for test data , and then we will compare to check what levels are new in there\n self.ph_test_level = pd.DataFrame(\n columns=data.drop(self.target, axis=1, errors=\"ignore\")\n .select_dtypes(include=\"object\")\n .columns\n )\n for i in self.ph_test_level.columns:\n self.ph_test_level.loc[0, i] = list(\n data[i].value_counts().sort_values().index\n )\n\n # new we have levels for both test and train, we will start comparing and replacing levels in test set (Only if test set has new levels)\n for i in self.ph_test_level.columns:\n new = list(\n (set(self.ph_test_level.loc[0, i]) - set(self.ph_train_level.loc[0, i]))\n )\n # now if there is a difference , only then replace it\n if len(new) > 0:\n if self.replacement_strategy == \"raise exception\":\n raise ValueError(\n f\"Column '{i}' contains levels '{new}' which were not present in train data.\"\n )\n data[i].replace(new, self.ph_train_level.loc[0, i][0], inplace=True)\n\n return data\n\n def fit_transform(\n self, data, y=None\n ): # There is no transformation happening in training data set, its all about test\n self.fit(data)\n return data\n\n\n# _______________________________________________________________________________________________________________________\n# Group akin features\nclass Group_Similar_Features(BaseEstimator, TransformerMixin):\n \"\"\"\n - Given a list of features , it creates aggregate features \n - features created are Min, Max, Mean, Median, Mode & Std\n - Only works on numerical features\n Args: \n list_of_similar_features: list of list, string , e.g. [['col',col2],['col3','col4']]\n group_name: list, group name/names to be added as prefix to aggregate features, e.g ['gorup1','group2']\n \"\"\"\n\n def __init__(self, group_name=[], list_of_grouped_features=[[]]):\n self.list_of_similar_features = list_of_grouped_features\n self.group_name = group_name\n # if list of list not given\n try:\n np.array(self.list_of_similar_features).shape[0]\n except:\n raise (\n \"Group_Similar_Features: list_of_grouped_features is not provided as list of list\"\n )\n\n def fit(self, data, y=None):\n # nothing to learn\n return self\n\n def transform(self, dataset, y=None):\n data = dataset\n # # only going to process if there is an actual missing value in training data set\n if len(self.list_of_similar_features) > 0:\n for f, g in zip(self.list_of_similar_features, self.group_name):\n data[g + \"_Min\"] = data[f].apply(np.min, 1)\n data[g + \"_Max\"] = data[f].apply(np.max, 1)\n data[g + \"_Mean\"] = data[f].apply(np.mean, 1)\n data[g + \"_Median\"] = data[f].apply(np.median, 1)\n data[g + \"_Mode\"] = stats.mode(data[f], 1)[0]\n data[g + \"_Std\"] = data[f].apply(np.std, 1)\n\n return data\n else:\n return data\n\n def fit_transform(self, data, y=None):\n return self.transform(data)\n\n\n# ____________________________________________________________________________________________________________________________________________________________________\n# Binning for Continious\nclass Binning(BaseEstimator, TransformerMixin):\n \"\"\"\n - Converts numerical variables to catagorical variable through binning\n - Number of binns are automitically determined through Sturges method\n - Once discretize, original feature will be dropped\n Args:\n features_to_discretize: list of featur names to be binned\n\n \"\"\"\n\n def __init__(self, features_to_discretize):\n self.features_to_discretize = features_to_discretize\n\n def fit(self, data, y=None):\n self.fit_transform(data, y=y)\n return self\n\n def transform(self, dataset, y=None):\n data = dataset\n # only do if features are provided\n if len(self.features_to_discretize) > 0:\n data_t = self.disc.transform(\n np.array(data[self.features_to_discretize]).reshape(\n -1, self.len_columns\n )\n )\n # make pandas data frame\n data_t = pd.DataFrame(\n data_t, columns=self.features_to_discretize, index=data.index\n )\n # all these columns are catagorical\n data_t = data_t.astype(str)\n # drop original columns\n data.drop(self.features_to_discretize, axis=1, inplace=True)\n # add newly created columns\n data = pd.concat((data, data_t), axis=1)\n return data\n\n def fit_transform(self, dataset, y=None):\n data = dataset\n # only do if features are given\n\n if len(self.features_to_discretize) > 0:\n\n # place holder for all the features for their binns\n self.binns = []\n for i in self.features_to_discretize:\n # get numbr of binns\n hist, _ = np.histogram(data[i], bins=\"sturges\")\n self.binns.append(len(hist))\n\n # how many colums to deal with\n self.len_columns = len(self.features_to_discretize)\n # now do fit transform\n self.disc = KBinsDiscretizer(\n n_bins=self.binns, encode=\"ordinal\", strategy=\"kmeans\"\n )\n data_t = self.disc.fit_transform(\n np.array(data[self.features_to_discretize]).reshape(\n -1, self.len_columns\n )\n )\n # make pandas data frame\n data_t = pd.DataFrame(\n data_t, columns=self.features_to_discretize, index=data.index\n )\n # all these columns are catagorical\n data_t = data_t.astype(str)\n # drop original columns\n data.drop(self.features_to_discretize, axis=1, inplace=True)\n # add newly created columns\n data = pd.concat((data, data_t), axis=1)\n\n return data\n\n\n# ______________________________________________________________________________________________________________________\n# Scaling & Power Transform\nclass Scaling_and_Power_transformation(BaseEstimator, TransformerMixin):\n \"\"\"\n -Given a data set, applies Min Max, Standar Scaler or Power Transformation (yeo-johnson)\n -it is recommended to run Define_dataTypes first\n - ignores target variable \n Args: \n target: string , name of the target variable\n function_to_apply: string , default 'zscore' (standard scaler), all other {'minmaxm','yj','quantile','robust','maxabs'} ( min max,yeo-johnson & quantile power transformation, robust and MaxAbs scaler )\n\n \"\"\"\n\n def __init__(self, target, function_to_apply=\"zscore\", random_state_quantile=42):\n self.target = target\n self.function_to_apply = function_to_apply\n self.random_state_quantile = random_state_quantile\n # self.transform_target = transform_target\n # self.ml_usecase = ml_usecase\n\n def fit(self, dataset, y=None):\n\n data = dataset\n # we only want to apply if there are numeric columns\n self.numeric_features = (\n data.drop(self.target, axis=1, errors=\"ignore\")\n .select_dtypes(include=[\"float32\", \"int64\"])\n .columns\n )\n if len(self.numeric_features) > 0:\n if self.function_to_apply == \"zscore\":\n self.scale_and_power = StandardScaler()\n self.scale_and_power.fit(data[self.numeric_features])\n elif self.function_to_apply == \"minmax\":\n self.scale_and_power = MinMaxScaler()\n self.scale_and_power.fit(data[self.numeric_features])\n elif self.function_to_apply == \"yj\":\n self.scale_and_power = PowerTransformer(\n method=\"yeo-johnson\", standardize=True\n )\n self.scale_and_power.fit(data[self.numeric_features])\n elif self.function_to_apply == \"quantile\":\n self.scale_and_power = QuantileTransformer(\n random_state=self.random_state_quantile,\n output_distribution=\"normal\",\n )\n self.scale_and_power.fit(data[self.numeric_features])\n elif self.function_to_apply == \"robust\":\n self.scale_and_power = RobustScaler()\n self.scale_and_power.fit(data[self.numeric_features])\n elif self.function_to_apply == \"maxabs\":\n self.scale_and_power = MaxAbsScaler()\n self.scale_and_power.fit(data[self.numeric_features])\n\n return self\n\n def transform(self, dataset, y=None):\n data = dataset\n\n if len(self.numeric_features) > 0:\n self.data_t = pd.DataFrame(\n self.scale_and_power.transform(data[self.numeric_features])\n )\n # we need to set the same index as original data\n self.data_t.index = data.index\n self.data_t.columns = self.numeric_features\n for i in self.numeric_features:\n data[i] = self.data_t[i]\n return data\n\n else:\n return data\n\n def fit_transform(self, dataset, y=None):\n data = dataset\n self.fit(data)\n # convert target if appropriate\n # default behavious is quantile transformer\n # if ((self.ml_usecase == 'regression') and (self.transform_target == True)):\n # self.scale_and_power_target = QuantileTransformer(random_state=self.random_state_quantile,output_distribution='normal')\n # data[self.target]=self.scale_and_power_target.fit_transform(np.array(data[self.target]).reshape(-1,1))\n\n return self.transform(data)\n\n\n# ______________________________________________________________________________________________________________________\n# Scaling & Power Transform\nclass Target_Transformation(BaseEstimator, TransformerMixin):\n \"\"\"\n - Applies Power Transformation (yeo-johnson , Box-Cox) to target variable (Applicable to Regression only)\n - 'bc' for Box_Coc & 'yj' for yeo-johnson, default is Box-Cox\n - if target containes negtive / zero values , yeo-johnson is automatically selected \n \n \"\"\"\n\n def __init__(self, target, function_to_apply=\"bc\"):\n self.target = target\n if function_to_apply == \"bc\":\n function_to_apply = \"box-cox\"\n else:\n function_to_apply = \"yeo-johnson\"\n self.function_to_apply = function_to_apply\n\n def inverse_transform(self, dataset, y=None):\n data = self.p_transform_target.inverse_transform(\n np.array(dataset).reshape(-1, 1)\n )\n return data\n\n def fit(self, dataset, y=None):\n self.fit_transform(dataset, y=y)\n\n return self\n\n def transform(self, dataset, y=None):\n data = dataset\n if self.target in dataset.columns:\n # apply transformation\n data[self.target] = self.p_transform_target.transform(\n np.array(data[self.target]).reshape(-1, 1)\n )\n return data\n\n def fit_transform(self, dataset, y=None):\n data = dataset\n # if target has zero or negative values use yj instead\n if any(data[self.target] <= 0):\n self.function_to_apply = \"yeo-johnson\"\n # apply transformation\n self.p_transform_target = PowerTransformer(method=self.function_to_apply)\n data[self.target] = self.p_transform_target.fit_transform(\n np.array(data[self.target]).reshape(-1, 1)\n )\n\n return data\n\n\n# __________________________________________________________________________________________________________________________\n# Time feature extractor\nclass Make_Time_Features(BaseEstimator, TransformerMixin):\n \"\"\"\n -Given a time feature , it extracts more features\n - Only accepts / works where feature / data type is datetime64[ns]\n - full list of features is:\n ['month','weekday',is_month_end','is_month_start','hour']\n - all extracted features are defined as string / object\n -it is recommended to run Define_dataTypes first\n Args: \n time_feature: list of feature names as datetime64[ns] , default empty/none , if empty/None , it will try to pickup dates automatically where data type is datetime64[ns]\n list_of_features: list of required features , default value ['month','weekday','is_month_end','is_month_start','hour']\n\n \"\"\"\n\n def __init__(\n self,\n time_feature=None,\n list_of_features=[\"month\", \"weekday\", \"is_month_end\", \"is_month_start\", \"hour\"],\n ):\n self.time_feature = time_feature\n self.list_of_features_o = set(list_of_features)\n\n def fit(self, data, y=None):\n if self.time_feature is None:\n self.time_feature = data.select_dtypes(include=[\"datetime64[ns]\"]).columns\n self.has_hour_ = set()\n for i in self.time_feature:\n if \"hour\" in self.list_of_features_o:\n if any(x.hour for x in data[i]):\n self.has_hour_.add(i)\n return self\n\n def transform(self, dataset, y=None):\n data = dataset.copy()\n # run fit transform first\n\n def get_time_features(r):\n features = []\n if \"month\" in self.list_of_features_o:\n features.append((\"_month\", str(r.month)))\n if \"weekday\" in self.list_of_features_o:\n features.append((\"_weekday\", str(r.weekday())))\n if \"is_month_end\" in self.list_of_features_o:\n features.append(\n (\n \"_is_month_end\",\n \"1\"\n if calendar.monthrange(r.year, r.month)[1] == r.day\n else \"0\",\n )\n )\n if \"is_month_start\" in self.list_of_features_o:\n features.append((\"_is_month_start\", \"1\" if r.day == 1 else \"0\"))\n return tuple(features)\n\n # start making features for every column in the time list\n for i in self.time_feature:\n list_of_features = [get_time_features(r) for r in data[i]]\n\n fd = defaultdict(list)\n for x in list_of_features:\n for k, v in x:\n fd[k].append(v)\n\n for k, v in fd.items():\n data[i + k] = v\n\n # make hour column if choosen\n if \"hour\" in self.list_of_features_o and i in self.has_hour_:\n h = [r.hour for r in data[i]]\n data[f\"{i}_hour\"] = h\n data[f\"{i}_hour\"] = data[f\"{i}_hour\"].apply(str)\n\n # we dont need time columns any more\n data.drop(self.time_feature, axis=1, inplace=True)\n\n return data\n\n def fit_transform(self, dataset, y=None):\n # if no columns names are given , then pick datetime columns\n self.fit(dataset, y=y)\n\n return self.transform(dataset, y=y)\n\n\n# ____________________________________________________________________________________________________________________________________________________________________\n# Ordinal transformer\nclass Ordinal(BaseEstimator, TransformerMixin):\n \"\"\"\n - converts categorical features into ordinal values \n - takes a dataframe , and information about column names and ordered categories as dict\n - returns float panda data frame\n \"\"\"\n\n def __init__(self, info_as_dict):\n self.info_as_dict = info_as_dict\n\n def fit(self, data, y=None):\n self.fit_transform(data, y=y)\n return self\n\n def transform(self, dataset, y=None):\n data = dataset\n new_data_test = pd.DataFrame(\n self.enc.transform(data[self.info_as_dict.keys()]),\n columns=self.info_as_dict.keys(),\n index=data.index,\n )\n for i in self.info_as_dict.keys():\n data[i] = new_data_test[i]\n return data\n\n def fit_transform(self, dataset, y=None):\n data = dataset\n # creat categories from given keys in the data set\n cat_list = []\n for i in self.info_as_dict.values():\n i = [np.array(i)]\n cat_list = cat_list + i\n\n # now do fit transform\n self.enc = OrdinalEncoder(categories=cat_list)\n new_data_train = pd.DataFrame(\n self.enc.fit_transform(data.loc[:, self.info_as_dict.keys()]),\n columns=self.info_as_dict,\n index=data.index,\n )\n # new_data = pd.DataFrame(self.enc.fit_transform(data.loc[:,self.info_as_dict.keys()]))\n for i in self.info_as_dict.keys():\n data[i] = new_data_train[i]\n\n return data\n\n\n# _______________________________________________________________________________________________________________________\n\n# make dummy variables\nclass Dummify(BaseEstimator, TransformerMixin):\n \"\"\"\n - makes one hot encoded variables for dummy variable\n - it is HIGHLY recommended to run the Select_Data_Type class first\n - Ignores target variable\n\n Args: \n target: string , name of the target variable\n \"\"\"\n\n def __init__(self, target):\n self.target = target\n\n # creat ohe object\n self.ohe = OneHotEncoder(handle_unknown=\"ignore\", dtype=np.float32)\n\n def fit(self, dataset, y=None):\n data = dataset\n # will only do this if there are categorical variables\n if len(data.select_dtypes(include=(\"object\")).columns) > 0:\n # we need to learn the column names once the training data set is dummify\n # save non categorical data\n self.data_nonc = data.drop(\n self.target, axis=1, errors=\"ignore\"\n ).select_dtypes(exclude=(\"object\"))\n self.target_column = data[[self.target]]\n # # plus we will only take object data types\n categorical_data = data.drop(\n self.target, axis=1, errors=\"ignore\"\n ).select_dtypes(include=(\"object\"))\n # # now fit the trainin column\n self.ohe.fit(categorical_data)\n self.data_columns = self.ohe.get_feature_names(categorical_data.columns)\n\n return self\n\n def transform(self, dataset, y=None):\n data = dataset.copy()\n # will only do this if there are categorical variables\n if len(data.select_dtypes(include=(\"object\")).columns) > 0:\n # only for test data\n self.data_nonc = data.drop(\n self.target, axis=1, errors=\"ignore\"\n ).select_dtypes(exclude=(\"object\"))\n # fit without target and only categorical columns\n array = self.ohe.transform(\n data.drop(self.target, axis=1, errors=\"ignore\").select_dtypes(\n include=(\"object\")\n )\n ).toarray()\n data_dummies = pd.DataFrame(array, columns=self.data_columns)\n data_dummies.index = self.data_nonc.index\n if self.target in data.columns:\n target_column = data[[self.target]]\n else:\n target_column = None\n # now put target , numerical and categorical variables back togather\n data = pd.concat((target_column, self.data_nonc, data_dummies), axis=1)\n del self.data_nonc\n return data\n else:\n return data\n\n def fit_transform(self, dataset, y=None):\n data = dataset.copy()\n # will only do this if there are categorical variables\n if len(data.select_dtypes(include=(\"object\")).columns) > 0:\n self.fit(data)\n # fit without target and only categorical columns\n array = self.ohe.transform(\n data.drop(self.target, axis=1, errors=\"ignore\").select_dtypes(\n include=(\"object\")\n )\n ).toarray()\n data_dummies = pd.DataFrame(array, columns=self.data_columns)\n data_dummies.index = self.data_nonc.index\n # now put target , numerical and categorical variables back togather\n data = pd.concat((self.target_column, self.data_nonc, data_dummies), axis=1)\n # remove unwanted attributes\n del (self.target_column, self.data_nonc)\n return data\n else:\n return data\n\n\n# _______________________________________________________________________________________________________________________\n# Outlier\nclass Outlier(BaseEstimator, TransformerMixin):\n \"\"\"\n - Removes outlier using ABOD,KNN,IFO,PCA & HOBS using hard voting\n - Only takes numerical / One Hot Encoded features\n \"\"\"\n\n def __init__(\n self, target, contamination=0.20, random_state=42, methods=[\"knn\", \"iso\", \"pca\"]\n ):\n self.target = target\n self.contamination = contamination\n self.random_state = random_state\n self.methods = methods\n\n def fit(self, data, y=None):\n self.fit_transform(data, y=y)\n return self\n\n def transform(self, data, y=None):\n return data\n\n def fit_transform(self, dataset, y=None):\n\n # dummify if there are any obects\n if len(dataset.select_dtypes(include=\"object\").columns) > 0:\n self.dummy = Dummify(self.target)\n data = self.dummy.fit_transform(dataset)\n else:\n data = dataset\n\n data_without_target = data.drop(self.target, axis=1)\n\n if \"knn\" in self.methods:\n self.knn = KNN(contamination=self.contamination)\n self.knn.fit(data_without_target)\n knn_predict = self.knn.predict(data_without_target)\n data_without_target[\"knn\"] = knn_predict\n\n if \"iso\" in self.methods:\n self.iso = IForest(\n contamination=self.contamination,\n random_state=self.random_state,\n behaviour=\"new\",\n )\n self.iso.fit(data_without_target)\n iso_predict = self.iso.predict(data_without_target)\n data_without_target[\"iso\"] = iso_predict\n\n if \"pca\" in self.methods:\n self.pca = PCA_od(\n contamination=self.contamination, random_state=self.random_state\n )\n self.pca.fit(data_without_target)\n pca_predict = self.pca.predict(data_without_target)\n data_without_target[\"pca\"] = pca_predict\n\n data_without_target[\"vote_outlier\"] = data_without_target[self.methods].sum(\n axis=1\n )\n\n self.outliers = data_without_target[\n data_without_target[\"vote_outlier\"] == len(self.methods)\n ].index\n\n return dataset[~dataset.index.isin(self.outliers)]\n\n\n# ____________________________________________________________________________________________________________________________________________________________________\n# Column Name cleaner transformer\nclass Clean_Colum_Names(BaseEstimator, TransformerMixin):\n \"\"\"\n - Cleans special chars that are not supported by jason format\n \"\"\"\n\n def fit(self, data, y=None):\n return self\n\n def transform(self, dataset, y=None):\n data = dataset\n data.columns = data.columns.str.replace(r\"[\\,\\}\\{\\]\\[\\:\\\"\\']\", \"\")\n return data\n\n def fit_transform(self, dataset, y=None):\n return self.transform(dataset, y=y)\n\n\n# __________________________________________________________________________________________________________________________________________________________________________\n# Clustering entire data\nclass Cluster_Entire_Data(BaseEstimator, TransformerMixin):\n \"\"\"\n - Applies kmeans clustering to the entire data set and produce clusters\n - Highly recommended to run the DataTypes_Auto_infer class first\n Args:\n target_variable: target variable (integer or numerical only)\n check_clusters_upto: to determine optimum number of kmeans clusters, set the uppler limit of clusters\n \"\"\"\n\n def __init__(self, target_variable, check_clusters_upto=20, random_state=42):\n self.target = target_variable\n self.check_clusters = check_clusters_upto + 1\n self.random_state = random_state\n\n def fit(self, data, y=None):\n self.fit_transform(data, y=y)\n return self\n\n def transform(self, dataset, y=None):\n data = dataset\n data = data.drop(self.target, axis=1, errors=\"ignore\")\n # first convert to dummy\n if len(data.select_dtypes(include=\"object\").columns) > 0:\n data_t1 = self.dummy.transform(data)\n else:\n data_t1 = data\n\n # # # now make PLS\n # # data_t1 = self.pls.transform(data_t1)\n # # data_t1 = self.pca.transform(data_t1)\n # # now predict with the clustes\n predict = pd.DataFrame(self.k_object.predict(data_t1), index=data.index)\n data[\"data_cluster\"] = predict\n data[\"data_cluster\"] = data[\"data_cluster\"].astype(\"object\")\n if self.target in dataset.columns:\n data[self.target] = dataset[self.target]\n return data\n\n def fit_transform(self, dataset, y=None):\n data = dataset.copy()\n # first convert to dummy (if there are objects in data set)\n if len(data.select_dtypes(include=\"object\").columns) > 0:\n self.dummy = Dummify(self.target)\n data_t1 = self.dummy.fit_transform(data)\n data_t1 = data_t1.drop(self.target, axis=1)\n else:\n data_t1 = data.drop(self.target, axis=1)\n\n # now make PLS\n # self.pls = PLSRegression(n_components=len(data_t1.columns)-1)\n # data_t1 = self.pls.fit_transform(data_t1.drop(self.target,axis=1),data_t1[self.target])[0]\n # self.pca = PCA(n_components=len(data_t1.columns)-1)\n # data_t1 = self.pca.fit_transform(data_t1.drop(self.target,axis=1))\n\n # we are goign to make a place holder , for 2 to 20 clusters\n self.ph = pd.DataFrame(\n np.arange(2, self.check_clusters, 1), columns=[\"clusters\"]\n )\n self.ph[\"Silhouette\"] = float(0)\n self.ph[\"calinski\"] = float(0)\n\n # Now start making clusters\n for k in self.ph.index:\n c = self.ph[\"clusters\"][k]\n self.k_object = cluster.KMeans(\n n_clusters=c,\n init=\"k-means++\",\n precompute_distances=\"auto\",\n n_init=10,\n random_state=self.random_state,\n )\n self.k_object.fit(data_t1)\n self.ph.iloc[k, 1] = metrics.silhouette_score(\n data_t1, self.k_object.labels_\n )\n self.ph.iloc[k, 2] = metrics.calinski_harabasz_score(\n data_t1, self.k_object.labels_\n )\n\n # now standardize the scores and make a total column\n m = MinMaxScaler((-1, 1))\n self.ph[\"calinski\"] = m.fit_transform(\n np.array(self.ph[\"calinski\"]).reshape(-1, 1)\n )\n self.ph[\"Silhouette\"] = m.fit_transform(\n np.array(self.ph[\"Silhouette\"]).reshape(-1, 1)\n )\n self.ph[\"total\"] = self.ph[\"Silhouette\"] + self.ph[\"calinski\"]\n # sort it by total column and take the first row column 0 , that would represent the optimal clusters\n try:\n self.clusters = int(\n self.ph[self.ph[\"total\"] == max(self.ph[\"total\"])][\"clusters\"]\n )\n except: # in case there isnt a decisive measure , take calinski as yeard stick\n self.clusters = int(\n self.ph[self.ph[\"calinski\"] == max(self.ph[\"calinski\"])][\"clusters\"]\n )\n # Now make the final cluster object\n self.k_object = cluster.KMeans(\n n_clusters=self.clusters,\n init=\"k-means++\",\n precompute_distances=\"auto\",\n n_init=10,\n random_state=self.random_state,\n )\n # now do fit predict\n predict = pd.DataFrame(self.k_object.fit_predict(data_t1), index=data.index)\n data[\"data_cluster\"] = predict\n data[\"data_cluster\"] = data[\"data_cluster\"].astype(\"object\")\n\n if self.target in dataset.columns:\n data[self.target] = dataset[self.target]\n\n return data\n\n\n# __________________________________________________________________________________________________________________________________________\n# Clustering catagorical data\nclass Reduce_Cardinality_with_Clustering(BaseEstimator, TransformerMixin):\n \"\"\"\n - Reduces the level of catagorical column / cardinality through clustering \n - Highly recommended to run the DataTypes_Auto_infer class first\n Args:\n target_variable: target variable (integer or numerical only)\n catagorical_feature: list of features on which clustering is to be applied / cardinality to be reduced\n check_clusters_upto: to determine optimum number of kmeans clusters, set the uppler limit of clusters\n \"\"\"\n\n def __init__(\n self,\n target_variable,\n catagorical_feature=[],\n check_clusters_upto=30,\n random_state=42,\n ):\n self.target = target_variable\n self.feature = catagorical_feature\n self.check_clusters = check_clusters_upto + 1\n self.random = random_state\n\n def fit(self, data, y=None):\n self.fit_transform(data, y=y)\n return self\n\n def transform(self, dataset, y=None):\n data = dataset\n # we already know which leval belongs to whihc cluster , so all w need is to replace levels with clusters we already have from training data set\n for i, z in zip(self.feature, self.ph_data):\n data[i] = data[i].replace(list(z[\"levels\"]), z[\"cluster\"])\n return data\n\n def fit_transform(self, dataset, y=None):\n data = dataset.copy()\n # first convert to dummy\n if len(data.select_dtypes(include=\"object\").columns) > 0:\n self.dummy = Dummify(self.target)\n data_t = self.dummy.fit_transform(data.drop(self.feature, axis=1))\n # data_t1 = data_t1.drop(self.target,axis=1)\n else:\n data_t = data.drop(self.feature, axis=1)\n\n # now make PLS\n self.pls = PLSRegression(\n n_components=2\n ) # since we are only using two componenets to group #PLSRegression(n_components=len(data_t1.columns)-1)\n data_pls = self.pls.fit_transform(\n data_t.drop(self.target, axis=1), data_t[self.target]\n )[0]\n\n # # now we will take one component and then we calculate mean, median, min, max and sd of that one component grouped by the catagorical levels\n self.ph_data = []\n self.ph_clusters = []\n for i in self.feature:\n data_t1 = pd.DataFrame(\n dict(levels=data[i], comp1=data_pls[:, 0], comp2=data_pls[:, 1]),\n index=data.index,\n )\n # now group by feature\n data_t1 = data_t1.groupby(\"levels\")\n data_t1 = data_t1[[\"comp1\", \"comp2\"]].agg(\n [\"mean\", \"median\", \"min\", \"max\", \"std\"]\n ) # this gives us a df with only numeric columns (min , max ) and level as index\n # some time if a level has only one record its std will come up as NaN, so convert NaN to 1\n data_t1.fillna(1, inplace=True)\n\n # now number of clusters cant be more than the number of samples in aggregated data , so\n self.check_clusters = min(self.check_clusters, len(data_t1))\n\n # # we are goign to make a place holder , for 2 to 20 clusters\n self.ph = pd.DataFrame(\n np.arange(2, self.check_clusters, 1), columns=[\"clusters\"]\n )\n self.ph[\"Silhouette\"] = float(0)\n self.ph[\"calinski\"] = float(0)\n\n # Now start making clusters\n for k in self.ph.index:\n c = self.ph[\"clusters\"][k]\n self.k_object = cluster.KMeans(\n n_clusters=c,\n init=\"k-means++\",\n precompute_distances=\"auto\",\n n_init=10,\n random_state=self.random,\n )\n self.k_object.fit(data_t1)\n self.ph.iloc[k, 1] = metrics.silhouette_score(\n data_t1, self.k_object.labels_\n )\n self.ph.iloc[k, 2] = metrics.calinski_harabasz_score(\n data_t1, self.k_object.labels_\n )\n\n # now standardize the scores and make a total column\n m = MinMaxScaler((-1, 1))\n self.ph[\"calinski\"] = m.fit_transform(\n np.array(self.ph[\"calinski\"]).reshape(-1, 1)\n )\n self.ph[\"Silhouette\"] = m.fit_transform(\n np.array(self.ph[\"Silhouette\"]).reshape(-1, 1)\n )\n self.ph[\"total\"] = self.ph[\"Silhouette\"] + self.ph[\"calinski\"]\n # sort it by total column and take the first row column 0 , that would represent the optimal clusters\n try:\n self.clusters = int(\n self.ph[self.ph[\"total\"] == max(self.ph[\"total\"])][\"clusters\"]\n )\n except: # in case there isnt a decisive measure , take calinski as yeard stick\n self.clusters = int(\n self.ph[self.ph[\"calinski\"] == max(self.ph[\"calinski\"])][\"clusters\"]\n )\n self.ph_clusters.append(self.ph)\n # Now make the final cluster object\n self.k_object = cluster.KMeans(\n n_clusters=self.clusters,\n init=\"k-means++\",\n precompute_distances=\"auto\",\n n_init=10,\n random_state=self.random,\n )\n # now do fit predict\n predict = self.k_object.fit_predict(data_t1)\n # put it back with the group by aggregate columns\n data_t1[\"cluster\"] = predict\n data_t1[\"cluster\"] = data_t1[\"cluster\"].apply(str)\n # now we dont need all the columns, only the cluster column is required along with the index (index also has a name , we groupy as \"levels\")\n data_t1 = data_t1[[\"cluster\"]]\n # now convert index ot the column\n data_t1.reset_index(\n level=0, inplace=True\n ) # this table now only contains every level and its cluster\n # self.data_t1= data_t1\n # we can now replace cluster with the original level in the original data frame\n data[i] = data[i].replace(list(data_t1[\"levels\"]), data_t1[\"cluster\"])\n self.ph_data.append(data_t1)\n\n if self.target in dataset.columns:\n data[self.target] = dataset[self.target]\n return data\n\n\n# ____________________________________________________________________________________________________________________________________________\n# Clustering catagorical data\nclass Reduce_Cardinality_with_Counts(BaseEstimator, TransformerMixin):\n \"\"\"\n - Reduces the level of catagorical column by replacing levels with their count & converting objects into float\n Args:\n catagorical_feature: list of features on which clustering is to be applied\n \"\"\"\n\n def __init__(self, catagorical_feature=[]):\n self.feature = catagorical_feature\n\n def fit(self, data, y=None):\n self.fit_transform(data, y=y)\n return self\n\n def transform(self, dataset, y=None):\n data = dataset\n # we already know level counts\n for i, z, k in zip(self.feature, self.ph_data, self.ph_u):\n data[i] = data[i].replace(k, z[\"counts\"])\n data[i] = data[i].astype(\"float32\")\n\n return data\n\n def fit_transform(self, dataset, y=None):\n data = dataset\n #\n self.ph_data = []\n self.ph_u = []\n for i in self.feature:\n data_t1 = pd.DataFrame(\n dict(\n levels=data[i].groupby(data[i], sort=False).count().index,\n counts=data[i].groupby(data[i], sort=False).count().values,\n )\n )\n u = data[i].unique()\n # replace levels with counts\n data[i].replace(u, data_t1[\"counts\"], inplace=True)\n data[i] = data[i].astype(\"float32\")\n self.ph_data.append(data_t1)\n self.ph_u.append(u)\n\n return data\n\n\n# ____________________________________________________________________________________________________________________________________________\n# take noneliner transformations\nclass Make_NonLiner_Features(BaseEstimator, TransformerMixin):\n \"\"\"\n - convert numerical features into polynomial features\n - it is HIGHLY recommended to run the Autoinfer_Data_Type class first\n - Ignores target variable\n - it picks up data type float32 as numerical \n - for multiclass classification problem , set subclass arg to 'multi'\n\n Args: \n target: string , name of the target variable\n Polynomial_degree: int ,default 2 \n \"\"\"\n\n def __init__(\n self,\n target,\n ml_usecase=\"classification\",\n Polynomial_degree=2,\n other_nonliner_features=[\"sin\", \"cos\", \"tan\"],\n top_features_to_pick=0.20,\n random_state=42,\n subclass=\"ignore\",\n n_jobs=1,\n ):\n self.target = target\n self.Polynomial_degree = Polynomial_degree\n self.ml_usecase = ml_usecase\n self.other_nonliner_features = other_nonliner_features\n self.top_features_to_pick = top_features_to_pick\n self.random_state = random_state\n self.subclass = subclass\n self.n_jobs = n_jobs\n\n def fit(self, data, y=None):\n self.fit_transform(data, y=y)\n return self\n\n def transform(self, dataset, y=None): # same application for test and train\n data = dataset\n\n self.numeric_columns = (\n data.drop(self.target, axis=1, errors=\"ignore\")\n .select_dtypes(include=\"float32\")\n .columns\n )\n if self.Polynomial_degree >= 2: # dont run anything if powr is les than 2\n # self.numeric_columns = data.drop(self.target,axis=1,errors='ignore').select_dtypes(include=\"float32\").columns\n # start taking powers\n for i in range(2, self.Polynomial_degree + 1):\n ddc_power = np.power(data[self.numeric_columns], i)\n ddc_col = list(ddc_power.columns)\n ii = str(i)\n ddc_col = [ddc_col + \"_Power\" + ii for ddc_col in ddc_col]\n ddc_power.columns = ddc_col\n # put it back with data dummy\n # data = pd.concat((data,ddc_power),axis=1)\n else:\n ddc_power = pd.DataFrame()\n\n # take sin:\n if \"sin\" in self.other_nonliner_features:\n ddc_sin = np.sin(data[self.numeric_columns])\n ddc_col = list(ddc_sin.columns)\n ddc_col = [\"sin(\" + i + \")\" for i in ddc_col]\n ddc_sin.columns = ddc_col\n # put it back with data dummy\n # data = pd.concat((data,ddc_sin),axis=1)\n else:\n ddc_sin = pd.DataFrame()\n\n # take cos:\n if \"cos\" in self.other_nonliner_features:\n ddc_cos = np.cos(data[self.numeric_columns])\n ddc_col = list(ddc_cos.columns)\n ddc_col = [\"cos(\" + i + \")\" for i in ddc_col]\n ddc_cos.columns = ddc_col\n # put it back with data dummy\n # data = pd.concat((data,ddc_cos),axis=1)\n else:\n ddc_cos = pd.DataFrame()\n\n # take tan:\n if \"tan\" in self.other_nonliner_features:\n ddc_tan = np.tan(data[self.numeric_columns])\n ddc_col = list(ddc_tan.columns)\n ddc_col = [\"tan(\" + i + \")\" for i in ddc_col]\n ddc_tan.columns = ddc_col\n # put it back with data dummy\n # data = pd.concat((data,ddc_tan),axis=1)\n else:\n ddc_tan = pd.DataFrame()\n\n # dummy_all\n dummy_all = pd.concat((data, ddc_power, ddc_sin, ddc_cos, ddc_tan), axis=1)\n # we can select top features using RF\n # # and we only want to do this if the dummy all have more than 50 features\n # if len(dummy_all.columns) > 71:\n\n dummy_all = dummy_all[self.columns_to_keep]\n if self.target in dataset.columns:\n dummy_all[self.target] = dataset[self.target]\n return dummy_all\n\n def fit_transform(self, dataset, y=None):\n\n data = dataset\n\n self.numeric_columns = (\n data.drop(self.target, axis=1, errors=\"ignore\")\n .select_dtypes(include=\"float32\")\n .columns\n )\n if self.Polynomial_degree >= 2: # dont run anything if powr is les than 2\n # self.numeric_columns = data.drop(self.target,axis=1,errors='ignore').select_dtypes(include=\"float32\").columns\n # start taking powers\n for i in range(2, self.Polynomial_degree + 1):\n ddc_power = np.power(data[self.numeric_columns], i)\n ddc_col = list(ddc_power.columns)\n ii = str(i)\n ddc_col = [ddc_col + \"_Power\" + ii for ddc_col in ddc_col]\n ddc_power.columns = ddc_col\n # put it back with data dummy\n # data = pd.concat((data,ddc_power),axis=1)\n else:\n ddc_power = pd.DataFrame()\n\n # take sin:\n if \"sin\" in self.other_nonliner_features:\n ddc_sin = np.sin(data[self.numeric_columns])\n ddc_col = list(ddc_sin.columns)\n ddc_col = [\"sin(\" + i + \")\" for i in ddc_col]\n ddc_sin.columns = ddc_col\n # put it back with data dummy\n # data = pd.concat((data,ddc_sin),axis=1)\n else:\n ddc_sin = pd.DataFrame()\n\n # take cos:\n if \"cos\" in self.other_nonliner_features:\n ddc_cos = np.cos(data[self.numeric_columns])\n ddc_col = list(ddc_cos.columns)\n ddc_col = [\"cos(\" + i + \")\" for i in ddc_col]\n ddc_cos.columns = ddc_col\n # put it back with data dummy\n # data = pd.concat((data,ddc_cos),axis=1)\n else:\n ddc_cos = pd.DataFrame()\n\n # take tan:\n if \"tan\" in self.other_nonliner_features:\n ddc_tan = np.tan(data[self.numeric_columns])\n ddc_col = list(ddc_tan.columns)\n ddc_col = [\"tan(\" + i + \")\" for i in ddc_col]\n ddc_tan.columns = ddc_col\n # put it back with data dummy\n # data = pd.concat((data,ddc_tan),axis=1)\n else:\n ddc_tan = pd.DataFrame()\n\n # dummy_all\n dummy_all = pd.concat(\n (data[[self.target]], ddc_power, ddc_sin, ddc_cos, ddc_tan), axis=1\n )\n # we can select top features using our Feature Selection Classic transformer\n afs = Advanced_Feature_Selection_Classic(\n target=self.target,\n ml_usecase=self.ml_usecase,\n top_features_to_pick=self.top_features_to_pick,\n random_state=self.random_state,\n subclass=self.subclass,\n n_jobs=self.n_jobs,\n )\n dummy_all_t = afs.fit_transform(dummy_all)\n\n data = pd.concat((data, dummy_all_t), axis=1)\n # # making sure no duplicated columns are there\n data = data.loc[:, ~data.columns.duplicated()]\n self.columns_to_keep = data.drop(self.target, axis=1).columns\n return data\n\n\n# ______________________________________________________________________________________________________________________________________________________\n# Feature Selection\nclass Advanced_Feature_Selection_Classic(BaseEstimator, TransformerMixin):\n \"\"\"\n - Selects important features and reduces the feature space. Feature selection is based on Random Forest , Light GBM and Correlation\n - to run on multiclass classification , set the subclass argument to 'multi'\n \"\"\"\n\n def __init__(\n self,\n target,\n ml_usecase=\"classification\",\n top_features_to_pick=0.10,\n random_state=42,\n subclass=\"ignore\",\n n_jobs=1,\n ):\n self.target = target\n self.ml_usecase = ml_usecase\n self.top_features_to_pick = 1 - top_features_to_pick\n self.random_state = random_state\n self.subclass = subclass\n self.n_jobs = n_jobs\n\n def fit(self, dataset, y=None):\n self.fit_transform(dataset, y=y)\n return self\n\n def transform(self, dataset, y=None):\n # return the data with onlys specific columns\n data = dataset\n # self.selected_columns.remove(self.target)\n data = data[self.selected_columns_test]\n if self.target in dataset.columns:\n data[self.target] = dataset[self.target]\n return data\n\n def fit_transform(self, dataset, y=None):\n\n dummy_all = dataset.copy()\n dummy_all[self.target] = dummy_all[self.target].astype(\"float32\")\n\n # Random Forest\n max_fe = min(70, int(np.sqrt(len(dummy_all.columns))))\n max_sa = min(1000, int(np.sqrt(len(dummy_all))))\n\n if self.ml_usecase == \"classification\":\n m = rfc(\n 100,\n max_depth=5,\n max_features=max_fe,\n n_jobs=self.n_jobs,\n max_samples=max_sa,\n random_state=self.random_state,\n )\n else:\n m = rfr(\n 100,\n max_depth=5,\n max_features=max_fe,\n n_jobs=self.n_jobs,\n max_samples=max_sa,\n random_state=self.random_state,\n )\n\n m.fit(dummy_all.drop(self.target, axis=1), dummy_all[self.target])\n # self.fe_imp_table= pd.DataFrame(m.feature_importances_,columns=['Importance'],index=dummy_all.drop(self.target,axis=1).columns).sort_values(by='Importance',ascending= False)\n self.fe_imp_table = pd.DataFrame(\n m.feature_importances_,\n columns=[\"Importance\"],\n index=dummy_all.drop(self.target, axis=1).columns,\n )\n self.fe_imp_table = self.fe_imp_table[\n self.fe_imp_table[\"Importance\"]\n >= self.fe_imp_table.quantile(self.top_features_to_pick)[0]\n ]\n top = self.fe_imp_table.index\n dummy_all_columns_RF = dummy_all[top].columns\n\n # LightGBM\n max_fe = min(70, int(np.sqrt(len(dummy_all.columns))))\n max_sa = min(\n float(1000 / len(dummy_all)),\n float(np.sqrt(len(dummy_all) / len(dummy_all))),\n )\n\n if self.ml_usecase == \"classification\":\n m = lgbmc(\n n_estimators=100,\n max_depth=5,\n n_jobs=self.n_jobs,\n subsample=max_sa,\n random_state=self.random_state,\n )\n else:\n m = lgbmr(\n n_estimators=100,\n max_depth=5,\n n_jobs=self.n_jobs,\n subsample=max_sa,\n random_state=self.random_state,\n )\n m.fit(dummy_all.drop(self.target, axis=1), dummy_all[self.target])\n # self.fe_imp_table= pd.DataFrame(m.feature_importances_,columns=['Importance'],index=dummy_all.drop(self.target,axis=1).columns).sort_values(by='Importance',ascending= False)\n self.fe_imp_table = pd.DataFrame(\n m.feature_importances_,\n columns=[\"Importance\"],\n index=dummy_all.drop(self.target, axis=1).columns,\n )\n self.fe_imp_table = self.fe_imp_table[\n self.fe_imp_table[\"Importance\"]\n >= self.fe_imp_table.quantile(self.top_features_to_pick)[0]\n ]\n top = self.fe_imp_table.index\n dummy_all_columns_LGBM = dummy_all[top].columns\n\n # we can now select top correlated feature\n if self.subclass != \"multi\":\n corr = pd.DataFrame(np.corrcoef(dummy_all.T))\n corr.columns = dummy_all.columns\n corr.index = dummy_all.columns\n # corr = corr[self.target].abs().sort_values(ascending=False)[0:self.top_features_to_pick+1]\n corr = corr[self.target].abs()\n corr = corr[corr.index != self.target] # drop the target column\n corr = corr[corr >= corr.quantile(self.top_features_to_pick)]\n corr = pd.DataFrame(dict(features=corr.index, value=corr)).reset_index(\n drop=True\n )\n corr = corr.drop_duplicates(subset=\"value\")\n corr = corr[\"features\"]\n # corr = pd.DataFrame(dict(features=corr.index,value=corr)).reset_index(drop=True)\n # corr = corr.drop_duplicates(subset='value')[0:self.top_features_to_pick+1]\n # corr = corr['features']\n else:\n corr = list()\n\n self.dummy_all_columns_RF = dummy_all_columns_RF\n self.dummy_all_columns_LGBM = dummy_all_columns_LGBM\n self.corr = corr\n\n self.selected_columns = list(\n set(\n [self.target]\n + list(dummy_all_columns_RF)\n + list(corr)\n + list(dummy_all_columns_LGBM)\n )\n )\n\n self.selected_columns_test = (\n dataset[self.selected_columns].drop(self.target, axis=1).columns\n )\n return dataset[self.selected_columns]\n\n\n# _\n\n# ______________________________________________________________________________________________________________________________________________________\n# Boruta Feature Selection algorithm\n# Base on: https://github.com/scikit-learn-contrib/boruta_py/blob/master/boruta/boruta_py.py\nclass Boruta_Feature_Selection(BaseEstimator, TransformerMixin):\n \"\"\"\n Boruta selection algorithm based on borutaPy sklearn-contrib and\n Miron B Kursa, https://m2.icm.edu.pl/boruta/\n Selects the most important features.\n Args:\n target (str): target column name\n ml_usecase (str): case: classification or regression\n top_features_to_pick: to make...\n max_iteration {int): overall iterations of shuffle and train forests \n alpha {float): p-value on which \n the option to favour one measur to another. e.g. if value is .6 , during feature selection tug of war, correlation target measure will have a higher say.\n A value of .5 means both measure have equal say \n \"\"\"\n\n def __init__(\n self,\n target,\n ml_usecase=\"classification\",\n top_features_to_pick=1.0,\n max_iteration=200,\n n_iter_no_change=25,\n alpha=0.05,\n random_state=42,\n subclass=\"ignore\",\n n_jobs=1,\n ):\n self.target = target\n self.ml_usecase = ml_usecase\n self.top_features_to_pick = top_features_to_pick\n self.random_state = random_state\n self.subclass = subclass\n self.max_iteration = max_iteration\n self.n_iter_no_change = n_iter_no_change\n self.alpha = alpha\n self.selected_columns_test = []\n self.n_jobs = n_jobs\n\n @property\n def selected_columns(self):\n return self.selected_columns_test + [self.target]\n\n def fit(self, dataset, y=None):\n from .patches.boruta_py import BorutaPyPatched\n\n dummy_data = dataset\n X, y = dummy_data.drop(self.target, axis=1), dummy_data[self.target].values\n y = y.astype(\"float32\")\n X_cols = X.columns\n X = X.values\n\n if self.ml_usecase == \"classification\":\n m = rfc(\n 100,\n max_depth=5,\n n_jobs=self.n_jobs,\n random_state=self.random_state,\n class_weight=\"balanced\",\n )\n else:\n m = rfr(\n 100, max_depth=5, n_jobs=self.n_jobs, random_state=self.random_state,\n )\n\n feat_selector = BorutaPyPatched(\n m,\n n_estimators=\"auto\",\n perc=int(self.top_features_to_pick * 100),\n max_iter=self.max_iteration,\n random_state=self.random_state,\n early_stopping=(self.n_iter_no_change > 0),\n n_iter_no_change=self.n_iter_no_change,\n )\n\n try:\n feat_selector.fit(X, y)\n self.selected_columns_test = list(X_cols[feat_selector.support_])\n except:\n # boruta may errors out if all features are selected\n self.selected_columns_test = list(X_cols)\n\n return self\n\n def transform(self, dataset, y=None):\n if self.target in dataset.columns:\n return dataset[self.selected_columns]\n else:\n return dataset[self.selected_columns_test]\n\n def fit_transform(self, dataset, y=None):\n self.fit(dataset, y=y)\n return self.transform(dataset, y=y)\n\n\n# _________________________________________________________________________________________________________________________________________\nclass Fix_multicollinearity(BaseEstimator, TransformerMixin):\n \"\"\"\n Fixes multicollinearity between predictor variables , also considering the correlation between target variable.\n Only applies to regression or two class classification ML use case\n Takes numerical and one hot encoded variables only\n Args:\n threshold (float): The utmost absolute pearson correlation tolerated beyween featres from 0.0 to 1.0\n target_variable (str): The target variable/column name\n correlation_with_target_threshold: minimum absolute correlation required between every feature and the target variable , default 1.0 (0.0 to 1.0)\n correlation_with_target_preference: float (0.0 to 1.0), default .08 ,while choosing between a pair of features w.r.t multicol & correlation target , this gives \n the option to favour one measur to another. e.g. if value is .6 , during feature selection tug of war, correlation target measure will have a higher say.\n A value of .5 means both measure have equal say \n \"\"\"\n\n # mamke a constructer\n\n def __init__(\n self,\n threshold,\n target_variable,\n correlation_with_target_threshold=0.0,\n correlation_with_target_preference=1.0,\n ):\n self.threshold = threshold\n self.target_variable = target_variable\n self.correlation_with_target_threshold = correlation_with_target_threshold\n self.target_corr_weight = correlation_with_target_preference\n self.multicol_weight = 1 - correlation_with_target_preference\n\n # Make fit method\n\n def fit(self, data, y=None):\n \"\"\"\n Args:\n data = takes preprocessed data frame\n Returns:\n None\n \"\"\"\n\n if data[self.target_variable].dtype not in [\"int32\", \"int64\", \"float32\", \"float64\"]:\n raise ValueError('dtype for the target variable should be int32, int64, float32, or float64 only')\n\n # global data1\n data1 = data.select_dtypes(include=[\"int32\", \"int64\", \"float32\", \"float64\"])\n # try:\n # self.data1 = self.data1.astype('float16')\n # except:\n # None\n # make an correlation db with abs correlation db\n # self.data_c = self.data1.T.drop_duplicates()\n # self.data1 = self.data_c.T\n corr = pd.DataFrame(np.corrcoef(data1.T))\n corr.columns = data1.columns\n corr.index = data1.columns\n # corr_matrix = abs(data1.corr())\n corr_matrix = abs(corr)\n\n # for every diagonal value, make it Nan\n corr_matrix.values[\n tuple([np.arange(corr_matrix.shape[0])] * 2)\n ] = np.NaN\n\n # Now Calculate the average correlation of every feature with other, and get a pandas data frame\n avg_cor = pd.DataFrame(corr_matrix.mean())\n avg_cor[\"feature\"] = avg_cor.index\n avg_cor.reset_index(drop=True, inplace=True)\n avg_cor.columns = [\"avg_cor\", \"features\"]\n\n # Calculate the correlation with the target\n targ_cor = pd.DataFrame(corr_matrix[self.target_variable].dropna())\n targ_cor[\"feature\"] = targ_cor.index\n targ_cor.reset_index(drop=True, inplace=True)\n targ_cor.columns = [\"target_variable\", \"features\"]\n\n # Now, add a column for variable name and drop index\n corr_matrix[\"column\"] = corr_matrix.index\n corr_matrix.reset_index(drop=True, inplace=True)\n\n # now we need to melt it , so that we can correlation pair wise , with two columns\n cols = corr_matrix.column\n melt = (\n corr_matrix.melt(id_vars=[\"column\"], value_vars=cols)\n .sort_values(by=\"value\", ascending=False)\n .dropna()\n )\n\n # now bring in the avg correlation for first of the pair\n merge = pd.merge(\n melt, avg_cor, left_on=\"column\", right_on=\"features\"\n ).drop(\"features\", axis=1)\n\n # now bring in the avg correlation for second of the pair\n merge = pd.merge(\n merge, avg_cor, left_on=\"variable\", right_on=\"features\"\n ).drop(\"features\", axis=1)\n\n # now bring in the target correlation for first of the pair\n merge = pd.merge(\n merge, targ_cor, left_on=\"column\", right_on=\"features\"\n ).drop(\"features\", axis=1)\n\n # now bring in the avg correlation for second of the pair\n merge = pd.merge(\n merge, targ_cor, left_on=\"variable\", right_on=\"features\"\n ).drop(\"features\", axis=1)\n\n # sort and save\n merge = merge.sort_values(by=\"value\", ascending=False)\n\n # we need to now eleminate all the pairs that are actually duplicate e.g cor(x,y) = cor(y,x) , they are the same , we need to find these and drop them\n merge[\"all_columns\"] = merge[\"column\"] + merge[\"variable\"]\n\n # this puts all the coresponding pairs of features togather , so that we can only take one, since they are just the duplicates\n merge[\"all_columns\"] = [sorted(i) for i in merge[\"all_columns\"]]\n\n # now sort by new column\n merge = merge.sort_values(by=\"all_columns\")\n\n # take every second colums\n merge = merge.iloc[::2, :]\n\n # make a ranking column to eliminate features\n merge[\"rank_x\"] = round(\n self.multicol_weight * (merge[\"avg_cor_y\"] - merge[\"avg_cor_x\"])\n + self.target_corr_weight\n * (merge[\"target_variable_x\"] - merge[\"target_variable_y\"]),\n 6,\n ) # round it to 6 digits\n\n ## Now there will be rows where the rank will be exactly zero, these is where the value (corelartion between features) is exactly one ( like price and price^2)\n ## so in that case , we can simply pick one of the variable\n # but since , features can be in either column, we will drop one column (say 'column') , only if the feature is not in the second column (in variable column)\n # both equations below will return the list of columns to drop from here\n # this is how it goes\n\n ## For the portion where correlation is exactly one !\n one = merge[merge[\"rank_x\"] == 0]\n\n # this portion is complicated\n # table one have all the paired variable having corelation of 1\n # in a nutshell, we can take any column (one side of pair) and delete the other columns (other side of the pair)\n # however one varibale can appear more than once on any of the sides , so we will run for loop to find all pairs...\n # here it goes\n # take a list of all (but unique ) variables that have correlation 1 for eachother, we will make two copies\n u_all = list(\n pd.unique(pd.concat((one[\"column\"], one[\"variable\"]), axis=0))\n )\n u_all_1 = list(\n pd.unique(pd.concat((one[\"column\"], one[\"variable\"]), axis=0))\n )\n # take a list of features (unique) for the first side of the pair\n u_column = pd.unique(one[\"column\"])\n\n # now we are going to start picking each variable from one column (one side of the pair) , check it against the other column (other side of the pair)\n # to pull all coresponding / paired variables , and delete thoes newly varibale names from all unique list\n\n for i in u_column:\n # print(i)\n r = one[one[\"column\"] == i][\"variable\"]\n for q in r:\n if q in u_all:\n # print(\"_\"+q)\n u_all.remove(q)\n\n # now the unique column contains the varibales that should remain, so in order to get the variables that should be deleted :\n to_drop = list(set(u_all_1) - set(u_all))\n\n # to_drop_a =(list(set(one['column'])-set(one['variable'])))\n # to_drop_b =(list(set(one['variable'])-set(one['column'])))\n # to_drop = to_drop_a + to_drop_b\n\n ## now we are to treat where rank is not Zero and Value (correlation) is greater than a specific threshold\n non_zero = merge[\n (merge[\"rank_x\"] != 0.0) & (merge[\"value\"] >= self.threshold)\n ]\n\n # pick the column to delete\n non_zero_list = list(\n np.where(\n non_zero[\"rank_x\"] < 0,\n non_zero[\"column\"],\n non_zero[\"variable\"],\n )\n )\n\n # add two list\n self.to_drop = to_drop + non_zero_list\n\n # make sure that target column is not a part of the list\n try:\n self.to_drop.remove(self.target_variable)\n except:\n pass\n\n # now we want to keep only the columns that have more correlation with traget by a threshold\n self.to_drop_taret_correlation = []\n if self.correlation_with_target_threshold != 0.0:\n corr = pd.DataFrame(\n np.corrcoef(data.drop(self.to_drop, axis=1).T),\n columns=data.drop(self.to_drop, axis=1).columns,\n index=data.drop(self.to_drop, axis=1).columns,\n )\n self.to_drop_taret_correlation = corr[self.target_variable].abs()\n # to_drop_taret_correlation = data.drop(self.to_drop,axis=1).corr()[target_variable].abs()\n self.to_drop_taret_correlation = self.to_drop_taret_correlation[\n self.to_drop_taret_correlation < self.correlation_with_target_threshold\n ]\n self.to_drop_taret_correlation = list(self.to_drop_taret_correlation.index)\n # to_drop = corr + to_drop\n try:\n self.to_drop_taret_correlation.remove(self.target_variable)\n except:\n pass\n return self\n\n # now Transform\n def transform(self, dataset, y=None):\n \"\"\"\n Args:f\n data = takes preprocessed data frame\n Returns:\n data frame\n \"\"\"\n data = dataset\n data = data.drop(self.to_drop, axis=1)\n # now drop less correlated data\n data.drop(self.to_drop_taret_correlation, axis=1, inplace=True, errors=\"ignore\")\n return data\n\n # fit_transform\n def fit_transform(self, data, y=None):\n\n \"\"\"\n Args:\n data = takes preprocessed data frame\n Returns:\n data frame\n \"\"\"\n self.fit(data)\n return self.transform(data)\n\n\n# ____________________________________________________________________________________________________________________________________________________________________\n# handle perfect multicollinearity\nclass Remove_100(BaseEstimator, TransformerMixin):\n \"\"\"\n - Takes DF, return data frame while removing features that are perfectly correlated (droping one)\n \"\"\"\n\n def __init__(self, target):\n self.target = target\n self.columns_to_drop = []\n\n def fit(self, data, y=None):\n self.fit_transform(data, y=y)\n return self\n\n def transform(self, dataset, y=None):\n return dataset.drop(self.columns_to_drop, axis=1)\n\n def fit_transform(self, dataset, y=None):\n data = dataset\n\n targetless_data = data.drop(self.target, axis=1)\n\n # correlation should be calculated between at least two features, if there is only 1, there is nothing to delete\n if len(targetless_data.columns) <= 1:\n return data\n\n corr = pd.DataFrame(np.corrcoef(targetless_data.T))\n corr.columns = targetless_data.columns\n corr.index = targetless_data.columns\n corr_matrix = abs(corr)\n\n # Now, add a column for variable name and drop index\n corr_matrix[\"column\"] = corr_matrix.index\n corr_matrix.reset_index(drop=True, inplace=True)\n\n # now we need to melt it , so that we can correlation pair wise , with two columns\n cols = corr_matrix.column\n melt = corr_matrix.melt(id_vars=[\"column\"], value_vars=cols).sort_values(\n by=\"value\", ascending=False\n ) # .dropna()\n melt[\"value\"] = round(melt[\"value\"], 2) # round it to two digits\n\n # now pick variables where value is one and 'column' != variabe ( both columns are not same)\n c1 = melt[\"value\"] == 1.00\n c2 = melt[\"column\"] != melt[\"variable\"]\n melt = melt[((c1 == True) & (c2 == True))]\n\n # we need to now eleminate all the pairs that are actually duplicate e.g cor(x,y) = cor(y,x) , they are the same , we need to find these and drop them\n melt[\"all_columns\"] = melt[\"column\"] + melt[\"variable\"]\n\n # this puts all the coresponding pairs of features togather , so that we can only take one, since they are just the duplicates\n melt[\"all_columns\"] = [sorted(i) for i in melt[\"all_columns\"]]\n\n # # now sort by new column\n melt = melt.sort_values(by=\"all_columns\")\n\n # # take every second colums\n melt = melt.iloc[::2, :]\n\n # lets keep the columns on the left hand side of the table\n self.columns_to_drop = melt[\"variable\"]\n\n return data.drop(self.columns_to_drop, axis=1)\n\n\n# _______________________________________________________________________________________________________________________________________________________________________________________________\n# custome DFS\nclass DFS_Classic(BaseEstimator, TransformerMixin):\n \"\"\"\n - Automated feature interactions using multiplication, division , addition & substraction\n - Only accepts numeric / One Hot Encoded features\n - Takes DF, return same DF \n - for Multiclass classification problem , set subclass arg as 'multi'\n \"\"\"\n\n def __init__(\n self,\n target,\n ml_usecase=\"classification\",\n interactions=[\"multiply\", \"divide\", \"add\", \"subtract\"],\n top_features_to_pick_percentage=0.05,\n random_state=42,\n subclass=\"ignore\",\n n_jobs=1,\n ):\n self.target = target\n self.interactions = interactions\n self.top_n_correlated = top_features_to_pick_percentage # (this will be 1- top_features , but handled in the Advance_feature_selection )\n self.ml_usecase = ml_usecase\n self.random_state = random_state\n self.subclass = subclass\n self.n_jobs = n_jobs\n\n def fit(self, data, y=None):\n self.fit_transform(data, y=y)\n return self\n\n def transform(self, dataset, y=None):\n\n data = dataset\n\n data_without_target = data.drop(self.target, axis=1, errors=\"ignore\")\n # for multiplication:\n # we need bot catagorical and numerical columns\n\n if \"multiply\" in self.interactions:\n\n data_multiply = pd.concat(\n [\n data_without_target.mul(col[1], axis=\"index\")\n for col in data_without_target.iteritems()\n ],\n axis=1,\n )\n data_multiply.columns = [\n \"_multiply_\".join([i, j])\n for j in data_without_target.columns\n for i in data_without_target.columns\n ]\n # we dont need to apply rest of conditions\n data_multiply.index = data.index\n else:\n data_multiply = pd.DataFrame()\n\n # for division, we only want it to apply to numerical columns\n if \"divide\" in self.interactions:\n\n data_divide = pd.concat(\n [\n data_without_target[self.numeric_columns].div(col[1], axis=\"index\")\n for col in data_without_target[self.numeric_columns].iteritems()\n ],\n axis=1,\n )\n data_divide.columns = [\n \"_divide_\".join([i, j])\n for j in data_without_target[self.numeric_columns].columns\n for i in data_without_target[self.numeric_columns].columns\n ]\n data_divide.replace([np.inf, -np.inf], 0, inplace=True)\n data_divide.fillna(0, inplace=True)\n data_divide.index = data.index\n else:\n data_divide = pd.DataFrame()\n\n # for addition, we only want it to apply to numerical columns\n if \"add\" in self.interactions:\n\n data_add = pd.concat(\n [\n data_without_target[self.numeric_columns].add(col[1], axis=\"index\")\n for col in data_without_target[self.numeric_columns].iteritems()\n ],\n axis=1,\n )\n data_add.columns = [\n \"_add_\".join([i, j])\n for j in data_without_target[self.numeric_columns].columns\n for i in data_without_target[self.numeric_columns].columns\n ]\n data_add.index = data.index\n else:\n data_add = pd.DataFrame()\n\n # for substraction, we only want it to apply to numerical columns\n if \"subtract\" in self.interactions:\n\n data_substract = pd.concat(\n [\n data_without_target[self.numeric_columns].sub(col[1], axis=\"index\")\n for col in data_without_target[self.numeric_columns].iteritems()\n ],\n axis=1,\n )\n data_substract.columns = [\n \"_subtract_\".join([i, j])\n for j in data_without_target[self.numeric_columns].columns\n for i in data_without_target[self.numeric_columns].columns\n ]\n data_substract.index = data.index\n else:\n data_substract = pd.DataFrame()\n\n # get all the dummy data combined\n dummy_all = pd.concat(\n (data, data_multiply, data_divide, data_add, data_substract), axis=1\n )\n del data_multiply\n del data_divide\n del data_add\n del data_substract\n # now only return the columns we want:\n dummy_all = dummy_all[self.columns_to_keep]\n if self.target in dataset.columns:\n dummy_all[self.target] = dataset[self.target]\n return dummy_all\n\n def fit_transform(self, dataset, y=None):\n\n data = dataset\n\n data_without_target = data.drop(self.target, axis=1, errors=\"ignore\")\n\n # we need to seperate numerical and ont hot encoded columns\n # self.ohe_columns = [i if ((len(data[i].unique())==2) & (data[i].unique()[0] in [0,1]) & (data[i].unique()[1] in [0,1]) ) else None for i in data.drop(self.target,axis=1).columns]\n self.ohe_columns = [\n i\n for i in data.columns\n if data[i].nunique() == 2\n and data[i].unique()[0] in [0, 1]\n and data[i].unique()[1] in [0, 1]\n ]\n # self.ohe_columns = [i for i in self.ohe_columns if i is not None]\n self.numeric_columns = [\n i for i in data_without_target.columns if i not in self.ohe_columns\n ]\n target_variable = data[[self.target]]\n\n # for multiplication:\n # we need bot catagorical and numerical columns\n\n if \"multiply\" in self.interactions:\n data_multiply = pd.concat(\n [\n data_without_target.mul(col[1], axis=\"index\")\n for col in data_without_target.iteritems()\n ],\n axis=1,\n )\n data_multiply.columns = [\n \"_multiply_\".join([i, j])\n for j in data_without_target.columns\n for i in data_without_target.columns\n ]\n # we dont need columns that are self interacted\n col = [\n \"_multiply_\".join([i, j])\n for j in data_without_target.columns\n for i in data_without_target.columns\n if i != j\n ]\n data_multiply = data_multiply[col]\n # we dont need columns where the sum of the total column is null (to catagorical variables never happening togather)\n col1 = [\n i for i in data_multiply.columns if np.nansum(data_multiply[i]) != 0\n ]\n data_multiply = data_multiply[col1]\n data_multiply.index = data.index\n else:\n data_multiply = pd.DataFrame()\n\n # for division, we only want it to apply to numerical columns\n if \"divide\" in self.interactions:\n data_divide = pd.concat(\n [\n data_without_target[self.numeric_columns].div(col[1], axis=\"index\")\n for col in data_without_target[self.numeric_columns].iteritems()\n ],\n axis=1,\n )\n data_divide.columns = [\n \"_divide_\".join([i, j])\n for j in data_without_target[self.numeric_columns].columns\n for i in data_without_target[self.numeric_columns].columns\n ]\n # we dont need columns that are self interacted\n col = [\n \"_divide_\".join([i, j])\n for j in data_without_target[self.numeric_columns].columns\n for i in data_without_target[self.numeric_columns].columns\n if i != j\n ]\n data_divide = data_divide[col]\n # we dont need columns where the sum of the total column is null (to catagorical variables never happening togather)\n col1 = [i for i in data_divide.columns if np.nansum(data_divide[i]) != 0]\n data_divide = data_divide[col1]\n # additionally we need to fill anll the possible NaNs\n data_divide.replace([np.inf, -np.inf], 0, inplace=True)\n data_divide.fillna(0, inplace=True)\n data_divide.index = data.index\n else:\n data_divide = pd.DataFrame()\n\n # for addition, we only want it to apply to numerical columns\n if \"add\" in self.interactions:\n data_add = pd.concat(\n [\n data_without_target[self.numeric_columns].add(col[1], axis=\"index\")\n for col in data_without_target[self.numeric_columns].iteritems()\n ],\n axis=1,\n )\n data_add.columns = [\n \"_add_\".join([i, j])\n for j in data_without_target[self.numeric_columns].columns\n for i in data_without_target[self.numeric_columns].columns\n ]\n # we dont need columns that are self interacted\n col = [\n \"_add_\".join([i, j])\n for j in data_without_target[self.numeric_columns].columns\n for i in data_without_target[self.numeric_columns].columns\n if i != j\n ]\n data_add = data_add[col]\n # we dont need columns where the sum of the total column is null (to catagorical variables never happening togather)\n col1 = [i for i in data_add.columns if np.nansum(data_add[i]) != 0]\n data_add = data_add[col1]\n data_add.index = data.index\n else:\n data_add = pd.DataFrame()\n\n # for substraction, we only want it to apply to numerical columns\n if \"subtract\" in self.interactions:\n data_substract = pd.concat(\n [\n data_without_target[self.numeric_columns].sub(col[1], axis=\"index\")\n for col in data_without_target[self.numeric_columns].iteritems()\n ],\n axis=1,\n )\n data_substract.columns = [\n \"_subtract_\".join([i, j])\n for j in data_without_target[self.numeric_columns].columns\n for i in data_without_target[self.numeric_columns].columns\n ]\n # we dont need columns that are self interacted\n col = [\n \"_subtract_\".join([i, j])\n for j in data_without_target[self.numeric_columns].columns\n for i in data_without_target[self.numeric_columns].columns\n if i != j\n ]\n data_substract = data_substract[col]\n # we dont need columns where the sum of the total column is null (to catagorical variables never happening togather)\n col1 = [\n i for i in data_substract.columns if np.nansum(data_substract[i]) != 0\n ]\n data_substract = data_substract[col1]\n data_substract.index = data.index\n else:\n data_substract = pd.DataFrame()\n\n # get all the dummy data combined\n dummy_all = pd.concat(\n (data_multiply, data_divide, data_add, data_substract), axis=1\n )\n del data_multiply\n del data_divide\n del data_add\n del data_substract\n\n dummy_all[self.target] = target_variable\n self.dummy_all = dummy_all\n\n # apply advanced feature selectio\n afs = Advanced_Feature_Selection_Classic(\n target=self.target,\n ml_usecase=self.ml_usecase,\n top_features_to_pick=self.top_n_correlated,\n random_state=self.random_state,\n subclass=self.subclass,\n n_jobs=self.n_jobs,\n )\n dummy_all_t = afs.fit_transform(dummy_all)\n\n data_fe_final = pd.concat(\n (data, dummy_all_t), axis=1\n ) # self.data_fe[self.corr]\n # # making sure no duplicated columns are there\n data_fe_final = data_fe_final.loc[\n :, ~data_fe_final.columns.duplicated()\n ] # new added\n # # remove thetarget column\n # # this is the final data we want that includes original , fe data plus impact of top n correlated\n self.columns_to_keep = data_fe_final.drop(self.target, axis=1).columns\n del dummy_all\n del dummy_all_t\n\n return data_fe_final\n\n\n# ____________________________________________________________________________________________________________________________________________________________________\n# Empty transformer\nclass Empty(BaseEstimator, TransformerMixin):\n \"\"\"\n - Takes DF, return same DF \n \"\"\"\n\n def fit(self, data, y=None):\n return self\n\n def transform(self, data, y=None):\n return data\n\n def fit_transform(self, data, y=None):\n return self.transform(data)\n\n\n# ____________________________________________________________________________________________________________________________________\n# reduce feature space\nclass Reduce_Dimensions_For_Supervised_Path(BaseEstimator, TransformerMixin):\n \"\"\"\n - Takes DF, return same DF with different types of dimensionality reduction modles (pca_liner , pca_kernal, tsne , pls, incremental)\n - except pca_liner, every other method takes integer as number of components \n - only takes numeric variables (float & One Hot Encoded)\n - it is intended to solve supervised ML usecases , such as classification / regression\n \"\"\"\n\n def __init__(\n self,\n target,\n method=\"pca_liner\",\n variance_retained_or_number_of_components=0.99,\n random_state=42,\n ):\n self.target = target\n self.variance_retained = variance_retained_or_number_of_components\n self.random_state = random_state\n self.method = method\n\n def fit(self, data, y=None):\n self.fit_transform(data, y=y)\n return self\n\n def transform(self, dataset, y=None):\n data = dataset\n if self.method in [\n \"pca_liner\",\n \"pca_kernal\",\n \"tsne\",\n \"incremental\",\n ]: # if self.method in ['pca_liner' , 'pca_kernal', 'tsne' , 'incremental','psa']\n data = data.drop(self.target, axis=1, errors=\"ignore\")\n data_pca = self.pca.transform(data)\n data_pca = pd.DataFrame(data_pca)\n data_pca.columns = [\n \"Component_\" + str(i) for i in np.arange(1, len(data_pca.columns) + 1)\n ]\n data_pca.index = data.index\n if self.target in dataset.columns:\n data_pca[self.target] = dataset[self.target]\n return data_pca\n else:\n return dataset\n\n def fit_transform(self, dataset, y=None):\n data = dataset\n if self.method == \"pca_liner\":\n self.pca = PCA(self.variance_retained, random_state=self.random_state)\n # fit transform\n data_pca = self.pca.fit_transform(data.drop(self.target, axis=1))\n data_pca = pd.DataFrame(data_pca)\n data_pca.columns = [\n \"Component_\" + str(i) for i in np.arange(1, len(data_pca.columns) + 1)\n ]\n data_pca.index = data.index\n data_pca[self.target] = data[self.target]\n return data_pca\n elif self.method == \"pca_kernal\": # take number of components only\n self.pca = KernelPCA(\n self.variance_retained,\n kernel=\"rbf\",\n random_state=self.random_state,\n n_jobs=-1,\n )\n # fit transform\n data_pca = self.pca.fit_transform(data.drop(self.target, axis=1))\n data_pca = pd.DataFrame(data_pca)\n data_pca.columns = [\n \"Component_\" + str(i) for i in np.arange(1, len(data_pca.columns) + 1)\n ]\n data_pca.index = data.index\n data_pca[self.target] = data[self.target]\n return data_pca\n # elif self.method == 'pls': # take number of components only\n # self.pca = PLSRegression(self.variance_retained,scale=False)\n # # fit transform\n # data_pca = self.pca.fit_transform(data.drop(self.target,axis=1),data[self.target])[0]\n # data_pca = pd.DataFrame(data_pca)\n # data_pca.columns = [\"Component_\"+str(i) for i in np.arange(1,len(data_pca.columns)+1)]\n # data_pca.index = data.index\n # data_pca[self.target] = data[self.target]\n # return(data_pca)\n elif self.method == \"tsne\": # take number of components only\n self.pca = TSNE(self.variance_retained, random_state=self.random_state)\n # fit transform\n data_pca = self.pca.fit_transform(data.drop(self.target, axis=1))\n data_pca = pd.DataFrame(data_pca)\n data_pca.columns = [\n \"Component_\" + str(i) for i in np.arange(1, len(data_pca.columns) + 1)\n ]\n data_pca.index = data.index\n data_pca[self.target] = data[self.target]\n return data_pca\n elif self.method == \"incremental\": # take number of components only\n self.pca = IncrementalPCA(self.variance_retained)\n # fit transform\n data_pca = self.pca.fit_transform(data.drop(self.target, axis=1))\n data_pca = pd.DataFrame(data_pca)\n data_pca.columns = [\n \"Component_\" + str(i) for i in np.arange(1, len(data_pca.columns) + 1)\n ]\n data_pca.index = data.index\n data_pca[self.target] = data[self.target]\n return data_pca\n else:\n return dataset\n\n\n# ___________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________\n# preprocess_all_in_one\ndef Preprocess_Path_One(\n train_data,\n target_variable,\n ml_usecase=None,\n test_data=None,\n categorical_features=[],\n numerical_features=[],\n time_features=[],\n features_todrop=[],\n display_types=True,\n imputation_type=\"simple\",\n numeric_imputation_strategy=\"mean\",\n categorical_imputation_strategy=\"not_available\",\n imputation_classifier=None,\n imputation_regressor=None,\n imputation_max_iter=10,\n imputation_warm_start=False,\n imputation_order=\"ascending\",\n apply_zero_nearZero_variance=False,\n club_rare_levels=False,\n rara_level_threshold_percentage=0.05,\n apply_untrained_levels_treatment=False,\n untrained_levels_treatment_method=\"least frequent\",\n apply_ordinal_encoding=False,\n ordinal_columns_and_categories={},\n apply_cardinality_reduction=False,\n cardinal_method=\"cluster\",\n cardinal_features=[],\n apply_binning=False,\n features_to_binn=[],\n apply_grouping=False,\n group_name=[],\n features_to_group_ListofList=[[]],\n apply_polynomial_trigonometry_features=False,\n max_polynomial=2,\n trigonometry_calculations=[\"sin\", \"cos\", \"tan\"],\n top_poly_trig_features_to_select_percentage=0.20,\n scale_data=False,\n scaling_method=\"zscore\",\n Power_transform_data=False,\n Power_transform_method=\"quantile\",\n remove_outliers=False,\n outlier_contamination_percentage=0.01,\n outlier_methods=[\"pca\", \"iso\", \"knn\"],\n dummify_categoricals=True,\n apply_feature_selection=False,\n feature_selection_top_features_percentage=0.80,\n feature_selection_method=\"classic\",\n remove_multicollinearity=False,\n maximum_correlation_between_features=0.90,\n remove_perfect_collinearity=False,\n apply_feature_interactions=False,\n feature_interactions_to_apply=[\"multiply\", \"divide\", \"add\", \"subtract\"],\n feature_interactions_top_features_to_select_percentage=0.01,\n cluster_entire_data=False,\n range_of_clusters_to_try=20,\n apply_pca=False,\n pca_method=\"pca_liner\",\n pca_variance_retained_or_number_of_components=0.99,\n random_state=42,\n n_jobs=-1,\n):\n\n \"\"\"\n Follwoing preprocess steps are taken:\n - 1) Auto infer data types \n - 2) Impute (simple or with surrogate columns)\n - 3) Ordinal Encoder\n - 4) Drop categorical variables that have zero variance or near zero variance\n - 5) Club categorical variables levels togather as a new level (other_infrequent) that are rare / at the bottom 5% of the variable distribution\n - 6) Club unseen levels in test dataset with most/least frequent levels in train dataset \n - 7) Reduce high cardinality in categorical features using clustering or counts\n - 8) Generate sub features from time feature such as 'month','weekday',is_month_end','is_month_start' & 'hour'\n - 9) Group features by calculating min, max, mean, median & sd of similar features\n -10) Make nonliner features (polynomial, sin , cos & tan)\n -11) Scales & Power Transform (zscore,minmax,yeo-johnson,quantile,maxabs,robust) , including option to transform target variable\n -12) Apply binning to continious variable when numeric features are provided as a list \n -13) Detect & remove outliers using isolation forest, knn and PCA\n -14) Apply clusters to segment entire data\n -15) One Hot / Dummy encoding\n -16) Remove special characters from column names such as commas, square brackets etc to make it competible with jason dependednt models\n -17) Feature Selection throuh Random Forest , LightGBM and Pearson Correlation / Boruta algorithm\n -18) Fix multicollinearity\n -19) Feature Interaction (DFS) , multiply , divided , add and substract features\n -20) Apply diamension reduction techniques such as pca_liner, pca_kernal, incremental, tsne \n - except for pca_liner, all other method only takes number of component (as integer) i.e no variance explaination metohd available \n \"\"\"\n\n # also make sure that all the column names are string\n train_data.columns = [str(i) for i in train_data.columns]\n if test_data is not None:\n test_data.columns = [str(i) for i in test_data.columns]\n\n if target_variable is None:\n ml_usecase = \"regression\"\n else:\n # WE NEED TO AUTO INFER the ml use case\n inferred_ml_usecase, subcase = infer_ml_usecase(train_data[target_variable])\n if ml_usecase is None:\n ml_usecase = inferred_ml_usecase\n\n dtypes = DataTypes_Auto_infer(\n target=target_variable,\n ml_usecase=ml_usecase,\n categorical_features=categorical_features,\n numerical_features=numerical_features,\n time_features=time_features,\n features_todrop=features_todrop,\n display_types=display_types,\n id_columns=find_id_columns(\n train_data, target_variable, numerical_features=numerical_features\n ),\n )\n\n # for imputation\n # imputation_type = \"A\"\n if imputation_type == \"simple\":\n imputer = Simple_Imputer(\n numeric_strategy=numeric_imputation_strategy,\n target_variable=target_variable,\n categorical_strategy=categorical_imputation_strategy,\n )\n # elif imputation_type == \"surrogate imputer\":\n # imputer = Surrogate_Imputer(numeric_strategy=numeric_imputation_strategy,categorical_strategy=categorical_imputation_strategy,target_variable=target_variable)\n else:\n imputer = Iterative_Imputer(\n classifier=imputation_classifier,\n regressor=imputation_regressor,\n target=target_variable,\n initial_strategy_numeric=numeric_imputation_strategy,\n max_iter=imputation_max_iter,\n warm_start=imputation_warm_start,\n imputation_order=imputation_order,\n random_state=random_state,\n ordinal_columns=ordinal_columns_and_categories.keys(),\n )\n\n # for zero_near_zero\n if apply_zero_nearZero_variance == True:\n znz = Zroe_NearZero_Variance(target=target_variable)\n else:\n znz = SKLEARN_EMPTY_STEP\n\n # for rare levels clubbing:\n\n if club_rare_levels == True:\n club_R_L = Catagorical_variables_With_Rare_levels(\n target=target_variable, threshold=rara_level_threshold_percentage\n )\n else:\n club_R_L = SKLEARN_EMPTY_STEP\n\n # untrained levels in test\n if apply_untrained_levels_treatment == True:\n new_levels = New_Catagorical_Levels_in_TestData(\n target=target_variable,\n replacement_strategy=untrained_levels_treatment_method,\n )\n else:\n new_levels = New_Catagorical_Levels_in_TestData(\n target=target_variable, replacement_strategy=\"raise exception\",\n )\n\n # untrained levels in test(ordinal specific)\n if apply_untrained_levels_treatment == True:\n new_levels1 = New_Catagorical_Levels_in_TestData(\n target=target_variable,\n replacement_strategy=untrained_levels_treatment_method,\n )\n else:\n new_levels1 = New_Catagorical_Levels_in_TestData(\n target=target_variable, replacement_strategy=\"raise exception\",\n )\n\n # cardinality:\n if apply_cardinality_reduction == True and cardinal_method == \"cluster\":\n cardinality = Reduce_Cardinality_with_Clustering(\n target_variable=target_variable,\n catagorical_feature=cardinal_features,\n check_clusters_upto=50,\n random_state=random_state,\n )\n elif apply_cardinality_reduction == True and cardinal_method == \"count\":\n cardinality = Reduce_Cardinality_with_Counts(\n catagorical_feature=cardinal_features\n )\n else:\n cardinality = SKLEARN_EMPTY_STEP\n\n # ordinal coding\n if apply_ordinal_encoding == True:\n # we need to make sure that if the columns chosen by user have NA & imputer strategy is not_availablle then we add that to the category first\n for i in ordinal_columns_and_categories.keys():\n if sum(train_data[i].isnull()) > 0:\n if categorical_imputation_strategy == \"not_available\":\n lis = [\"not_available\"] + ordinal_columns_and_categories[i]\n ordinal_columns_and_categories.update({i: lis})\n\n ordinal = Ordinal(info_as_dict=ordinal_columns_and_categories)\n else:\n ordinal = SKLEARN_EMPTY_STEP\n\n # grouping\n if apply_grouping == True:\n group = Group_Similar_Features(\n group_name=group_name, list_of_grouped_features=features_to_group_ListofList\n )\n else:\n group = SKLEARN_EMPTY_STEP\n\n # non_liner_features\n if apply_polynomial_trigonometry_features == True:\n nonliner = Make_NonLiner_Features(\n target=target_variable,\n ml_usecase=ml_usecase,\n Polynomial_degree=max_polynomial,\n other_nonliner_features=trigonometry_calculations,\n top_features_to_pick=top_poly_trig_features_to_select_percentage,\n random_state=random_state,\n subclass=subcase,\n n_jobs=n_jobs,\n )\n else:\n nonliner = SKLEARN_EMPTY_STEP\n\n # binning\n if apply_binning == True:\n binn = Binning(features_to_discretize=features_to_binn)\n else:\n binn = SKLEARN_EMPTY_STEP\n\n # scaling & power transform\n if scale_data == True:\n scaling = Scaling_and_Power_transformation(\n target=target_variable,\n function_to_apply=scaling_method,\n random_state_quantile=random_state,\n )\n else:\n scaling = SKLEARN_EMPTY_STEP\n\n if Power_transform_data == True:\n P_transform = Scaling_and_Power_transformation(\n target=target_variable,\n function_to_apply=Power_transform_method,\n random_state_quantile=random_state,\n )\n else:\n P_transform = SKLEARN_EMPTY_STEP\n\n # for Time Variables\n feature_time = Make_Time_Features()\n\n if dummify_categoricals == True:\n dummy = Dummify(target_variable)\n else:\n dummy = SKLEARN_EMPTY_STEP\n\n # remove putliers\n if remove_outliers == True:\n rem_outliers = Outlier(\n target=target_variable,\n contamination=outlier_contamination_percentage,\n random_state=random_state,\n methods=outlier_methods,\n )\n else:\n rem_outliers = SKLEARN_EMPTY_STEP\n\n # cluster all data:\n if cluster_entire_data == True:\n cluster_all = Cluster_Entire_Data(\n target_variable=target_variable,\n check_clusters_upto=range_of_clusters_to_try,\n random_state=random_state,\n )\n else:\n cluster_all = SKLEARN_EMPTY_STEP\n\n # clean column names for special char\n clean_names = Clean_Colum_Names()\n\n # feature selection\n if apply_feature_selection:\n # TODO: add autoselect\n if feature_selection_method == \"boruta\":\n feature_select = Boruta_Feature_Selection(\n target=target_variable,\n ml_usecase=ml_usecase,\n top_features_to_pick=feature_selection_top_features_percentage,\n random_state=random_state,\n subclass=subcase,\n n_jobs=n_jobs,\n )\n else:\n feature_select = Advanced_Feature_Selection_Classic(\n target=target_variable,\n ml_usecase=ml_usecase,\n top_features_to_pick=feature_selection_top_features_percentage,\n random_state=random_state,\n subclass=subcase,\n n_jobs=n_jobs,\n )\n else:\n feature_select = SKLEARN_EMPTY_STEP\n\n # removing multicollinearity\n if remove_multicollinearity == True and subcase != \"multi\":\n fix_multi = Fix_multicollinearity(\n target_variable=target_variable,\n threshold=maximum_correlation_between_features,\n )\n elif remove_multicollinearity == True and subcase == \"multi\":\n fix_multi = Fix_multicollinearity(\n target_variable=target_variable,\n threshold=maximum_correlation_between_features,\n correlation_with_target_preference=0.0,\n )\n else:\n fix_multi = SKLEARN_EMPTY_STEP\n\n # remove 100% collinearity\n if remove_perfect_collinearity == True:\n fix_perfect = Remove_100(target=target_variable)\n else:\n fix_perfect = SKLEARN_EMPTY_STEP\n\n # apply dfs\n if apply_feature_interactions == True:\n dfs = DFS_Classic(\n target=target_variable,\n ml_usecase=ml_usecase,\n interactions=feature_interactions_to_apply,\n top_features_to_pick_percentage=feature_interactions_top_features_to_select_percentage,\n random_state=random_state,\n subclass=subcase,\n n_jobs=n_jobs,\n )\n else:\n dfs = SKLEARN_EMPTY_STEP\n\n # apply pca\n if apply_pca == True:\n pca = Reduce_Dimensions_For_Supervised_Path(\n target=target_variable,\n method=pca_method,\n variance_retained_or_number_of_components=pca_variance_retained_or_number_of_components,\n random_state=random_state,\n )\n else:\n pca = SKLEARN_EMPTY_STEP\n\n pipe = Pipeline(\n [\n (\"dtypes\", dtypes),\n (\"imputer\", imputer),\n (\n \"new_levels1\",\n new_levels1,\n ), # specifically used for ordinal, so that if a new level comes in a feature that was marked ordinal can be handled\n (\"ordinal\", ordinal),\n (\"cardinality\", cardinality),\n (\"znz\", znz),\n (\"club_R_L\", club_R_L),\n (\"new_levels\", new_levels),\n (\"feature_time\", feature_time),\n (\"group\", group),\n (\"nonliner\", nonliner),\n (\"scaling\", scaling),\n (\"P_transform\", P_transform),\n (\"binn\", binn),\n (\"rem_outliers\", rem_outliers),\n (\"cluster_all\", cluster_all),\n (\"dummy\", dummy),\n (\"fix_perfect\", fix_perfect),\n (\"clean_names\", clean_names),\n (\"feature_select\", feature_select),\n (\"fix_multi\", fix_multi),\n (\"dfs\", dfs),\n (\"pca\", pca),\n ]\n )\n\n return pipe\n\n\n# ______________________________________________________________________________________________________________________________________________________\n# preprocess_all_in_one_unsupervised\ndef Preprocess_Path_Two(\n train_data,\n ml_usecase=None,\n test_data=None,\n categorical_features=[],\n numerical_features=[],\n time_features=[],\n features_todrop=[],\n display_types=False,\n imputation_type=\"simple\",\n numeric_imputation_strategy=\"mean\",\n categorical_imputation_strategy=\"not_available\",\n imputation_classifier=None,\n imputation_regressor=None,\n imputation_max_iter=10,\n imputation_warm_start=False,\n imputation_order=\"ascending\",\n apply_zero_nearZero_variance=False,\n club_rare_levels=False,\n rara_level_threshold_percentage=0.05,\n apply_untrained_levels_treatment=False,\n untrained_levels_treatment_method=\"least frequent\",\n apply_cardinality_reduction=False,\n cardinal_method=\"cluster\",\n cardinal_features=[],\n apply_ordinal_encoding=False,\n ordinal_columns_and_categories={},\n apply_binning=False,\n features_to_binn=[],\n apply_grouping=False,\n group_name=[],\n features_to_group_ListofList=[[]],\n scale_data=False,\n scaling_method=\"zscore\",\n Power_transform_data=False,\n Power_transform_method=\"quantile\",\n remove_outliers=False,\n outlier_contamination_percentage=0.01,\n outlier_methods=[\"pca\", \"iso\", \"knn\"],\n remove_multicollinearity=False,\n maximum_correlation_between_features=0.90,\n remove_perfect_collinearity=False,\n apply_pca=False,\n pca_method=\"pca_liner\",\n pca_variance_retained_or_number_of_components=0.99,\n random_state=42,\n n_jobs=-1,\n):\n\n \"\"\"\n Follwoing preprocess steps are taken:\n - THIS IS BUILt FOR UNSUPERVISED LEARNING\n - 1) Auto infer data types \n - 2) Impute (simple or with surrogate columns)\n - 3) Ordinal Encoder\n - 4) Drop categorical variables that have zero variance or near zero variance\n - 5) Club categorical variables levels togather as a new level (other_infrequent) that are rare / at the bottom 5% of the variable distribution\n - 6) Club unseen levels in test dataset with most/least frequent levels in train dataset \n - 7) Reduce high cardinality in categorical features using clustering or counts\n - 8) Generate sub features from time feature such as 'month','weekday',is_month_end','is_month_start' & 'hour'\n - 9) Group features by calculating min, max, mean, median & sd of similar features\n -10) Scales & Power Transform (zscore,minmax,yeo-johnson,quantile,maxabs,robust) , including option to transform target variable\n -11) Apply binning to continious variable when numeric features are provided as a list \n -12) Detect & remove outliers using isolation forest, knn and PCA\n -13) One Hot / Dummy encoding\n -14) Remove special characters from column names such as commas, square brackets etc to make it competible with jason dependednt models\n -15) Fix multicollinearity\n -16) Apply diamension reduction techniques such as pca_liner, pca_kernal, incremental, tsne \n - except for pca_liner, all other method only takes number of component (as integer) i.e no variance explaination metohd available \n \"\"\"\n return Preprocess_Path_One(\n train_data=train_data,\n ml_usecase=ml_usecase,\n target_variable=None,\n test_data=test_data,\n categorical_features=categorical_features,\n numerical_features=numerical_features,\n time_features=time_features,\n features_todrop=features_todrop,\n display_types=display_types,\n imputation_type=imputation_type,\n numeric_imputation_strategy=numeric_imputation_strategy,\n categorical_imputation_strategy=categorical_imputation_strategy,\n imputation_classifier=imputation_classifier,\n imputation_regressor=imputation_regressor,\n imputation_max_iter=imputation_max_iter,\n imputation_warm_start=imputation_warm_start,\n imputation_order=imputation_order,\n apply_zero_nearZero_variance=apply_zero_nearZero_variance,\n club_rare_levels=club_rare_levels,\n rara_level_threshold_percentage=rara_level_threshold_percentage,\n apply_untrained_levels_treatment=apply_untrained_levels_treatment,\n untrained_levels_treatment_method=untrained_levels_treatment_method,\n apply_ordinal_encoding=apply_ordinal_encoding,\n ordinal_columns_and_categories=ordinal_columns_and_categories,\n apply_cardinality_reduction=apply_cardinality_reduction,\n cardinal_method=cardinal_method,\n cardinal_features=cardinal_features,\n apply_binning=apply_binning,\n features_to_binn=features_to_binn,\n apply_grouping=apply_grouping,\n group_name=group_name,\n features_to_group_ListofList=features_to_group_ListofList,\n scale_data=scale_data,\n scaling_method=scaling_method,\n Power_transform_data=Power_transform_data,\n Power_transform_method=Power_transform_method,\n remove_outliers=remove_outliers,\n outlier_contamination_percentage=outlier_contamination_percentage,\n outlier_methods=outlier_methods,\n dummify_categoricals=False,\n remove_multicollinearity=remove_multicollinearity,\n maximum_correlation_between_features=maximum_correlation_between_features,\n remove_perfect_collinearity=remove_perfect_collinearity,\n apply_pca=apply_pca,\n pca_method=pca_method,\n pca_variance_retained_or_number_of_components=pca_variance_retained_or_number_of_components,\n random_state=random_state,\n n_jobs=n_jobs,\n )\n\n\ndef _get_labelencoder_reverse_dict(le: LabelEncoder) -> dict:\n # now get the replacement dict\n rev = le.inverse_transform(range(0, len(le.classes_)))\n rep = np.array(range(0, len(le.classes_)))\n replacement = {}\n for i, k in zip(rev, rep):\n replacement[i] = k\n return replacement\n"
] |
[
[
"sklearn.ensemble.RandomForestRegressor",
"pandas.merge",
"pandas.to_datetime",
"sklearn.utils.validation.check_is_fitted",
"sklearn.cluster.KMeans",
"sklearn.metrics.silhouette_score",
"sklearn.decomposition.IncrementalPCA",
"sklearn.preprocessing.QuantileTransformer",
"sklearn.preprocessing.MaxAbsScaler",
"pandas.DataFrame",
"sklearn.base.clone",
"sklearn.manifold.TSNE",
"pandas.isna",
"sklearn.preprocessing.LabelEncoder",
"sklearn.preprocessing.PowerTransformer",
"numpy.where",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.metrics.calinski_harabasz_score",
"pandas.reset_option",
"numpy.histogram",
"sklearn.ensemble.RandomForestClassifier",
"numpy.arange",
"sklearn.impute.SimpleImputer",
"sklearn.pipeline.Pipeline",
"numpy.sin",
"numpy.nansum",
"pandas.set_option",
"pandas.concat",
"numpy.power",
"pandas.Categorical",
"numpy.tan",
"pandas.unique",
"sklearn.utils.validation.check_random_state",
"sklearn.cross_decomposition.PLSRegression",
"numpy.corrcoef",
"numpy.array",
"sklearn.decomposition.PCA",
"numpy.abs",
"pandas.isnull",
"sklearn.preprocessing.RobustScaler",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.preprocessing.KBinsDiscretizer",
"numpy.cos",
"sklearn.preprocessing.OrdinalEncoder",
"scipy.stats.mode",
"sklearn.preprocessing.StandardScaler",
"sklearn.decomposition.KernelPCA"
]
] |
eth-sri/dp-sniper
|
[
"ee087b2584ca97d1064154db5dd2bda0bb5a8ceb"
] |
[
"statdpwrapper/verification.py"
] |
[
"from typing import List\n\nimport numpy as np\n\nfrom dpsniper.search.ddconfig import DDConfig\nfrom statdpwrapper.postprocessing import Postprocessing\nfrom dpsniper.probability.estimators import PrEstimator\nfrom dpsniper.attack.attack import Attack\n\nfrom statdpwrapper.postprocessing import the_zero_noise_prng\n\n\nclass StatDPAttack(Attack):\n \"\"\"\n A wrapper for a StatDP event.\n Note: Does not really extend dpsniper.attack.Attack due to missing vectorization in check\n \"\"\"\n\n def __init__(self, event, postprocessing: Postprocessing):\n \"\"\"\n Creates an attack from a StatDP event and postprocessing.\n \"\"\"\n self.event = event\n self.postprocessing = postprocessing\n self.noisefree_reference = None\n\n def set_noisefree_reference(self, noisefree_reference):\n self.noisefree_reference = noisefree_reference\n\n def check(self, b: List):\n \"\"\"\n Computes the probabilities whether given outputs b lie in the attack set\n\n Args:\n b: list of list (outer list are samples, inner list are dimensions)\n noisefree_reference: noisefree reference evaluation (if required by postprocessing)\n\n Returns:\n boolean 1d array of shape (n_samples,)\n \"\"\"\n if self.postprocessing.requires_noisefree_reference and self.noisefree_reference is None:\n raise ValueError(\"check(...) requires noisefree_reference\")\n\n x = np.empty(shape=(len(b), self.postprocessing.n_output_dimensions()), dtype=float)\n for idx, sample in enumerate(b): # loop over samples\n x[idx, :] = np.asarray(self.postprocessing.process(sample, noisefree_reference=self.noisefree_reference),\n dtype=float)\n return self._check_event(x)\n\n def _check_event(self, x):\n \"\"\"\n Args:\n x: ndarray of shape (n_samples, d), dtype float\n\n Returns:\n ndarray of shape (n_samples,) containing probabilitites in [0.0, 1.0]\n \"\"\"\n res = np.full(fill_value=True, shape=x.shape[0])\n for col in range(0, x.shape[1]): # loop cover columns\n if isinstance(self.event[col], int) or isinstance(self.event[col], float):\n # equality check\n res = np.logical_and(res, x[:, col] == self.event[col])\n else:\n # open interval check\n low = self.event[col][0]\n high = self.event[col][1]\n res = np.logical_and(res, np.logical_and(\n x[:, col] > low,\n x[:, col] < high\n ))\n return res.astype(float)\n\n\nclass StatDPPrEstimator(PrEstimator):\n \"\"\"\n A probability estimator based on samples for StatDP algorithm implementations.\n \"\"\"\n\n def __init__(self, mechanism, n_samples: int, config: DDConfig, use_parallel_executor=False, **kwargs):\n super().__init__(mechanism, n_samples, config, use_parallel_executor)\n self.mechanism_kwargs = kwargs\n\n def _get_samples(self, a, n_samples):\n # non-vectorized variant for StatDP\n l = [0] * n_samples\n for i in range(0, n_samples):\n l[i] = self.mechanism(np.random.default_rng(), a, **self.mechanism_kwargs)\n return l\n\n def _get_noisefree_reference(self, a):\n return self.mechanism(the_zero_noise_prng, a, **self.mechanism_kwargs)\n"
] |
[
[
"numpy.logical_and",
"numpy.random.default_rng",
"numpy.full"
]
] |
ckvic3/VisDrone
|
[
"bdba106e4d5081f9d3f07af80e736eb3287ba6e8"
] |
[
"training_dataset/VisDrone/par_crop.py"
] |
[
"from os.path import join, isdir\nfrom os import listdir, mkdir, makedirs\nimport cv2\nimport numpy as np\nimport glob\nimport xml.etree.ElementTree as ET\nfrom concurrent import futures\nimport sys\nimport time\nsub_sets = ['VisDrone2019-SOT-train','VisDrone2019-SOT-val']\nVisDrone_base_path = '/home/tempuser1/VisDrone/Single-Object Tracking/'\nanno_base_path = None\ndata_base_path =None\nglobals()\n\n# Print iterations progress (thanks StackOverflow)\ndef printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n barLength - Optional : character length of bar (Int)\n \"\"\"\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\x1b[2K\\r')\n sys.stdout.flush()\n\ndef crop_hwc(image, bbox, out_sz, padding=(0, 0, 0)):\n a = (out_sz-1) / (bbox[2]-bbox[0])\n b = (out_sz-1) / (bbox[3]-bbox[1])\n c = -a * bbox[0]\n d = -b * bbox[1]\n mapping = np.array([[a, 0, c],\n [0, b, d]]).astype(np.float)\n crop = cv2.warpAffine(image, mapping, (out_sz, out_sz), borderMode=cv2.BORDER_CONSTANT, borderValue=padding)\n return crop\n\n\ndef pos_s_2_bbox(pos, s):\n return [pos[0]-s/2, pos[1]-s/2, pos[0]+s/2, pos[1]+s/2]\n\n\ndef crop_like_SiamFC(image, bbox, context_amount=1, exemplar_size=127, instanc_size=255, padding=(0, 0, 0)):\n target_pos = [(bbox[2]+bbox[0])/2., (bbox[3]+bbox[1])/2.]\n target_size = [bbox[2]-bbox[0], bbox[3]-bbox[1]]\n wc_z = target_size[1] + context_amount * sum(target_size)\n hc_z = target_size[0] + context_amount * sum(target_size)\n s_z = np.sqrt(wc_z * hc_z)\n scale_z = exemplar_size / s_z\n d_search = (instanc_size - exemplar_size) / 2\n pad = d_search / scale_z\n s_x = s_z + 2 * pad\n\n z = crop_hwc(image, pos_s_2_bbox(target_pos, s_z), exemplar_size, padding)\n x = crop_hwc(image, pos_s_2_bbox(target_pos, s_x), instanc_size, padding)\n return z, x\n\ndef crop_video(video, crop_path, instanc_size=511):\n global anno_base_path\n global data_base_path\n\n f = open(join(anno_base_path, video))\n annos = f.readlines()\n f.close()\n\n video =video.strip('.txt')\n\n video_crop_base_path =join(crop_path, video)\n if not isdir(video_crop_base_path): makedirs(video_crop_base_path)\n\n FramePaths = sorted(listdir(join(data_base_path, video)))\n\n for idx,FramePath in enumerate(FramePaths):\n im = cv2.imread(join(data_base_path,video,FramePath))\n avg_chans = np.mean(im, axis=(0, 1))\n\n anno =annos[idx].strip().split(',')\n bbox =[int(anno[0]), int(anno[1]),\n int(anno[0])+int(anno[2]),int(anno[1])+int(anno[3])]\n\n z,x =crop_like_SiamFC(im,bbox,instanc_size=instanc_size,padding=avg_chans)\n cv2.imwrite(join(video_crop_base_path,'{:06d}.{:02d}.z.jpg'.format(idx,0)),z)\n cv2.imwrite(join(video_crop_base_path,'{:06d}.{:02d}.x.jpg'.format(idx,0)),x)\n\ndef main(instanc_size=511, num_threads=24):\n crop_path = './crop{:d}'.format(instanc_size)\n if not isdir(crop_path): mkdir(crop_path)\n for sub_set in sub_sets:\n print('processing',sub_set,'\\n')\n set_crop_path = join(crop_path,sub_set)\n path = join(VisDrone_base_path,sub_set)\n global anno_base_path\n global data_base_path\n anno_base_path = join(path,'annotations')\n data_base_path = join(path,'sequences')\n videos = sorted(listdir(anno_base_path))\n n_videos = len(videos)\n\n with futures.ProcessPoolExecutor(max_workers=num_threads) as executor:\n fs = [executor.submit(crop_video, video, set_crop_path, instanc_size) for video in videos]\n for i, f in enumerate(futures.as_completed(fs)):\n # Write progress to error so that it can be seen\n printProgress(i, n_videos, suffix='Done ', barLength=40)\n\nif __name__ == '__main__':\n since = time.time()\n main(int(sys.argv[1]), int(sys.argv[2]))\n time_elapsed = time.time() - since\n print('Total complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))"
] |
[
[
"numpy.array",
"numpy.mean",
"numpy.sqrt"
]
] |
jordanparker6/datascience-starter
|
[
"3eef1640a45d19431e9fb26adf5e089d3708dab1"
] |
[
"ds/dataset/gridsearchcv.py"
] |
[
"import itertools\nimport numpy as np\nimport pandas as pd\nfrom typing import Dict, Any, Tuple\nfrom sklearn.model_selection import TimeSeriesSplit\nfrom sklearn.model_selection import ShuffleSplit\nfrom tqdm import tqdm\nfrom abc import ABC\n\nclass GridsearchCVBase(ABC):\n \"\"\"A base class for cross validated gridsearch.\n\n Args:\n estimator: A scikit learn stimator that implements the fit and score methods.\n cv: The number of folds in kfold cross validation.\n\n \"\"\"\n def __init__(self, estimator, cv: int = 5):\n super().__init__()\n self.estimator = estimator #: A scikit learn estimator that implements fit and score methods.\n self.cv = cv #: The number of folds in kflod cross validation.\n self.splitter = None #: A class for splitting the dataframe into k-folds.\n\n def crossval(self, df: pd.DataFrame, parameters: Dict[str, Any], cv: int = 5) -> np.float:\n \"\"\"Performs k-fold cross validation using the estimators score method and the provided splitter.\n\n Args:\n df: A pandas dataframe of target and feature variables.\n parameters: A dictionary of parameters and possible values.\n cv: The number of folds in k-fold cross validation.\n\n Returns:\n The mean score for the cross validation.\n\n \"\"\"\n if self.splitter == None:\n raise NotImplementedError\n else:\n cv = self.splitter(n_splits=cv)\n score = []\n for train_index, test_index in cv.split(df):\n train, test = df.iloc[train_index, :], df.iloc[test_index, :]\n model = self.estimator(**parameters)\n model.fit(train)\n score.append(model.score(test))\n return np.array(score).mean()\n\n def fit(self, df: pd.DataFrame, parameters: Dict[str, Any], min_loss: bool = True) -> Tuple[Dict[str, Any], np.ndarray]:\n \"\"\"Fit method for cross validated grid search.\n\n Args:\n df: A pandas dataframe of target and feature variables.\n parameters: A dictionary of parameters and possible values.\n min_loss: A boolean indicator to optimise for the min or max score in gridsearch.\n\n \"\"\"\n scores = []\n params = []\n values = parameters.values()\n options = [dict(zip(parameters.keys(), v)) for v in itertools.product(*parameters.values())]\n for option in tqdm(options):\n score = self.crossval(df, option, self.cv)\n scores.append(score)\n params.append(option)\n scores = np.array(scores)\n if min_loss:\n best = np.nanargmin(scores)\n else:\n best = np.nanargmax(scores)\n return params[best], scores\n\n\nclass GridsearchCV(GridsearchCVBase):\n \"\"\"\"A gridsearch and crossvalidation approach for iid datasets.\n \"\"\"\n def __init__(self, estimator, cv: int = 5):\n super().__init__(estimator, cv)\n self.splitter = ShuffleSplit\n\n\nclass TimeseriesGridsearchCV(GridsearchCVBase):\n \"\"\"\"A gridsearch and crossvalidation approach for timeseries datasets.\n \"\"\"\n def __init__(self, estimator, cv=5):\n super().__init__(estimator, cv)\n self.splitter = TimeSeriesSplit"
] |
[
[
"numpy.nanargmin",
"numpy.nanargmax",
"numpy.array"
]
] |
patilli/vqa_benchmarking
|
[
"53a05d8956e71e99de6d97db5e7a7e400b6cc65f"
] |
[
"backend/vqa_benchmarking_backend/datasets/CLEVRDataset.py"
] |
[
"import json\nimport os\nimport random\nimport re\nfrom typing import Dict, List, Tuple, Union\n\nimport cv2 as cv\nimport numpy as np\nimport PIL\nimport torch\nfrom tqdm.auto import tqdm\nfrom vqa_benchmarking_backend.datasets.dataset import (DataSample,\n DiagnosticDataset)\nfrom vqa_benchmarking_backend.tokenizers.vqatokenizer import (\n process_digit_article, process_punctuation)\nfrom vqa_benchmarking_backend.utils.vocab import Vocabulary\n\n\ndef preprocess_question(question: str) -> List[str]:\n \"\"\"\n Remove punctuation and make everything lower case\n \"\"\"\n return re.sub(\n r\"([.,'!?\\\"()*#:;])\",\n '',\n question.lower()\n ).replace('-', ' ').replace('/', ' ')\n\ndef load_img(path: str, transform = None) -> np.ndarray:\n \"\"\"\n Load an image using module ``cv2``\n \"\"\"\n img = cv.imread(path)\n if transform:\n img = transform(img)\n return img\n\ndef load_img_feats(path: str) -> torch.FloatTensor:\n \"\"\"\n Load a numpy array containing image features\n \"\"\"\n # Format:\n # f['info']: image_id, objects_id (object class id per ROI), objects_conf (propabily in [0,1] per object id), attrs_id (attribute id per ROI)\n # f[\"num_bbox\"]: number of ROIs\n # f['x]: feature matrix (number of ROIs x feature_dim = 204)\n img_feats = np.load(path, allow_pickle=True)[\"x\"]\n return torch.from_numpy(img_feats)\n\nclass CLEVRDataSample(DataSample):\n \"\"\"\n Class describing one data sample of the CLEVR dataset\n Inheriting from ``DataSample``\n \"\"\"\n def __init__(self, question_id: str, question: str, answers: Dict[str, float], image_id: str, image_path: str, image_feat_path: str, image_transform = None) -> None:\n super().__init__(question_id, question, answers, image_id, image_path, image_feat_path, image_transform)\n self._question = preprocess_question(question)\n\n @property\n def image(self) -> np.ndarray:\n \"\"\"\n Returns the image, if not present it loads it from ``self._image_path``\n \"\"\"\n if isinstance(self._img, type(None)):\n self._img = load_img(self._image_path)\n return self._img\n \n @image.setter\n def image(self, image: np.ndarray):\n \"\"\"\n Overrides image, resets image features since image was updated\n \"\"\"\n self._img = image\n # reset image features, since image updated\n self._img_feats = None\n\n @property\n def question_tokenized(self) -> List[str]:\n \"\"\"\n Returns tokenized question\n \"\"\"\n return self._question.split()\n \n @property\n def question(self) -> str:\n \"\"\"\n Returns full question\n \"\"\"\n return self._question\n \n @question.setter\n def question(self, question):\n self._question = preprocess_question(question)\n # reset tokens, token ids and embeddings since question updated\n self._q_token_ids = None\n self._q_feats = None\n \n def __str__(self):\n \"\"\"\n Stringify object\n \"\"\"\n str_dict = {\n 'question_id': self.question_id,\n 'question': self.question,\n 'tokens': self.question_tokenized,\n 'answer': self.answers,\n 'imageId': self.image_id,\n 'image_path': self._image_path \n }\n return str(str_dict)\n\n def question_token_ids(self, vocab: Vocabulary) -> torch.LongTensor:\n return torch.tensor([vocab.stoi(token) for token in self.question_tokenized], dtype=torch.long)\n\nclass CLEVRDataset(DiagnosticDataset):\n \"\"\"\n Class describing the CLEVR dataset\n Inheriting from ``DiagnosticDataset``\n \"\"\"\n def __init__(self, question_file: str, \n img_dir, \n img_feat_dir,\n idx2ans,\n transform=None, \n load_img_features=False,\n dataset_fraction: float = 0.05, # percentage of data to keep\n random_seed: int = 12345):\n\n self.img_dir = img_dir\n self.img_feat_dir = img_feat_dir\n self.transform = transform\n self.load_img_features = load_img_features\n self.idx2ans = idx2ans\n\n self.data, self.qid_to_sample, self.q_vocab, self.a_vocab = self._load_data(question_file, dataset_fraction, random_seed)\n\n def _load_data(self, question_file: str, dataset_fraction, random_seed) -> Tuple[List[DataSample], Dict[str, DataSample], Vocabulary, Vocabulary]:\n \"\"\"\n Loads data from CLEVR json files\n Returns:\n * data: list of ``CLEVRDataSample``\n * qid_to_sample: mapping of question id to data sample\n * question_vocab: ``Vocabulary`` of all unique words occuring in the data\n * answer_vocab: ``Vocabulary`` of all unique answers\n \"\"\"\n random.seed(random_seed)\n data = []\n qid_to_sample = {}\n answer_vocab = Vocabulary(itos={}, stoi={})\n question_vocab = Vocabulary(itos={}, stoi={})\n # load questions\n ques = json.load(open(question_file))['questions']\n if dataset_fraction < 1.0:\n # draw fraction of dataset at random\n num_keep = int(len(ques) * dataset_fraction) \n print(f\"Keeping {dataset_fraction*100}%: {num_keep}/{len(ques)} samples\")\n ques = random.sample(ques, k=num_keep)\n for question in tqdm(ques):\n iid = question['image_filename']\n qid = str(question['question_index'])\n sample = CLEVRDataSample(question_id=qid,\n question=question['question'], \n answers={question['answer']: 1.0},\n image_id=iid,\n image_path=os.path.join(self.img_dir, f\"{iid}\"),\n image_feat_path=os.path.join(self.img_feat_dir, f\"{iid}.npz\"))\n answer_vocab.add_token(question['answer'])\n for token in sample.question_tokenized:\n question_vocab.add_token(token)\n qid_to_sample[qid] = sample\n data.append(qid_to_sample[qid])\n \n return data, qid_to_sample, question_vocab, answer_vocab\n \n def __getitem__(self, index) -> DataSample:\n \"\"\"\n Returns a data sample\n \"\"\"\n return self.data[index]\n\n def label_from_class(self, class_index: int) -> str:\n \"\"\"\n Get the answer string of a given class index\n \"\"\"\n return self.a_vocab.itos(class_index)\n \n def word_in_vocab(self, word: str) -> bool:\n \"\"\"\n Checks if a word occured inside the ``Vocabulary`` dervied of all questions\n \"\"\"\n return self.q_vocab.exists(word)\n \n def get_name(self) -> str:\n \"\"\"\n Returns the name of the dataset, required for file caching\n \"\"\"\n return \"CLEVR\"\n\n def __len__(self):\n \"\"\"\n Returns the length of the CLEVRDataset as in self.data\n \"\"\"\n return len(self.data)\n\n def index_to_question_id(self, index) -> str:\n \"\"\"\n Get the index of a specific question id\n \"\"\"\n return self.data[index].question_id\n \n def class_idx_to_answer(self, class_idx: int) -> Union[str, None]:\n \"\"\"\n Get the answer string for a given class index from the ``self.idx2ans`` dictionary\n \"\"\"\n if isinstance(next(iter(self.idx2ans.keys())), int):\n if class_idx in self.idx2ans:\n return self.idx2ans[class_idx]\n else:\n if str(class_idx) in self.idx2ans:\n return self.idx2ans[str(class_idx)]\n return None\n"
] |
[
[
"numpy.load",
"torch.from_numpy"
]
] |
SFU-MARS/optimized_dp
|
[
"4a3b33f7da29479a3bfdc61905b61c05c76e6795"
] |
[
"solver.py"
] |
[
"import heterocl as hcl\nimport numpy as np\nimport time\n\nfrom Plots.plotting_utilities import *\nfrom argparse import ArgumentParser\n\n# Backward reachable set computation library\nfrom computeGraphs.graph_3D import *\nfrom computeGraphs.graph_4D import *\nfrom computeGraphs.graph_5D import *\nfrom computeGraphs.graph_6D import *\n\nfrom TimeToReach.TimeToReach_3D import *\nfrom TimeToReach.TimeToReach_4D import *\nfrom TimeToReach.TimeToReach_5D import *\n\n# Value Iteration library\nfrom valueIteration.value_iteration_3D import *\nfrom valueIteration.value_iteration_4D import *\nfrom valueIteration.value_iteration_5D import *\nfrom valueIteration.value_iteration_6D import *\n\ndef solveValueIteration(MDP_obj):\n print(\"Welcome to optimized_dp \\n\")\n # Initialize the HCL environment\n hcl.init()\n hcl.config.init_dtype = hcl.Float()\n\n ########################################## INITIALIZE ##########################################\n\n # Convert the python array to hcl type array\n V_opt = hcl.asarray(np.zeros(MDP_obj._ptsEachDim))\n intermeds = hcl.asarray(np.ones(MDP_obj._actions.shape[0]))\n trans = hcl.asarray(MDP_obj._trans)\n gamma = hcl.asarray(MDP_obj._gamma)\n epsilon = hcl.asarray(MDP_obj._epsilon)\n count = hcl.asarray(np.zeros(1))\n maxIters = hcl.asarray(MDP_obj._maxIters)\n actions = hcl.asarray(MDP_obj._actions)\n bounds = hcl.asarray(MDP_obj._bounds)\n goal = hcl.asarray(MDP_obj._goal)\n ptsEachDim = hcl.asarray(MDP_obj._ptsEachDim)\n sVals = hcl.asarray(np.zeros([MDP_obj._bounds.shape[0]]))\n iVals = hcl.asarray(np.zeros([MDP_obj._bounds.shape[0]]))\n interpV = hcl.asarray(np.zeros([1]))\n useNN = hcl.asarray(MDP_obj._useNN)\n\n print(MDP_obj._bounds.shape[0])\n print(np.zeros([MDP_obj._bounds.shape[0]]))\n if MDP_obj._bounds.shape[0] == 3:\n fillVal = hcl.asarray(MDP_obj._fillVal)\n f = value_iteration_3D(MDP_obj)\n if MDP_obj._bounds.shape[0] == 4:\n f = value_iteration_4D(MDP_obj)\n if MDP_obj._bounds.shape[0] == 5:\n f = value_iteration_5D(MDP_obj)\n if MDP_obj._bounds.shape[0] == 6:\n f = value_iteration_6D(MDP_obj)\n\n # Build the graph and use the executable\n # Now use the executable\n t_s = time.time()\n if MDP_obj._bounds.shape[0] == 3:\n f(V_opt, actions, intermeds, trans, interpV, gamma, epsilon, iVals, sVals, bounds, goal, ptsEachDim, count,\n maxIters, useNN, fillVal)\n else:\n f(V_opt, actions, intermeds, trans, interpV, gamma, epsilon, iVals, sVals, bounds, goal, ptsEachDim, count,\n maxIters, useNN)\n t_e = time.time()\n\n V = V_opt.asnumpy()\n c = count.asnumpy()\n print(\"Finished in \", int(c[0]), \" iterations\")\n print(\"Took \", t_e - t_s, \" seconds\")\n\n # # Write results to file\n # if (MDP_obj.dir_path):\n # dir_path = MDP_obj.dir_path\n # else:\n # dir_path = \"./hcl_value_matrix_test/\"\n #\n # if (MDP_obj.file_name):\n # file_name = MDP_obj.file_name\n # else:\n # file_name = \"hcl_value_iteration_\" + str(int(c[0])) + \"_iterations_by\" + (\n # \"_Interpolation\" if MDP_obj._useNN[0] == 0 else \"_NN\")\n # MDP_obj.writeResults(V, dir_path, file_name, just_values=True)\n return V\n\ndef HJSolver(dynamics_obj, grid, multiple_value, tau, compMethod,\n plot_option, accuracy=\"low\"):\n print(\"Welcome to optimized_dp \\n\")\n if type(multiple_value) == list:\n init_value = multiple_value[0]\n constraint = multiple_value[1]\n else:\n init_value = multiple_value\n \n hcl.init()\n hcl.config.init_dtype = hcl.Float(32)\n\n ################# INITIALIZE DATA TO BE INPUT INTO EXECUTABLE ##########################\n\n print(\"Initializing\\n\")\n\n V_0 = hcl.asarray(init_value)\n V_1 = hcl.asarray(np.zeros(tuple(grid.pts_each_dim)))\n l0 = hcl.asarray(init_value)\n probe = hcl.asarray(np.zeros(tuple(grid.pts_each_dim)))\n\n list_x1 = np.reshape(grid.vs[0], grid.pts_each_dim[0])\n list_x2 = np.reshape(grid.vs[1], grid.pts_each_dim[1])\n list_x3 = np.reshape(grid.vs[2], grid.pts_each_dim[2])\n if grid.dims >= 4:\n list_x4 = np.reshape(grid.vs[3], grid.pts_each_dim[3])\n if grid.dims >= 5:\n list_x5 = np.reshape(grid.vs[4], grid.pts_each_dim[4])\n if grid.dims >= 6:\n list_x6 = np.reshape(grid.vs[5], grid.pts_each_dim[5])\n\n\n # Convert to hcl array type\n list_x1 = hcl.asarray(list_x1)\n list_x2 = hcl.asarray(list_x2)\n list_x3 = hcl.asarray(list_x3)\n if grid.dims >= 4:\n list_x4 = hcl.asarray(list_x4)\n if grid.dims >= 5:\n list_x5 = hcl.asarray(list_x5)\n if grid.dims >= 6:\n list_x6 = hcl.asarray(list_x6)\n\n # Get executable\n if grid.dims == 3:\n solve_pde = graph_3D(dynamics_obj, grid, compMethod[\"PrevSetsMode\"], accuracy)\n\n if grid.dims == 4:\n solve_pde = graph_4D(dynamics_obj, grid, compMethod[\"PrevSetsMode\"], accuracy)\n\n if grid.dims == 5:\n solve_pde = graph_5D(dynamics_obj, grid, compMethod[\"PrevSetsMode\"], accuracy)\n\n if grid.dims == 6:\n solve_pde = graph_6D(dynamics_obj, grid, compMethod[\"PrevSetsMode\"], accuracy)\n\n # Print out code for different backend\n #print(solve_pde)\n\n ################ USE THE EXECUTABLE ############\n # Variables used for timing\n execution_time = 0\n iter = 0\n tNow = tau[0]\n print(\"Started running\\n\")\n for i in range (1, len(tau)):\n #tNow = tau[i-1]\n t_minh= hcl.asarray(np.array((tNow, tau[i])))\n while tNow <= tau[i] - 1e-4:\n tmp_arr = V_0.asnumpy()\n # Start timing\n iter += 1\n start = time.time()\n\n # Run the execution and pass input into graph\n if grid.dims == 3:\n solve_pde(V_1, V_0, list_x1, list_x2, list_x3, t_minh, l0)\n if grid.dims == 4:\n solve_pde(V_1, V_0, list_x1, list_x2, list_x3, list_x4, t_minh, l0, probe)\n if grid.dims == 5:\n solve_pde(V_1, V_0, list_x1, list_x2, list_x3, list_x4, list_x5 ,t_minh, l0)\n if grid.dims == 6:\n solve_pde(V_1, V_0, list_x1, list_x2, list_x3, list_x4, list_x5, list_x6, t_minh, l0)\n\n tNow = np.asscalar((t_minh.asnumpy())[0])\n\n # Calculate computation time\n execution_time += time.time() - start\n\n # If TargetSetMode is specified by user\n if \"TargetSetMode\" in compMethod:\n if compMethod[\"TargetSetMode\"] == \"max\":\n tmp_val = np.maximum(V_0.asnumpy(), constraint)\n elif compMethod[\"TargetSetMode\"] == \"min\":\n tmp_val = np.minimum(V_0.asnumpy(), constraint)\n # Update final result\n V_1 = hcl.asarray(tmp_val)\n # Update input for next iteration\n V_0 = hcl.asarray(tmp_val)\n\n # Some information printing\n print(t_minh)\n print(\"Computational time to integrate (s): {:.5f}\".format(time.time() - start))\n\n # Time info printing\n print(\"Total kernel time (s): {:.5f}\".format(execution_time))\n print(\"Finished solving\\n\")\n\n ##################### PLOTTING #####################\n if plot_option.do_plot :\n # Only plots last value array for now\n plot_isosurface(grid, V_1.asnumpy(), plot_option)\n\n return V_1.asnumpy()\n\ndef TTRSolver(dynamics_obj, grid, init_value, epsilon, plot_option):\n print(\"Welcome to optimized_dp \\n\")\n ################# INITIALIZE DATA TO BE INPUT INTO EXECUTABLE ##########################\n\n print(\"Initializing\\n\")\n hcl.init()\n hcl.config.init_dtype = hcl.Float(32)\n\n # Convert initial distance value function to initial time-to-reach value function\n init_value[init_value < 0] = 0\n init_value[init_value > 0] = 1000\n V_0 = hcl.asarray(init_value)\n prev_val = np.zeros(init_value.shape)\n\n # Re-shape states vector\n list_x1 = np.reshape(grid.vs[0], grid.pts_each_dim[0])\n list_x2 = np.reshape(grid.vs[1], grid.pts_each_dim[1])\n list_x3 = np.reshape(grid.vs[2], grid.pts_each_dim[2])\n if grid.dims >= 4:\n list_x4 = np.reshape(grid.vs[3], grid.pts_each_dim[3])\n if grid.dims >= 5:\n list_x5 = np.reshape(grid.vs[4], grid.pts_each_dim[4])\n if grid.dims >= 6:\n list_x6 = np.reshape(grid.vs[5], grid.pts_each_dim[5])\n\n # Convert states vector to hcl array type\n list_x1 = hcl.asarray(list_x1)\n list_x2 = hcl.asarray(list_x2)\n list_x3 = hcl.asarray(list_x3)\n if grid.dims >= 4:\n list_x4 = hcl.asarray(list_x4)\n if grid.dims >= 5:\n list_x5 = hcl.asarray(list_x5)\n if grid.dims >= 6:\n list_x6 = hcl.asarray(list_x6)\n\n # Get executable\n\n if grid.dims == 3:\n solve_TTR = TTR_3D(dynamics_obj, grid)\n if grid.dims == 4:\n solve_TTR = TTR_4D(dynamics_obj, grid)\n if grid.dims == 5:\n solve_TTR = TTR_5D(dynamics_obj, grid)\n if grid.dims == 6:\n solve_TTR = TTR_6D(dynamics_obj, grid)\n print(\"Got Executable\\n\")\n\n # Print out code for different backend\n # print(solve_pde)\n\n ################ USE THE EXECUTABLE ############\n error = 10000\n count = 0\n start = time.time()\n while error > epsilon:\n print(\"Iteration: {} Error: {}\".format(count, error))\n count += 1\n if grid.dims == 3:\n solve_TTR(V_0, list_x1, list_x2, list_x3)\n if grid.dims == 4:\n solve_TTR(V_0, list_x1, list_x2, list_x3, list_x4)\n if grid.dims == 5:\n solve_TTR(V_0, list_x1, list_x2, list_x3, list_x4, list_x5)\n if grid.dims == 6:\n solve_TTR(V_0, list_x1, list_x2, list_x3, list_x4, list_x5, list_x6 )\n\n error = np.max(np.abs(prev_val - V_0.asnumpy()))\n prev_val = V_0.asnumpy()\n print(\"Total TTR computation time (s): {:.5f}\".format(time.time() - start))\n print(\"Finished solving\\n\")\n\n ##################### PLOTTING #####################\n plot_isosurface(grid, V_0.asnumpy(), plot_option)\n return V_0.asnumpy()\n\n"
] |
[
[
"numpy.reshape",
"numpy.array",
"numpy.zeros",
"numpy.ones"
]
] |
Curly-Mo/tf-sandbox
|
[
"18d4483dcfb25caa7301f1307362b09ddd7ccb74"
] |
[
"audio_gen/audio_gen_complex.py"
] |
[
"import datetime\nimport librosa\nimport tflearn\nimport numpy as np\nimport argparse\nimport tensorflow as tf\n\n\ndef features_from_audio(y, sr, n_fft=512, seq_len=400):\n S = librosa.core.stft(y, n_fft=n_fft).astype(np.complex64)\n return features_from_spectrogram(S, sr, seq_len=seq_len)\n\n\ndef features_from_spectrogram(S, sr, seq_len=250, win_length=1):\n X, Y = [], []\n for i in range(len(S.T)-seq_len):\n x, y = S[:, i:i+seq_len], S[:, i+seq_len:i+seq_len+win_length]\n X.append(x.T.reshape([-1, x.T.shape[0], x.T.shape[1]]))\n Y.append(y.T)\n return np.vstack(X), np.vstack(Y)\n\n\ndef network(seq_len, n_features):\n init_state = (tf.ones([20, 3], dtype=tf.complex64), tf.ones([20, 3], dtype=tf.complex64))\n input = tf.placeholder(shape=(None, seq_len, n_features), dtype=tf.complex64)\n net = tflearn.input_data(placeholder=input, dtype=tf.complex64)\n net = tflearn.lstm(net, 3, initial_state=init_state)\n net = tflearn.dropout(net, 0.8)\n net = tflearn.fully_connected(net, n_features)\n net = tflearn.regression(net, optimizer='adam', loss='mean_square', dtype=tf.complex64)\n\n model = tflearn.DNN(net, tensorboard_verbose=3)\n return model\n\n\ndef generate(model, X, seq_len):\n Y = []\n for i in range(seq_len):\n y = model.predict([X])\n Y.append(y)\n X = np.vstack([X[1:], y])\n Y = np.vstack(Y)\n return Y.T\n\n\ndef main(audio_path=librosa.util.example_audio_file(), output='test.wav', load=None):\n y, sr = librosa.load(audio_path)\n X, Y = features_from_audio(y, sr)\n seq_len = X.shape[1]\n n_features = X.shape[2]\n model = network(seq_len, n_features)\n if load:\n model.load(load)\n else:\n model.fit(X, Y, n_epoch=5, batch_size=20)\n model.save(f\"model/{datetime.datetime.now().strftime('%Y-%m-%dT%H-%M-%S')}.tflearn\")\n Y_out = generate(model, X[3000], len(X))\n waveform = librosa.istft(Y_out)\n librosa.output.write_wav(output, waveform, sr)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('--input', type=str)\n parser.add_argument('--output', type=str)\n parser.add_argument('--load', type=str, default=None)\n args = parser.parse_args()\n main(args.input, args.output, args.load)\n"
] |
[
[
"tensorflow.ones",
"tensorflow.placeholder",
"numpy.vstack"
]
] |
mmaaz60/ovr-cnn
|
[
"28d125ae51f0216575e12b5d5ded878874131b22"
] |
[
"maskrcnn_benchmark/engine/bbox_aug.py"
] |
[
"import torch\nimport torchvision.transforms as TT\n\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.data import transforms as T\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\nfrom maskrcnn_benchmark.modeling.roi_heads.box_head.inference import make_roi_box_post_processor\n\n\ndef im_detect_bbox_aug(model, images, device):\n # Collect detections computed under different transformations\n boxlists_ts = []\n for _ in range(len(images)):\n boxlists_ts.append([])\n\n def add_preds_t(boxlists_t):\n for i, boxlist_t in enumerate(boxlists_t):\n if len(boxlists_ts[i]) == 0:\n # The first one is identity transform, no need to resize the boxlist\n boxlists_ts[i].append(boxlist_t)\n else:\n # Resize the boxlist as the first one\n boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size))\n\n # Compute detections for the original image (identity transform)\n boxlists_i = im_detect_bbox(\n model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device\n )\n add_preds_t(boxlists_i)\n\n # Perform detection on the horizontally flipped image\n if cfg.TEST.BBOX_AUG.H_FLIP:\n boxlists_hf = im_detect_bbox_hflip(\n model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device\n )\n add_preds_t(boxlists_hf)\n\n # Compute detections at different scales\n for scale in cfg.TEST.BBOX_AUG.SCALES:\n max_size = cfg.TEST.BBOX_AUG.MAX_SIZE\n boxlists_scl = im_detect_bbox_scale(\n model, images, scale, max_size, device\n )\n add_preds_t(boxlists_scl)\n\n if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:\n boxlists_scl_hf = im_detect_bbox_scale(\n model, images, scale, max_size, device, hflip=True\n )\n add_preds_t(boxlists_scl_hf)\n\n # Merge boxlists detected by different bbox aug params\n boxlists = []\n for i, boxlist_ts in enumerate(boxlists_ts):\n bbox = torch.cat([boxlist_t.bbox for boxlist_t in boxlist_ts])\n scores = torch.cat([boxlist_t.get_field('scores') for boxlist_t in boxlist_ts])\n boxlist = BoxList(bbox, boxlist_ts[0].size, boxlist_ts[0].mode)\n boxlist.add_field('scores', scores)\n boxlists.append(boxlist)\n\n # Apply NMS and limit the final detections\n results = []\n post_processor = make_roi_box_post_processor(cfg)\n \n module = model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model\n num_classes = module.roi_heads['box'].predictor.num_classes\n\n for boxlist in boxlists:\n results.append(post_processor.filter_results(boxlist, num_classes))\n\n return results\n\n\ndef im_detect_bbox(model, images, target_scale, target_max_size, device):\n \"\"\"\n Performs bbox detection on the original image.\n \"\"\"\n transform = TT.Compose([\n T.Resize(target_scale, target_max_size),\n TT.ToTensor(),\n T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255\n )\n ])\n images = [transform(image) for image in images]\n images = to_image_list(images, cfg.DATALOADER.SIZE_DIVISIBILITY)\n return model(images.to(device))\n\n\ndef im_detect_bbox_hflip(model, images, target_scale, target_max_size, device):\n \"\"\"\n Performs bbox detection on the horizontally flipped image.\n Function signature is the same as for im_detect_bbox.\n \"\"\"\n transform = TT.Compose([\n T.Resize(target_scale, target_max_size),\n TT.RandomHorizontalFlip(1.0),\n TT.ToTensor(),\n T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255\n )\n ])\n images = [transform(image) for image in images]\n images = to_image_list(images, cfg.DATALOADER.SIZE_DIVISIBILITY)\n boxlists = model(images.to(device))\n\n # Invert the detections computed on the flipped image\n boxlists_inv = [boxlist.transpose(0) for boxlist in boxlists]\n return boxlists_inv\n\n\ndef im_detect_bbox_scale(model, images, target_scale, target_max_size, device, hflip=False):\n \"\"\"\n Computes bbox detections at the given scale.\n Returns predictions in the scaled image space.\n \"\"\"\n if hflip:\n boxlists_scl = im_detect_bbox_hflip(model, images, target_scale, target_max_size, device)\n else:\n boxlists_scl = im_detect_bbox(model, images, target_scale, target_max_size, device)\n return boxlists_scl\n"
] |
[
[
"torch.cat"
]
] |
sbruch/rlstudio
|
[
"ba831f794a6b6ababe18bd154317367206511fdb"
] |
[
"rlstudio/stats/point_estimate.py"
] |
[
"from rlstudio.experiment import base as exp_base\nfrom rlstudio.typing import TaskId\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom typing import List\nimport warnings\n\n\nclass PointEstimate:\n \"\"\"Records point estimates such as returns.\"\"\"\n def __init__(self,\n id: str,\n task_ids: List[TaskId],\n horizon: int,\n nrounds: int,\n nruns: int):\n \"\"\"Creates a PointEstimate object.\n \n Args:\n id: A string identifier for this object.\n task_ids: List of task identifiers.\n horizon: Number of point estimates per task.\n nrounds: Number of rounds visiting the task series.\n nruns: Number of simulation runs.\n \"\"\"\n self.id = id\n self.task_ids = task_ids\n self.horizon = horizon\n self.nrounds = nrounds\n self.nruns = nruns\n\n self.stats = np.zeros((nruns, nrounds, len(task_ids), horizon))\n self.stats[:] = np.nan\n\n def record(self, metadata: exp_base.EvaluationMetadata, estimate: float) -> None:\n \"\"\"Records an estimate.\"\"\"\n r = metadata.round_id\n t = self.task_ids.index(metadata.task_id)\n h = metadata.time % self.horizon\n run = metadata.run_id % self.nruns\n self.stats[run, r, t, h] = estimate\n\n def render_sequential(self, xlabel: str, ylabel: str,\n xticks=None, ceiling: float = None, xscale=1):\n \"\"\"Renders the accumulated statistics one task at a time.\n\n Args:\n xlabel: Label for the x axis.\n ylabel: Label for the y axis.\n xticks: Optional xticks. Computed automatically if not given.\n ceiling: Optional maximum value achievable. A dashed horizontal line\n is plotted to highlight this value.\n xscale: Scales ticks on x axis.\n\n Returns:\n A tuple containing `matplotlib.figure.Figure` and `matplotlib.axes.Axes`.\n \"\"\"\n # Construct a color map.\n unique_task_ids = sorted(np.unique(self.task_ids))\n cmap = iter(plt.cm.rainbow(np.linspace(0, 1, len(unique_task_ids))))\n colors = {}\n for task_id in unique_task_ids:\n colors[task_id] = next(cmap)\n\n # Plot.\n fig, ax = plt.subplots()\n\n current_x = 0\n for round_id in range(self.nrounds):\n for task_idx, task_id in enumerate(self.task_ids):\n data = np.squeeze(self.stats[:, round_id, task_idx, :])\n x = np.arange(current_x, current_x + data.shape[-1]) * xscale\n\n if data.ndim == 2:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n y = np.nanmean(data, axis=0)\n err = np.nanstd(data, axis=0)\n plt.fill_between(x, y - err, y + err,\n color=colors[task_id], alpha=.2)\n else:\n y = data\n\n plt.plot(x, y, c=colors[task_id], label=task_id,\n marker='o' if len(y) == 1 else None)\n current_x += data.shape[-1]\n\n # Remove duplicates from entries in the legend.\n handles, labels = plt.gca().get_legend_handles_labels()\n labels, ids = np.unique(labels, return_index=True)\n handles = [handles[i] for i in ids]\n plt.legend(handles, labels, loc='best')\n\n if ceiling is not None:\n ax.axhline(ceiling, color='k', linestyle=':', linewidth=.75)\n\n # Add labels.\n if xticks is not None:\n plt.xticks(xticks)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.tight_layout()\n plt.close()\n\n return fig, ax\n\n def render_compact(self, xlabel: str, ylabel: str,\n xticks=None, ceiling: float = None, xscale=1):\n \"\"\"Renders the accumulated statistics in a compact plot.\n\n Args:\n xlabel: Label for the x axis.\n ylabel: Label for the y axis.\n xticks: Optional xticks. Computed automatically if not given.\n ceiling: Optional maximum value achievable. A dashed horizontal line\n is plotted to highlight this value.\n xscale: Scales ticks on x axis.\n\n Returns:\n A tuple containing `matplotlib.figure.Figure` and `matplotlib.axes.Axes`.\n \"\"\"\n # Construct a color map.\n unique_task_ids = sorted(np.unique(self.task_ids))\n cmap = iter(plt.cm.rainbow(np.linspace(0, 1, len(unique_task_ids))))\n colors = {}\n for task_id in unique_task_ids:\n colors[task_id] = next(cmap)\n\n # Plot.\n fig, ax = plt.subplots()\n\n for task_idx, task_id in enumerate(self.task_ids):\n data = self.stats[:, :, task_idx, :].reshape((self.nruns, -1))\n data = np.squeeze(data)\n x = np.arange(data.shape[-1]) * xscale\n\n if data.ndim == 2:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n y = np.nanmean(data, axis=0)\n err = np.nanstd(data, axis=0)\n plt.fill_between(x, y - err, y + err,\n color=colors[task_id], alpha=.2)\n else:\n y = data\n\n plt.plot(x, y, c=colors[task_id], label=task_id,\n marker='o' if len(y) == 1 else None)\n\n plt.legend(loc='best')\n\n if ceiling is not None:\n ax.axhline(ceiling, color='k', linestyle=':', linewidth=.75)\n\n # Add labels.\n if xticks is not None:\n plt.xticks(xticks)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.tight_layout()\n plt.close()\n\n return fig, ax\n\n def is_compatible(self, other) -> bool:\n if not isinstance(other, PointEstimate):\n return False\n if self.id != other.id or self.horizon != other.horizon or self.nrounds != other.nrounds:\n return False\n if not np.array_equal(self.task_ids, other.task_ids):\n return False\n return True\n\n\ndef unify(points: List[PointEstimate]) -> PointEstimate:\n \"\"\"A mechanism to aggregate multiple `PointEstimate` objects into one unified object.\"\"\"\n if len(points) == 0:\n return None\n\n # Validate and compute total number of runs.\n nruns = points[0].nruns\n for point in points[1:]:\n if not points[0].is_compatible(point):\n raise ValueError('Incompatible PointEstimate objects')\n nruns += point.nruns\n\n # Unify.\n unified = PointEstimate(\n id=points[0].id,\n task_ids=points[0].task_ids,\n horizon=points[0].horizon,\n nrounds=points[0].nrounds,\n nruns=nruns)\n\n runs = 0\n for point in points:\n unified.stats[runs:runs+point.nruns] = point.stats\n runs += point.nruns\n\n return unified\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"numpy.array_equal",
"numpy.unique",
"numpy.arange",
"numpy.squeeze",
"matplotlib.pyplot.subplots",
"numpy.nanstd",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.close",
"numpy.nanmean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel"
]
] |
snehsagarajput/sudoku-solver-app
|
[
"cf3ac2ae8d413ef88b0c76314ac298c169a0d1c2"
] |
[
"server/utility/preprocess.py"
] |
[
"try:\n from cv2 import cv2\nexcept:\n import cv2 \nimport numpy as np\n\nfrom utility.preprocessGrid import get_corner_points_of_largest_contour, crop_and_reshape\nfrom utility.preprocessNumberCell import PreprocessNumber\n\n\ndef preprocess_sudoku_board(board_img, dimension):\n # Thicker lines to ease the finding of the largest contour\n processed = __preprocess_image(board_img.copy(), line_thickness=3)\n corners = get_corner_points_of_largest_contour(processed_sudoku_board=processed)\n\n # Extract the Sudoku board\n cropped_grid = crop_and_reshape(__preprocess_image(board_img.copy(), line_thickness=1), corners)\n\n # Split the Sudoku board in 81 squares of equal size, then expand each square to minimize the risk that a\n # substantial part of a number is lost between two squares.\n expand_ratio = 1.05\n expanded_images = __get_expanded_images(img=cropped_grid.copy(), expand_ratio=expand_ratio)\n\n # Find and center numbers and add them to centered_numbers, if no number is found, add a black image.\n centered_numbers = []\n for image in expanded_images:\n process_number = PreprocessNumber(image.copy())\n process_number.crop_feature()\n is_number = process_number.is_number()\n if is_number:\n centered_number = process_number.get_centered_number(dimension)\n else:\n # Create a black image\n centered_number = np.zeros(dimension)\n centered_numbers.append((centered_number, is_number))\n\n return centered_numbers\n\n\ndef __preprocess_image(img, line_thickness=3):\n img = cv2.GaussianBlur(img.copy(), (9, 9), 0)\n thresh_hold = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 4)\n\n # Inverts the picture\n inverted = cv2.bitwise_not(thresh_hold, thresh_hold)\n\n # Adjusting the lines thickness\n kernel = np.ones((line_thickness, line_thickness), np.uint8)\n return cv2.dilate(inverted, kernel)\n\n\ndef __get_expanded_images(img, expand_ratio):\n nr_rows = 9\n expanded_images = []\n width = int(np.floor(min(img.shape) / nr_rows))\n height = width\n for row in range(1, 1 + nr_rows):\n for col in range(1, 1 + nr_rows):\n # Only expand in directions where there is something, for example, do not expand upwards if the affected\n # image is already at the first row.\n adj_height_upper = adj_height_lower = adj_width_left = adj_width_right = 0\n if row != 1 and row != 9:\n adj_height_upper = int(height / expand_ratio) - height\n if row != 9:\n adj_height_lower = int(height * expand_ratio) - height\n if col != 1:\n adj_width_left = int(width / expand_ratio) - width\n if col != 9:\n adj_width_right = int(width * expand_ratio) - width\n from_row = (row - 1) * height + adj_height_upper\n to_row = row * height + adj_height_lower\n\n # Making sure that all rows at the bottom will be included\n if row == 9:\n to_row = img.shape[0]\n expanded_image = np.zeros((int((to_row - from_row)), width + (adj_width_right - adj_width_left)))\n\n # Expands each image\n for index, k in enumerate(range(from_row, to_row)):\n expanded_image[index] = img[k][(col - 1) * width + adj_width_left:col * width + adj_width_right]\n expanded_images.append(expanded_image)\n return expanded_images\n"
] |
[
[
"numpy.zeros",
"numpy.ones"
]
] |
omri-haim/IML.HUJI
|
[
"24584be25b0bdb3f29044c6089c36c38ec613b47"
] |
[
"IMLearn/learners/classifiers/perceptron.py"
] |
[
"from __future__ import annotations\nfrom typing import Callable\nfrom typing import NoReturn\nfrom ...base import BaseEstimator\nimport numpy as np\n\n\ndef default_callback(fit: Perceptron, x: np.ndarray, y: int):\n pass\n\n\nclass Perceptron(BaseEstimator):\n \"\"\"\n Perceptron half-space classifier\n\n Finds a separating hyperplane for given linearly separable data.\n\n Attributes\n ----------\n include_intercept: bool, default = True\n Should fitted model include an intercept or not\n\n max_iter_: int, default = 1000\n Maximum number of passes over training data\n\n coefs_: ndarray of shape (n_features,) or (n_features+1,)\n Coefficients vector fitted by Perceptron algorithm. To be set in\n `Perceptron.fit` function.\n\n callback_: Callable[[Perceptron, np.ndarray, int], None]\n A callable to be called after each update of the model while fitting to given data\n Callable function should receive as input a Perceptron instance, current sample and current response\n \"\"\"\n def __init__(self,\n include_intercept: bool = True,\n max_iter: int = 1000,\n callback: Callable[[Perceptron, np.ndarray, int], None] = default_callback):\n \"\"\"\n Instantiate a Perceptron classifier\n\n Parameters\n ----------\n include_intercept: bool, default=True\n Should fitted model include an intercept or not\n\n max_iter: int, default = 1000\n Maximum number of passes over training data\n\n callback: Callable[[Perceptron, np.ndarray, int], None]\n A callable to be called after each update of the model while fitting to given data\n Callable function should receive as input a Perceptron instance, current sample and current response\n \"\"\"\n super().__init__()\n self.include_intercept_ = include_intercept\n self.max_iter_ = max_iter\n self.callback_ = callback\n self.coefs_ = None\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit a halfspace to given samples. Iterate over given data as long as there\n exists a sample misclassified or that did not reach `self.max_iter_`\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n\n Notes\n -----\n Fits model with or without an intercept depending on value of `self.fit_intercept_`\n \"\"\"\n if self.include_intercept_:\n x0 = np.ones(X.shape[0])\n X = np.column_stack((x0, X))\n\n self.coefs_ = np.zeros(X.shape[1])\n\n t = 0\n all_fitted = False\n while t < self.max_iter_ and all_fitted is False:\n classification_err = y * (X @ self.coefs_)\n\n all_fitted = True\n i = 0\n while i < len(y) and all_fitted is True:\n if classification_err[i] <= 0:\n all_fitted = False\n self.coefs_ += y[i]*X[i, :]\n self.fitted_ = True\n self.callback_(self, X, y)\n # self.callback_(self, X[i,:], y[i])\n i += 1\n t += 1\n print(t)\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n if self.include_intercept_ and X.ndim > 1 and X.shape[1] != len(self.coefs_):\n x0 = np.ones(X.shape[0])\n X = np.column_stack((x0,X))\n\n y_pred = 2 * ((X @ self.coefs_) >= 0) - 1\n return y_pred\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under misclassification loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under missclassification loss function\n \"\"\"\n from ...metrics import misclassification_error\n y_pred = self._predict(X)\n loss = misclassification_error(y, y_pred)\n return loss\n"
] |
[
[
"numpy.zeros",
"numpy.column_stack",
"numpy.ones"
]
] |
amirsarikhani/NEST
|
[
"2771f6593bca0827489359c4129db9eea439d036",
"2771f6593bca0827489359c4129db9eea439d036"
] |
[
"Community_Water_Model/CWATM_analyze/plot_indus5min_precipitation_worldclim.py",
"Community_Water_Model/CWATM_analyze/plot_indus5min_temperature_worldclim.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 14 12:12:11 2018\n\n@author: wangy\n\nCompare the annual mean precipitation in the Indus River Basin in the WorldClim\ndataset and the WFDEI dataset\n\"\"\"\n\nimport xarray as xr\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport matplotlib.colors as colors\nimport rasterio.features as rf\nfrom shapely.geometry import shape\nimport fiona\nfrom affine import Affine\nfrom calcAffine import calcAffine\n\ndom = [31,28.25,31,30,31,30,31,31,30,31,30,31]\n\n\n# ---- mask to the Indus River Basin\ndef create_mask(var_lon, var_lat):\n c = fiona.open(os.path.join('..', '..', 'hydrosheds-d875a5caec4b04119dbc', \\\n 'as_bas_15s_beta', 'Indus_River_Basin.shp'))\n pol = c.next()\n geom = shape(pol['geometry'])\n c.close()\n\n x, y = np.meshgrid(np.arange(0, len(var_lon)), np.arange(0, len(var_lat)))\n u, v = np.meshgrid(var_lon, var_lat)\n af = calcAffine(x.reshape(-1), y.reshape(-1), u.reshape(-1), v.reshape(-1))\n \n mask = rf.rasterize([(geom, 1)], out_shape=(len(var_lon), len(var_lat)),\n fill=0, all_touched=True, dtype=np.uint8, \\\n transform=Affine(*af))\n return mask\n\n\n# WFDEI\noutpath = os.path.join('..', 'CWATM_data', 'climate', 'wfdei')\nf = xr.open_dataset(os.path.join(outpath, 'pr_gpcc_watch+wfdei_2011_2013.nc4'), decode_times=True)\nprecip_clim = f.pr.sel({'lon': f.lon[(f.lon.values > 65.) * \\\n (f.lon.values < 85.)], \\\n 'lat': f.lat[(f.lat.values > 20.) * \\\n (f.lat.values < 40.)]}).copy()\nprecip_clim = precip_clim.groupby('time.month').mean(dim='time')\nprecip_clim = precip_clim.rename({'month': 'time'})\nprecip_clim = precip_clim.transpose('time', 'lon', 'lat')\nmask = create_mask(precip_clim.lon.values, precip_clim.lat.values)\nf.close()\n\n\n# World Clim\noutpath2 = os.path.join('..', 'CWATM_data', 'cwatm_input5min', 'meteo')\nf = xr.open_dataset(os.path.join(outpath2, 'wc2.0_5m_prec.nc'))\nprecip_wc = f.wc_prec.sel({'lon': f.lon[(f.lon.values > 65.) * \\\n (f.lon.values < 85.)], \\\n 'lat': f.lat[(f.lat.values > 20.) * \\\n (f.lat.values < 40.)]}).copy()\nprecip_wc = precip_wc.transpose('time', 'lon', 'lat')\nmask2 = create_mask(precip_wc.lon.values, precip_wc.lat.values)\nf.close()\n\n\ndef draw_precip(dataset, scale, mask, flag=False):\n bounds = np.array([0., 30., 50., 70., 90., 125., 175., 250., 350., 475., \\\n 625., 800., 1200.])\n norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)\n\n n = int(dataset.shape[0]/3)\n fig, axes = plt.subplots(n, 3, figsize=(12, n*3))\n axes = axes.flat\n for count in range(dataset.shape[0]):\n # (with converstion to mm/month)\n if flag:\n monthdata = dataset[{'time':count}].where(mask) * scale\n else:\n monthdata = dataset[{'time':count}].where(mask) * dom[count] * scale\n\n X, Y = np.meshgrid(list(monthdata.lon.values), list(monthdata.lat.values))\n \n im = axes[count].pcolormesh(X, Y, \\\n monthdata.values, cmap='Spectral_r', norm=norm)\n \n axes[count].set_title('Month = '+str(count+1))\n\n fig.subplots_adjust(hspace=0.25, bottom=0.05)\n cbar_ax = fig.add_axes([0.15, 0., 0.7, 0.01])\n cbar = fig.colorbar(im, ax=axes[0], cax=cbar_ax, \\\n ticks = (bounds[1:] + bounds[:-1])/2, \\\n orientation = 'horizontal', label='Precipitation [mm/month]')\n return fig, axes, cbar\ndraw_precip(precip_clim[{'time': range(12)}], 864000., mask, True)\ndraw_precip(precip_wc[{'time': range(12)}], 1., mask2, True)\n\n\ndef draw_precip_mean(dataset, scale, mask, flag=False):\n bounds = np.array([0., 30., 50., 70., 90., 125., 175., 250., 350., 475., \\\n 625., 800., 1200.])\n norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)\n\n fig, ax = plt.subplots(figsize=(8,6))\n for count in range(precip_clim.shape[0]):\n # (with converstion to mm/month)\n if flag:\n monthdata = dataset[{'time':count}].where(mask) * scale\n else:\n monthdata = dataset[{'time':count}].where(mask) * dom[count] * scale\n if (count==0):\n yeardata = monthdata\n else:\n yeardata += monthdata\n\n X, Y = np.meshgrid(list(yeardata.lon.values), list(yeardata.lat.values))\n\n im = ax.pcolormesh(X, Y, \\\n yeardata.values, cmap='Spectral_r', norm=norm)\n ax.set_title('Climatology')\n\n fig.subplots_adjust(bottom=0.1)\n cbar_ax = fig.add_axes([0.15, 0.05, 0.7, 0.01])\n cbar = fig.colorbar(im, ax=ax, cax=cbar_ax, \\\n ticks = (bounds[1:] + bounds[:-1])/2, \\\n orientation = 'horizontal', label='Precipitation [mm/month]')\n return fig, ax, cbar\n\ndraw_precip_mean(precip_clim, 864000., mask, True)\ndraw_precip_mean(precip_wc, 1., mask2, True)\n\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 14 12:12:11 2018\n\n@author: wangy\n\nCompare the annual mean temperature in the Indus River Basin in the WorldClim\ndataset and the WFDEI dataset\n\"\"\"\n\nimport xarray as xr\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport matplotlib.colors as colors\nimport rasterio.features as rf\nfrom shapely.geometry import shape\nimport fiona\nfrom affine import Affine\nfrom calcAffine import calcAffine\n\ndom = [31,28.25,31,30,31,30,31,31,30,31,30,31]\n\n\n# ---- mask to the Indus River Basin\ndef create_mask(var_lon, var_lat):\n c = fiona.open(os.path.join('..', '..', 'hydrosheds-d875a5caec4b04119dbc', \\\n 'as_bas_15s_beta', 'Indus_River_Basin.shp'))\n pol = c.next()\n geom = shape(pol['geometry'])\n c.close()\n\n x, y = np.meshgrid(np.arange(0, len(var_lon)), np.arange(0, len(var_lat)))\n u, v = np.meshgrid(var_lon, var_lat)\n af = calcAffine(x.reshape(-1), y.reshape(-1), u.reshape(-1), v.reshape(-1))\n \n mask = rf.rasterize([(geom, 1)], out_shape=(len(var_lon), len(var_lat)),\n fill=0, all_touched=True, dtype=np.uint8, \\\n transform=Affine(*af))\n return mask\n\n\n# WFDEI\noutpath = os.path.join('..', 'CWATM_data', 'climate', 'wfdei')\nf = xr.open_dataset(os.path.join(outpath, 'tavg.nc'), decode_times=True)\ntavg_clim = f.tavg.sel({'lon': f.lon[(f.lon.values > 65.) * \\\n (f.lon.values < 85.)], \\\n 'lat': f.lat[(f.lat.values > 20.) * \\\n (f.lat.values < 40.)]}).copy()\ntavg_clim = tavg_clim.groupby('time.month').mean(dim='time')\ntavg_clim = tavg_clim.rename({'month': 'time'})\ntavg_clim = tavg_clim.transpose('time', 'lon', 'lat')\nmask = create_mask(tavg_clim.lon.values, tavg_clim.lat.values)\nf.close()\n\n\n# World Clim\noutpath2 = os.path.join('..', 'CWATM_data', 'cwatm_input5min', 'meteo')\nf = xr.open_dataset(os.path.join(outpath2, 'worldclim_tavg.nc'))\ntavg_wc = f.wc_tavg.sel({'lon': f.lon[(f.lon.values > 65.) * \\\n (f.lon.values < 85.)], \\\n 'lat': f.lat[(f.lat.values > 20.) * \\\n (f.lat.values < 40.)]}).copy()\ntavg_wc = tavg_wc.transpose('time', 'lon', 'lat')\nmask2 = create_mask(tavg_wc.lon.values, tavg_wc.lat.values)\nf.close()\n\n\ndef draw_tavg(dataset, scale, mask):\n bounds = np.arange(-10., 55., 5)\n norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)\n\n n = int(dataset.shape[0]/3)\n fig, axes = plt.subplots(n, 3, figsize=(12, n*3))\n axes = axes.flat\n for count in range(dataset.shape[0]):\n # (with converstion to degC)\n monthdata = dataset[{'time':count}].where(mask) - scale\n\n X, Y = np.meshgrid(list(monthdata.lon.values), list(monthdata.lat.values))\n \n im = axes[count].pcolormesh(X, Y, \\\n monthdata.values, cmap='Spectral_r', norm=norm)\n\n axes[count].set_title('Month = '+str(count+1))\n\n fig.subplots_adjust(hspace=0.25, bottom=0.05)\n cbar_ax = fig.add_axes([0.15, 0., 0.7, 0.01])\n cbar = fig.colorbar(im, ax=axes[0], cax=cbar_ax, \\\n ticks = (bounds[1:] + bounds[:-1])/2, \\\n orientation = 'horizontal', label='Temperature [$^o$C]')\n return fig, axes, cbar\ndraw_tavg(tavg_clim[{'time': range(12)}], 273.15, mask)\ndraw_tavg(tavg_wc[{'time': range(12)}], 273.15, mask2)\n\n\ndef draw_tavg_mean(dataset, scale, mask):\n bounds = np.arange(-10., 55., 5)\n norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)\n\n fig, ax = plt.subplots(figsize=(8,6))\n for count in range(tavg_clim.shape[0]):\n # (with converstion to degC)\n monthdata = (dataset[{'time':count}].where(mask) - scale) * dom[count]\n if (count==0):\n yeardata = monthdata\n else:\n yeardata += monthdata\n yeardata /= sum(dom)\n\n X, Y = np.meshgrid(list(yeardata.lon.values), list(yeardata.lat.values))\n\n im = ax.pcolormesh(X, Y, \\\n yeardata.values, cmap='Spectral_r', norm=norm)\n ax.set_title('Climatology')\n\n fig.subplots_adjust(bottom=0.1)\n cbar_ax = fig.add_axes([0.15, 0.05, 0.7, 0.01])\n cbar = fig.colorbar(im, ax=ax, cax=cbar_ax, \\\n ticks = (bounds[1:] + bounds[:-1])/2, \\\n orientation = 'horizontal', label='Temperature [$^o$C]')\n return fig, ax, cbar\n\ndraw_tavg_mean(tavg_clim, 273.15, mask)\ndraw_tavg_mean(tavg_wc, 273.15, mask2)"
] |
[
[
"numpy.array",
"numpy.meshgrid",
"matplotlib.colors.BoundaryNorm",
"matplotlib.pyplot.subplots"
],
[
"numpy.arange",
"numpy.meshgrid",
"matplotlib.colors.BoundaryNorm",
"matplotlib.pyplot.subplots"
]
] |
sWizad/TensoRF
|
[
"f7782140d6493b841cc07cad995ee10d56250fa2"
] |
[
"models/hashgrid.py"
] |
[
"# hashgrid based model\nimport torch\nimport torch.nn.functional as F\n\nimport tinycudann as tcnn\n\nfrom .tensoRF import TensorVMSplit\n\nclass HashGridDecomposition(TensorVMSplit):\n \"\"\"\n a drop-in replacement for for VM decompositon where we factor into hashgrid instead\n \"\"\"\n def __init__(self, *args, **kwargs):\n print(\"Model: HashGridDecomposition\")\n super(TensorVMSplit, self).__init__(*args, **kwargs)\n \n def init_svd_volume(self, res, device):\n self.density_plane, self.density_line = self.init_one_svd(self.density_n_comp, self.gridSize, 0.1, device)\n self.app_plane, self.app_line = self.init_one_svd(self.app_n_comp, self.gridSize, 0.1, device)\n self.basis_mat = torch.nn.Linear(sum(self.app_n_comp), self.app_dim, bias=False).to(device)\n print(self.density_line)\n print(\"Stop at initial\")\n exit()\n\n\n def init_one_svd(self, n_component, gridSize, scale, device):\n plane_coef, line_coef = [], []\n for i in range(len(self.vecMode)):\n vec_id = self.vecMode[i]\n mat_id_0, mat_id_1 = self.matMode[i]\n plane_coef.append(torch.nn.Parameter(\n scale * torch.randn((1, n_component[i], gridSize[mat_id_1], gridSize[mat_id_0])))) #\n line_coef.append(\n torch.nn.Parameter(scale * torch.randn((1, n_component[i], gridSize[vec_id], 1))))\n\n return torch.nn.ParameterList(plane_coef).to(device), torch.nn.ParameterList(line_coef).to(device)\n\n def get_optparam_groups(self, lr_init = 0.02, lr_basis = 0.001):\n raise NotImplementedError(\"Need to support hashgrid_soon\") \n # lr_init_spatialxyz = lr_init\n # lr_init_network = lr_basis\n grad_vars = [{'params': self.density_line, 'lr': lr_init_spatialxyz}, {'params': self.density_plane, 'lr': lr_init_spatialxyz},\n {'params': self.app_line, 'lr': lr_init_spatialxyz}, {'params': self.app_plane, 'lr': lr_init_spatialxyz},\n {'params': self.basis_mat.parameters(), 'lr':lr_init_network}]\n if isinstance(self.renderModule, torch.nn.Module):\n grad_vars += [{'params':self.renderModule.parameters(), 'lr':lr_init_network}]\n return grad_vars\n\n def compute_features(self, xyz_sampled):\n raise NotImplementedError(\"Need to implement how to input \")\n return sigma_feature, app_features\n\n def compute_densityfeature(self, xyz_sampled):\n \"\"\"\n @params xyz_sampled scale in [-1,1] #[num_ray, 3]\n @return sigma_feature #[num_ray]\n \"\"\"\n raise NotImplementedError(\"Need to implement how to input \")\n return sigma_feature\n\n def compute_appfeature(self, xyz_sampled):\n return app_features\n\n @torch.no_grad()\n def upsample_volume_grid(self, res_target):\n # Hashgrid is not support UPsample size\n self.update_stepSize(res_target)\n print(f'upsamping to {res_target}')\n\n\n @torch.no_grad()\n def shrink(self, new_aabb):\n print(\"====> shrinking ...\")\n xyz_min, xyz_max = new_aabb\n t_l, b_r = (xyz_min - self.aabb[0]) / self.units, (xyz_max - self.aabb[0]) / self.units\n # print(new_aabb, self.aabb)\n # print(t_l, b_r,self.alphaMask.alpha_volume.shape)\n t_l, b_r = torch.round(torch.round(t_l)).long(), torch.round(b_r).long() + 1\n b_r = torch.stack([b_r, self.gridSize]).amin(0)\n\n \"\"\"\n raise NotImplementedError()\n for i in range(len(self.vecMode)):\n mode0 = self.vecMode[i]\n self.density_line[i] = torch.nn.Parameter(\n self.density_line[i].data[...,t_l[mode0]:b_r[mode0],:]\n )\n self.app_line[i] = torch.nn.Parameter(\n self.app_line[i].data[...,t_l[mode0]:b_r[mode0],:]\n )\n mode0, mode1 = self.matMode[i]\n self.density_plane[i] = torch.nn.Parameter(\n self.density_plane[i].data[...,t_l[mode1]:b_r[mode1],t_l[mode0]:b_r[mode0]]\n )\n self.app_plane[i] = torch.nn.Parameter(\n self.app_plane[i].data[...,t_l[mode1]:b_r[mode1],t_l[mode0]:b_r[mode0]]\n )\n \"\"\"\n\n\n if not torch.all(self.alphaMask.gridSize == self.gridSize):\n t_l_r, b_r_r = t_l / (self.gridSize-1), (b_r-1) / (self.gridSize-1)\n correct_aabb = torch.zeros_like(new_aabb)\n correct_aabb[0] = (1-t_l_r)*self.aabb[0] + t_l_r*self.aabb[1]\n correct_aabb[1] = (1-b_r_r)*self.aabb[0] + b_r_r*self.aabb[1]\n print(\"aabb\", new_aabb, \"\\ncorrect aabb\", correct_aabb)\n new_aabb = correct_aabb\n\n newSize = b_r - t_l\n self.aabb = new_aabb\n self.update_stepSize((newSize[0], newSize[1], newSize[2]))\n\n def feature2density(self, density_features):\n raise NotImplementedError()\n if self.fea2denseAct == \"softplus\":\n return F.softplus(density_features+self.density_shift)\n elif self.fea2denseAct == \"relu\":\n return F.relu(density_features)\n\n def density_L1(self):\n raise NotImplementedError(\"HashGrid shouldn't call this\")\n\n def TV_loss_density(self, reg):\n raise NotImplementedError(\"HashGrid shouldn't call this\")\n\n def TV_loss_app(self, reg):\n raise NotImplementedError(\"HashGrid shouldn't call this\")\n\n def vectorDiffs(self, vector_comps):\n raise NotImplementedError(\"HashGrid shouldn't call this\")\n\n def vector_comp_diffs(self):\n raise NotImplementedError(\"HashGrid shouldn't call this\")\n \n @torch.no_grad()\n def up_sampling_VM(self, plane_coef, line_coef, res_target):\n raise NotImplementedError(\"HashGrid shouldn't call this\")\n\n \n# experiment 1: Given same number of parameters. Hashgrid or VM perform better\n\n\n# experiment 2: Given only same number of parameters. Hashgrid or VM perform better?\n#class HashGridDecompositionMatchResolution(HashGridDecomposition):\n# def __init__(self, *args, **kwargs):\n# print(\"Model: HashGridDecompositionMatchResolution\")\n# super(TensorVMSplit, self).__init__(*args, **kwargs)\n\n"
] |
[
[
"torch.all",
"torch.randn",
"torch.round",
"torch.zeros_like",
"torch.nn.functional.relu",
"torch.no_grad",
"torch.nn.ParameterList",
"torch.stack",
"torch.nn.functional.softplus"
]
] |
tbattz/aws-price-collator
|
[
"76be52e8af9bedb4171bd805dbf73e1a38c00e14"
] |
[
"priceParsing/spotPrices.py"
] |
[
"from priceParsing.baseParser import BaseParser\n\nimport os\nimport time\nimport boto3\nimport pandas as pd\n\n\n\nclass SpotPrices(BaseParser):\n\t\"\"\"\n\tParsers current spot prices, or reads existing spot prices from csv file.\n\t\"\"\"\n\tdef __init__(self, apiKeyFilePath=None, csvDir='csvFiles', regionId='ap-southeast-2', subRegion='a', loadCsv=False):\n\t\t\"\"\"\n\t\t:param apiKeyFilePath: A path to the api key file.\n\t\t:param csvDir: The directory to read/write csv to/from.\n\t\t:param regionId: The region Id name, e.g. 'ap-southeast-2'.\n\t\t:param subRegion: The sub region string, e.g. 'a', 'b', 'c'.\n\t\t:param loadCsv: True if to load existing data from csv file.\n\t\t\"\"\"\n\t\tcsvFile = 'aws-spot-prices-' + regionId + '-' + subRegion + '.csv'\n\t\tsuper().__init__(csvDir=csvDir, csvFile=csvFile)\n\t\tself.apiKeyFilePath = apiKeyFilePath\n\t\tself.csvDir = csvDir\n\t\tself.csvFile = csvFile\n\t\tself.regionId = regionId\n\t\tself.subRegion = subRegion\n\t\tself.loadCsv = loadCsv\n\t\tself.apiKeyFilePath = apiKeyFilePath\n\n\t\tself.df = None\n\n\t\tif not self.loadCsv:\n\t\t\tself.parseSpotPricesUsingAPI()\n\t\telse:\n\t\t\tself.loadFromCsv()\n\n\n\tdef parseSpotPricesUsingAPI(self):\n\t\t\"\"\"\n\t\tParse the current spot prices using the api.\n\n\t\t:return: A dataframe of the current spot prices.\n\t\t\"\"\"\n\t\tstartTime = time.time()\n\t\t# Read keys file\n\t\tkeys = pd.read_csv(self.apiKeyFilePath)\n\t\tprint('Read keys file.')\n\n\t\t# Authenticate Client\n\t\tclient = boto3.client('ec2', region_name=self.regionId,\n\t\t\t\t\t\t\t aws_access_key_id=keys['Access key ID'].values[0],\n\t\t\t\t\t\t\t aws_secret_access_key=keys['Secret access key'].values[0])\n\n\t\t# Get the spot price history\n\t\tprices = client.describe_spot_price_history(MaxResults=600,\n\t\t\t\t\t\t\t\t\t\t\t\t\tProductDescriptions=['Linux/UNIX'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tAvailabilityZone=self.regionId + self.subRegion)\n\n\t\t# Filter older updates\n\t\tinstanceType = []\n\t\tkeepData = []\n\t\tfor price in prices['SpotPriceHistory']:\n\t\t\tnewType = price['InstanceType']\n\t\t\tif newType not in instanceType:\n\t\t\t\tinstanceType.append(newType)\n\t\t\t\tkeepData.append(price)\n\n\t\t# Create dataframe\n\t\tself.df = pd.DataFrame(keepData)\n\t\tself.df = self.df.set_index(['InstanceType'])\n\t\tself.df['SpotPrice'] = self.df['SpotPrice'].astype(float)\n\t\tprint('Read %i spot prices using api.' % self.df.shape[0])\n\n\t\t# Write data to disc\n\t\tfilename = os.path.join(self.csvDir, self.csvFile)\n\t\tself.df.to_csv(filename)\n\t\tprint('Wrote', filename)\n\n\t\tendTime = time.time()\n\t\tprint('Elapsed %.2fs' % (endTime - startTime))\n\n\t\treturn self.df\n\n\n\n\n\n\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
krishna939/Emotion-Recognition
|
[
"78050f494408989fff0931b9565d12898077f202"
] |
[
"python/sahuja_data_augment.py"
] |
[
"import cv2\r\nimport numpy as np\r\nimport os\r\nfrom matplotlib import pyplot as plt\r\n\r\nsource_directory = 'Test/'\r\ntarget_directory = 'Test_output/'\r\nfor file_name in os.listdir(source_directory):\r\n source_image =(os.path.join(source_directory, file_name))\r\n \r\n index_of_dot = file_name.index('.')\r\n file_name_without_extension = file_name[:index_of_dot]\r\n\r\n img = cv2.imread(source_image,0)\r\n rows,cols = img.shape\r\n i = 1\r\n # Transform 1 - Scaling - Resizing image\r\n dst = cv2.resize(img,None,fx=2, fy=2, interpolation = cv2.INTER_CUBIC)\r\n #target_image =(os.path.join(target_directory, file_name))\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n\r\n # Transform 2 - Translation - Shift of (100,50)\r\n M = np.float32([[1,0,100],[0,1,50]])\r\n dst = cv2.warpAffine(img,M,(cols,rows))\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n\r\n # Transform 3 - Rotation - Rotate 90\r\n M = cv2.getRotationMatrix2D((cols/2,rows/2),90,1)\r\n dst = cv2.warpAffine(img,M,(cols,rows))\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n\r\n # Transform 4 - Rotate 180 / Invert\r\n M = cv2.getRotationMatrix2D((cols/2,rows/2),180,1)\r\n dst = cv2.warpAffine(img,M,(cols,rows))\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n\r\n # Transform 5 - Erosion\r\n kernel = np.ones((5,5),np.uint8)\r\n dst = cv2.erode(img,kernel,iterations = 1)\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n\r\n # Transform 6 - Dilation\r\n kernel = np.ones((5,5),np.uint8)\r\n dst = cv2.dilate(img,kernel,iterations = 1)\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n\r\n # Transform 7 - Opening - Erosion followed by dilation\r\n kernel = np.ones((5,5),np.uint8)\r\n dst = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n\r\n # Transform 8 - Closing - Dilation followed by Erosion\r\n kernel = np.ones((5,5),np.uint8)\r\n dst = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n\r\n # Transform 9 - Affine Transformation\r\n pts1 = np.float32([[50,50],[200,50],[50,200]])\r\n pts2 = np.float32([[10,100],[200,50],[100,250]])\r\n M = cv2.getAffineTransform(pts1,pts2)\r\n dst = cv2.warpAffine(img,M,(cols,rows))\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n\r\n # Transform 10 - Perspective Transformation\r\n pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])\r\n pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])\r\n M = cv2.getPerspectiveTransform(pts1,pts2)\r\n dst = cv2.warpPerspective(img,M,(300,300))\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n\r\n # Transformation 11 - Smoothing - Blurring Image using Low-pass filter\r\n\r\n dst = cv2.blur(img,(5,5))\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n\r\n \r\n # Transformation 12 - Change Color\r\n\r\n dst = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n\r\n # Transformation 13 - Image Filtering - 2D Convolution\r\n\r\n kernel = np.ones((5,5),np.float32)/25\r\n dst = cv2.filter2D(img,-1,kernel)\r\n cv2.imwrite(target_directory+file_name_without_extension+'_'+str(i)+'.jpg', dst)\r\n i=i+1\r\n"
] |
[
[
"numpy.float32",
"numpy.ones"
]
] |
Zealoe/HRNet-Semantic-Segmentation
|
[
"e5082879d6a46f1eb1127429e9948c80c0e15418"
] |
[
"lib/models/sync_bn/inplace_abn/bn.py"
] |
[
"import os, sys\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as functional\r\n\r\ntry:\r\n from queue import Queue\r\nexcept ImportError:\r\n from Queue import Queue\r\n\r\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\r\nsys.path.append(BASE_DIR)\r\nsys.path.append(os.path.join(BASE_DIR, '../src'))\r\nfrom functions import *\r\n\r\n\r\nclass ABN(nn.Module):\r\n \"\"\"Activated Batch Normalization\r\n\r\n This gathers a `BatchNorm2d` and an activation function in a single module\r\n \"\"\"\r\n\r\n def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation=\"leaky_relu\", slope=0.01):\r\n \"\"\"Creates an Activated Batch Normalization module\r\n\r\n Parameters\r\n ----------\r\n num_features : int\r\n Number of feature channels in the input and output.\r\n eps : float\r\n Small constant to prevent numerical issues.\r\n momentum : float\r\n Momentum factor applied to compute running statistics as.\r\n affine : bool\r\n If `True` apply learned scale and shift transformation after normalization.\r\n activation : str\r\n Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.\r\n slope : float\r\n Negative slope for the `leaky_relu` activation.\r\n \"\"\"\r\n super(ABN, self).__init__()\r\n self.num_features = num_features\r\n self.affine = affine\r\n self.eps = eps\r\n self.momentum = momentum\r\n self.activation = activation\r\n self.slope = slope\r\n if self.affine:\r\n self.weight = nn.Parameter(torch.ones(num_features))\r\n self.bias = nn.Parameter(torch.zeros(num_features))\r\n else:\r\n self.register_parameter('weight', None)\r\n self.register_parameter('bias', None)\r\n self.register_buffer('running_mean', torch.zeros(num_features))\r\n self.register_buffer('running_var', torch.ones(num_features))\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n nn.init.constant_(self.running_mean, 0)\r\n nn.init.constant_(self.running_var, 1)\r\n if self.affine:\r\n nn.init.constant_(self.weight, 1)\r\n nn.init.constant_(self.bias, 0)\r\n\r\n def forward(self, x):\r\n x = functional.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,\r\n self.training, self.momentum, self.eps)\r\n\r\n if self.activation == ACT_RELU:\r\n return functional.relu(x, inplace=True)\r\n elif self.activation == ACT_LEAKY_RELU:\r\n return functional.leaky_relu(x, negative_slope=self.slope, inplace=True)\r\n elif self.activation == ACT_ELU:\r\n return functional.elu(x, inplace=True)\r\n else:\r\n return x\r\n\r\n def __repr__(self):\r\n rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \\\r\n ' affine={affine}, activation={activation}'\r\n if self.activation == \"leaky_relu\":\r\n rep += ', slope={slope})'\r\n else:\r\n rep += ')'\r\n return rep.format(name=self.__class__.__name__, **self.__dict__)\r\n\r\n\r\nclass InPlaceABN(ABN):\r\n \"\"\"InPlace Activated Batch Normalization\"\"\"\r\n\r\n def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation=\"leaky_relu\", slope=0.01):\r\n \"\"\"Creates an InPlace Activated Batch Normalization module\r\n\r\n Parameters\r\n ----------\r\n num_features : int\r\n Number of feature channels in the input and output.\r\n eps : float\r\n Small constant to prevent numerical issues.\r\n momentum : float\r\n Momentum factor applied to compute running statistics as.\r\n affine : bool\r\n If `True` apply learned scale and shift transformation after normalization.\r\n activation : str\r\n Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.\r\n slope : float\r\n Negative slope for the `leaky_relu` activation.\r\n \"\"\"\r\n super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope)\r\n\r\n def forward(self, x):\r\n return inplace_abn(x, self.weight, self.bias, self.running_mean, self.running_var,\r\n self.training, self.momentum, self.eps, self.activation, self.slope)\r\n\r\n\r\nclass InPlaceABNSync(ABN):\r\n \"\"\"InPlace Activated Batch Normalization with cross-GPU synchronization\r\n\r\n This assumes that it will be replicated across GPUs using the same mechanism as in `nn.DataParallel`.\r\n \"\"\"\r\n\r\n def __init__(self, num_features, devices=None, eps=1e-5, momentum=0.1, affine=True, activation=\"leaky_relu\",\r\n slope=0.01):\r\n \"\"\"Creates a synchronized, InPlace Activated Batch Normalization module\r\n\r\n Parameters\r\n ----------\r\n num_features : int\r\n Number of feature channels in the input and output.\r\n devices : list of int or None\r\n IDs of the GPUs that will run the replicas of this module.\r\n eps : float\r\n Small constant to prevent numerical issues.\r\n momentum : float\r\n Momentum factor applied to compute running statistics as.\r\n affine : bool\r\n If `True` apply learned scale and shift transformation after normalization.\r\n activation : str\r\n Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.\r\n slope : float\r\n Negative slope for the `leaky_relu` activation.\r\n \"\"\"\r\n super(InPlaceABNSync, self).__init__(num_features, eps, momentum, affine, activation, slope)\r\n self.devices = devices if devices else list(range(torch.cuda.device_count()))\r\n\r\n # Initialize queues\r\n self.worker_ids = self.devices[1:]\r\n self.master_queue = Queue(len(self.worker_ids))\r\n self.worker_queues = [Queue(1) for _ in self.worker_ids]\r\n\r\n def forward(self, x):\r\n if x.get_device() == self.devices[0]:\r\n # Master mode\r\n extra = {\r\n \"is_master\": True,\r\n \"master_queue\": self.master_queue,\r\n \"worker_queues\": self.worker_queues,\r\n \"worker_ids\": self.worker_ids\r\n }\r\n else:\r\n # Worker mode\r\n extra = {\r\n \"is_master\": False,\r\n \"master_queue\": self.master_queue,\r\n \"worker_queue\": self.worker_queues[self.worker_ids.index(x.get_device())]\r\n }\r\n\r\n return inplace_abn_sync(x, self.weight, self.bias, self.running_mean, self.running_var,\r\n extra, self.training, self.momentum, self.eps, self.activation, self.slope)\r\n\r\n def __repr__(self):\r\n rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \\\r\n ' affine={affine}, devices={devices}, activation={activation}'\r\n if self.activation == \"leaky_relu\":\r\n rep += ', slope={slope})'\r\n else:\r\n rep += ')'\r\n return rep.format(name=self.__class__.__name__, **self.__dict__)\r\n"
] |
[
[
"torch.nn.functional.batch_norm",
"torch.ones",
"torch.zeros",
"torch.nn.init.constant_",
"torch.nn.functional.relu",
"torch.nn.functional.leaky_relu",
"torch.nn.functional.elu",
"torch.cuda.device_count"
]
] |
helderdaniel/hdlib
|
[
"ae766db53dc33e3cc6c1bb233073559c0b610a52"
] |
[
"tensorflow/annmodel.py"
] |
[
"'''\nDefine ANN models and algorithms base class\nDefines also some common network topologies\n\nv0.1 jan 2019\nv0.2 aug 2019\nhdaniel@ualg.pt\n'''\nimport tensorflow as tf\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\nimport pandas as pd\nfrom hdlib.time.stopwatch import Stopwatch\nimport hdlib.tensorflow.plot as tfplt\n\n\nclass ANNModel():\n ''' \n Define ANN models and algorithms base class\n Default constructor calls self.define() method on descedant class,\n with default arguments\n\n self.define() method must be defined in each subclass\n '''\n\n#define the network model\n def __init__(self, *args, **kwargs):\n '''\n Base class constructor calls define() in each sub class with any arguments.\n Sets also callbacks to None.\n Callbacks can be used to autosave model during trainning. See autosave()\n\n If called with no arguments from the subclass, calls define() on the subclass\n with default arguments.\n '''\n self._callbacks = []\n self.define(*args, **kwargs)\n\n #Define the net\n def define(self, save=None):\n '''\n Must be redefined in each subclass to define the network topology.\n See MLP subclass example\n '''\n # module ABC introduces decorator @abstractmethod\n # but has problems with multiple inheritance if a descendan inherites from metaclass\n # ABCmeta (because base abstact class must) and form a descendadnt of other metacalss\n #\n # raising an exception have also issues with multiple inheritance, because\n # the use of super() is not possible (raises exception=), making multiple inheritance \n # impossible to choose upper class\n #raise NotImplementedError('Should be implemented by concrete sub class')\n #\n #better just pass\n pass\n\n # compile the model\n # Define training optimizer function and learning rate,\n # eg.: sgd: Stochastic Gradient Descent\n #['accuracy', 'mean_absolute_error', 'mean_squared_error']\n #or: acc , mae, mse\n def compile(self, optimizer='sgd', loss='mse', metrics='acc', *args, **kwargs):\n self.model.compile(optimizer=optimizer, loss=loss, metrics=metrics, *args, **kwargs)\n\n def save(self, filename):\n self.model.save(filename)\n \n def load(self, filename):\n self.model = tf.keras.models.load_model(filename)\n\n #https://machinelearningmastery.com/how-to-stop-training-deep-neural-networks-at-the-right-time-using-early-stopping/\n def earlyStop(self, filename, monitor='val_loss', minDelta=0.0001, patience=10, epochPeriod=1, verbose=1):\n estop = tf.keras.callbacks.EarlyStopping(\n monitor=monitor, min_delta=minDelta, patience=patience, \n verbose=verbose)\n self._callbacks.append(estop)\n self.autosave(self, filename, epochPeriod=epochPeriod, verbose=verbose)\n\n #https://keras.io/callbacks/#reducelronplateau\n #if minLR == 0 it keeps reducing \n #cooldown: wait before resuming normal operation (i.e. beginning to monitor if there is any improvement in the monitored metric over a patience epochs).\n #if cooldown=5 after the learning rate is reduced, the algorithm waits 5 epochs before starting to monitor the metrics again. \n #So if there is no improvement in the metric and patience=10, the learning rate will be reduced again after 15 epochs.\n def adaptLR(self, monitor='val_loss', factor=0.1, minDelta=0.0001, minLR=0, cooldown=0, patience=5):\n reducef = tf.keras.callbacks.ReduceLROnPlateau(\n monitor='val_loss', factor=factor, min_delta=minDelta, \n min_lr=minLR, cooldown=cooldown, patience=patience, verbose=verbose)\n self._callbacks.append(reducef)\n \n def autosave(self, filename, epochPeriod=1, verbose=1):\n '''\n verbose = 0 do not save anything\n verbose = 1 save the last best model only\n verbose = 2 save each best model as is obtained\n verbose = 3 save at each epoch period\n\n Note: by default save_weights_only==False, so save allways whole model\n also by default monitors loss function to infer models (monitor='val_loss') \n and mode='auto' to infer direction (mode can be 'min' or 'max')\n '''\n if verbose == 1:\n save = tf.keras.callbacks.ModelCheckpoint(filepath=filename, verbose=verbose, period=epochPeriod, save_best_only=True)\n if verbose == 2:\n filename += '-{epoch:04d}-{val_loss:.6f}.h5'\n save = tf.keras.callbacks.ModelCheckpoint(filepath=filename, verbose=verbose, period=epochPeriod, save_best_only=True)\n if verbose == 3:\n filename += '-{epoch:04d}-{val_loss:.6f}.h5'\n save = tf.keras.callbacks.ModelCheckpoint(filepath=filename, verbose=verbose, period=epochPeriod)\n if verbose != 0:\n self._callbacks.append(save)\n\n #Train the model\n def train(self, trainXs, trainYs, epochs=10, validationSplit=0.2, batchSize=128, \n verbose=0, debug=False):\n chrono = Stopwatch()\n chrono.reset() \n\n #Note: If batchSize, after split in train and validate, train data is smaller than batch\n #gives error at fit():\n ##AttributeError: 'ProgbarLogger' object has no attribute 'log_values\n #https://github.com/keras-team/keras/issues/3657#issuecomment-360522232\n response = self.model.fit(trainXs, trainYs, batch_size=batchSize, \n validation_split=validationSplit,\n epochs=epochs, callbacks=self._callbacks, verbose=verbose)\n chrono.lap()\n loss = response.history['loss'][0]\n print('Train Loss:', loss, 'compute time: ' + str(chrono.read(1)))\n print(response.history.keys())\n if debug:\n tfplt.plotTrainHistory(response) \n\n #Predict\n def predict(self, testXs, testYs):\n #predict\n predict = self.model.predict(testXs)\n #set time index\n predict = pd.DataFrame(predict, columns=['predict'], index=testYs.index)\n return predict\n\n def __str__(self):\n rows = []\n self.model.summary(print_fn=lambda x: rows.append(x))\n output = \"\\n\".join(rows)\n return output\n\n\nclass MLP1h(ANNModel):\n '''\n Define a Multi Layer Perceptron with one hidden kayer\n '''\n def define(self, numInputs=1, numOutputs=1, numNeurons=1, activation=tf.nn.relu, save=None):\n super().define(save)\n self.model = tf.keras.models.Sequential([\n tf.keras.layers.Dense(numNeurons, activation=activation, input_shape=(numInputs,)), # input shape required\n tf.keras.layers.Dense(numNeurons, activation=activation),\n tf.keras.layers.Dense(numOutputs)\n ])\n\nclass MLP(ANNModel):\n '''\n Define a Multi Layer Perceptron with n hidden layers\n '''\n def define(self, numInputs=1, numOutputs=1, numNeurons=[1], activation=tf.nn.relu, save=None):\n super().define(save)\n self.model = tf.keras.models.Sequential()\n self.model.add(tf.keras.layers.Dense(numNeurons[0], activation=activation, input_shape=(numInputs,))) # input shape required\n for n in numNeurons:\n self.model.add(tf.keras.layers.Dense(n, activation=activation))\n self.model.add(tf.keras.layers.Dense(numOutputs))"
] |
[
[
"tensorflow.keras.models.load_model",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"pandas.DataFrame",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.models.Sequential"
]
] |
1e100/keras-retinanet
|
[
"38f37d1a3c3c7126b8b2cd1f2c124aa2bf551959"
] |
[
"keras_retinanet/preprocessing/coco.py"
] |
[
"\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom ..preprocessing.generator import Generator\nfrom ..utils.image import read_image_bgr\n\nimport os\nimport numpy as np\n\nfrom pycocotools.coco import COCO\n\n\nclass CocoGenerator(Generator):\n \"\"\" Generate data from the COCO dataset.\n\n See https://github.com/cocodataset/cocoapi/tree/master/PythonAPI for more information.\n \"\"\"\n\n def __init__(self, data_dir, set_name, **kwargs):\n \"\"\" Initialize a COCO data generator.\n\n Args\n data_dir: Path to where the COCO dataset is stored.\n set_name: Name of the set to parse.\n \"\"\"\n self.data_dir = data_dir\n self.set_name = set_name\n self.coco = COCO(\n os.path.join(data_dir, \"annotations\", \"instances_\" + set_name + \".json\")\n )\n self.image_ids = self.coco.getImgIds()\n\n self.load_classes()\n\n super(CocoGenerator, self).__init__(**kwargs)\n\n def load_classes(self):\n \"\"\" Loads the class to label mapping (and inverse) for COCO.\n \"\"\"\n # load class names (name -> label)\n categories = self.coco.loadCats(self.coco.getCatIds())\n categories.sort(key=lambda x: x[\"id\"])\n\n self.classes = {}\n self.coco_labels = {}\n self.coco_labels_inverse = {}\n for c in categories:\n self.coco_labels[len(self.classes)] = c[\"id\"]\n self.coco_labels_inverse[c[\"id\"]] = len(self.classes)\n self.classes[c[\"name\"]] = len(self.classes)\n\n # also load the reverse (label -> name)\n self.labels = {}\n for key, value in self.classes.items():\n self.labels[value] = key\n\n def size(self):\n \"\"\" Size of the COCO dataset.\n \"\"\"\n return len(self.image_ids)\n\n def num_classes(self):\n \"\"\" Number of classes in the dataset. For COCO this is 80.\n \"\"\"\n return len(self.classes)\n\n def has_label(self, label):\n \"\"\" Return True if label is a known label.\n \"\"\"\n return label in self.labels\n\n def has_name(self, name):\n \"\"\" Returns True if name is a known class.\n \"\"\"\n return name in self.classes\n\n def name_to_label(self, name):\n \"\"\" Map name to label.\n \"\"\"\n return self.classes[name]\n\n def label_to_name(self, label):\n \"\"\" Map label to name.\n \"\"\"\n return self.labels[label]\n\n def coco_label_to_label(self, coco_label):\n \"\"\" Map COCO label to the label as used in the network.\n COCO has some gaps in the order of labels. The highest label is 90, but there are 80 classes.\n \"\"\"\n return self.coco_labels_inverse[coco_label]\n\n def coco_label_to_name(self, coco_label):\n \"\"\" Map COCO label to name.\n \"\"\"\n return self.label_to_name(self.coco_label_to_label(coco_label))\n\n def label_to_coco_label(self, label):\n \"\"\" Map label as used by the network to labels as used by COCO.\n \"\"\"\n return self.coco_labels[label]\n\n def image_aspect_ratio(self, image_index):\n \"\"\" Compute the aspect ratio for an image with image_index.\n \"\"\"\n image = self.coco.loadImgs(self.image_ids[image_index])[0]\n return float(image[\"width\"]) / float(image[\"height\"])\n\n def load_image(self, image_index):\n \"\"\" Load an image at the image_index.\n \"\"\"\n image_info = self.coco.loadImgs(self.image_ids[image_index])[0]\n path = os.path.join(\n self.data_dir, \"images\", self.set_name, image_info[\"file_name\"]\n )\n return read_image_bgr(path)\n\n def load_annotations(self, image_index):\n \"\"\" Load annotations for an image_index.\n \"\"\"\n # get ground truth annotations\n annotations_ids = self.coco.getAnnIds(\n imgIds=self.image_ids[image_index], iscrowd=False\n )\n annotations = {\"labels\": np.empty((0,)), \"bboxes\": np.empty((0, 4))}\n\n # some images appear to miss annotations (like image with id 257034)\n if len(annotations_ids) == 0:\n return annotations\n\n # parse annotations\n coco_annotations = self.coco.loadAnns(annotations_ids)\n for idx, a in enumerate(coco_annotations):\n # some annotations have basically no width / height, skip them\n if a[\"bbox\"][2] < 1 or a[\"bbox\"][3] < 1:\n continue\n\n annotations[\"labels\"] = np.concatenate(\n [annotations[\"labels\"], [self.coco_label_to_label(a[\"category_id\"])]],\n axis=0,\n )\n annotations[\"bboxes\"] = np.concatenate(\n [\n annotations[\"bboxes\"],\n [\n [\n a[\"bbox\"][0],\n a[\"bbox\"][1],\n a[\"bbox\"][0] + a[\"bbox\"][2],\n a[\"bbox\"][1] + a[\"bbox\"][3],\n ]\n ],\n ],\n axis=0,\n )\n\n return annotations\n"
] |
[
[
"numpy.concatenate",
"numpy.empty"
]
] |
dqmis/neuralTPPs-1
|
[
"594f86e52faf3895a37301bc8757359afd829227"
] |
[
"tpp/models/decoders/hawkes.py"
] |
[
"import torch as th\nimport torch.nn as nn\n\nfrom typing import Dict, Optional, Tuple\n\nfrom tpp.models.decoders.base.decoder import Decoder\nfrom tpp.utils.events import Events\nfrom tpp.utils.nnplus import non_neg_param\nfrom tpp.processes.hawkes_fast import decoder_fast as hawkes_decoder\n# from tpp.processes.hawkes_slow import decoder_slow as hawkes_decoder\n\n\nclass HawkesDecoder(Decoder):\n \"\"\"A parametric Hawkes Process decoder.\n\n Args:\n marks: The distinct number of marks (classes) for the process. Defaults\n to 1.\n \"\"\"\n def __init__(self, marks: Optional[int] = 1, **kwargs):\n super(HawkesDecoder, self).__init__(name=\"hawkes\", marks=marks)\n self.alpha = nn.Parameter(th.Tensor(self.marks, self.marks))\n self.beta = nn.Parameter(th.Tensor(self.marks, self.marks))\n self.mu = nn.Parameter(th.Tensor(self.marks))\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.uniform_(self.alpha)\n nn.init.uniform_(self.beta)\n nn.init.uniform_(self.mu)\n\n def forward(\n self,\n events: Events,\n query: th.Tensor,\n prev_times: th.Tensor,\n prev_times_idxs: th.LongTensor,\n pos_delta_mask: th.Tensor,\n is_event: th.Tensor,\n representations: th.Tensor,\n representations_mask: Optional[th.Tensor] = None,\n artifacts: Optional[dict] = None\n ) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:\n self.alpha.data = non_neg_param(self.alpha.data)\n self.mu.data = non_neg_param(self.mu.data)\n return hawkes_decoder(\n events=events,\n query=query,\n prev_times=prev_times,\n is_event=is_event,\n alpha=self.alpha,\n beta=self.beta,\n mu=self.mu,\n marks=self.marks)\n"
] |
[
[
"torch.nn.init.uniform_",
"torch.Tensor"
]
] |
a6802739/tensorflow
|
[
"a26413ef0a179dd93b5228b031de83dce97a8684",
"ca1f0ce1b9e82e9577a2439c1c40cf38d4694c40"
] |
[
"tensorflow/python/autograph/pyct/transformer_test.py",
"tensorflow/python/keras/engine/network.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for templates module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gast\n\nfrom tensorflow.python.autograph.pyct import anno\nfrom tensorflow.python.autograph.pyct import parser\nfrom tensorflow.python.autograph.pyct import transformer\nfrom tensorflow.python.platform import test\n\n\nclass TransformerTest(test.TestCase):\n\n def _simple_context(self):\n entity_info = transformer.EntityInfo(\n source_code=None,\n source_file=None,\n future_features=(),\n namespace=None,\n arg_values=None,\n arg_types=None)\n return transformer.Context(entity_info)\n\n def test_entity_scope_tracking(self):\n\n class TestTransformer(transformer.Base):\n\n # The choice of note to assign to is arbitrary. Using Assign because it's\n # easy to find in the tree.\n def visit_Assign(self, node):\n anno.setanno(node, 'enclosing_entities', self.enclosing_entities)\n return self.generic_visit(node)\n\n # This will show up in the lambda function.\n def visit_BinOp(self, node):\n anno.setanno(node, 'enclosing_entities', self.enclosing_entities)\n return self.generic_visit(node)\n\n tr = TestTransformer(self._simple_context())\n\n def test_function():\n a = 0\n\n class TestClass(object):\n\n def test_method(self):\n b = 0\n def inner_function(x):\n c = 0\n d = lambda y: (x + y)\n return c, d\n return b, inner_function\n return a, TestClass\n\n node, _ = parser.parse_entity(test_function, future_features=())\n node = tr.visit(node)\n\n test_function_node = node\n test_class = test_function_node.body[1]\n test_method = test_class.body[0]\n inner_function = test_method.body[1]\n lambda_node = inner_function.body[1].value\n\n a = test_function_node.body[0]\n b = test_method.body[0]\n c = inner_function.body[0]\n lambda_expr = lambda_node.body\n\n self.assertEqual(\n (test_function_node,), anno.getanno(a, 'enclosing_entities'))\n self.assertEqual((test_function_node, test_class, test_method),\n anno.getanno(b, 'enclosing_entities'))\n self.assertEqual(\n (test_function_node, test_class, test_method, inner_function),\n anno.getanno(c, 'enclosing_entities'))\n self.assertEqual((test_function_node, test_class, test_method,\n inner_function, lambda_node),\n anno.getanno(lambda_expr, 'enclosing_entities'))\n\n def assertSameAnno(self, first, second, key):\n self.assertIs(anno.getanno(first, key), anno.getanno(second, key))\n\n def assertDifferentAnno(self, first, second, key):\n self.assertIsNot(anno.getanno(first, key), anno.getanno(second, key))\n\n def test_state_tracking(self):\n\n class LoopState(object):\n pass\n\n class CondState(object):\n pass\n\n class TestTransformer(transformer.Base):\n\n def visit(self, node):\n anno.setanno(node, 'loop_state', self.state[LoopState].value)\n anno.setanno(node, 'cond_state', self.state[CondState].value)\n return super(TestTransformer, self).visit(node)\n\n def visit_While(self, node):\n self.state[LoopState].enter()\n node = self.generic_visit(node)\n self.state[LoopState].exit()\n return node\n\n def visit_If(self, node):\n self.state[CondState].enter()\n node = self.generic_visit(node)\n self.state[CondState].exit()\n return node\n\n tr = TestTransformer(self._simple_context())\n\n def test_function(a):\n a = 1\n while a:\n _ = 'a'\n if a > 2:\n _ = 'b'\n while True:\n raise '1'\n if a > 3:\n _ = 'c'\n while True:\n raise '1'\n\n node, _ = parser.parse_entity(test_function, future_features=())\n node = tr.visit(node)\n\n fn_body = node.body\n outer_while_body = fn_body[1].body\n self.assertSameAnno(fn_body[0], outer_while_body[0], 'cond_state')\n self.assertDifferentAnno(fn_body[0], outer_while_body[0], 'loop_state')\n\n first_if_body = outer_while_body[1].body\n self.assertDifferentAnno(outer_while_body[0], first_if_body[0],\n 'cond_state')\n self.assertSameAnno(outer_while_body[0], first_if_body[0], 'loop_state')\n\n first_inner_while_body = first_if_body[1].body\n self.assertSameAnno(first_if_body[0], first_inner_while_body[0],\n 'cond_state')\n self.assertDifferentAnno(first_if_body[0], first_inner_while_body[0],\n 'loop_state')\n\n second_if_body = outer_while_body[2].body\n self.assertDifferentAnno(first_if_body[0], second_if_body[0], 'cond_state')\n self.assertSameAnno(first_if_body[0], second_if_body[0], 'loop_state')\n\n second_inner_while_body = second_if_body[1].body\n self.assertDifferentAnno(first_inner_while_body[0],\n second_inner_while_body[0], 'cond_state')\n self.assertDifferentAnno(first_inner_while_body[0],\n second_inner_while_body[0], 'loop_state')\n\n def test_local_scope_info_stack(self):\n\n class TestTransformer(transformer.Base):\n\n # Extract all string constants from the block.\n def visit_Str(self, node):\n self.set_local('string', self.get_local('string', default='') + node.s)\n return self.generic_visit(node)\n\n def _annotate_result(self, node):\n self.enter_local_scope()\n node = self.generic_visit(node)\n anno.setanno(node, 'test', self.get_local('string'))\n self.exit_local_scope()\n return node\n\n def visit_While(self, node):\n return self._annotate_result(node)\n\n def visit_For(self, node):\n return self._annotate_result(node)\n\n tr = TestTransformer(self._simple_context())\n\n def test_function(a):\n \"\"\"Docstring.\"\"\"\n assert a == 'This should not be counted'\n for i in range(3):\n _ = 'a'\n if i > 2:\n return 'b'\n else:\n _ = 'c'\n while True:\n raise '1'\n return 'nor this'\n\n node, _ = parser.parse_entity(test_function, future_features=())\n node = tr.visit(node)\n\n for_node = node.body[2]\n while_node = for_node.body[1].orelse[1]\n\n self.assertFalse(anno.hasanno(for_node, 'string'))\n self.assertEqual('abc', anno.getanno(for_node, 'test'))\n self.assertFalse(anno.hasanno(while_node, 'string'))\n self.assertEqual('1', anno.getanno(while_node, 'test'))\n\n def test_local_scope_info_stack_checks_integrity(self):\n\n class TestTransformer(transformer.Base):\n\n def visit_If(self, node):\n self.enter_local_scope()\n return self.generic_visit(node)\n\n def visit_For(self, node):\n node = self.generic_visit(node)\n self.exit_local_scope()\n return node\n\n tr = TestTransformer(self._simple_context())\n\n def no_exit(a):\n if a > 0:\n print(a)\n return None\n\n node, _ = parser.parse_entity(no_exit, future_features=())\n with self.assertRaises(AssertionError):\n tr.visit(node)\n\n def no_entry(a):\n for _ in a:\n print(a)\n\n node, _ = parser.parse_entity(no_entry, future_features=())\n with self.assertRaises(AssertionError):\n tr.visit(node)\n\n def test_visit_block_postprocessing(self):\n\n class TestTransformer(transformer.Base):\n\n def _process_body_item(self, node):\n if isinstance(node, gast.Assign) and (node.value.id == 'y'):\n if_node = gast.If(gast.Name('x', gast.Load(), None), [node], [])\n return if_node, if_node.body\n return node, None\n\n def visit_FunctionDef(self, node):\n node.body = self.visit_block(\n node.body, after_visit=self._process_body_item)\n return node\n\n def test_function(x, y):\n z = x\n z = y\n return z\n\n tr = TestTransformer(self._simple_context())\n\n node, _ = parser.parse_entity(test_function, future_features=())\n node = tr.visit(node)\n\n self.assertEqual(len(node.body), 2)\n self.assertTrue(isinstance(node.body[0], gast.Assign))\n self.assertTrue(isinstance(node.body[1], gast.If))\n self.assertTrue(isinstance(node.body[1].body[0], gast.Assign))\n self.assertTrue(isinstance(node.body[1].body[1], gast.Return))\n\n def test_robust_error_on_list_visit(self):\n\n class BrokenTransformer(transformer.Base):\n\n def visit_If(self, node):\n # This is broken because visit expects a single node, not a list, and\n # the body of an if is a list.\n # Importantly, the default error handling in visit also expects a single\n # node. Therefore, mistakes like this need to trigger a type error\n # before the visit called here installs its error handler.\n # That type error can then be caught by the enclosing call to visit,\n # and correctly blame the If node.\n self.visit(node.body)\n return node\n\n def test_function(x):\n if x > 0:\n return x\n\n tr = BrokenTransformer(self._simple_context())\n\n node, _ = parser.parse_entity(test_function, future_features=())\n with self.assertRaises(ValueError) as cm:\n node = tr.visit(node)\n obtained_message = str(cm.exception)\n expected_message = r'expected \"ast.AST\", got \"\\<(type|class) \\'list\\'\\>\"'\n self.assertRegexpMatches(obtained_message, expected_message)\n\n def test_robust_error_on_ast_corruption(self):\n # A child class should not be able to be so broken that it causes the error\n # handling in `transformer.Base` to raise an exception. Why not? Because\n # then the original error location is dropped, and an error handler higher\n # up in the call stack gives misleading information.\n\n # Here we test that the error handling in `visit` completes, and blames the\n # correct original exception, even if the AST gets corrupted.\n\n class NotANode(object):\n pass\n\n class BrokenTransformer(transformer.Base):\n\n def visit_If(self, node):\n node.body = NotANode()\n raise ValueError('I blew up')\n\n def test_function(x):\n if x > 0:\n return x\n\n tr = BrokenTransformer(self._simple_context())\n\n node, _ = parser.parse_entity(test_function, future_features=())\n with self.assertRaises(ValueError) as cm:\n node = tr.visit(node)\n obtained_message = str(cm.exception)\n # The message should reference the exception actually raised, not anything\n # from the exception handler.\n expected_substring = 'I blew up'\n self.assertTrue(expected_substring in obtained_message, obtained_message)\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"A `Network` is way to compose layers: the topological form of a `Model`.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport json\nimport os\n\nfrom six.moves import zip # pylint: disable=redefined-builtin\n\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import func_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.engine import training_utils\nfrom tensorflow.python.keras.mixed_precision.experimental import policy\nfrom tensorflow.python.keras.saving import hdf5_format\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import layer_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.keras.utils.io_utils import ask_to_proceed_with_overwrite\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import checkpoint_management\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.training.tracking import data_structures\nfrom tensorflow.python.training.tracking import layer_utils as trackable_layer_utils\nfrom tensorflow.python.training.tracking import util as trackable_utils\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import serialization\nfrom tensorflow.python.util import tf_inspect\n\n\n# pylint: disable=g-import-not-at-top\ntry:\n import h5py\nexcept ImportError:\n h5py = None\n\ntry:\n import yaml\nexcept ImportError:\n yaml = None\n# pylint: enable=g-import-not-at-top\n\n\nclass Network(base_layer.Layer):\n \"\"\"A `Network` is a composition of layers.\n\n `Network` is the topological form of a \"model\". A `Model`\n is simply a `Network` with added training routines.\n\n Two types of `Networks` exist: Graph Networks and Subclass Networks. Graph\n networks are used in the Keras Functional and Sequential APIs. Subclassed\n networks are used when a user subclasses the `Model` class. In general,\n more Keras features are supported with Graph Networks than with Subclassed\n Networks, specifically:\n\n - Model cloning (`keras.models.clone`)\n - Serialization (`model.get_config()/from_config`, `model.to_json()/to_yaml()`\n - Whole-model saving (`model.save()`)\n\n A Graph Network can be instantiated by passing two arguments to `__init__`.\n The first argument is the `keras.Input` Tensors that represent the inputs\n to the Network. The second argument specifies the output Tensors that\n represent the outputs of this Network. Both arguments can be a nested\n structure of Tensors.\n\n Example:\n\n ```\n inputs = {'x1': keras.Input(shape=(10,)), 'x2': keras.Input(shape=(1,))}\n t = keras.layers.Dense(1, activation='relu')(inputs['x1'])\n outputs = keras.layers.Add()([t, inputs['x2'])\n network = Network(inputs, outputs)\n ```\n\n A Graph Network constructed using the Functional API can also include raw\n TensorFlow functions, with the exception of functions that create Variables\n or assign ops.\n\n Example:\n\n ```\n inputs = keras.Input(shape=(10,))\n x = keras.layers.Dense(1)(inputs)\n outputs = tf.nn.relu(x)\n network = Network(inputs, outputs)\n ```\n\n Subclassed Networks can be instantiated via `name` and (optional) `dynamic`\n keyword arguments. Subclassed Networks keep track of their Layers, and their\n `call` method can be overridden. Subclassed Networks are typically created\n indirectly, by subclassing the `Model` class.\n\n Example:\n\n ```\n class MyModel(keras.Model):\n def __init__(self):\n super(MyModel, self).__init__(name='my_model', dynamic=False)\n\n self.layer1 = keras.layers.Dense(10, activation='relu')\n\n def call(self, inputs):\n return self.layer1(inputs)\n ```\n \"\"\"\n\n def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called\n # Signature detection\n if (len(args) == 2 or\n len(args) == 1 and 'outputs' in kwargs or\n 'inputs' in kwargs and 'outputs' in kwargs):\n # Graph network\n self._init_graph_network(*args, **kwargs)\n else:\n # Subclassed network\n self._init_subclassed_network(**kwargs)\n\n # Several Network methods have \"no_automatic_dependency_tracking\"\n # annotations. Since Network does automatic dependency tracking on attribute\n # assignment, including for common data structures such as lists, by default\n # we'd have quite a few empty dependencies which users don't care about (or\n # would need some way to ignore dependencies automatically, which is confusing\n # when applied to user code). Some attributes, such as _layers, would cause\n # structural issues (_layers being the place where Layers assigned to tracked\n # attributes are stored).\n #\n # Aside from these aesthetic and structural issues, useless dependencies on\n # empty lists shouldn't cause issues; adding or removing them will not break\n # checkpoints, but may cause \"all Python objects matched\" assertions to fail\n # (in which case less strict assertions may be substituted if necessary).\n @trackable.no_automatic_dependency_tracking\n def _base_init(self, name=None):\n # The following are implemented as property functions:\n # self.trainable_weights\n # self.non_trainable_weights\n # self.input_spec\n # self.losses\n # self.updates\n\n self._init_set_name(name, zero_based=True)\n self._activity_regularizer = None\n # This acts just like the `trainable` attribute of any layer instance.\n # It does not affect users of the underlying layers, only users of the\n # Network instance.\n self.trainable = True\n self._is_compiled = False\n self._expects_training_arg = False\n\n # This is True for Sequential networks and Functional networks.\n self._compute_output_and_mask_jointly = False\n\n self.supports_masking = False\n if not hasattr(self, 'optimizer'):\n # Don't reset optimizer if already set.\n self.optimizer = None\n\n # Private attributes to implement compatibility with Layer.\n self._trainable_weights = []\n self._non_trainable_weights = []\n self._updates = [] # Used in symbolic mode only.\n self._losses = []\n self._eager_losses = []\n # A list of metric instances corresponding to the symbolic metric tensors\n # added using the `add_metric` API.\n self._metrics = []\n # A dictionary that maps metric names to metric result tensors.\n self._metrics_tensors = {}\n self._scope = None # Never used.\n self._reuse = None # Never used.\n if context.executing_eagerly():\n self._graph = None\n else:\n self._graph = ops.get_default_graph() # Used in symbolic mode only.\n # A Network does not create weights of its own, thus has no dtype.\n self._dtype = None\n\n # All layers in order of horizontal graph traversal.\n # Entries are unique. Includes input and output layers.\n self._layers = []\n\n # Used in symbolic mode only, only in conjunction with graph-networks\n self._outbound_nodes = []\n self._inbound_nodes = []\n\n self._trackable_saver = (\n trackable_utils.saver_with_op_caching(self))\n\n # Networks do not need to do any casting of inputs or variables, because\n # each of its layers will handle casting through the layer's own\n # implementation. Therefore networks use the 'infer' policy, which does no\n # casting.\n self._mixed_precision_policy = policy.Policy('infer')\n\n @trackable.no_automatic_dependency_tracking\n def _init_graph_network(self, inputs, outputs, name=None):\n self._call_convention = (base_layer_utils\n .CallConvention.EXPLICIT_INPUTS_ARGUMENT)\n # Normalize and set self.inputs, self.outputs.\n if isinstance(inputs, list) and len(nest.flatten(inputs)) == 1:\n inputs = inputs[0]\n if isinstance(outputs, list) and len(nest.flatten(outputs)) == 1:\n outputs = outputs[0]\n self._nested_outputs = outputs\n self._nested_inputs = inputs\n self.inputs = nest.flatten(inputs)\n self.outputs = nest.flatten(outputs)\n\n if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs):\n base_layer_utils.create_keras_history(self._nested_outputs)\n\n self._base_init(name=name)\n self._validate_graph_inputs_and_outputs()\n\n self._compute_previous_mask = (\n 'mask' in tf_inspect.getfullargspec(self.call).args or\n hasattr(self, 'compute_mask'))\n # A Network does not create weights of its own, thus it is already\n # built.\n self.built = True\n self._compute_output_and_mask_jointly = True\n self._is_graph_network = True\n self._dynamic = False\n # `_expects_training_arg` is True since the `training` argument is always\n # present in the signature of the `call` method of a graph network.\n self._expects_training_arg = True\n\n self._input_layers = []\n self._output_layers = []\n self._input_coordinates = []\n self._output_coordinates = []\n\n # This is for performance optimization when calling the Network on new\n # inputs. Every time the Network is called on a set on input tensors,\n # we compute the output tensors, output masks and output shapes in one pass,\n # then cache them here. When any of these outputs is queried later, we\n # retrieve it from there instead of recomputing it.\n self._output_mask_cache = {}\n self._output_tensor_cache = {}\n self._output_shape_cache = {}\n\n # Build self._output_layers:\n for x in self.outputs:\n layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access\n self._output_layers.append(layer)\n self._output_coordinates.append((layer, node_index, tensor_index))\n\n # Build self._input_layers:\n for x in self.inputs:\n layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access\n # It's supposed to be an input layer, so only one node\n # and one tensor output.\n assert node_index == 0\n assert tensor_index == 0\n self._input_layers.append(layer)\n self._input_coordinates.append((layer, node_index, tensor_index))\n\n # Keep track of the network's nodes and layers.\n nodes, nodes_by_depth, layers, layers_by_depth = _map_graph_network(\n self.inputs, self.outputs)\n self._network_nodes = nodes\n self._nodes_by_depth = nodes_by_depth\n self._layers = layers\n self._layers_by_depth = layers_by_depth\n self._layer_call_argspecs = {}\n for layer in self._layers:\n self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)\n\n self._track_layers(layers)\n\n # Create the node linking internal inputs to internal outputs.\n base_layer.Node(\n outbound_layer=self,\n inbound_layers=[],\n node_indices=[],\n tensor_indices=[],\n input_tensors=self._nested_inputs,\n output_tensors=self._nested_outputs)\n\n # Build self.input_names and self.output_names.\n self.input_names = []\n self.output_names = []\n self._feed_input_names = []\n self._feed_inputs = []\n self._feed_input_shapes = []\n for i, layer in enumerate(self._input_layers):\n self.input_names.append(layer.name)\n if layer.is_placeholder:\n self._feed_input_names.append(layer.name)\n self._feed_input_shapes.append(backend.int_shape(self.inputs[i]))\n self._feed_inputs.append(layer.input)\n for layer in self._output_layers:\n self.output_names.append(layer.name)\n\n @trackable.no_automatic_dependency_tracking\n def _init_subclassed_network(self, name=None, dynamic=False):\n self._base_init(name=name)\n self._is_graph_network = False\n self._dynamic = dynamic\n call_argspec = tf_inspect.getfullargspec(self.call)\n if 'training' in call_argspec.args:\n self._expects_training_arg = True\n else:\n self._expects_training_arg = False\n self._call_convention = self._determine_call_convention(call_argspec)\n self.outputs = []\n self.inputs = []\n self.built = False\n\n @property\n def dynamic(self):\n if self._is_graph_network:\n return any(layer.dynamic for layer in self.layers)\n return self._dynamic or any(layer.dynamic for layer in self.layers)\n\n def _determine_call_convention(self, call_argspec):\n \"\"\"Decides how `self.call()` is invoked. See `CallConvention`.\"\"\"\n if call_argspec.varargs:\n may_take_single_argument = False\n else:\n try:\n # Note: tf_inspect doesn't raise a TypeError when regular inspect would,\n # so we need to keep in mind that \"getcallargs\" may have returned\n # something even though we under-specified positional arguments.\n all_args = tf_inspect.getcallargs(self.call, None)\n self_args = set()\n for arg_name, obj in all_args.items():\n if obj is self:\n self_args.add(arg_name)\n may_take_single_argument = True\n except TypeError:\n may_take_single_argument = False\n if may_take_single_argument:\n # A single positional argument (plus \"self\") is considered equivalent to\n # an \"inputs\" argument.\n all_positional_args = len(call_argspec.args)\n if call_argspec.defaults is not None:\n all_positional_args -= len(call_argspec.defaults)\n non_self_positional_args = all_positional_args\n for positional_arg_name in call_argspec.args[:all_positional_args]:\n if positional_arg_name in self_args:\n non_self_positional_args -= 1\n if non_self_positional_args == 1:\n if 'inputs' in call_argspec.args[all_positional_args:]:\n raise TypeError(\n \"Model.call() takes a single positional argument (to which \"\n \"inputs are passed by convention) and a separate 'inputs' \"\n \"argument. Unable to determine which arguments are inputs.\")\n return base_layer_utils.CallConvention.SINGLE_POSITIONAL_ARGUMENT\n if 'inputs' in call_argspec.args:\n return base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT\n else:\n return base_layer_utils.CallConvention.POSITIONAL_ARGUMENTS_ARE_INPUTS\n\n def _track_layers(self, layers):\n \"\"\"Add Trackable dependencies on a list of Layers.\"\"\"\n weight_layer_index = 0\n for layer_index, layer in enumerate(layers):\n if layer.weights:\n # Keep a separate index for layers which have weights. This allows users\n # to insert Layers without weights anywhere in the network without\n # breaking checkpoints.\n self._track_trackable(\n layer, name='layer_with_weights-%d' % weight_layer_index,\n overwrite=True)\n weight_layer_index += 1\n # Even if it doesn't have weights, we should still track everything in\n # case it has/will have Trackable dependencies.\n self._track_trackable(\n layer, name='layer-%d' % layer_index, overwrite=True)\n\n def __setattr__(self, name, value):\n if not getattr(self, '_setattr_tracking', True):\n super(Network, self).__setattr__(name, value)\n return\n\n if all(\n isinstance(v, (base_layer.Layer,\n data_structures.TrackableDataStructure)) or\n trackable_layer_utils.has_weights(v) for v in nest.flatten(value)):\n try:\n self._is_graph_network\n except AttributeError:\n raise RuntimeError('It looks like you are subclassing `Model` and you '\n 'forgot to call `super(YourClass, self).__init__()`.'\n ' Always start with this line.')\n\n super(Network, self).__setattr__(name, value)\n\n # Keep track of metric instance created in subclassed model/layer.\n # We do this so that we can maintain the correct order of metrics by adding\n # the instance to the `metrics` list as soon as it is created.\n from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top\n if isinstance(value, metrics_module.Metric):\n self._metrics.append(value)\n\n @property\n def stateful(self):\n return any((hasattr(layer, 'stateful') and layer.stateful)\n for layer in self.layers)\n\n def reset_states(self):\n for layer in self.layers:\n if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False):\n layer.reset_states()\n\n @property\n def state_updates(self):\n \"\"\"Returns the `updates` from all layers that are stateful.\n\n This is useful for separating training updates and\n state updates, e.g. when we need to update a layer's internal state\n during prediction.\n\n Returns:\n A list of update ops.\n \"\"\"\n state_updates = []\n for layer in self.layers:\n if getattr(layer, 'stateful', False):\n if hasattr(layer, 'updates'):\n state_updates += layer.updates\n return state_updates\n\n def get_weights(self):\n \"\"\"Retrieves the weights of the model.\n\n Returns:\n A flat list of Numpy arrays.\n \"\"\"\n weights = []\n for layer in self.layers:\n weights += layer.weights\n return backend.batch_get_value(weights)\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the model.\n\n Arguments:\n weights: A list of Numpy arrays with shapes and types matching\n the output of `model.get_weights()`.\n \"\"\"\n tuples = []\n for layer in self.layers:\n num_param = len(layer.weights)\n layer_weights = weights[:num_param]\n for sw, w in zip(layer.weights, layer_weights):\n tuples.append((sw, w))\n weights = weights[num_param:]\n backend.batch_set_value(tuples)\n\n def compute_mask(self, inputs, mask):\n if not self._is_graph_network:\n return None\n\n # TODO(omalleyt): b/123540974 This function is not really safe to call\n # by itself because it will duplicate any updates and losses in graph\n # mode by `call`ing the Layers again.\n output_tensors = self._run_internal_graph(inputs, mask=mask)\n return nest.map_structure(lambda t: t._keras_mask, output_tensors)\n\n @property\n def layers(self):\n return trackable_layer_utils.filter_empty_layer_containers(\n self._layers)\n\n def get_layer(self, name=None, index=None):\n \"\"\"Retrieves a layer based on either its name (unique) or index.\n\n If `name` and `index` are both provided, `index` will take precedence.\n Indices are based on order of horizontal graph traversal (bottom-up).\n\n Arguments:\n name: String, name of layer.\n index: Integer, index of layer.\n\n Returns:\n A layer instance.\n\n Raises:\n ValueError: In case of invalid layer name or index.\n \"\"\"\n # TODO(fchollet): We could build a dictionary based on layer names\n # since they are constant, but we have not done that yet.\n if index is not None:\n if len(self.layers) <= index:\n raise ValueError('Was asked to retrieve layer at index ' + str(index) +\n ' but model only has ' + str(len(self.layers)) +\n ' layers.')\n else:\n return self.layers[index]\n else:\n if not name:\n raise ValueError('Provide either a layer name or layer index.')\n for layer in self.layers:\n if layer.name == name:\n return layer\n raise ValueError('No such layer: ' + name)\n\n def _get_unfiltered_updates(self, check_trainable=True):\n if check_trainable and not self.trainable and not self.stateful:\n return []\n updates = []\n for layer in self.layers:\n updates += layer._get_unfiltered_updates(check_trainable=check_trainable)\n updates += list(self._updates)\n return updates\n\n @property\n def _unfiltered_losses(self):\n losses = []\n\n # If any eager losses are present, we assume the model to be part of an\n # eager training loop (either a custom one or the one used when\n # `run_eagerly=True`), and so we always return just the eager losses in that\n # case.\n if self._eager_losses:\n losses.extend(self._eager_losses)\n else:\n losses.extend(self._losses)\n for layer in self.layers:\n if isinstance(layer, Network):\n losses += layer._unfiltered_losses\n else:\n losses += layer.losses\n return losses\n\n @trackable.no_automatic_dependency_tracking\n def _clear_losses(self):\n \"\"\"Used every step in eager to reset losses.\"\"\"\n self._eager_losses = []\n for layer in self.layers:\n layer._clear_losses()\n\n @property\n def updates(self):\n \"\"\"Retrieves the network's updates.\n\n Will only include updates that are either\n unconditional, or conditional on inputs to this model\n (e.g. will not include updates that were created by layers of this model\n outside of the model).\n\n When the network has no registered inputs, all updates are returned.\n\n Effectively, `network.updates` behaves like `layer.updates`.\n\n Concrete example:\n\n ```python\n bn = keras.layers.BatchNormalization()\n x1 = keras.layers.Input(shape=(10,))\n _ = bn(x1) # This creates 2 updates.\n\n x2 = keras.layers.Input(shape=(10,))\n y2 = bn(x2) # This creates 2 more updates.\n\n # The BN layer has now 4 updates.\n self.assertEqual(len(bn.updates), 4)\n\n # Let's create a model from x2 to y2.\n model = keras.models.Model(x2, y2)\n\n # The model does not list all updates from its underlying layers,\n # but only the updates that are relevant to it. Updates created by layers\n # outside of the model are discarded.\n self.assertEqual(len(model.updates), 2)\n\n # If you keep calling the model, you append to its updates, just like\n # what happens for a layer.\n x3 = keras.layers.Input(shape=(10,))\n y3 = model(x3)\n self.assertEqual(len(model.updates), 4)\n\n # But if you call the inner BN layer independently, you don't affect\n # the model's updates.\n x4 = keras.layers.Input(shape=(10,))\n _ = bn(x4)\n self.assertEqual(len(model.updates), 4)\n ```\n\n Returns:\n A list of update ops.\n \"\"\"\n\n updates = self._get_unfiltered_updates(check_trainable=True)\n\n # `updates` might contain irrelevant updates, so it needs to be filtered\n # with respect to inputs the model has been called on.\n relevant_inputs = []\n for i in range(0, len(self._inbound_nodes)):\n inputs = self.get_input_at(i)\n if isinstance(inputs, list):\n relevant_inputs += inputs\n else:\n relevant_inputs.append(inputs)\n if not relevant_inputs:\n return list(set(updates))\n\n reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, updates)\n relevant_conditional_updates = [x for x in updates if x in reachable]\n unconditional_updates = [\n x for x in updates if x._unconditional_update] # pylint: disable=protected-access\n # A layer could be used multiple times in a nested structure,\n # so the updates list must be de-duped.\n return list(set(relevant_conditional_updates + unconditional_updates))\n\n @property\n def losses(self):\n \"\"\"Retrieves the network's losses.\n\n Will only include losses that are either\n unconditional, or conditional on inputs to this model\n (e.g. will not include losses that depend on tensors\n that aren't inputs to this model).\n\n When the network has no registered inputs, all losses are returned.\n\n Returns:\n A list of loss tensors.\n \"\"\"\n losses = self._unfiltered_losses\n\n if context.executing_eagerly():\n return losses\n\n # TODO(kaftan/fchollet): Clean this up / make it obsolete.\n # This is a super ugly, confusing check necessary to\n # handle the case where we are executing in a function graph in eager mode\n # but the model was constructed symbolically in a separate graph scope.\n # We need to capture the losses created in the current graph function,\n # and filter out the incorrect loss tensors created when symbolically\n # building the graph.\n # We have to use this check because the code after it that checks\n # for reachable inputs only captures the part of the model that was\n # built symbolically, and captures the wrong tensors from a different\n # func graph (causing a crash later on when trying to execute the\n # graph function)\n if ops.executing_eagerly_outside_functions():\n return [\n loss for loss in losses\n if getattr(loss, 'graph', None) == ops.get_default_graph()\n ]\n\n relevant_inputs = []\n for i in range(0, len(self._inbound_nodes)):\n inputs = self.get_input_at(i)\n if isinstance(inputs, list):\n relevant_inputs += inputs\n else:\n relevant_inputs.append(inputs)\n if not relevant_inputs:\n return losses\n\n reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, losses)\n relevant_conditional_losses = [x for x in losses if x in reachable]\n unconditional_losses = [\n x for x in losses if x._unconditional_loss] # pylint: disable=protected-access\n return list(set(\n relevant_conditional_losses + unconditional_losses + self._losses))\n\n @property\n def trainable_weights(self):\n return trackable_layer_utils.gather_trainable_weights(\n trainable=self.trainable,\n sub_layers=self._layers,\n extra_variables=self._trainable_weights)\n\n @property\n def non_trainable_weights(self):\n return trackable_layer_utils.gather_non_trainable_weights(\n trainable=self.trainable,\n sub_layers=self._layers,\n extra_variables=self._non_trainable_weights + self._trainable_weights)\n\n @property\n def _all_metrics_tensors(self):\n \"\"\"Returns the network's symbolic metric tensors.\"\"\"\n # TODO(psv): Remove this property.\n metrics_tensors = {}\n for layer in self.layers:\n if isinstance(layer, Network):\n metrics_tensors.update(layer._all_metrics_tensors)\n else:\n metrics_tensors.update(layer._metrics_tensors)\n metrics_tensors.update(self._metrics_tensors)\n return metrics_tensors\n\n @property\n def input_spec(self):\n \"\"\"Gets the network's input specs.\n\n Returns:\n A list of `InputSpec` instances (one per input to the model)\n or a single instance if the model has only one input.\n \"\"\"\n # If subclassed model, can't assume anything.\n if not self._is_graph_network:\n return None\n\n specs = []\n for layer in self._input_layers:\n if layer.input_spec is None:\n specs.append(None)\n else:\n if not isinstance(layer.input_spec, list):\n raise TypeError('Layer ' + layer.name +\n ' has an input_spec attribute that '\n 'is not a list. We expect a list. '\n 'Found input_spec = ' + str(layer.input_spec))\n specs += layer.input_spec\n if len(specs) == 1:\n return specs[0]\n return specs\n\n @base_layer.default\n def build(self, input_shape):\n \"\"\"Builds the model based on input shapes received.\n\n This is to be used for subclassed models, which do not know at instantiation\n time what their inputs look like.\n\n This method only exists for users who want to call `model.build()` in a\n standalone way (as a substitute for calling the model on real data to\n build it). It will never be called by the framework (and thus it will\n never throw unexpected errors in an unrelated workflow).\n\n Args:\n input_shape: Single tuple, TensorShape, or list of shapes, where shapes\n are tuples, integers, or TensorShapes.\n\n Raises:\n ValueError:\n 1. In case of invalid user-provided data (not of type tuple,\n list, or TensorShape).\n 2. If the model requires call arguments that are agnostic\n to the input shapes (positional or kwarg in call signature).\n 3. If not all layers were properly built.\n 4. If float type inputs are not supported within the layers.\n\n In each of these cases, the user should build their model by calling it\n on real tensor data.\n \"\"\"\n if self._is_graph_network:\n self.built = True\n return\n\n # If subclass network\n if input_shape is None:\n raise ValueError('Input shape must be defined when calling build on a '\n 'model subclass network.')\n valid_types = (tuple, list, tensor_shape.TensorShape)\n if not isinstance(input_shape, valid_types):\n raise ValueError('Specified input shape is not one of the valid types. '\n 'Please specify a batch input shape of type tuple or '\n 'list of input shapes. User provided '\n 'input type: {}'.format(type(input_shape)))\n\n if input_shape and not self.inputs:\n # We create placeholders for the `None`s in the shape and build the model\n # in a Graph. Since tf.Variable is compatible with both eager execution\n # and graph building, the variables created after building the model in\n # a Graph are still valid when executing eagerly.\n if context.executing_eagerly():\n graph = func_graph.FuncGraph('build_graph')\n else:\n graph = backend.get_graph()\n with graph.as_default():\n if isinstance(input_shape, list):\n x = [base_layer_utils.generate_placeholders_from_shape(shape)\n for shape in input_shape]\n else:\n x = base_layer_utils.generate_placeholders_from_shape(input_shape)\n\n kwargs = {}\n call_signature = tf_inspect.getfullargspec(self.call)\n call_args = call_signature.args\n # Exclude `self`, `inputs`, and any argument with a default value.\n if len(call_args) > 2:\n if call_signature.defaults:\n call_args = call_args[2:-len(call_signature.defaults)]\n else:\n call_args = call_args[2:]\n for arg in call_args:\n if arg == 'training':\n # Case where `training` is a positional arg with no default.\n kwargs['training'] = False\n else:\n # Has invalid call signature with unknown positional arguments.\n raise ValueError(\n 'Currently, you cannot build your model if it has '\n 'positional or keyword arguments that are not '\n 'inputs to the model, but are required for its '\n '`call` method. Instead, in order to instantiate '\n 'and build your model, `call` your model on real '\n 'tensor data with all expected call arguments.')\n elif len(call_args) < 2:\n # Signature without `inputs`.\n raise ValueError('You can only call `build` on a model if its `call` '\n 'method accepts an `inputs` argument.')\n try:\n self.call(x, **kwargs)\n except (errors.InvalidArgumentError, TypeError):\n raise ValueError('You cannot build your model by calling `build` '\n 'if your layers do not support float type inputs. '\n 'Instead, in order to instantiate and build your '\n 'model, `call` your model on real tensor data (of '\n 'the correct dtype).')\n if self._layers:\n self._track_layers(self._layers)\n self.built = True\n\n def call(self, inputs, training=None, mask=None):\n \"\"\"Calls the model on new inputs.\n\n In this case `call` just reapplies\n all ops in the graph to the new inputs\n (e.g. build a new computational graph from the provided inputs).\n\n Arguments:\n inputs: A tensor or list of tensors.\n training: Boolean or boolean scalar tensor, indicating whether to run\n the `Network` in training mode or inference mode.\n mask: A mask or list of masks. A mask can be\n either a tensor or None (no mask).\n\n Returns:\n A tensor if there is a single output, or\n a list of tensors if there are more than one outputs.\n \"\"\"\n if not self._is_graph_network:\n raise NotImplementedError('When subclassing the `Model` class, you should'\n ' implement a `call` method.')\n\n return self._run_internal_graph(inputs, training=training, mask=mask)\n\n def compute_output_shape(self, input_shape):\n if not self._is_graph_network:\n return super(Network, self).compute_output_shape(input_shape)\n\n # Convert any shapes in tuple format to TensorShapes.\n input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)\n\n if len(nest.flatten(input_shape)) != len(nest.flatten(self._input_layers)):\n raise ValueError('Invalid input_shape argument ' + str(input_shape) +\n ': model has ' + str(len(self._input_layers)) +\n ' tensor inputs.')\n\n cache_key = generic_utils.object_list_uid(input_shape)\n if cache_key in self._output_shape_cache:\n # Cache hit. Return shapes as TensorShapes.\n return self._output_shape_cache[cache_key]\n\n layers_to_output_shapes = {}\n for layer, shape in zip(self._input_layers, nest.flatten(input_shape)):\n # It's an input layer: then `compute_output_shape` is identity,\n # and there is only one node and one tensor..\n shape_key = layer.name + '_0_0'\n layers_to_output_shapes[shape_key] = shape\n\n depth_keys = list(self._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n # Iterate over nodes, by depth level.\n if len(depth_keys) > 1:\n for depth in depth_keys:\n nodes = self._nodes_by_depth[depth]\n for node in nodes:\n # This is always a single layer, never a list.\n layer = node.outbound_layer\n if layer in self._input_layers:\n # We've already covered the input layers\n # a few lines above.\n continue\n # Potentially redundant list,\n # same size as node.input_tensors.\n layer_input_shapes = []\n for inbound_layer, node_id, tensor_id, _ in node.iterate_inbound():\n input_layer_key = inbound_layer.name + '_%s_%s' % (node_id,\n tensor_id)\n layer_input_shapes.append(layers_to_output_shapes[input_layer_key])\n layer_input_shapes = nest.pack_sequence_as(node.inbound_layers,\n layer_input_shapes)\n # Layers expect shapes to be tuples for `compute_output_shape`.\n layer_input_shapes = tf_utils.convert_shapes(\n layer_input_shapes, to_tuples=True)\n layer_output_shapes = layer.compute_output_shape(layer_input_shapes)\n # Convert back to TensorShapes.\n layer_output_shapes = tf_utils.convert_shapes(\n layer_output_shapes, to_tuples=False)\n\n node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access\n for j, shape in enumerate(nest.flatten(layer_output_shapes)):\n shape_key = layer.name + '_%s_%s' % (node_index, j)\n layers_to_output_shapes[shape_key] = shape\n\n # Read final output shapes from layers_to_output_shapes.\n output_shapes = []\n for i in range(len(self._output_layers)):\n layer, node_index, tensor_index = self._output_coordinates[i]\n shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)\n output_shapes.append(layers_to_output_shapes[shape_key])\n output_shapes = nest.pack_sequence_as(self._nested_outputs, output_shapes)\n # Store in cache.\n self._output_shape_cache[cache_key] = output_shapes\n\n # Return shapes as TensorShapes.\n return output_shapes\n\n def _run_internal_graph(self, inputs, training=None, mask=None):\n \"\"\"Computes output tensors for new inputs.\n\n # Note:\n - Expects `inputs` to be a list (potentially with 1 element).\n - Can be run on non-Keras tensors.\n\n Arguments:\n inputs: Tensor or nested structure of Tensors.\n training: Boolean learning phase.\n mask: (Optional) Tensor or nested structure of Tensors.\n\n Returns:\n Two lists: output_tensors, output_masks\n \"\"\"\n # Note: masking support is relevant mainly for Keras.\n # It cannot be factored out without having the fully reimplement the network\n # calling logic on the Keras side. We choose to incorporate it in\n # Network because 1) it may be useful to fully support in tf.layers in\n # the future and 2) Keras is a major user of Network. If you don't\n # use masking, it does not interfere with regular behavior at all and you\n # can ignore it.\n inputs = nest.flatten(inputs)\n if mask is None:\n masks = [None for _ in range(len(inputs))]\n else:\n masks = nest.flatten(mask)\n\n for input_t, mask in zip(inputs, masks):\n input_t._keras_mask = mask\n\n # Dictionary mapping reference tensors to computed tensors.\n tensor_dict = {}\n\n for x, y, mask in zip(self.inputs, inputs, masks):\n tensor_dict[str(id(x))] = y\n\n depth_keys = list(self._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n # Ignore the InputLayers when computing the graph.\n depth_keys = depth_keys[1:]\n\n for depth in depth_keys:\n nodes = self._nodes_by_depth[depth]\n for node in nodes:\n # This is always a single layer, never a list.\n layer = node.outbound_layer\n\n if all(\n str(id(tensor)) in tensor_dict\n for tensor in nest.flatten(node.input_tensors)):\n\n # Call layer (reapplying ops to new inputs).\n computed_tensors = nest.map_structure(\n lambda t: tensor_dict[str(id(t))], node.input_tensors)\n\n # Ensure `training` and `mask` arg propagation if applicable.\n kwargs = node.arguments or {}\n argspec = self._layer_call_argspecs[layer].args\n if 'training' in argspec:\n kwargs.setdefault('training', training)\n if 'mask' in argspec:\n computed_masks = nest.map_structure(lambda t: t._keras_mask,\n computed_tensors)\n kwargs.setdefault('mask', computed_masks)\n\n # Compute outputs.\n output_tensors = layer(computed_tensors, **kwargs)\n\n # Update tensor_dict.\n for x, y in zip(\n nest.flatten(node.output_tensors), nest.flatten(output_tensors)):\n tensor_dict[str(id(x))] = y\n\n output_tensors = []\n output_shapes = []\n for x in self.outputs:\n assert str(id(x)) in tensor_dict, 'Could not compute output ' + str(x)\n tensor = tensor_dict[str(id(x))]\n output_shapes.append(x.shape)\n output_tensors.append(tensor)\n\n if output_shapes is not None:\n input_shapes = [x.shape for x in inputs]\n cache_key = generic_utils.object_list_uid(input_shapes)\n self._output_shape_cache[cache_key] = nest.pack_sequence_as(\n self._nested_outputs, output_shapes)\n\n output_tensors = nest.pack_sequence_as(self._nested_outputs, output_tensors)\n return output_tensors\n\n def get_config(self):\n if not self._is_graph_network:\n raise NotImplementedError\n\n config = {\n 'name': self.name,\n }\n node_conversion_map = {}\n for layer in self.layers:\n if issubclass(layer.__class__, Network):\n # Networks start with a pre-existing node\n # linking their input to output.\n kept_nodes = 1\n else:\n kept_nodes = 0\n for original_node_index, node in enumerate(layer._inbound_nodes):\n node_key = _make_node_key(layer.name, original_node_index)\n if node_key in self._network_nodes:\n node_conversion_map[node_key] = kept_nodes\n kept_nodes += 1\n layer_configs = []\n for layer in self.layers: # From the earliest layers on.\n layer_class_name = layer.__class__.__name__\n layer_config = layer.get_config()\n filtered_inbound_nodes = []\n for original_node_index, node in enumerate(layer._inbound_nodes):\n node_key = _make_node_key(layer.name, original_node_index)\n if node_key in self._network_nodes:\n # The node is relevant to the model:\n # add to filtered_inbound_nodes.\n if node.arguments:\n try:\n json.dumps(node.arguments)\n kwargs = node.arguments\n except TypeError:\n logging.warning(\n 'Layer ' + layer.name +\n ' was passed non-serializable keyword arguments: ' +\n str(node.arguments) + '. They will not be included '\n 'in the serialized model (and thus will be missing '\n 'at deserialization time).')\n kwargs = {}\n else:\n kwargs = {}\n if node.inbound_layers:\n node_data = []\n for inbound_layer, node_id, tensor_id, _ in node.iterate_inbound():\n node_key = _make_node_key(inbound_layer.name, node_id)\n new_node_index = node_conversion_map.get(node_key, 0)\n node_data.append(\n tf_utils.ListWrapper(\n [inbound_layer.name, new_node_index, tensor_id, kwargs]))\n node_data = nest.pack_sequence_as(node.input_tensors, node_data)\n if not nest.is_sequence(node_data):\n node_data = [node_data]\n # Convert ListWrapper to list for backwards compatible configs.\n node_data = tf_utils.convert_inner_node_data(node_data)\n filtered_inbound_nodes.append(node_data)\n layer_configs.append({\n 'name': layer.name,\n 'class_name': layer_class_name,\n 'config': layer_config,\n 'inbound_nodes': filtered_inbound_nodes,\n })\n config['layers'] = layer_configs\n\n # Gather info about inputs and outputs.\n model_inputs = []\n for i in range(len(self._input_layers)):\n layer, node_index, tensor_index = self._input_coordinates[i]\n node_key = _make_node_key(layer.name, node_index)\n if node_key not in self._network_nodes:\n continue\n new_node_index = node_conversion_map[node_key]\n model_inputs.append(\n tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))\n model_inputs = nest.pack_sequence_as(self._nested_inputs, model_inputs)\n # Preserve external Keras compat for Models with single input.\n if not nest.is_sequence(model_inputs):\n model_inputs = [model_inputs]\n model_inputs = tf_utils.convert_inner_node_data(model_inputs)\n config['input_layers'] = model_inputs\n\n model_outputs = []\n for i in range(len(self._output_layers)):\n layer, node_index, tensor_index = self._output_coordinates[i]\n node_key = _make_node_key(layer.name, node_index)\n if node_key not in self._network_nodes:\n continue\n new_node_index = node_conversion_map[node_key]\n model_outputs.append(\n tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))\n model_outputs = nest.pack_sequence_as(self._nested_outputs, model_outputs)\n # Preserve external Keras compat for Models with single output.\n if not nest.is_sequence(model_outputs):\n model_outputs = [model_outputs]\n model_outputs = tf_utils.convert_inner_node_data(model_outputs)\n config['output_layers'] = model_outputs\n return copy.deepcopy(config)\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n \"\"\"Instantiates a Model from its config (output of `get_config()`).\n\n Arguments:\n config: Model config dictionary.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\n Returns:\n A model instance.\n\n Raises:\n ValueError: In case of improperly formatted config dict.\n \"\"\"\n # Layer instances created during\n # the graph reconstruction process\n created_layers = {}\n\n # Dictionary mapping layer instances to\n # node data that specifies a layer call.\n # It acts as a queue that maintains any unprocessed\n # layer call until it becomes possible to process it\n # (i.e. until the input tensors to the call all exist).\n unprocessed_nodes = {}\n\n def add_unprocessed_node(layer, node_data):\n if layer not in unprocessed_nodes:\n unprocessed_nodes[layer] = [node_data]\n else:\n unprocessed_nodes[layer].append(node_data)\n\n def process_node(layer, node_data):\n \"\"\"Deserialize a node.\n\n Arguments:\n layer: layer instance.\n node_data: Nested structure of `ListWrapper`.\n\n Raises:\n ValueError: In case of improperly formatted `node_data`.\n \"\"\"\n input_tensors = []\n for input_data in nest.flatten(node_data):\n input_data = input_data.as_list()\n inbound_layer_name = input_data[0]\n inbound_node_index = input_data[1]\n inbound_tensor_index = input_data[2]\n if len(input_data) == 3:\n kwargs = {}\n elif len(input_data) == 4:\n kwargs = input_data[3]\n else:\n raise ValueError('Improperly formatted model config.')\n\n inbound_layer = created_layers[inbound_layer_name]\n if len(inbound_layer._inbound_nodes) <= inbound_node_index:\n add_unprocessed_node(layer, node_data)\n return\n inbound_node = inbound_layer._inbound_nodes[inbound_node_index]\n input_tensors.append(\n nest.flatten(inbound_node.output_tensors)[inbound_tensor_index])\n input_tensors = nest.pack_sequence_as(node_data, input_tensors)\n # Call layer on its inputs, thus creating the node\n # and building the layer if needed.\n if input_tensors is not None:\n # Preserve compatibility with older configs.\n flat_input_tensors = nest.flatten(input_tensors)\n if len(flat_input_tensors) == 1:\n layer(flat_input_tensors[0], **kwargs)\n else:\n layer(input_tensors, **kwargs)\n\n def process_layer(layer_data):\n \"\"\"Deserializes a layer, then call it on appropriate inputs.\n\n Arguments:\n layer_data: layer config dict.\n\n Raises:\n ValueError: In case of improperly formatted `layer_data` dict.\n \"\"\"\n layer_name = layer_data['name']\n\n # Instantiate layer.\n from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top\n\n layer = deserialize_layer(layer_data, custom_objects=custom_objects)\n created_layers[layer_name] = layer\n\n # Gather layer inputs and convert to `ListWrapper` objects.\n inbound_nodes_data = layer_data['inbound_nodes']\n inbound_nodes_data = tf_utils.convert_inner_node_data(\n inbound_nodes_data, wrap=True)\n for node_data in inbound_nodes_data:\n # We don't process nodes (i.e. make layer calls)\n # on the fly because the inbound node may not yet exist,\n # in case of layer shared at different topological depths\n # (e.g. a model such as A(B(A(B(x)))))\n add_unprocessed_node(layer, node_data)\n\n # First, we create all layers and enqueue nodes to be processed\n for layer_data in config['layers']:\n process_layer(layer_data)\n # Then we process nodes in order of layer depth.\n # Nodes that cannot yet be processed (if the inbound node\n # does not yet exist) are re-enqueued, and the process\n # is repeated until all nodes are processed.\n while unprocessed_nodes:\n for layer_data in config['layers']:\n layer = created_layers[layer_data['name']]\n if layer in unprocessed_nodes:\n for node_data in unprocessed_nodes.pop(layer):\n process_node(layer, node_data)\n\n name = config.get('name')\n input_tensors = []\n output_tensors = []\n\n input_layers = tf_utils.convert_inner_node_data(\n config['input_layers'], wrap=True)\n for layer_data in nest.flatten(input_layers):\n layer_name, node_index, tensor_index = layer_data.as_list()\n assert layer_name in created_layers\n layer = created_layers[layer_name]\n layer_output_tensors = layer._inbound_nodes[node_index].output_tensors\n input_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])\n\n output_layers = tf_utils.convert_inner_node_data(\n config['output_layers'], wrap=True)\n for layer_data in nest.flatten(output_layers):\n layer_name, node_index, tensor_index = layer_data.as_list()\n assert layer_name in created_layers\n layer = created_layers[layer_name]\n layer_output_tensors = layer._inbound_nodes[node_index].output_tensors\n output_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])\n\n input_tensors = nest.pack_sequence_as(input_layers, input_tensors)\n output_tensors = nest.pack_sequence_as(output_layers, output_tensors)\n return cls(inputs=input_tensors, outputs=output_tensors, name=name)\n\n def save(self, filepath, overwrite=True, include_optimizer=True):\n \"\"\"Saves the model to a single HDF5 file.\n\n The savefile includes:\n - The model architecture, allowing to re-instantiate the model.\n - The model weights.\n - The state of the optimizer, allowing to resume training\n exactly where you left off.\n\n This allows you to save the entirety of the state of a model\n in a single file.\n\n Saved models can be reinstantiated via `keras.models.load_model`.\n The model returned by `load_model`\n is a compiled model ready to be used (unless the saved model\n was never compiled in the first place).\n\n Arguments:\n filepath: String, path to the file to save the weights to.\n overwrite: Whether to silently overwrite any existing file at the\n target location, or provide the user with a manual prompt.\n include_optimizer: If True, save optimizer's state together.\n\n Example:\n\n ```python\n from keras.models import load_model\n\n model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'\n del model # deletes the existing model\n\n # returns a compiled model\n # identical to the previous one\n model = load_model('my_model.h5')\n ```\n \"\"\"\n if not self._is_graph_network:\n raise NotImplementedError(\n 'The `save` method requires the model to be a Functional model or a '\n 'Sequential model. It does not work for subclassed models, '\n 'because such models are defined via the body of a Python method, '\n 'which isn\\'t safely serializable. Consider '\n 'using `save_weights`, in order to save the weights of the model.')\n\n from tensorflow.python.keras.models import save_model # pylint: disable=g-import-not-at-top\n save_model(self, filepath, overwrite, include_optimizer)\n\n def save_weights(self, filepath, overwrite=True, save_format=None):\n \"\"\"Saves all layer weights.\n\n Either saves in HDF5 or in TensorFlow format based on the `save_format`\n argument.\n\n When saving in HDF5 format, the weight file has:\n - `layer_names` (attribute), a list of strings\n (ordered names of model layers).\n - For every layer, a `group` named `layer.name`\n - For every such layer group, a group attribute `weight_names`,\n a list of strings\n (ordered names of weights tensor of the layer).\n - For every weight in the layer, a dataset\n storing the weight value, named after the weight tensor.\n\n When saving in TensorFlow format, all objects referenced by the network are\n saved in the same format as `tf.train.Checkpoint`, including any `Layer`\n instances or `Optimizer` instances assigned to object attributes. For\n networks constructed from inputs and outputs using `tf.keras.Model(inputs,\n outputs)`, `Layer` instances used by the network are tracked/saved\n automatically. For user-defined classes which inherit from `tf.keras.Model`,\n `Layer` instances must be assigned to object attributes, typically in the\n constructor. See the documentation of `tf.train.Checkpoint` and\n `tf.keras.Model` for details.\n\n Arguments:\n filepath: String, path to the file to save the weights to. When saving\n in TensorFlow format, this is the prefix used for checkpoint files\n (multiple files are generated). Note that the '.h5' suffix causes\n weights to be saved in HDF5 format.\n overwrite: Whether to silently overwrite any existing file at the\n target location, or provide the user with a manual prompt.\n save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or\n '.keras' will default to HDF5 if `save_format` is `None`. Otherwise\n `None` defaults to 'tf'.\n\n Raises:\n ImportError: If h5py is not available when attempting to save in HDF5\n format.\n ValueError: For invalid/unknown format arguments.\n \"\"\"\n filepath_is_h5 = _is_hdf5_filepath(filepath)\n if save_format is None:\n if filepath_is_h5:\n save_format = 'h5'\n else:\n save_format = 'tf'\n else:\n user_format = save_format.lower().strip()\n if user_format in ('tensorflow', 'tf'):\n save_format = 'tf'\n elif user_format in ('hdf5', 'h5', 'keras'):\n save_format = 'h5'\n else:\n raise ValueError(\n 'Unknown format \"%s\". Was expecting one of {\"tf\", \"h5\"}.' % (\n save_format,))\n if save_format == 'tf' and filepath_is_h5:\n raise ValueError(\n ('save_weights got save_format=\"tf\"/\"tensorflow\", but the '\n 'filepath (\"%s\") looks like an HDF5 file. Omit the \".h5\"/\".keras\" '\n 'when saving in TensorFlow format.')\n % filepath)\n\n if save_format == 'h5' and h5py is None:\n raise ImportError(\n '`save_weights` requires h5py when saving in hdf5.')\n if save_format == 'tf':\n check_filepath = filepath + '.index'\n else:\n check_filepath = filepath\n # If file exists and should not be overwritten:\n if not overwrite and os.path.isfile(check_filepath):\n proceed = ask_to_proceed_with_overwrite(check_filepath)\n if not proceed:\n return\n if save_format == 'h5':\n with h5py.File(filepath, 'w') as f:\n hdf5_format.save_weights_to_hdf5_group(f, self.layers)\n else:\n if context.executing_eagerly():\n session = None\n else:\n session = backend.get_session()\n optimizer = getattr(self, 'optimizer', None)\n if (optimizer\n and not isinstance(optimizer, trackable.Trackable)):\n logging.warning(\n ('This model was compiled with a Keras optimizer (%s) but is being '\n 'saved in TensorFlow format with `save_weights`. The model\\'s '\n 'weights will be saved, but unlike with TensorFlow optimizers in '\n 'the TensorFlow format the optimizer\\'s state will not be '\n 'saved.\\n\\nConsider using a TensorFlow optimizer from `tf.train`.')\n % (optimizer,))\n self._trackable_saver.save(filepath, session=session)\n # Record this checkpoint so it's visible from tf.train.latest_checkpoint.\n checkpoint_management.update_checkpoint_state_internal(\n save_dir=os.path.dirname(filepath),\n model_checkpoint_path=filepath,\n save_relative_paths=True,\n all_model_checkpoint_paths=[filepath])\n\n def load_weights(self, filepath, by_name=False):\n \"\"\"Loads all layer weights, either from a TensorFlow or an HDF5 weight file.\n\n If `by_name` is False weights are loaded based on the network's\n topology. This means the architecture should be the same as when the weights\n were saved. Note that layers that don't have weights are not taken into\n account in the topological ordering, so adding or removing layers is fine as\n long as they don't have weights.\n\n If `by_name` is True, weights are loaded into layers only if they share the\n same name. This is useful for fine-tuning or transfer-learning models where\n some of the layers have changed.\n\n Only topological loading (`by_name=False`) is supported when loading weights\n from the TensorFlow format. Note that topological loading differs slightly\n between TensorFlow and HDF5 formats for user-defined classes inheriting from\n `tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the\n TensorFlow format loads based on the object-local names of attributes to\n which layers are assigned in the `Model`'s constructor.\n\n Arguments:\n filepath: String, path to the weights file to load. For weight files in\n TensorFlow format, this is the file prefix (the same as was passed\n to `save_weights`).\n by_name: Boolean, whether to load weights by name or by topological\n order. Only topological loading is supported for weight files in\n TensorFlow format.\n\n Returns:\n When loading a weight file in TensorFlow format, returns the same status\n object as `tf.train.Checkpoint.restore`. When graph building, restore\n ops are run automatically as soon as the network is built (on first call\n for user-defined classes inheriting from `Model`, immediately if it is\n already built).\n\n When loading weights in HDF5 format, returns `None`.\n\n Raises:\n ImportError: If h5py is not available and the weight file is in HDF5\n format.\n \"\"\"\n if _is_hdf5_filepath(filepath):\n save_format = 'h5'\n else:\n try:\n pywrap_tensorflow.NewCheckpointReader(filepath)\n save_format = 'tf'\n except errors_impl.DataLossError:\n # The checkpoint is not readable in TensorFlow format. Try HDF5.\n save_format = 'h5'\n if save_format == 'tf':\n status = self._trackable_saver.restore(filepath)\n if by_name:\n raise NotImplementedError(\n 'Weights may only be loaded based on topology into Models when '\n 'loading TensorFlow-formatted weights (got by_name=True to '\n 'load_weights).')\n if not context.executing_eagerly():\n session = backend.get_session()\n # Restore existing variables (if any) immediately, and set up a\n # streaming restore for any variables created in the future.\n trackable_utils.streaming_restore(status=status, session=session)\n status.assert_nontrivial_match()\n return status\n if h5py is None:\n raise ImportError(\n '`load_weights` requires h5py when loading weights from HDF5.')\n if self._is_graph_network and not self.built:\n raise NotImplementedError(\n 'Unable to load weights saved in HDF5 format into a subclassed '\n 'Model which has not created its variables yet. Call the Model '\n 'first, then load the weights.')\n with h5py.File(filepath, 'r') as f:\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n if by_name:\n hdf5_format.load_weights_from_hdf5_group_by_name(f, self.layers)\n else:\n hdf5_format.load_weights_from_hdf5_group(f, self.layers)\n\n def _updated_config(self):\n \"\"\"Util shared between different serialization methods.\n\n Returns:\n Model config with Keras version information added.\n \"\"\"\n from tensorflow.python.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top\n\n config = self.get_config()\n model_config = {\n 'class_name': self.__class__.__name__,\n 'config': config,\n 'keras_version': keras_version,\n 'backend': backend.backend()\n }\n return model_config\n\n def to_json(self, **kwargs):\n \"\"\"Returns a JSON string containing the network configuration.\n\n To load a network from a JSON save file, use\n `keras.models.model_from_json(json_string, custom_objects={})`.\n\n Arguments:\n **kwargs: Additional keyword arguments\n to be passed to `json.dumps()`.\n\n Returns:\n A JSON string.\n \"\"\"\n model_config = self._updated_config()\n return json.dumps(\n model_config, default=serialization.get_json_type, **kwargs)\n\n def to_yaml(self, **kwargs):\n \"\"\"Returns a yaml string containing the network configuration.\n\n To load a network from a yaml save file, use\n `keras.models.model_from_yaml(yaml_string, custom_objects={})`.\n\n `custom_objects` should be a dictionary mapping\n the names of custom losses / layers / etc to the corresponding\n functions / classes.\n\n Arguments:\n **kwargs: Additional keyword arguments\n to be passed to `yaml.dump()`.\n\n Returns:\n A YAML string.\n\n Raises:\n ImportError: if yaml module is not found.\n \"\"\"\n if yaml is None:\n raise ImportError(\n 'Requires yaml module installed (`pip install pyyaml`).')\n return yaml.dump(self._updated_config(), **kwargs)\n\n def summary(self, line_length=None, positions=None, print_fn=None):\n \"\"\"Prints a string summary of the network.\n\n Arguments:\n line_length: Total length of printed lines\n (e.g. set this to adapt the display to different\n terminal window sizes).\n positions: Relative or absolute positions of log elements\n in each line. If not provided,\n defaults to `[.33, .55, .67, 1.]`.\n print_fn: Print function to use. Defaults to `print`.\n It will be called on each line of the summary.\n You can set it to a custom function\n in order to capture the string summary.\n\n Raises:\n ValueError: if `summary()` is called before the model is built.\n \"\"\"\n if not self.built:\n raise ValueError('This model has not yet been built. '\n 'Build the model first by calling `build()` or calling '\n '`fit()` with some data, or specify '\n 'an `input_shape` argument in the first layer(s) for '\n 'automatic build.')\n layer_utils.print_summary(self,\n line_length=line_length,\n positions=positions,\n print_fn=print_fn)\n\n def _validate_graph_inputs_and_outputs(self):\n \"\"\"Validates the inputs and outputs of a Graph Network.\"\"\"\n # Check for redundancy in inputs.\n if len(set(self.inputs)) != len(self.inputs):\n raise ValueError('The list of inputs passed to the model '\n 'is redundant. '\n 'All inputs should only appear once.'\n ' Found: ' + str(self.inputs))\n\n for x in self.inputs:\n # Check that x has appropriate `_keras_history` metadata.\n if not hasattr(x, '_keras_history'):\n cls_name = self.__class__.__name__\n raise ValueError('Input tensors to a ' + cls_name + ' ' +\n 'must come from `tf.keras.Input`. '\n 'Received: ' + str(x) +\n ' (missing previous layer metadata).')\n # Check that x is an input tensor.\n # pylint: disable=protected-access\n layer, _, _ = x._keras_history\n if len(layer._inbound_nodes) > 1 or (\n layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers):\n cls_name = self.__class__.__name__\n logging.warning(cls_name + ' inputs must come from '\n '`tf.keras.Input` (thus holding past layer metadata), '\n 'they cannot be the output of '\n 'a previous non-Input layer. '\n 'Here, a tensor specified as '\n 'input to \"' + self.name + '\" was not an Input tensor, '\n 'it was generated by layer ' + layer.name + '.\\n'\n 'Note that input tensors are '\n 'instantiated via `tensor = tf.keras.Input(shape)`.\\n'\n 'The tensor that caused the issue was: ' + str(x.name))\n\n # Check compatibility of batch sizes of Input Layers.\n input_batch_sizes = [\n training_utils.get_static_batch_size(x._keras_history[0])\n for x in self.inputs\n ]\n consistent_batch_size = None\n for batch_size in input_batch_sizes:\n if batch_size is not None:\n if (consistent_batch_size is not None and\n batch_size != consistent_batch_size):\n raise ValueError('The specified batch sizes of the Input Layers'\n ' are incompatible. Found batch sizes: {}'.format(\n input_batch_sizes))\n consistent_batch_size = batch_size\n\n for x in self.outputs:\n if not hasattr(x, '_keras_history'):\n cls_name = self.__class__.__name__\n raise ValueError('Output tensors to a ' + cls_name + ' must be '\n 'the output of a TensorFlow `Layer` '\n '(thus holding past layer metadata). Found: ' + str(x))\n\n\ndef _is_hdf5_filepath(filepath):\n return (filepath.endswith('.h5') or filepath.endswith('.keras') or\n filepath.endswith('.hdf5'))\n\n\ndef _make_node_key(layer_name, node_index):\n return layer_name + '_ib-' + str(node_index)\n\n\ndef _map_graph_network(inputs, outputs):\n \"\"\"Validates a network's topology and gather its layers and nodes.\n\n Arguments:\n inputs: List of input tensors.\n outputs: List of outputs tensors.\n\n Returns:\n A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.\n - nodes: list of Node instances.\n - nodes_by_depth: dict mapping ints (depth) to lists of node instances.\n - layers: list of Layer instances.\n - layers_by_depth: dict mapping ints (depth) to lists of layer instances.\n\n Raises:\n ValueError: In case the network is not valid (e.g. disconnected graph).\n \"\"\"\n # Network_nodes: set of nodes included in the graph of layers\n # (not all nodes included in the layers are relevant to the current graph).\n network_nodes = set() # ids of all nodes relevant to the Network\n nodes_depths = {} # dict {node: depth value}\n layers_depths = {} # dict {layer: depth value}\n layer_indices = {} # dict {layer: index in traversal}\n nodes_in_decreasing_depth = []\n\n def build_map(tensor,\n finished_nodes,\n nodes_in_progress,\n layer,\n node_index,\n tensor_index):\n \"\"\"Builds a map of the graph of layers.\n\n This recursively updates the map `layer_indices`,\n the list `nodes_in_decreasing_depth` and the set `network_nodes`.\n\n Arguments:\n tensor: Some tensor in a graph.\n finished_nodes: Set of nodes whose subgraphs have been traversed\n completely. Useful to prevent duplicated work.\n nodes_in_progress: Set of nodes that are currently active on the\n recursion stack. Useful to detect cycles.\n layer: Layer from which `tensor` comes from. If not provided,\n will be obtained from `tensor._keras_history`.\n node_index: Node index from which `tensor` comes from.\n tensor_index: Tensor_index from which `tensor` comes from.\n\n Raises:\n ValueError: if a cycle is detected.\n \"\"\"\n node = layer._inbound_nodes[node_index] # pylint: disable=protected-access\n\n # Prevent cycles.\n if node in nodes_in_progress:\n raise ValueError('The tensor ' + str(tensor) + ' at layer \"' +\n layer.name + '\" is part of a cycle.')\n\n # Don't repeat work for shared subgraphs\n if node in finished_nodes:\n return\n\n node_key = _make_node_key(layer.name, node_index)\n # Update network_nodes.\n network_nodes.add(node_key)\n\n # Store the traversal order for layer sorting.\n if layer not in layer_indices:\n layer_indices[layer] = len(layer_indices)\n\n nodes_in_progress.add(node)\n\n # Propagate to all previous tensors connected to this node.\n for layer, node_index, tensor_index, tensor in node.iterate_inbound():\n build_map(tensor, finished_nodes, nodes_in_progress, layer, node_index,\n tensor_index)\n\n finished_nodes.add(node)\n nodes_in_progress.remove(node)\n nodes_in_decreasing_depth.append(node)\n\n finished_nodes = set()\n nodes_in_progress = set()\n for x in outputs:\n layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access\n build_map(x, finished_nodes, nodes_in_progress,\n layer=layer,\n node_index=node_index,\n tensor_index=tensor_index)\n\n for node in reversed(nodes_in_decreasing_depth):\n # If the depth is not set, the node has no outbound nodes (depth 0).\n depth = nodes_depths.setdefault(node, 0)\n\n # Update the depth of the corresponding layer\n previous_depth = layers_depths.get(node.outbound_layer, 0)\n # If we've seen this layer before at a higher depth,\n # we should use that depth instead of the node depth.\n # This is necessary for shared layers that have inputs at different\n # depth levels in the graph.\n depth = max(depth, previous_depth)\n layers_depths[node.outbound_layer] = depth\n nodes_depths[node] = depth\n\n # Update the depth of inbound nodes.\n # The \"depth\" of a node is the max of the depths\n # of all layers it is connected to.\n for inbound_layer, node_index, _, _ in node.iterate_inbound():\n inbound_node = inbound_layer._inbound_nodes[node_index] # pylint: disable=protected-access\n previous_depth = nodes_depths.get(inbound_node, 0)\n nodes_depths[inbound_node] = max(depth + 1, previous_depth)\n\n # Build a dict {depth: list of nodes with this depth}\n nodes_by_depth = {}\n for node, depth in nodes_depths.items():\n if depth not in nodes_by_depth:\n nodes_by_depth[depth] = []\n nodes_by_depth[depth].append(node)\n\n # Build a dict {depth: list of layers with this depth}\n layers_by_depth = {}\n for layer, depth in layers_depths.items():\n if depth not in layers_by_depth:\n layers_by_depth[depth] = []\n layers_by_depth[depth].append(layer)\n\n # Get sorted list of layer depths.\n depth_keys = list(layers_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Set self.layers and self._layers_by_depth.\n layers = []\n for depth in depth_keys:\n layers_for_depth = layers_by_depth[depth]\n # Network.layers needs to have a deterministic order:\n # here we order them by traversal order.\n layers_for_depth.sort(key=lambda x: layer_indices[x])\n layers.extend(layers_for_depth)\n\n # Get sorted list of node depths.\n depth_keys = list(nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Check that all tensors required are computable.\n # computable_tensors: all tensors in the graph\n # that can be computed from the inputs provided.\n computable_tensors = []\n for x in inputs:\n computable_tensors.append(x)\n\n layers_with_complete_input = [] # To provide a better error msg.\n for depth in depth_keys:\n for node in nodes_by_depth[depth]:\n layer = node.outbound_layer\n if layer:\n for x in nest.flatten(node.input_tensors):\n if x not in computable_tensors:\n raise ValueError('Graph disconnected: '\n 'cannot obtain value for tensor ' + str(x) +\n ' at layer \"' + layer.name + '\". '\n 'The following previous layers '\n 'were accessed without issue: ' +\n str(layers_with_complete_input))\n for x in nest.flatten(node.output_tensors):\n computable_tensors.append(x)\n layers_with_complete_input.append(layer.name)\n\n # Ensure name unicity, which will be crucial for serialization\n # (since serialized nodes refer to layers by their name).\n all_names = [layer.name for layer in layers]\n for name in all_names:\n if all_names.count(name) != 1:\n raise ValueError('The name \"' + name + '\" is used ' +\n str(all_names.count(name)) + ' times in the model. '\n 'All layer names should be unique.')\n return network_nodes, nodes_by_depth, layers, layers_by_depth\n"
] |
[
[
"tensorflow.python.autograph.pyct.anno.setanno",
"tensorflow.python.autograph.pyct.anno.hasanno",
"tensorflow.python.autograph.pyct.parser.parse_entity",
"tensorflow.python.autograph.pyct.transformer.EntityInfo",
"tensorflow.python.autograph.pyct.transformer.Context",
"tensorflow.python.platform.test.main",
"tensorflow.python.autograph.pyct.anno.getanno"
],
[
"tensorflow.python.keras.models.save_model",
"tensorflow.python.training.tracking.layer_utils.filter_empty_layer_containers",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.training.tracking.util.saver_with_op_caching",
"tensorflow.python.keras.backend.batch_get_value",
"tensorflow.python.keras.backend.int_shape",
"tensorflow.python.keras.saving.hdf5_format.load_weights_from_hdf5_group_by_name",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.training.tracking.layer_utils.has_weights",
"tensorflow.python.keras.engine.base_layer.Node",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.engine.base_layer_utils.generate_placeholders_from_shape",
"tensorflow.python.keras.utils.layer_utils.print_summary",
"tensorflow.python.keras.layers.deserialize",
"tensorflow.python.training.tracking.layer_utils.gather_trainable_weights",
"tensorflow.python.keras.utils.tf_utils.convert_shapes",
"tensorflow.python.pywrap_tensorflow.NewCheckpointReader",
"tensorflow.python.training.tracking.layer_utils.gather_non_trainable_weights",
"tensorflow.python.keras.utils.generic_utils.object_list_uid",
"tensorflow.python.keras.backend.get_session",
"tensorflow.python.keras.mixed_precision.experimental.policy.Policy",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.python.framework.func_graph.FuncGraph",
"tensorflow.python.keras.utils.tf_utils.get_reachable_from_inputs",
"tensorflow.python.keras.saving.hdf5_format.save_weights_to_hdf5_group",
"tensorflow.python.keras.saving.hdf5_format.load_weights_from_hdf5_group",
"tensorflow.python.keras.utils.tf_utils.convert_inner_node_data",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.util.tf_inspect.getcallargs",
"tensorflow.python.keras.backend.batch_set_value",
"tensorflow.python.keras.utils.io_utils.ask_to_proceed_with_overwrite",
"tensorflow.python.keras.engine.training_utils.get_static_batch_size",
"tensorflow.python.keras.engine.base_layer_utils.create_keras_history",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.training.tracking.util.streaming_restore",
"tensorflow.python.keras.backend.backend",
"tensorflow.python.keras.utils.tf_utils.ListWrapper",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.python.util.nest.flatten"
]
] |
nikitazviahin/UniversityProjects
|
[
"87b23d3528cce814bed78ddf3c9833884f2ec625"
] |
[
"Mathematical_Algorithms/lab3/runge2.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport pylab\n\n\ndef func(z, x):\n return -z / x\n \ntry:\n a = float(input('Ввежите начальную границу промежутка: '))\n b = float(input('Введите конечную границу промежутка: '))\n if a>=b:\n print('Начальная граница не может быьб больше конечной')\n raise SystemExit\n elif a<-999999 or a>999999 or b>999999 or b<-999999:\n print('Интервал в не границ диапазона. Интервал должен находится в пределах -999999 до 999999')\n raise SystemExit\nexcept:\n print('Введена строка, а не число')\n raise SystemExit\n \ntry:\n h = float(input('Введите шаг интервала: '))\n if h<=0 or h > 999999:\n print('Интервал должен быть между -999999 и 999999')\n raise SystemExit \nexcept:\n print('Введена строка, а не число')\n raise SystemExit\n\nn = int((b-a)/(h/2))\n\nmatrix = [[1, -1, 1, None, None]]\n\nfor i in range(1, n + 1):\n matrix.append([None, None, None, None, None])\n k = []\n q = []\n matrix[i][0] = (matrix[i-1][0] + h/2)\n q.append(func(matrix[i-1][2], matrix[i-1][0]))\n q.append(func(matrix[i-1][2] + q[0]*h/2, matrix[i-1][0] + h/2))\n matrix[i][4] = (q[0] + q[1])/2\n k.append(matrix[i-1][2])\n k.append(matrix[i-1][2] + q[0]*h/2)\n matrix[i][3] = (k[0] + k[1])/2\n matrix[i][1] = (matrix[i-1][1] + matrix[i][3]*h)\n matrix[i][2] = (matrix[i-1][2] + matrix[i][4]*h)\nmatrix = pd.DataFrame(np.array(matrix),\n columns=['x', 'y', 'z', 'k', 'q'])\n\nprint(matrix)\nxlist = [i for i in matrix['x']]\nylist = [i for i in matrix['y']]\npylab.plot (xlist, ylist)\n\npylab.show()\n"
] |
[
[
"numpy.array"
]
] |
Masao-Someki/espnet_onnx
|
[
"a0f44a087329bdcc292a4479ddf29d299edd92e5"
] |
[
"espnet_onnx/asr/frontend/logmel.py"
] |
[
"from typing import Tuple\nfrom typeguard import check_argument_types\n\nimport numpy as np\nimport librosa\n\nfrom espnet_onnx.utils.function import (\n make_pad_mask,\n mask_fill\n)\nfrom espnet_onnx.utils.config import Config\n\n\nclass LogMel:\n \"\"\"Convert STFT to fbank feats\n The arguments is same as librosa.filters.mel\n Args:\n config.fs: number > 0 [scalar] sampling rate of the incoming signal\n config.n_fft: int > 0 [scalar] number of FFT components\n config.n_mels: int > 0 [scalar] number of Mel bands to generate\n config.fmin: float >= 0 [scalar] lowest frequency (in Hz)\n config.fmax: float >= 0 [scalar] highest frequency (in Hz).\n If `None`, use `fmax = fs / 2.0`\n config.htk: use HTK formula instead of Slaney\n \"\"\"\n\n def __init__(\n self,\n config: Config\n ):\n fmin = 0 if config.fmin is None else config.fmin\n fmax = fs / 2 if config.fmax is None else config.fmax\n _mel_options = dict(\n sr=config.sr,\n n_fft=config.n_fft,\n n_mels=config.n_mels,\n fmin=fmin,\n fmax=fmax,\n htk=config.htk,\n )\n self.mel_options = _mel_options\n self.log_base = config.log_base\n melmat = librosa.filters.mel(**_mel_options)\n self.melmat = melmat.T\n\n def extra_repr(self):\n return \", \".join(f\"{k}={v}\" for k, v in self.mel_options.items())\n\n def __call__(\n self,\n feat: np.ndarray,\n ilens: np.ndarray = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n # feat: (B, T, D1) x melmat: (D1, D2) -> mel_feat: (B, T, D2)\n mel_feat = np.matmul(feat, self.melmat)\n mel_feat = np.clip(mel_feat, 1e-10, float('inf'))\n\n if self.log_base is None:\n logmel_feat = np.log(mel_feat)\n elif self.log_base == 2.0:\n logmel_feat = np.log2(mel_feat)\n elif self.log_base == 10.0:\n logmel_feat = np.log10(mel_feat)\n else:\n logmel_feat = np.log(mel_feat) / np.log(self.log_base)\n\n # Zero padding\n logmel_feat = mask_fill(\n logmel_feat, make_pad_mask(ilens, logmel_feat, 1), 0.0)\n return logmel_feat, ilens\n"
] |
[
[
"numpy.log",
"numpy.log2",
"numpy.matmul",
"numpy.log10"
]
] |
tiago939/feature_aligment
|
[
"423c606b0bed5a677b5fddde9cd1f17af91bdab6",
"423c606b0bed5a677b5fddde9cd1f17af91bdab6"
] |
[
"mnist/training.py",
"mnist/networks.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torchvision\nimport torchvision.transforms as transforms\nimport os, random, sys, math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networks\n\n#random seed\nmanualSeed = 1\nnp.random.seed(manualSeed)\nrandom.seed(manualSeed)\ntorch.manual_seed(manualSeed)\nif torch.cuda.is_available():\n torch.cuda.manual_seed(manualSeed)\n torch.cuda.manual_seed_all(manualSeed)\n torch.backends.cudnn.deterministic = True\n\ndevice = 'cuda'\nbatch_size = 128\nZ = 128 #latent vector size\nepochs = 1 #number of epochs\nT = 1 #number of feature iterations\nP = 0.0 #weight of the perceptual loss\n\n# Data\nprint('==> Preparing data..')\ntransform_train = transforms.Compose([\n transforms.ToTensor(),\n])\n\ntrainset = torchvision.datasets.MNIST(\n root='./data', train=True, download=True, transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(\n trainset, batch_size=batch_size, shuffle=True, num_workers=1)\n\n# Model\nprint('==> Building model..')\nencoder = networks.Encoder(Z).to(device)\ndiscriminator = networks.Discriminator().to(device)\ngenerator = networks.Generator().to(device)\n\nfor m in encoder.modules():\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n nn.init.orthogonal_(m.weight)\n\nfor m in generator.modules():\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n nn.init.orthogonal_(m.weight)\n\nfor m in discriminator.modules():\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n nn.init.orthogonal_(m.weight)\n\noptimizer = optim.Adam(encoder.parameters(), lr=0.0001, betas=(0.5,0.999))\noptimizer_gen = optim.Adam(generator.parameters(), lr=0.0001, betas=(0.5,0.999))\noptimizer_discr = optim.Adam(discriminator.parameters(), lr=0.0001, betas=(0.5,0.999))\n\n\n#loads checkpoint\ncheckpoint = torch.load('./checkpoint_fa/ckpt.pth',map_location='cpu')\nencoder.load_state_dict(checkpoint['encoder_state_dict'])\ndiscriminator.load_state_dict(checkpoint['discriminator_state_dict'])\ngenerator.load_state_dict(checkpoint['generator_state_dict'])\noptimizer.load_state_dict(checkpoint['optimizer_state_dict'])\noptimizer_discr.load_state_dict(checkpoint['optimizer_discr_state_dict'])\noptimizer_gen.load_state_dict(checkpoint['optimizer_gen_state_dict'])\n\n\n# Training\nencoder.train()\ndiscriminator.train()\ngenerator.train()\n\nfor epoch in range(epochs):\n\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n print('epoch: ', epoch, ' batch: ', batch_idx)\n for m in encoder.modules():\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n m.weight.data = torch.clamp(m.weight.data, min=-(2**0.5), max=2**0.5)\n\n inputs = inputs.to(device)\n inputs = torch.clamp(inputs, min=0.01, max=0.99)\n outputs = encoder(inputs, 1)\n C = encoder(outputs, 2)\n\n labels = torch.zeros(C.shape, device=device)\n for i in range(len(labels)):\n l = targets[i].item()\n labels[i][l] = 1.0\n\n output = encoder(inputs,1)\n mean = encoder(output,3)\n log_variance = encoder(output,4)\n std = torch.exp(0.5*log_variance)\n eps = torch.empty(std.shape, device=device).normal_(mean=0.0,std=1.0)\n z = mean + std*eps\n\n #extract feature\n r = torch.zeros(inputs.shape, device=device)\n r.requires_grad_(True)\n for _ in range(T):\n out = encoder(r,1)\n meanf = encoder(out,3)\n log_variancef = encoder(out,4)\n stdf = torch.exp(0.5*log_variancef)\n Cf = encoder(out,2)\n zf = meanf + stdf\n cost = 0.5*torch.sum( (z - zf)**2.0) + 0.5*torch.sum( (labels-Cf)**2.0)\n r= r - torch.autograd.grad(cost, r, retain_graph=True, create_graph=True)[0]\n r = torch.sigmoid(r)\n\n RL = 0.5*torch.sum( (inputs-r)**2.0) #align feaute and inputs\n KLD = -0.5*torch.sum( 1.0 + log_variance - mean**2.0 - torch.exp(log_variance), axis=1)\n beta = torch.rand(KLD.shape, device=device)\n\n loss = torch.sum(beta*KLD) + RL\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n g = generator(r.detach(), labels)\n output_g = encoder(g, 1)\n mean_g = encoder(output_g, 3)\n log_variance_g = encoder(output_g, 4)\n std_g = torch.exp(0.5*log_variance_g)\n\n output = encoder(inputs,1)\n mean = encoder(output,3)\n log_variance = encoder(output,4)\n std = torch.exp(0.5*log_variance)\n\n D = discriminator(g)\n loss = 0.5*torch.sum( (1.0-D)**2.0) + P*(0.5*torch.sum( (mean-mean_g)**2.0) + 0.5*torch.sum( (std - std_g)**2.0))\n optimizer_gen.zero_grad()\n loss.backward()\n optimizer_gen.step()\n\n D_real = discriminator(inputs)\n loss = 0.5*torch.sum( (1.0-D_real)**2.0)\n optimizer_discr.zero_grad()\n loss.backward()\n optimizer_discr.step()\n\n z = torch.empty(z.shape, device=device).normal_(mean=0.0,std=1.0)\n r = torch.zeros(inputs.shape, device=device)\n r.requires_grad_(True)\n out = encoder(r,1)\n meanf = encoder(out,3)\n log_variancef = encoder(out,4)\n stdf = torch.exp(0.5*log_variancef)\n Cf = encoder(out,2)\n zf = meanf + stdf\n cost = 0.5*torch.sum( (z - zf)**2.0) + 0.5*torch.sum( (labels-Cf)**2.0)\n r = r - torch.autograd.grad(cost, r, retain_graph=True, create_graph=True)[0]\n r = torch.sigmoid(r)\n g = generator(r,labels)\n\n D_fake = discriminator(g.detach())\n loss = 0.5*torch.sum( (-1.0-D_fake)**2.0)\n optimizer_discr.zero_grad()\n loss.backward()\n optimizer_discr.step()\n\nprint('Saving..')\nif not os.path.isdir('checkpoint_fa'):\n os.mkdir('checkpoint_fa')\ntorch.save({\n 'encoder_state_dict': encoder.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'generator_state_dict': generator.state_dict(),\n 'optimizer_gen_state_dict': optimizer_gen.state_dict(),\n 'discriminator_state_dict': discriminator.state_dict(),\n 'optimizer_discr_state_dict': optimizer_discr.state_dict(),\n }, './checkpoint_fa/ckpt.pth')\nprint('done!')\n",
"import torch \nimport torch.nn as nn\nimport numpy as np\n\nclass Flatten(torch.nn.Module):\n def forward(self, x):\n batch_size = x.shape[0]\n return x.view(batch_size, -1)\n\nclass unFlatten2(torch.nn.Module):\n def forward(self, x):\n batch_size = x.shape[0]\n return x.view(batch_size, 1, 28, 28)\n\nclass asinh(torch.nn.Module):\n def forward(self, x):\n return torch.asinh(x)\n\nclass Encoder(nn.Module):\n\n def __init__(self,Z):\n super(Encoder, self).__init__()\n\n self.encoder = nn.Sequential(\n nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1),\n nn.LeakyReLU(),\n\n nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),\n nn.LeakyReLU(),\n\n Flatten(),\n nn.Linear(3136,4096),\n nn.LeakyReLU(),\n )\n\n self.mean = nn.Linear(4096, Z)\n self.log_variance = nn.Linear(4096, Z)\n self.classifier = nn.Linear(4096,10)\n\n def forward(self, x, mode):\n if mode == 1:\n x = self.encoder(x)\n return x\n\n if mode == 2:\n x = self.classifier(x)\n return x\n\n if mode == 3:\n x = self.mean(x)\n return x\n\n if mode == 4:\n x = self.log_variance(x)\n return x\n\nclass Generator(nn.Module):\n\n def __init__(self):\n super(Generator, self).__init__()\n\n self.label = nn.Sequential(\n nn.Linear(10, 28*28),\n unFlatten2()\n )\n\n self.generator = nn.Sequential(\n nn.Conv2d(2, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1),\n nn.Sigmoid(),\n )\n\n def forward(self, x, y):\n y = self.label(y)\n x = torch.cat((x,y),dim=1)\n x = self.generator(x)\n return x\n\n\nclass Discriminator(nn.Module):\n\n def __init__(self):\n super(Discriminator, self).__init__()\n\n self.discriminator = nn.Sequential(\n nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1),\n nn.LeakyReLU(),\n\n nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),\n nn.LeakyReLU(),\n\n Flatten(),\n nn.Linear(3136,4096),\n nn.LeakyReLU(),\n nn.Linear(4096,1)\n )\n\n def forward(self, x):\n x = self.discriminator(x)\n return x\n\nclass Memory(nn.Module):\n\n def __init__(self,Z):\n super(Memory, self).__init__()\n\n self.encoder = nn.Sequential(\n nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),\n asinh(),\n nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1),\n asinh(),\n\n nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),\n asinh(),\n nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),\n asinh(),\n\n Flatten(),\n nn.Linear(3136,4096),\n asinh(),\n nn.Linear(4096,Z)\n )\n\n def forward(self, x,):\n x = self.encoder(x)\n return x\n"
] |
[
[
"torch.sigmoid",
"torch.empty",
"numpy.random.seed",
"torch.load",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.zeros",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.exp",
"torch.nn.init.orthogonal_",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.rand",
"torch.clamp",
"torch.autograd.grad"
],
[
"torch.cat",
"torch.asinh",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d"
]
] |
Gandor26/covid-open
|
[
"50dcb773160edc16b107785a6bb32ae6f82fc9a7"
] |
[
"hosps.py"
] |
[
"from typing import Optional, Tuple, List, Dict\nfrom copy import deepcopy\nimport pandas as pd\nimport numpy as np\nimport torch as pt\nfrom torch import Tensor, BoolTensor, nn\nfrom torch.nn import init, functional as F\nfrom torch.optim import Adam\n\nfrom data import *\nfrom expsmooth import ExpSmooth\nfrom attention import XSeriesAttention\nfrom base import GlobalLocalModel\n\n\nclass CausalRegressor(nn.Module):\n def __init__(self, \n cond_size: int,\n pred_size: int,\n n_location: int,\n n_output: int,\n s_window: int,\n d_hidden: int,\n ) -> None:\n super(CausalRegressor, self).__init__()\n self.cond_size = cond_size\n self.pred_size = pred_size\n self.n_location = n_location\n self.s_window = s_window\n self.d_hidden = d_hidden\n self.n_output = n_output\n \n self.temporal_weight = nn.Parameter(Tensor(n_output*pred_size, d_hidden, cond_size))\n self.ma_weight = nn.Parameter(Tensor(d_hidden, 1, s_window))\n self._reset_parameters()\n \n def _reset_parameters(self) -> None:\n init_weight = Tensor(self.pred_size).uniform_()\n init_weight = F.softmax(init_weight, dim=0)\n weights = []\n for day in range(self.cond_size):\n weights.append(init_weight)\n init_weight = init_weight[:-1]\n init_weight = pt.cat([\n 1.0-pt.sum(init_weight, dim=0, keepdim=True), \n init_weight,\n ], dim=0)\n weights = pt.stack(weights, dim=1)\n weights = pt.stack([weights] * self.d_hidden, dim=1)\n weights = pt.cat([weights] * self.n_output, dim=0)\n with pt.no_grad():\n self.temporal_weight.copy_(weights)\n \n init_weight = Tensor(self.d_hidden, 1, self.s_window)\n init.xavier_uniform_(init_weight)\n init_weight = F.softmax(init_weight, dim=2)\n with pt.no_grad():\n self.ma_weight.copy_(init_weight)\n\n def forward(self,\n new_cases: Tensor,\n total_beds: Tensor,\n senior_pop_rate: Tensor,\n ) -> Tensor:\n cases = new_cases.unsqueeze(dim=1)\n hidden = F.conv1d(cases, self.ma_weight)\n hidden = F.relu(hidden)\n senior_pop_rate = senior_pop_rate.view(-1, 1, 1)\n total_beds = total_beds.view(-1, 1, 1)\n hidden = 2 * total_beds / (1 + pt.exp(-(8*senior_pop_rate/total_beds) * hidden)) - total_beds\n preds = F.conv1d(hidden, self.temporal_weight)\n preds = preds.transpose(-1,-2)\n return preds\n\n\nclass HospModel(GlobalLocalModel):\n def __init__(self,\n cond_size: int,\n pred_size: int,\n n_rolls: int,\n d_hidden: int,\n n_location: int,\n case_window: int,\n quantiles: List[int],\n d_feats: int = 0,\n share_params: bool = False,\n full_attention: bool = False,\n symmetric: bool = False,\n add_autoreg: bool = True,\n fix_ar_key: bool = True,\n var_penalty: float = 1.0,\n ) -> None:\n super(HospModel, self).__init__(\n cond_size, pred_size, n_rolls, d_hidden, n_location,\n quantiles, share_params, var_penalty,\n )\n \n self.smoother = ExpSmooth(\n cond_size=cond_size, \n pred_size=pred_size*n_rolls, \n n_location=n_location,\n share_params=share_params,\n )\n self.attention = XSeriesAttention(\n cond_size=cond_size,\n pred_size=pred_size,\n d_hidden=d_hidden,\n d_feats=d_feats,\n n_rolls=n_rolls,\n n_output=self.n_output,\n full_attention=full_attention,\n symmetric=symmetric,\n cum_value=False,\n add_autoreg=add_autoreg,\n fix_ar_key=fix_ar_key,\n )\n \n self.regression = CausalRegressor(\n cond_size=cond_size,\n pred_size=pred_size,\n n_location=n_location,\n n_output=self.n_output,\n s_window=case_window,\n d_hidden=d_hidden,\n )\n\n self.register_buffer('smoothed', None, persistent=False)\n self.register_buffer('level_diffs', None, persistent=False)\n self.register_buffer('global_pr', None, persistent=False)\n self.register_buffer('local_pr', None, persistent=False)\n\n def forward(self, \n hosp_data: Tensor,\n case_data: Tensor,\n total_beds: Tensor,\n senior_pop_rate: Tensor,\n query_time_feats: Optional[Tensor] = None,\n ref_time_feats: Optional[Tensor] = None,\n query_space_feats: Optional[Tensor] = None,\n ref_space_feats: Optional[Tensor] = None,\n test_size: Optional[int] = None,\n ) -> Tuple[Tensor, Tensor, Dict]:\n case_query = case_data\n attn_mask = pt.ones(\n hosp_data.size(1)-self.cond_size+1,\n hosp_data.size(1)-self.cond_size-self.pred_size+1,\n dtype=pt.bool, device=hosp_data.device,\n ).triu()\n attn_mask = attn_mask.view(1, *attn_mask.shape, 1)\n hosp_length = hosp_data.size(1)\n target_index = pt.tensor(\n np.arange(self.cond_size, hosp_length+1).reshape(-1,1)\\\n + np.arange(self.pred_size * self.n_rolls).reshape(1,-1),\n dtype=pt.long, device=hosp_data.device\n )\n target_mask = target_index >= hosp_length\n target_index = pt.where(target_mask, pt.zeros_like(target_index)-1, target_index)\n target = hosp_data[:, target_index]\n \n sm, local_pr, level_diffs = self.smoother(hosp_data)\n hosp_query = hosp_data\n hosp_ref = hosp_data\n if test_size is not None:\n hosp_query = hosp_data[:, -(test_size+self.cond_size):]\n case_query = case_data[:, -(test_size+self.cond_size+self.regression.s_window-1):]\n attn_mask = attn_mask[:, -(test_size+1):]\n local_pr = local_pr[:, -(test_size+1):]\n target = target[:, -(test_size+1):]\n target_mask = target_mask[-(test_size+1):]\n \n local_est = self.regression(\n new_cases=case_query,\n total_beds=total_beds,\n senior_pop_rate=senior_pop_rate,\n )\n \n global_pr = self.attention(\n query=hosp_query,\n ref=hosp_ref,\n local_est=local_est,\n query_space_feats=query_space_feats,\n ref_space_feats=ref_space_feats,\n query_time_feats=query_time_feats,\n ref_time_feats=ref_time_feats,\n attn_mask=attn_mask,\n )\n\n pr = self.tradeoff * pt.clamp_min(global_pr, 0.0) + (1 - self.tradeoff) * pt.clamp_min(local_pr, 0.0).unsqueeze(dim=2)\n # pr = pt.clamp_min(global_pr + local_pr.unsqueeze(dim=2), 0.0)\n loss = sum(\n self.quantile_error(p, target, q) \n for q, p in zip(\n self.quantiles,\n pr.unbind(dim=2),\n )\n )\n loss = loss.masked_fill(target_mask, 0.0).mean()\n loss = loss + level_diffs.mean() * self.var_penalty\n self.smoothed = sm.detach()\n self.level_diffs = level_diffs.detach()\n self.global_pr = global_pr.detach()\n self.local_pr = local_pr.detach()\n return loss, pr\n\n\ndef load_data(\n start_date: str,\n end_date: str,\n case_window: int,\n device: int = -1,\n test_size: Optional[int] = None,\n) -> Dict[str, np.ndarray]:\n hosps = load_hospitalized_data(\n start_date=pd.to_datetime(start_date)-pd.Timedelta(1, unit='d'),\n end_date=end_date\n )\n hosps = hosps.bfill()\n hosps = hosps.diff(1).iloc[1:]\n hosps = hosps.rename(columns=state2abbr)\n \n cases = load_cdc_truth(\n death=False, cumulative=False,\n start_date=pd.to_datetime(start_date)-pd.Timedelta(case_window-1, unit='d'),\n end_date=end_date,\n ).rename(columns=state2abbr).loc[:, hosps.columns]\n beds = load_bed_and_population_data().loc[hosps.columns]\n normed_beds = (beds - beds.mean(axis=0)) / beds.std(axis=0)\n normed_beds['65+%'] = beds['population_65'] / beds['adult_population']\n # mobs = load_mobility_data().rename(columns=state2abbr)\n \n feats = load_census_embedding().loc[:, [\n 'ANC1P_252', \n 'ANC1P_290', \n 'ANC2P_252', \n 'ANC2P_290', \n 'HICOV_1',\n 'HICOV_2',\n 'LANP_1200'\n ]].reindex(beds.index).fillna(0.0)\n\n query_space_feats = np.c_[\n normed_beds.loc[\n hosps.columns, \n [\n 'adult_population', \n 'population_65', \n 'density',\n ]\n ].values,\n feats.loc[hosps.columns].values,\n ]\n ref_space_feats = query_space_feats.copy()\n \n device = pt.device('cpu') if device < 0 else pt.device(f'cuda:{device}')\n data = {\n 'hosp_data': pt.tensor(hosps.values.T, dtype=pt.float, device=device),\n 'case_data': pt.tensor(cases.values.T, dtype=pt.float, device=device),\n 'total_beds': pt.tensor(beds.loc[:, 'total_hospital_beds'].values, dtype=pt.float, device=device),\n 'senior_pop_rate': pt.tensor(normed_beds.loc[hosps.columns, '65+%'].values, dtype=pt.float, device=device),\n 'query_space_feats': pt.tensor(query_space_feats, dtype=pt.float, device=device),\n 'ref_space_feats': pt.tensor(ref_space_feats, dtype=pt.float, device=device),\n }\n if test_size is not None:\n train_data = deepcopy(data)\n train_data['hosp_data'] = train_data['hosp_data'][:, :-test_size]\n train_data['case_data'] = train_data['case_data'][:, :-test_size]\n valid_data = data\n else:\n train_data = data\n valid_data = None\n return train_data, valid_data"
] |
[
[
"torch.nn.functional.softmax",
"pandas.to_datetime",
"torch.Tensor",
"torch.cat",
"numpy.arange",
"torch.nn.functional.conv1d",
"torch.zeros_like",
"torch.sum",
"pandas.Timedelta",
"torch.tensor",
"torch.exp",
"torch.nn.functional.relu",
"torch.no_grad",
"torch.clamp_min",
"torch.nn.init.xavier_uniform_",
"torch.stack",
"torch.device"
]
] |
Turakar/mutation-prediction
|
[
"2fe9d56dbdba66e1e0ece2776c13e0a9d07ce366"
] |
[
"mutation_prediction/cli/__init__.py"
] |
[
"import argparse\nimport logging\nimport os\nimport sys\nimport time\nimport warnings\n\nimport hjson\nimport numpy as np\nimport optuna\nfrom optuna.trial import TrialState\nfrom sklearn import metrics\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom tqdm import tqdm\n\nimport mutation_prediction.cli.registry as registry\nimport mutation_prediction.data.baseline as baseline\nimport mutation_prediction.data.preprocessing as preprocessing\nfrom mutation_prediction import utils\nfrom mutation_prediction.models import (\n ModelObjectiveCrossValidation,\n ModelObjectiveFixedValidation,\n ModelObjectiveSelfScoring,\n)\n\n\ndef main():\n logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n warnings.filterwarnings(\n \"ignore\", message=\"The dataloader, train dataloader, does not have many workers.*\"\n )\n warnings.filterwarnings(\n \"ignore\", message=\"The dataloader, val dataloader 0, does not have many workers.*\"\n )\n\n parser = argparse.ArgumentParser(\n description=\"Mutation Prediction optimization CLI\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\"--db\", \"-d\", help=\"SQLAlchemy DB URL\")\n subparsers = parser.add_subparsers(title=\"subcommands\", required=True, dest=\"subcommand\")\n\n parser_new = subparsers.add_parser(\n \"new\", help=\"Create a new study or overwrite an existing one.\"\n )\n parser_new.set_defaults(func=main_new)\n parser_new.add_argument(\"name\")\n parser_new.add_argument(\"model\")\n parser_new.add_argument(\"dataset\")\n parser_new.add_argument(\"params\", nargs=\"+\")\n parser_new.add_argument(\n \"--sampler\",\n \"-s\",\n help=\"Optuna sampler to use.\",\n choices=[\"RandomSampler\", \"TPESampler\", \"CmaEsSampler\"],\n default=\"TPESampler\",\n )\n parser_new.add_argument(\n \"--multivariate\",\n action=\"store_true\",\n help=\"Make TPE Sampler use a multivariate Parzen estimator instead of multiple independent ones.\",\n )\n parser_new.add_argument(\n \"--startup-trials\",\n type=int,\n help=\"Number of random samples before using TPESampler/CmaEsSampler.\",\n )\n parser_new.add_argument(\n \"--group\",\n action=\"store_true\",\n help=\"Make TPE sampler use grouped multivariate distributions for \"\n \"conditional parameters.\",\n )\n parser_new.add_argument(\n \"--liar\",\n action=\"store_true\",\n help=\"Make TPE sampler use the constant liar feature to avoid evaluating similar\"\n \" hyperparameter configurations at once.\",\n )\n parser_new.add_argument(\n \"--objective\",\n \"-o\",\n default=\"cross-validation\",\n choices=[\"cross-validation\", \"fixed\", \"self-scoring\"],\n help=\"Which model objective function to use.\",\n )\n parser_new.add_argument(\n \"--splits\",\n \"-k\",\n default=5,\n type=int,\n help=\"Number of splits used in case of cross-validation.\",\n )\n parser_new.add_argument(\n \"--iterations\",\n \"-i\",\n default=5,\n type=int,\n help=\"Number of iterations used in case of fixed validation set.\",\n )\n\n parser_optimize = subparsers.add_parser(\"optimize\", help=\"Optimize an existing study.\")\n parser_optimize.set_defaults(func=main_optimize)\n parser_optimize.add_argument(\"name\")\n parser_optimize.add_argument(\n \"--trials\",\n \"-n\",\n help=\"If the specified number of trials is reached in total, optimization stops.\",\n type=int,\n )\n parser_optimize.add_argument(\n \"--timeout\",\n \"-t\",\n help=\"If the specified number of seconds passed, optimization stops.\",\n type=float,\n )\n parser_optimize.add_argument(\n \"--early-stop\",\n \"-e\",\n type=int,\n help=\"If the specified number of trials give no improvement, optimization stops.\",\n )\n parser_optimize.add_argument(\n \"--iterations\",\n \"-i\",\n help=\"If the specified number of trials is reached for this process, optimization stops.\",\n type=int,\n )\n\n parser_reevaluate = subparsers.add_parser(\n \"reevaluate\", help=\"Re-evaluate a cross-validation objective for better estimation.\"\n )\n parser_reevaluate.set_defaults(func=main_reevaluate)\n parser_reevaluate.add_argument(\"name\")\n parser_reevaluate.add_argument(\"--top\", type=int)\n parser_reevaluate.add_argument(\"--trials\", type=str)\n\n parser_evaluate = subparsers.add_parser(\n \"evaluate\", help=\"Evaluate the best hyperparameter configuration on the test set.\"\n )\n parser_evaluate.set_defaults(func=main_evaluate)\n parser_evaluate.add_argument(\"name\")\n parser_evaluate.add_argument(\n \"--iterations\", \"-i\", help=\"Number of iterations to take median over.\", default=1, type=int\n )\n parser_evaluate.add_argument(\n \"--trial\", \"-t\", help=\"Trial number to evaluate. Use best trial if not set.\", type=int\n )\n parser_evaluate.add_argument(\"--dataset\", help=\"Specify a different dataset for evaluation.\")\n\n parser_copy = subparsers.add_parser(\n \"copy\", help=\"Copy a study between two storages with optional renaming.\"\n )\n parser_copy.set_defaults(func=main_copy)\n parser_copy.add_argument(\"name\")\n parser_copy.add_argument(\"source\")\n parser_copy.add_argument(\"destination\")\n parser_copy.add_argument(\"--rename\", type=str, help=\"The new name of the study.\", default=None)\n parser_copy.add_argument(\"--yes\", \"-y\", action=\"store_true\")\n\n parser_train = subparsers.add_parser(\"train\", help=\"Train a model and save it.\")\n parser_train.set_defaults(func=main_train)\n parser_train.add_argument(\"name\")\n parser_train.add_argument(\"output\")\n\n args = parser.parse_args()\n args.func(args)\n\n\ndef main_new(args):\n params_hjson = \" \".join(args.params)\n try:\n params = hjson.loads(params_hjson)\n except hjson.scanner.HjsonDecodeError as e:\n print(\"Invalid HJSON!\")\n print(e)\n print(params_hjson)\n sys.exit(1)\n\n if args.model not in registry.models:\n raise KeyError(\"Unknown model! %s\" % args.model)\n if args.dataset not in registry.datasets:\n raise KeyError(\"Unknown dataset! %s\" % args.dataset)\n sampler_args = {}\n if args.multivariate:\n sampler_args[\"multivariate\"] = True\n if args.startup_trials:\n sampler_args[\"n_startup_trials\"] = args.startup_trials\n if args.group:\n sampler_args[\"group\"] = True\n if args.liar:\n sampler_args[\"constant_liar\"] = True\n sampler = get_sampler(args.sampler, sampler_args)\n\n storage = get_storage(args)\n study = optuna.create_study(\n storage=storage, study_name=args.name, load_if_exists=False, sampler=sampler\n )\n study.set_user_attr(\"params\", params)\n study.set_user_attr(\"model\", args.model)\n study.set_user_attr(\"dataset\", args.dataset)\n study.set_user_attr(\"objective\", args.objective)\n if args.objective == \"cross-validation\":\n study.set_user_attr(\"splits\", args.splits)\n elif args.objective == \"fixed\":\n study.set_user_attr(\"iterations\", args.iterations)\n study.set_user_attr(\"sampler\", args.sampler)\n study.set_user_attr(\"sampler_args\", sampler_args)\n\n\ndef main_optimize(args):\n if (\n args.early_stop is None\n and args.trials is None\n and args.timeout is None\n and args.iterations is None\n ):\n raise RuntimeError(\"No stopping criterion given!\")\n\n model, params, study, datasets, _ = load_study(args)\n\n # create objective\n objective = make_objective(datasets, model, params, study)\n\n # start optimization\n start_time = time.time()\n iteration = 0\n while True:\n\n if (\n args.trials is not None\n and len(\n [\n trial\n for trial in study.trials\n if trial.state\n in [\n TrialState.COMPLETE,\n TrialState.RUNNING,\n TrialState.PRUNED,\n ]\n ]\n )\n >= args.trials\n ):\n print(\"Maximum number of total trials reached.\")\n break\n if args.timeout is not None and time.time() > start_time + args.timeout:\n print(\"Timeout reached.\")\n break\n\n study.optimize(objective, n_trials=1)\n last_trial = study.trials[-1]\n iteration += 1\n\n if (\n args.early_stop is not None\n and last_trial.number >= study.best_trial.number + args.early_stop\n ):\n print(\"Early stopping criterion reached.\")\n break\n\n if args.iterations is not None and iteration >= args.iterations:\n print(\"Maximum number of trials for this process reached.\")\n break\n\n\ndef main_reevaluate(args):\n # initialize\n model, params, study, datasets, storage = load_study(args)\n assert study.user_attrs[\"objective\"] == \"cross-validation\"\n dataset, _ = datasets\n\n # select trials to re-evaluate\n if args.top:\n all_trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]\n trials = sorted(all_trials, key=lambda trial: trial.value)[: args.top]\n elif args.trials:\n trial_numbers = {int(x) for x in args.trials.split(\",\")}\n trials = [trial for trial in study.trials if trial.number in trial_numbers]\n else:\n raise ValueError(\"Neither --top nor --trials specified!\")\n reevaluation = study.user_attrs.get(\"reevaluation\", {})\n trials = [trial for trial in trials if str(trial.number) not in reevaluation]\n\n # re-evaluate trials\n for trial in tqdm(trials):\n # collect leave-one-out-cross-validation predictions\n predictions = np.zeros((len(dataset),), dtype=dataset.get_y().dtype)\n for val_idx in tqdm(range(len(dataset)), leave=False):\n mask = utils.make_mask(len(dataset), val_idx)\n train = dataset[~mask]\n val = dataset[mask]\n model.hyperparams.set_from_trial(trial, params)\n model.fit(train)\n val_pred = model.predict(val)\n predictions[val_idx] = val_pred[0]\n\n # update scores\n reevaluation[trial.number] = {\n \"rmse\": mean_squared_error(dataset.get_y(), predictions, squared=False),\n \"r2\": r2_score(dataset.get_y(), predictions),\n }\n study.set_user_attr(\"reevaluation\", reevaluation)\n\n\ndef main_evaluate(args):\n model, params, study, datasets, storage = load_study(args)\n if args.trial is not None:\n trial = study.trials[args.trial]\n elif \"selected\" in study.user_attrs:\n trial = study.trials[study.user_attrs[\"selected\"]]\n else:\n trial = study.best_trial\n print(\"Evaluating trial #%d\" % trial.number)\n\n iterations = args.iterations\n\n evaluation_key = \"evaluation\"\n if args.dataset and args.dataset != study.user_attrs[\"dataset\"]:\n evaluation_key = \"evaluation_%s\" % args.dataset\n evaluation = study.user_attrs.get(evaluation_key, {})\n if str(trial.number) in evaluation:\n scores = evaluation[str(trial.number)]\n iterations -= len(scores[\"test_rmse\"])\n else:\n scores = {\n \"train_rmse\": [],\n \"train_r2\": [],\n \"test_rmse\": [],\n \"test_r2\": [],\n }\n model.hyperparams.set_from_trial(trial, params)\n\n if iterations > 0:\n objective = trial.user_attrs.get(\"objective\") or \"cross-validation\"\n if objective in [\"cross-validation\", \"self-scoring\"]:\n if len(datasets) == 3:\n train = preprocessing.shuffle(datasets[0] + datasets[1])\n else:\n train = datasets[0]\n elif objective == \"fixed\":\n train = datasets[0]\n else:\n raise KeyError(\"Unknown objective!\")\n test = datasets[-1]\n\n iteration_generator = range(iterations)\n if args.iterations > 1:\n iteration_generator = tqdm(iteration_generator)\n for _ in iteration_generator:\n model.fit(train, trial=trial)\n if len(test) >= 1000:\n prediction = model.predict_batched(test, 1000)\n else:\n prediction = model.predict(test)\n scores[\"test_rmse\"].append(\n metrics.mean_squared_error(test.get_y(), prediction, squared=False)\n )\n scores[\"test_r2\"].append(metrics.r2_score(test.get_y(), prediction))\n if len(train) >= 1000:\n prediction = model.predict_batched(train, 1000)\n else:\n prediction = model.predict(train)\n scores[\"train_rmse\"].append(\n metrics.mean_squared_error(train.get_y(), prediction, squared=False)\n )\n scores[\"train_r2\"].append(metrics.r2_score(train.get_y(), prediction))\n\n evaluation = study.user_attrs.get(evaluation_key, {})\n evaluation[str(trial.number)] = scores\n study.set_user_attr(evaluation_key, evaluation)\n\n rmse = float(np.median(scores[\"test_rmse\"]))\n r2 = float(np.median(scores[\"test_r2\"]))\n\n print(\"RMSE test values: %s\" % str(scores[\"test_rmse\"]))\n print(\"R² test values: %s\" % str(scores[\"test_r2\"]))\n print(\"RMSE train values: %s\" % str(scores[\"train_rmse\"]))\n print(\"R² train values: %s\" % str(scores[\"train_r2\"]))\n print(\"Trial value: %.3f\" % trial.value)\n\n print(hjson.dumps(model.hyperparams.get()))\n dataset: str = study.user_attrs[\"dataset\"]\n model_name: str = study.user_attrs[\"model\"]\n if model_name.startswith(\"Base\") and dataset in [\"A\", \"B\", \"C\", \"D\"]:\n method, descriptor = tuple(model_name[len(\"Base\") :].split(\"_\"))\n base_params = baseline.load_hyperparameters(method)[dataset][descriptor]\n print(hjson.dumps(base_params))\n base_scores = baseline.load_scores()[dataset][descriptor][method]\n print(\n \"%s: %.3f (%.3f) / %.3f (%.3f)\"\n % (args.name, rmse, base_scores[\"RMSE\"], r2, base_scores[\"r2\"])\n )\n else:\n print(\"%s: %.3f / %.3f\" % (args.name, rmse, r2))\n\n\ndef main_copy(args):\n from_storage = url_to_storage(args.source)\n from_name = args.name\n to_storage = url_to_storage(args.destination)\n to_name = args.rename\n if to_name is None:\n to_name = from_name\n try:\n optuna.load_study(to_name, to_storage)\n if args.yes or input(\"Study already exists in destination. Overwrite? [y/n] \") == \"y\":\n optuna.delete_study(to_name, to_storage)\n else:\n return\n except KeyError:\n pass\n optuna.copy_study(\n from_name,\n from_storage,\n to_storage,\n to_name,\n )\n\n\ndef main_train(args):\n model, params, study, datasets, _ = load_study(args)\n best_trial_number = study.user_attrs.get(\"selected\", study.best_trial.number)\n best_trial = study.trials[best_trial_number]\n model.hyperparams.set_from_trial(best_trial, params)\n train, test = datasets\n print(\"Fitting...\")\n model.fit(train)\n print(\"Testing...\")\n if len(test) >= 1000:\n prediction = model.predict_batched(test, 1000)\n else:\n prediction = model.predict(test)\n print(\"R² = %.4f\" % r2_score(test.get_y(), prediction))\n model.save(os.path.join(\"models\", args.output + \".pt\"))\n\n\ndef load_study(args):\n storage = get_storage(args)\n study_id = storage.get_study_id_from_name(args.name)\n study_user_attrs = storage.get_study_user_attrs(study_id)\n sampler_name = study_user_attrs.get(\"sampler\") or \"TPESampler\"\n sampler_args = study_user_attrs.get(\"sampler_args\") or {}\n sampler = get_sampler(sampler_name, sampler_args)\n study = optuna.create_study(\n storage=storage, sampler=sampler, study_name=args.name, load_if_exists=True\n )\n model = registry.models[study.user_attrs[\"model\"]]()\n params = study.user_attrs[\"params\"]\n datasets = registry.datasets[\n args.dataset\n if hasattr(args, \"dataset\") and args.dataset is not None\n else study.user_attrs[\"dataset\"]\n ]()\n datasets = tuple(preprocessing.shuffle(d) for d in datasets)\n return model, params, study, datasets, storage\n\n\ndef make_objective(datasets, model, params, study, splits=None):\n objective = None\n if \"objective\" in study.user_attrs:\n objective_name = study.user_attrs[\"objective\"]\n else:\n objective_name = \"cross-validation\"\n if objective_name in [\"cross-validation\", \"self-scoring\"]:\n if len(datasets) == 3:\n train = preprocessing.shuffle(datasets[0] + datasets[1])\n else:\n train = datasets[0]\n if objective_name == \"cross-validation\":\n objective = ModelObjectiveCrossValidation(\n model,\n params,\n train,\n splits=splits if splits is not None else study.user_attrs[\"splits\"],\n )\n elif objective_name == \"self-scoring\":\n objective = ModelObjectiveSelfScoring(model, params, train)\n elif objective_name == \"fixed\":\n objective = ModelObjectiveFixedValidation(\n model, params, datasets[0], datasets[1], iterations=study.user_attrs[\"iterations\"]\n )\n if objective is None:\n raise KeyError(\"Unknown objective!\")\n return objective\n\n\ndef get_sampler(sampler_name, sampler_args):\n sampler_constructor = {\n \"TPESampler\": optuna.samplers.TPESampler,\n \"RandomSampler\": optuna.samplers.RandomSampler,\n \"CmaEsSampler\": optuna.samplers.CmaEsSampler,\n }[sampler_name]\n sampler = sampler_constructor(**sampler_args)\n return sampler\n\n\ndef get_storage(args) -> optuna.storages.BaseStorage:\n return url_to_storage(args.db)\n\n\ndef url_to_storage(url: str) -> optuna.storages.BaseStorage:\n if url.startswith(\"redis://\") or url.startswith(\"unix://\"):\n return optuna.storages.RedisStorage(url)\n else:\n return optuna.storages.RDBStorage(\n url=url, heartbeat_interval=60, engine_kwargs=dict(pool_pre_ping=True)\n )\n"
] |
[
[
"numpy.median"
]
] |
oj9040/SpectralNet2
|
[
"0657116249418fe05fb13097ed32c2264fe40210"
] |
[
"src/core/networks.py"
] |
[
"'''\nnetworks.py: contains network definitions (for siamese net,\ntriplet siamese net, and spectralnet)\n'''\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.layers import Input, Lambda, Subtract\n\nfrom . import train\nfrom . import costs\nfrom .layer import stack_layers\nfrom .util import LearningHandler, make_layer_list, train_gen, get_scale\n\nclass SiameseNet:\n def __init__(self, inputs, arch, siam_reg, y_true):\n self.orig_inputs = inputs\n # set up inputs\n self.inputs = {\n 'A': inputs['Unlabeled'],\n 'B': Input(shape=inputs['Unlabeled'].get_shape().as_list()[1:]),\n 'Labeled': inputs['Labeled'],\n }\n\n self.y_true = y_true\n\n # generate layers\n self.layers = []\n self.layers += make_layer_list(arch, 'siamese', siam_reg)\n\n # create the siamese net\n self.outputs = stack_layers(self.inputs, self.layers)\n\n # add the distance layer\n self.distance = Lambda(costs.euclidean_distance, output_shape=costs.eucl_dist_output_shape)([self.outputs['A'], self.outputs['B']])\n\n #create the distance model for training\n self.net = Model([self.inputs['A'], self.inputs['B']], self.distance)\n\n # compile the siamese network\n self.net.compile(loss=costs.get_contrastive_loss(m_neg=1, m_pos=0.05), optimizer='rmsprop')\n\n def train(self, pairs_train, dist_train, pairs_val, dist_val,\n lr, drop, patience, num_epochs, batch_size):\n # create handler for early stopping and learning rate scheduling\n self.lh = LearningHandler(\n lr=lr,\n drop=drop,\n lr_tensor=self.net.optimizer.lr,\n patience=patience)\n\n # initialize the training generator\n train_gen_ = train_gen(pairs_train, dist_train, batch_size)\n\n # format the validation data for keras\n validation_data = ([pairs_val[:, 0], pairs_val[:, 1]], dist_val)\n\n # compute the steps per epoch\n steps_per_epoch = int(len(pairs_train) / batch_size)\n\n # train the network\n hist = self.net.fit_generator(train_gen_, epochs=num_epochs, validation_data=validation_data, steps_per_epoch=steps_per_epoch, callbacks=[self.lh])\n\n return hist\n\n def predict(self, x, batch_sizes):\n # compute the siamese embeddings of the input data\n return train.predict(self.outputs['A'], x_unlabeled=x, inputs=self.orig_inputs, y_true=self.y_true, batch_sizes=batch_sizes)\n\nclass SpectralNet:\n def __init__(self, inputs, arch, spec_reg, y_true, y_train_labeled_onehot,\n n_clusters, affinity, scale_nbr, n_nbrs, batch_sizes,\n siamese_net=None, x_train=None, have_labeled=False):\n self.y_true = y_true\n self.y_train_labeled_onehot = y_train_labeled_onehot\n self.inputs = inputs\n self.batch_sizes = batch_sizes\n # generate layers\n self.layers = make_layer_list(arch[:-1], 'spectral', spec_reg)\n self.layers += [\n {'type': 'tanh',\n 'size': n_clusters,\n 'l2_reg': spec_reg,\n 'name': 'spectral_{}'.format(len(arch)-1)},\n {'type': 'Orthonorm', 'name':'orthonorm'}\n ]\n\n # create spectralnet\n self.outputs = stack_layers(self.inputs, self.layers)\n self.net = Model(inputs=self.inputs['Unlabeled'], outputs=self.outputs['Unlabeled'])\n\n # DEFINE LOSS\n\n # generate affinity matrix W according to params\n if affinity == 'siamese':\n input_affinity = tf.concat([siamese_net.outputs['A'], siamese_net.outputs['Labeled']], axis=0)\n x_affinity = siamese_net.predict(x_train, batch_sizes)\n elif affinity in ['knn', 'full']:\n input_affinity = tf.concat([self.inputs['Unlabeled'], self.inputs['Labeled']], axis=0)\n x_affinity = x_train\n\n # calculate scale for affinity matrix\n scale = get_scale(x_affinity, self.batch_sizes['Unlabeled'], scale_nbr)\n\n # create affinity matrix\n if affinity == 'full':\n W = costs.full_affinity(input_affinity, scale=scale)\n elif affinity in ['knn', 'siamese']:\n W = costs.knn_affinity(input_affinity, n_nbrs, scale=scale, scale_nbr=scale_nbr)\n\n # if we have labels, use them\n if have_labeled:\n # get true affinities (from labeled data)\n W_true = tf.cast(tf.equal(costs.squared_distance(y_true), 0),dtype='float32')\n\n # replace lower right corner of W with W_true\n unlabeled_end = tf.shape(self.inputs['Unlabeled'])[0]\n W_u = W[:unlabeled_end, :] # upper half\n W_ll = W[unlabeled_end:, :unlabeled_end] # lower left\n W_l = tf.concat((W_ll, W_true), axis=1) # lower half\n W = tf.concat((W_u, W_l), axis=0)\n\n # create pairwise batch distance matrix self.Dy\n self.Dy = costs.squared_distance(tf.concat([self.outputs['Unlabeled'], self.outputs['Labeled']], axis=0))\n else:\n self.Dy = costs.squared_distance(self.outputs['Unlabeled'])\n\n # define loss\n self.loss = K.sum(W * self.Dy) / (2 * batch_sizes['Unlabeled'])\n\n # create the train step update\n self.learning_rate = tf.Variable(0., name='spectral_net_learning_rate')\n self.train_step = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate).minimize(self.loss, var_list=self.net.trainable_weights)\n\n # initialize spectralnet variables\n K.get_session().run(tf.variables_initializer(self.net.trainable_weights))\n\n def train(self, x_train_unlabeled, x_train_labeled, x_val_unlabeled,\n lr, drop, patience, num_epochs):\n # create handler for early stopping and learning rate scheduling\n self.lh = LearningHandler(\n lr=lr,\n drop=drop,\n lr_tensor=self.learning_rate,\n patience=patience)\n\n losses = np.empty((num_epochs,))\n val_losses = np.empty((num_epochs,))\n\n # begin spectralnet training loop\n self.lh.on_train_begin()\n for i in range(num_epochs):\n # train spectralnet\n losses[i] = train.train_step(\n return_var=[self.loss],\n updates=self.net.updates + [self.train_step],\n x_unlabeled=x_train_unlabeled,\n inputs=self.inputs,\n y_true=self.y_true,\n batch_sizes=self.batch_sizes,\n x_labeled=x_train_labeled,\n y_labeled=self.y_train_labeled_onehot,\n batches_per_epoch=100)[0]\n\n # get validation loss\n val_losses[i] = train.predict_sum(\n self.loss,\n x_unlabeled=x_val_unlabeled,\n inputs=self.inputs,\n y_true=self.y_true,\n x_labeled=x_train_unlabeled[0:0],\n y_labeled=self.y_train_labeled_onehot,\n batch_sizes=self.batch_sizes)\n\n # do early stopping if necessary\n if self.lh.on_epoch_end(i, val_losses[i]):\n print('STOPPING EARLY')\n break\n\n # print training status\n print(\"Epoch: {}, loss={:2f}, val_loss={:2f}\".format(i, losses[i], val_losses[i]))\n\n return losses[:i], val_losses[:i]\n\n def predict(self, x):\n # test inputs do not require the 'Labeled' input\n inputs_test = {'Unlabeled': self.inputs['Unlabeled'], 'Orthonorm': self.inputs['Orthonorm']}\n return train.predict(\n self.outputs['Unlabeled'],\n x_unlabeled=x,\n inputs=inputs_test,\n y_true=self.y_true,\n x_labeled=x[0:0],\n y_labeled=self.y_train_labeled_onehot[0:0],\n batch_sizes=self.batch_sizes)\n"
] |
[
[
"tensorflow.concat",
"tensorflow.Variable",
"tensorflow.shape",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.variables_initializer",
"numpy.empty"
]
] |
EagleW/Stage-wise-Fine-tuning
|
[
"6054152aa0f64cab9c6eea47bd3570e6d79eaf26"
] |
[
"src/file_utils.py"
] |
[
"\"\"\"\nUtilities for working with the local dataset cache.\nThis file is adapted from the AllenNLP library at https://github.com/allenai/allennlp\nCopyright by the AllenNLP authors.\n\"\"\"\n\nimport fnmatch\nimport json\nimport os\nimport re\nimport shutil\nimport sys\nimport tarfile\nimport tempfile\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom dataclasses import fields\nfrom functools import partial, wraps\nfrom hashlib import sha256\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple, Union\nfrom urllib.parse import urlparse\nfrom zipfile import ZipFile, is_zipfile\n\nimport numpy as np\nfrom tqdm.auto import tqdm\n\nimport requests\nfrom filelock import FileLock\n\n# from . import __version__\n# from .utils import logging\nimport logging_t\n\n\nlogger = logging_t.get_logger(__name__) # pylint: disable=invalid-name\n\nENV_VARS_TRUE_VALUES = {\"1\", \"ON\", \"YES\"}\nENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({\"AUTO\"})\n\ntry:\n USE_TF = os.environ.get(\"USE_TF\", \"AUTO\").upper()\n USE_TORCH = os.environ.get(\"USE_TORCH\", \"AUTO\").upper()\n if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:\n import torch\n\n _torch_available = True # pylint: disable=invalid-name\n logger.info(\"PyTorch version {} available.\".format(torch.__version__))\n else:\n logger.info(\"Disabling PyTorch because USE_TF is set\")\n _torch_available = False\nexcept ImportError:\n _torch_available = False # pylint: disable=invalid-name\n\ntry:\n USE_TF = os.environ.get(\"USE_TF\", \"AUTO\").upper()\n USE_TORCH = os.environ.get(\"USE_TORCH\", \"AUTO\").upper()\n\n if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:\n import tensorflow as tf\n\n assert hasattr(tf, \"__version__\") and int(tf.__version__[0]) >= 2\n _tf_available = True # pylint: disable=invalid-name\n logger.info(\"TensorFlow version {} available.\".format(tf.__version__))\n else:\n logger.info(\"Disabling Tensorflow because USE_TORCH is set\")\n _tf_available = False\nexcept (ImportError, AssertionError):\n _tf_available = False # pylint: disable=invalid-name\n\n\ntry:\n USE_JAX = os.environ.get(\"USE_FLAX\", \"AUTO\").upper()\n\n if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:\n import flax\n import jax\n\n logger.info(\"JAX version {}, Flax: available\".format(jax.__version__))\n logger.info(\"Flax available: {}\".format(flax))\n _flax_available = True\n else:\n _flax_available = False\nexcept ImportError:\n _flax_available = False # pylint: disable=invalid-name\n\n\ntry:\n import datasets # noqa: F401\n\n # Check we're not importing a \"datasets\" directory somewhere\n _datasets_available = hasattr(datasets, \"__version__\") and hasattr(datasets, \"load_dataset\")\n if _datasets_available:\n logger.debug(f\"Succesfully imported datasets version {datasets.__version__}\")\n else:\n logger.debug(\"Imported a datasets object but this doesn't seem to be the 🤗 datasets library.\")\n\nexcept ImportError:\n _datasets_available = False\n\ntry:\n from torch.hub import _get_torch_home\n\n torch_cache_home = _get_torch_home()\nexcept ImportError:\n torch_cache_home = os.path.expanduser(\n os.getenv(\"TORCH_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"torch\"))\n )\n\n\ntry:\n import torch_xla.core.xla_model as xm # noqa: F401\n\n if _torch_available:\n _torch_tpu_available = True # pylint: disable=\n else:\n _torch_tpu_available = False\nexcept ImportError:\n _torch_tpu_available = False\n\n\ntry:\n import psutil # noqa: F401\n\n _psutil_available = True\n\nexcept ImportError:\n _psutil_available = False\n\n\ntry:\n import py3nvml # noqa: F401\n\n _py3nvml_available = True\n\nexcept ImportError:\n _py3nvml_available = False\n\n\ntry:\n from apex import amp # noqa: F401\n\n _has_apex = True\nexcept ImportError:\n _has_apex = False\n\n\ntry:\n import faiss # noqa: F401\n\n _faiss_available = True\n logger.debug(f\"Succesfully imported faiss version {faiss.__version__}\")\nexcept ImportError:\n _faiss_available = False\n\ntry:\n import sklearn.metrics # noqa: F401\n\n import scipy.stats # noqa: F401\n\n _has_sklearn = True\nexcept (AttributeError, ImportError):\n _has_sklearn = False\n\ntry:\n # Test copied from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py\n get_ipython = sys.modules[\"IPython\"].get_ipython\n if \"IPKernelApp\" not in get_ipython().config:\n raise ImportError(\"console\")\n if \"VSCODE_PID\" in os.environ:\n raise ImportError(\"vscode\")\n\n import IPython # noqa: F401\n\n _in_notebook = True\nexcept (AttributeError, ImportError, KeyError):\n _in_notebook = False\n\n\ntry:\n import sentencepiece # noqa: F401\n\n _sentencepiece_available = True\n\nexcept ImportError:\n _sentencepiece_available = False\n\n\ntry:\n import tokenizers # noqa: F401\n\n _tokenizers_available = True\n\nexcept ImportError:\n _tokenizers_available = False\n\n\ndefault_cache_path = os.path.join(torch_cache_home, \"transformers\")\n\n\nPYTORCH_PRETRAINED_BERT_CACHE = os.getenv(\"PYTORCH_PRETRAINED_BERT_CACHE\", default_cache_path)\nPYTORCH_TRANSFORMERS_CACHE = os.getenv(\"PYTORCH_TRANSFORMERS_CACHE\", PYTORCH_PRETRAINED_BERT_CACHE)\nTRANSFORMERS_CACHE = os.getenv(\"TRANSFORMERS_CACHE\", PYTORCH_TRANSFORMERS_CACHE)\n\nWEIGHTS_NAME = \"pytorch_model.bin\"\nTF2_WEIGHTS_NAME = \"tf_model.h5\"\nTF_WEIGHTS_NAME = \"model.ckpt\"\nCONFIG_NAME = \"config.json\"\nMODEL_CARD_NAME = \"modelcard.json\"\n\nSENTENCEPIECE_UNDERLINE = \"▁\"\nSPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility\n\nMULTIPLE_CHOICE_DUMMY_INPUTS = [\n [[0, 1, 0, 1], [1, 0, 0, 1]]\n] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.\nDUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]\nDUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]\n\nS3_BUCKET_PREFIX = \"https://s3.amazonaws.com/models.huggingface.co/bert\"\nCLOUDFRONT_DISTRIB_PREFIX = \"https://cdn.huggingface.co\"\nPRESET_MIRROR_DICT = {\n \"tuna\": \"https://mirrors.tuna.tsinghua.edu.cn/hugging-face-models\",\n \"bfsu\": \"https://mirrors.bfsu.edu.cn/hugging-face-models\",\n}\n\n\ndef is_torch_available():\n return _torch_available\n\n\ndef is_tf_available():\n return _tf_available\n\n\ndef is_flax_available():\n return _flax_available\n\n\ndef is_torch_tpu_available():\n return _torch_tpu_available\n\n\ndef is_datasets_available():\n return _datasets_available\n\n\ndef is_psutil_available():\n return _psutil_available\n\n\ndef is_py3nvml_available():\n return _py3nvml_available\n\n\ndef is_apex_available():\n return _has_apex\n\n\ndef is_faiss_available():\n return _faiss_available\n\n\ndef is_sklearn_available():\n return _has_sklearn\n\n\ndef is_sentencepiece_available():\n return _sentencepiece_available\n\n\ndef is_tokenizers_available():\n return _tokenizers_available\n\n\ndef is_in_notebook():\n return _in_notebook\n\n\ndef torch_only_method(fn):\n def wrapper(*args, **kwargs):\n if not _torch_available:\n raise ImportError(\n \"You need to install pytorch to use this method or class, \"\n \"or activate it with environment variables USE_TORCH=1 and USE_TF=0.\"\n )\n else:\n return fn(*args, **kwargs)\n\n return wrapper\n\n\nDATASETS_IMPORT_ERROR = \"\"\"\n{0} requires the 🤗 Datasets library but it was not found in your enviromnent. You can install it with:\n```\npip install datasets\n```\nIn a notebook or a colab, you can install it by executing a cell with\n```\n!pip install datasets\n```\nthen restarting your kernel.\n\nNote that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current\nworking directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or\nthat python file if that's the case.\n\"\"\"\n\n\nTOKENIZERS_IMPORT_ERROR = \"\"\"\n{0} requires the 🤗 Tokenizers library but it was not found in your enviromnent. You can install it with:\n```\npip install tokenizers\n```\nIn a notebook or a colab, you can install it by executing a cell with\n```\n!pip install tokenizers\n```\n\"\"\"\n\n\nSENTENCEPIECE_IMPORT_ERROR = \"\"\"\n{0} requires the SentencePiece library but it was not found in your enviromnent. Checkout the instructions on the\ninstallation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones\nthat match your enviromnent.\n\"\"\"\n\n\nFAISS_IMPORT_ERROR = \"\"\"\n{0} requires the faiss library but it was not found in your enviromnent. Checkout the instructions on the\ninstallation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones\nthat match your enviromnent.\n\"\"\"\n\n\nPYTORCH_IMPORT_ERROR = \"\"\"\n{0} requires the PyTorch library but it was not found in your enviromnent. Checkout the instructions on the\ninstallation page: https://pytorch.org/get-started/locally/ and follow the ones that match your enviromnent.\n\"\"\"\n\n\nSKLEARN_IMPORT_ERROR = \"\"\"\n{0} requires the scikit-learn library but it was not found in your enviromnent. You can install it with:\n```\npip install -U scikit-learn\n```\nIn a notebook or a colab, you can install it by executing a cell with\n```\n!pip install -U scikit-learn\n```\n\"\"\"\n\n\nTENSORFLOW_IMPORT_ERROR = \"\"\"\n{0} requires the TensorFlow library but it was not found in your enviromnent. Checkout the instructions on the\ninstallation page: https://www.tensorflow.org/install and follow the ones that match your enviromnent.\n\"\"\"\n\n\nFLAX_IMPORT_ERROR = \"\"\"\n{0} requires the FLAX library but it was not found in your enviromnent. Checkout the instructions on the\ninstallation page: https://github.com/google/flax and follow the ones that match your enviromnent.\n\"\"\"\n\n\ndef requires_datasets(obj):\n name = obj.__name__ if hasattr(obj, \"__name__\") else obj.__class__.__name__\n if not is_datasets_available():\n raise ImportError(DATASETS_IMPORT_ERROR.format(name))\n\n\ndef requires_faiss(obj):\n name = obj.__name__ if hasattr(obj, \"__name__\") else obj.__class__.__name__\n if not is_faiss_available():\n raise ImportError(FAISS_IMPORT_ERROR.format(name))\n\n\ndef requires_pytorch(obj):\n name = obj.__name__ if hasattr(obj, \"__name__\") else obj.__class__.__name__\n if not is_torch_available():\n raise ImportError(PYTORCH_IMPORT_ERROR.format(name))\n\n\ndef requires_sklearn(obj):\n name = obj.__name__ if hasattr(obj, \"__name__\") else obj.__class__.__name__\n if not is_sklearn_available():\n raise ImportError(SKLEARN_IMPORT_ERROR.format(name))\n\n\ndef requires_tf(obj):\n name = obj.__name__ if hasattr(obj, \"__name__\") else obj.__class__.__name__\n if not is_tf_available():\n raise ImportError(TENSORFLOW_IMPORT_ERROR.format(name))\n\n\ndef requires_flax(obj):\n name = obj.__name__ if hasattr(obj, \"__name__\") else obj.__class__.__name__\n if not is_flax_available():\n raise ImportError(FLAX_IMPORT_ERROR.format(name))\n\n\ndef requires_tokenizers(obj):\n name = obj.__name__ if hasattr(obj, \"__name__\") else obj.__class__.__name__\n if not is_tokenizers_available():\n raise ImportError(TOKENIZERS_IMPORT_ERROR.format(name))\n\n\ndef requires_sentencepiece(obj):\n name = obj.__name__ if hasattr(obj, \"__name__\") else obj.__class__.__name__\n if not is_sentencepiece_available():\n raise ImportError(SENTENCEPIECE_IMPORT_ERROR.format(name))\n\n\ndef add_start_docstrings(*docstr):\n def docstring_decorator(fn):\n fn.__doc__ = \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator\n\n\ndef add_start_docstrings_to_callable(*docstr):\n def docstring_decorator(fn):\n class_name = \":class:`~transformers.{}`\".format(fn.__qualname__.split(\".\")[0])\n intro = \" The {} forward method, overrides the :func:`__call__` special method.\".format(class_name)\n note = r\"\"\"\n\n .. note::\n Although the recipe for forward pass needs to be defined within\n this function, one should call the :class:`Module` instance afterwards\n instead of this since the former takes care of running the\n pre and post processing steps while the latter silently ignores them.\n \"\"\"\n fn.__doc__ = intro + note + \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator\n\n\ndef add_end_docstrings(*docstr):\n def docstring_decorator(fn):\n fn.__doc__ = fn.__doc__ + \"\".join(docstr)\n return fn\n\n return docstring_decorator\n\n\nPT_RETURN_INTRODUCTION = r\"\"\"\n Returns:\n :class:`~{full_output_type}` or :obj:`tuple(torch.FloatTensor)`:\n A :class:`~{full_output_type}` (if ``return_dict=True`` is passed or when ``config.return_dict=True``) or a\n tuple of :obj:`torch.FloatTensor` comprising various elements depending on the configuration\n (:class:`~transformers.{config_class}`) and inputs.\n\n\"\"\"\n\n\nTF_RETURN_INTRODUCTION = r\"\"\"\n Returns:\n :class:`~{full_output_type}` or :obj:`tuple(tf.Tensor)`:\n A :class:`~{full_output_type}` (if ``return_dict=True`` is passed or when ``config.return_dict=True``) or a\n tuple of :obj:`tf.Tensor` comprising various elements depending on the configuration\n (:class:`~transformers.{config_class}`) and inputs.\n\n\"\"\"\n\n\ndef _get_indent(t):\n \"\"\"Returns the indentation in the first line of t\"\"\"\n search = re.search(r\"^(\\s*)\\S\", t)\n return \"\" if search is None else search.groups()[0]\n\n\ndef _convert_output_args_doc(output_args_doc):\n \"\"\"Convert output_args_doc to display properly.\"\"\"\n # Split output_arg_doc in blocks argument/description\n indent = _get_indent(output_args_doc)\n blocks = []\n current_block = \"\"\n for line in output_args_doc.split(\"\\n\"):\n # If the indent is the same as the beginning, the line is the name of new arg.\n if _get_indent(line) == indent:\n if len(current_block) > 0:\n blocks.append(current_block[:-1])\n current_block = f\"{line}\\n\"\n else:\n # Otherwise it's part of the description of the current arg.\n # We need to remove 2 spaces to the indentation.\n current_block += f\"{line[2:]}\\n\"\n blocks.append(current_block[:-1])\n\n # Format each block for proper rendering\n for i in range(len(blocks)):\n blocks[i] = re.sub(r\"^(\\s+)(\\S+)(\\s+)\", r\"\\1- **\\2**\\3\", blocks[i])\n blocks[i] = re.sub(r\":\\s*\\n\\s*(\\S)\", r\" -- \\1\", blocks[i])\n\n return \"\\n\".join(blocks)\n\n\ndef _prepare_output_docstrings(output_type, config_class):\n \"\"\"\n Prepares the return part of the docstring using `output_type`.\n \"\"\"\n docstrings = output_type.__doc__\n\n # Remove the head of the docstring to keep the list of args only\n lines = docstrings.split(\"\\n\")\n i = 0\n while i < len(lines) and re.search(r\"^\\s*(Args|Parameters):\\s*$\", lines[i]) is None:\n i += 1\n if i < len(lines):\n docstrings = \"\\n\".join(lines[(i + 1) :])\n docstrings = _convert_output_args_doc(docstrings)\n\n # Add the return introduction\n full_output_type = f\"{output_type.__module__}.{output_type.__name__}\"\n intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith(\"TF\") else PT_RETURN_INTRODUCTION\n intro = intro.format(full_output_type=full_output_type, config_class=config_class)\n return intro + docstrings\n\n\nPT_TOKEN_CLASSIFICATION_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> labels = torch.tensor([1] * inputs[\"input_ids\"].size(1)).unsqueeze(0) # Batch size 1\n\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n\"\"\"\n\nPT_QUESTION_ANSWERING_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)\n\n >>> question, text = \"Who was Jim Henson?\", \"Jim Henson was a nice puppet\"\n >>> inputs = tokenizer(question, text, return_tensors='pt')\n >>> start_positions = torch.tensor([1])\n >>> end_positions = torch.tensor([3])\n\n >>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions)\n >>> loss = outputs.loss\n >>> start_scores = outputs.start_logits\n >>> end_scores = outputs.end_logits\n\"\"\"\n\nPT_SEQUENCE_CLASSIFICATION_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n\"\"\"\n\nPT_MASKED_LM_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)\n\n >>> inputs = tokenizer(\"The capital of France is {mask}.\", return_tensors=\"pt\")\n >>> labels = tokenizer(\"The capital of France is Paris.\", return_tensors=\"pt\")[\"input_ids\"]\n\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n\"\"\"\n\nPT_BASE_MODEL_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n\"\"\"\n\nPT_MULTIPLE_CHOICE_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)\n\n >>> prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n >>> choice0 = \"It is eaten with a fork and a knife.\"\n >>> choice1 = \"It is eaten while held in the hand.\"\n >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1\n\n >>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='pt', padding=True)\n >>> outputs = model(**{{k: v.unsqueeze(0) for k,v in encoding.items()}}, labels=labels) # batch size is 1\n\n >>> # the linear classifier still needs to be trained\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n\"\"\"\n\nPT_CAUSAL_LM_SAMPLE = r\"\"\"\n Example::\n\n >>> import torch\n >>> from transformers import {tokenizer_class}, {model_class}\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs, labels=inputs[\"input_ids\"])\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n\"\"\"\n\nTF_TOKEN_CLASSIFICATION_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True))\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\n >>> input_ids = inputs[\"input_ids\"]\n >>> inputs[\"labels\"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1\n\n >>> outputs = model(inputs)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n\"\"\"\n\nTF_QUESTION_ANSWERING_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True))\n\n >>> question, text = \"Who was Jim Henson?\", \"Jim Henson was a nice puppet\"\n >>> input_dict = tokenizer(question, text, return_tensors='tf')\n >>> outputs = model(input_dict)\n >>> start_logits = outputs.start_logits\n >>> end_logits = outputs.end_logits\n\n >>> all_tokens = tokenizer.convert_ids_to_tokens(input_dict[\"input_ids\"].numpy()[0])\n >>> answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1])\n\"\"\"\n\nTF_SEQUENCE_CLASSIFICATION_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True))\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\n >>> inputs[\"labels\"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1\n\n >>> outputs = model(inputs)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n\"\"\"\n\nTF_MASKED_LM_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True))\n\n >>> inputs = tokenizer(\"The capital of France is {mask}.\", return_tensors=\"tf\")\n >>> inputs[\"labels\"] = tokenizer(\"The capital of France is Paris.\", return_tensors=\"tf\")[\"input_ids\"]\n\n >>> outputs = model(inputs)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n\"\"\"\n\nTF_BASE_MODEL_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True))\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\n >>> outputs = model(inputs)\n\n >>> last_hidden_states = outputs.last_hidden_states\n\"\"\"\n\nTF_MULTIPLE_CHOICE_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True))\n\n >>> prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n >>> choice0 = \"It is eaten with a fork and a knife.\"\n >>> choice1 = \"It is eaten while held in the hand.\"\n\n >>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='tf', padding=True)\n >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}}\n >>> outputs = model(inputs) # batch size is 1\n\n >>> # the linear classifier still needs to be trained\n >>> logits = outputs.logits\n\"\"\"\n\nTF_CAUSAL_LM_SAMPLE = r\"\"\"\n Example::\n\n >>> from transformers import {tokenizer_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')\n >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True))\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\n >>> outputs = model(inputs)\n >>> logits = outputs.logits\n\"\"\"\n\n\ndef add_code_sample_docstrings(\n *docstr, tokenizer_class=None, checkpoint=None, output_type=None, config_class=None, mask=None\n):\n def docstring_decorator(fn):\n model_class = fn.__qualname__.split(\".\")[0]\n is_tf_class = model_class[:2] == \"TF\"\n doc_kwargs = dict(model_class=model_class, tokenizer_class=tokenizer_class, checkpoint=checkpoint)\n\n if \"SequenceClassification\" in model_class:\n code_sample = TF_SEQUENCE_CLASSIFICATION_SAMPLE if is_tf_class else PT_SEQUENCE_CLASSIFICATION_SAMPLE\n elif \"QuestionAnswering\" in model_class:\n code_sample = TF_QUESTION_ANSWERING_SAMPLE if is_tf_class else PT_QUESTION_ANSWERING_SAMPLE\n elif \"TokenClassification\" in model_class:\n code_sample = TF_TOKEN_CLASSIFICATION_SAMPLE if is_tf_class else PT_TOKEN_CLASSIFICATION_SAMPLE\n elif \"MultipleChoice\" in model_class:\n code_sample = TF_MULTIPLE_CHOICE_SAMPLE if is_tf_class else PT_MULTIPLE_CHOICE_SAMPLE\n elif \"MaskedLM\" in model_class or model_class in [\"FlaubertWithLMHeadModel\", \"XLMWithLMHeadModel\"]:\n doc_kwargs[\"mask\"] = \"[MASK]\" if mask is None else mask\n code_sample = TF_MASKED_LM_SAMPLE if is_tf_class else PT_MASKED_LM_SAMPLE\n elif \"LMHead\" in model_class:\n code_sample = TF_CAUSAL_LM_SAMPLE if is_tf_class else PT_CAUSAL_LM_SAMPLE\n elif \"Model\" in model_class or \"Encoder\" in model_class:\n code_sample = TF_BASE_MODEL_SAMPLE if is_tf_class else PT_BASE_MODEL_SAMPLE\n else:\n raise ValueError(f\"Docstring can't be built for model {model_class}\")\n\n output_doc = _prepare_output_docstrings(output_type, config_class) if output_type is not None else \"\"\n built_doc = code_sample.format(**doc_kwargs)\n fn.__doc__ = (fn.__doc__ or \"\") + \"\".join(docstr) + output_doc + built_doc\n return fn\n\n return docstring_decorator\n\n\ndef replace_return_docstrings(output_type=None, config_class=None):\n def docstring_decorator(fn):\n docstrings = fn.__doc__\n lines = docstrings.split(\"\\n\")\n i = 0\n while i < len(lines) and re.search(r\"^\\s*Returns?:\\s*$\", lines[i]) is None:\n i += 1\n if i < len(lines):\n lines[i] = _prepare_output_docstrings(output_type, config_class)\n docstrings = \"\\n\".join(lines)\n else:\n raise ValueError(\n f\"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, current docstring is:\\n{docstrings}\"\n )\n fn.__doc__ = docstrings\n return fn\n\n return docstring_decorator\n\n\ndef is_remote_url(url_or_filename):\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\")\n\n\ndef hf_bucket_url(model_id: str, filename: str, use_cdn=True, mirror=None) -> str:\n \"\"\"\n Resolve a model identifier, and a file name, to a HF-hosted url\n on either S3 or Cloudfront (a Content Delivery Network, or CDN).\n\n Cloudfront is replicated over the globe so downloads are way faster\n for the end user (and it also lowers our bandwidth costs). However, it\n is more aggressively cached by default, so may not always reflect the\n latest changes to the underlying file (default TTL is 24 hours).\n\n In terms of client-side caching from this library, even though\n Cloudfront relays the ETags from S3, using one or the other\n (or switching from one to the other) will affect caching: cached files\n are not shared between the two because the cached file's name contains\n a hash of the url.\n \"\"\"\n endpoint = (\n PRESET_MIRROR_DICT.get(mirror, mirror)\n if mirror\n else CLOUDFRONT_DISTRIB_PREFIX\n if use_cdn\n else S3_BUCKET_PREFIX\n )\n legacy_format = \"/\" not in model_id\n if legacy_format:\n return f\"{endpoint}/{model_id}-{filename}\"\n else:\n return f\"{endpoint}/{model_id}/{filename}\"\n\n\ndef url_to_filename(url, etag=None):\n \"\"\"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name\n so that TF 2.0 can identify it as a HDF5 file\n (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)\n \"\"\"\n url_bytes = url.encode(\"utf-8\")\n url_hash = sha256(url_bytes)\n filename = url_hash.hexdigest()\n\n if etag:\n etag_bytes = etag.encode(\"utf-8\")\n etag_hash = sha256(etag_bytes)\n filename += \".\" + etag_hash.hexdigest()\n\n if url.endswith(\".h5\"):\n filename += \".h5\"\n\n return filename\n\n\ndef filename_to_url(filename, cache_dir=None):\n \"\"\"\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.\n \"\"\"\n if cache_dir is None:\n cache_dir = TRANSFORMERS_CACHE\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n cache_path = os.path.join(cache_dir, filename)\n if not os.path.exists(cache_path):\n raise EnvironmentError(\"file {} not found\".format(cache_path))\n\n meta_path = cache_path + \".json\"\n if not os.path.exists(meta_path):\n raise EnvironmentError(\"file {} not found\".format(meta_path))\n\n with open(meta_path, encoding=\"utf-8\") as meta_file:\n metadata = json.load(meta_file)\n url = metadata[\"url\"]\n etag = metadata[\"etag\"]\n\n return url, etag\n\n\ndef cached_path(\n url_or_filename,\n cache_dir=None,\n force_download=False,\n proxies=None,\n resume_download=False,\n user_agent: Union[Dict, str, None] = None,\n extract_compressed_file=False,\n force_extract=False,\n local_files_only=False,\n) -> Optional[str]:\n \"\"\"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n Args:\n cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).\n force_download: if True, re-dowload the file even if it's already cached in the cache dir.\n resume_download: if True, resume the download if incompletly recieved file is found.\n user_agent: Optional string or dict that will be appended to the user-agent on remote requests.\n extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed\n file in a folder along the archive.\n force_extract: if True when extract_compressed_file is True and the archive was already extracted,\n re-extract the archive and overide the folder where it was extracted.\n\n Return:\n None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).\n Local path (string) otherwise\n \"\"\"\n if cache_dir is None:\n cache_dir = TRANSFORMERS_CACHE\n if isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n if is_remote_url(url_or_filename):\n # URL, so get it from the cache (downloading if necessary)\n output_path = get_from_cache(\n url_or_filename,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n user_agent=user_agent,\n local_files_only=local_files_only,\n )\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n output_path = url_or_filename\n elif urlparse(url_or_filename).scheme == \"\":\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))\n\n if extract_compressed_file:\n if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):\n return output_path\n\n # Path where we extract compressed archives\n # We avoid '.' in dir name and add \"-extracted\" at the end: \"./model.zip\" => \"./model-zip-extracted/\"\n output_dir, output_file = os.path.split(output_path)\n output_extract_dir_name = output_file.replace(\".\", \"-\") + \"-extracted\"\n output_path_extracted = os.path.join(output_dir, output_extract_dir_name)\n\n if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:\n return output_path_extracted\n\n # Prevent parallel extractions\n lock_path = output_path + \".lock\"\n with FileLock(lock_path):\n shutil.rmtree(output_path_extracted, ignore_errors=True)\n os.makedirs(output_path_extracted)\n if is_zipfile(output_path):\n with ZipFile(output_path, \"r\") as zip_file:\n zip_file.extractall(output_path_extracted)\n zip_file.close()\n elif tarfile.is_tarfile(output_path):\n tar_file = tarfile.open(output_path)\n tar_file.extractall(output_path_extracted)\n tar_file.close()\n else:\n raise EnvironmentError(\"Archive format of {} could not be identified\".format(output_path))\n\n return output_path_extracted\n\n return output_path\n\n\ndef http_get(url, temp_file, proxies=None, resume_size=0, user_agent: Union[Dict, str, None] = None):\n # ua = \"transformers/{}; python/{}\".format(__version__, sys.version.split()[0])\n if is_torch_available():\n ua += \"; torch/{}\".format(torch.__version__)\n if is_tf_available():\n ua += \"; tensorflow/{}\".format(tf.__version__)\n if isinstance(user_agent, dict):\n ua += \"; \" + \"; \".join(\"{}/{}\".format(k, v) for k, v in user_agent.items())\n elif isinstance(user_agent, str):\n ua += \"; \" + user_agent\n headers = {\"user-agent\": ua}\n if resume_size > 0:\n headers[\"Range\"] = \"bytes=%d-\" % (resume_size,)\n response = requests.get(url, stream=True, proxies=proxies, headers=headers)\n if response.status_code == 416: # Range not satisfiable\n return\n content_length = response.headers.get(\"Content-Length\")\n total = resume_size + int(content_length) if content_length is not None else None\n progress = tqdm(\n unit=\"B\",\n unit_scale=True,\n total=total,\n initial=resume_size,\n desc=\"Downloading\",\n disable=bool(logging.get_verbosity() == logging.NOTSET),\n )\n for chunk in response.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n progress.update(len(chunk))\n temp_file.write(chunk)\n progress.close()\n\n\ndef get_from_cache(\n url,\n cache_dir=None,\n force_download=False,\n proxies=None,\n etag_timeout=10,\n resume_download=False,\n user_agent: Union[Dict, str, None] = None,\n local_files_only=False,\n) -> Optional[str]:\n \"\"\"\n Given a URL, look for the corresponding file in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n\n Return:\n None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).\n Local path (string) otherwise\n \"\"\"\n if cache_dir is None:\n cache_dir = TRANSFORMERS_CACHE\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n os.makedirs(cache_dir, exist_ok=True)\n\n etag = None\n if not local_files_only:\n try:\n response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)\n if response.status_code == 200:\n etag = response.headers.get(\"ETag\")\n except (EnvironmentError, requests.exceptions.Timeout):\n # etag is already None\n pass\n\n filename = url_to_filename(url, etag)\n\n # get cache path to put the file\n cache_path = os.path.join(cache_dir, filename)\n\n # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.\n # try to get the last downloaded one\n if etag is None:\n if os.path.exists(cache_path):\n return cache_path\n else:\n matching_files = [\n file\n for file in fnmatch.filter(os.listdir(cache_dir), filename.split(\".\")[0] + \".*\")\n if not file.endswith(\".json\") and not file.endswith(\".lock\")\n ]\n if len(matching_files) > 0:\n return os.path.join(cache_dir, matching_files[-1])\n else:\n # If files cannot be found and local_files_only=True,\n # the models might've been found if local_files_only=False\n # Notify the user about that\n if local_files_only:\n raise ValueError(\n \"Cannot find the requested files in the cached path and outgoing traffic has been\"\n \" disabled. To enable model look-ups and downloads online, set 'local_files_only'\"\n \" to False.\"\n )\n return None\n\n # From now on, etag is not None.\n if os.path.exists(cache_path) and not force_download:\n return cache_path\n\n # Prevent parallel downloads of the same file with a lock.\n lock_path = cache_path + \".lock\"\n with FileLock(lock_path):\n\n # If the download just completed while the lock was activated.\n if os.path.exists(cache_path) and not force_download:\n # Even if returning early like here, the lock will be released.\n return cache_path\n\n if resume_download:\n incomplete_path = cache_path + \".incomplete\"\n\n @contextmanager\n def _resumable_file_manager():\n with open(incomplete_path, \"a+b\") as f:\n yield f\n\n temp_file_manager = _resumable_file_manager\n if os.path.exists(incomplete_path):\n resume_size = os.stat(incomplete_path).st_size\n else:\n resume_size = 0\n else:\n temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)\n resume_size = 0\n\n # Download to temporary file, then copy to cache dir once finished.\n # Otherwise you get corrupt cache entries if the download gets interrupted.\n with temp_file_manager() as temp_file:\n logger.info(\"%s not found in cache or force_download set to True, downloading to %s\", url, temp_file.name)\n\n http_get(url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent)\n\n logger.info(\"storing %s in cache at %s\", url, cache_path)\n os.replace(temp_file.name, cache_path)\n\n logger.info(\"creating metadata file for %s\", cache_path)\n meta = {\"url\": url, \"etag\": etag}\n meta_path = cache_path + \".json\"\n with open(meta_path, \"w\") as meta_file:\n json.dump(meta, meta_file)\n\n return cache_path\n\n\nclass cached_property(property):\n \"\"\"\n Descriptor that mimics @property but caches output in member variable.\n\n From tensorflow_datasets\n\n Built-in in functools from Python 3.8.\n \"\"\"\n\n def __get__(self, obj, objtype=None):\n # See docs.python.org/3/howto/descriptor.html#properties\n if obj is None:\n return self\n if self.fget is None:\n raise AttributeError(\"unreadable attribute\")\n attr = \"__cached_\" + self.fget.__name__\n cached = getattr(obj, attr, None)\n if cached is None:\n cached = self.fget(obj)\n setattr(obj, attr, cached)\n return cached\n\n\ndef torch_required(func):\n # Chose a different decorator name than in tests so it's clear they are not the same.\n @wraps(func)\n def wrapper(*args, **kwargs):\n if is_torch_available():\n return func(*args, **kwargs)\n else:\n raise ImportError(f\"Method `{func.__name__}` requires PyTorch.\")\n\n return wrapper\n\n\ndef tf_required(func):\n # Chose a different decorator name than in tests so it's clear they are not the same.\n @wraps(func)\n def wrapper(*args, **kwargs):\n if is_tf_available():\n return func(*args, **kwargs)\n else:\n raise ImportError(f\"Method `{func.__name__}` requires TF.\")\n\n return wrapper\n\n\ndef is_tensor(x):\n \"\"\" Tests if ``x`` is a :obj:`torch.Tensor`, :obj:`tf.Tensor` or :obj:`np.ndarray`. \"\"\"\n if is_torch_available():\n import torch\n\n if isinstance(x, torch.Tensor):\n return True\n if is_tf_available():\n import tensorflow as tf\n\n if isinstance(x, tf.Tensor):\n return True\n return isinstance(x, np.ndarray)\n\n\nclass ModelOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a ``__getitem__`` that allows indexing by integer or slice (like\n a tuple) or strings (like a dictionary) that will ignore the ``None`` attributes. Otherwise behaves like a\n regular python dictionary.\n\n .. warning::\n You can't unpack a :obj:`ModelOutput` directly. Use the :meth:`~transformers.file_utils.ModelOutput.to_tuple`\n method to convert it to a tuple before.\n \"\"\"\n\n def __post_init__(self):\n class_fields = fields(self)\n\n # Safety and consistency checks\n assert len(class_fields), f\"{self.__class__.__name__} has no fields.\"\n assert all(\n field.default is None for field in class_fields[1:]\n ), f\"{self.__class__.__name__} should not have more than one required field.\"\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and not is_tensor(first_field):\n try:\n iterator = iter(first_field)\n first_field_iterator = True\n except TypeError:\n first_field_iterator = False\n\n # if we provided an iterator as first field and the iterator is a (key, value) iterator\n # set the associated fields\n if first_field_iterator:\n for element in iterator:\n if (\n not isinstance(element, (list, tuple))\n or not len(element) == 2\n or not isinstance(element[0], str)\n ):\n break\n setattr(self, element[0], element[1])\n if element[1] is not None:\n self[element[0]] = element[1]\n elif first_field is not None:\n self[class_fields[0].name] = first_field\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k):\n if isinstance(k, str):\n inner_dict = {k: v for (k, v) in self.items()}\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name, value):\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def to_tuple(self) -> Tuple[Any]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not ``None``.\n \"\"\"\n return tuple(self[k] for k in self.keys())\n"
] |
[
[
"torch.hub._get_torch_home"
]
] |
vonlippmann/Topology-optimization-of-structure-with-simp-method
|
[
"c9e81e2254f01e4babb69a337370a7a86bf31fb8"
] |
[
"Python/shorthaircat.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\r\n# Standard imports.\r\nimport numpy as np\r\nfrom numpy import e\r\nimport threading\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.patches import Circle\r\nimport matplotlib.ticker as ticker\r\n\r\n#user defined pakages import\r\nfrom optimization_simp import Simp\r\nfrom postprocessor import ResultData\r\nimport global_variable\r\nfrom vtu2stl import *\r\n\r\n# Enthought imports.\r\nfrom traits.api import HasTraits, Instance, Property, Enum,Range,on_trait_change,Button,ToolbarButton#导入所需要的类型HasTraits:基类,Instance:实例类型 Property: Enum:枚举类型\r\nfrom traitsui.api import View, Item, Group,HSplit, VSplit, InstanceEditor,RangeEditor#导入traits属性的可视化api\r\nfrom tvtk.pyface.scene_editor import SceneEditor#导入场景配置api\r\nfrom mayavi.core.api import PipelineBase,Engine\r\nfrom mayavi.sources.vtk_data_source import VTKDataSource\r\nfrom mayavi.modules.api import Surface , Volume\r\nfrom mayavi.core.ui.engine_view import EngineView#导入引擎可视化模块\r\nfrom mayavi.core.ui.mayavi_scene import MayaviScene\r\nfrom mayavi.tools.mlab_scene_model import MlabSceneModel#导入mlab 绘图窗口的可视化模型\r\nfrom mayavi import mlab\r\n\r\n######################################################################\r\nclass ShorthairCat(HasTraits):\r\n '''\r\n 所有拥有traits属性的类都需要从HasTraits类继承\r\n '''\r\n density_filter = Range(0.0,1.0,1.0)\r\n\r\n calculate_button = ToolbarButton('Calculate')\r\n\r\n initial_button = Button('initialize')\r\n animate_button = Button('animate')\r\n\r\n # The scene model.\r\n scene = Instance(MlabSceneModel,())#此处进行了初始化\r\n scene0 = Instance(MlabSceneModel,())#位移场景\r\n scene1 = Instance(MlabSceneModel,())#应力场景\r\n scene2 = Instance(MlabSceneModel,())#应变场景\r\n scene3 = Instance(MlabSceneModel,())#密度场景\r\n scene4 = Instance(MlabSceneModel,())#动图场景\r\n\r\n plot = Instance(PipelineBase)#生成动画的实例\r\n\r\n # The mayavi engine view.\r\n engine_view = Instance(EngineView)\r\n\r\n # The current selection in the engine tree view.\r\n current_selection = Property\r\n\r\n ######################\r\n main_view = View(\r\n Group(\r\n Group(HSplit(HSplit(VSplit(\r\n Item(name='engine_view',\r\n style='custom',\r\n resizable=True,\r\n height =500,\r\n width = 200,\r\n show_label=False\r\n\r\n ),\r\n ),\r\n Item(name='current_selection',\r\n editor=InstanceEditor(),\r\n enabled_when='current_selection is not None',\r\n style='custom',\r\n resizable = True,\r\n height = 500,\r\n width = 200,\r\n springy=True,\r\n show_label=False),\r\n )),label = 'Settings',show_border = False),\r\n Group(\r\n Group(\r\n\r\n Item(name = 'density_filter',editor = RangeEditor()),\r\n '_',\r\n HSplit(\r\n Item('initial_button', show_label=False),\r\n\r\n Item('calculate_button', show_label=False),\r\n Item('animate_button', show_label=False))\r\n\r\n ),\r\n Group(\r\n Item(name='scene',\r\n editor=SceneEditor(),\r\n show_label=False,\r\n resizable=True,\r\n springy = True,\r\n height=600,\r\n width=600,\r\n label = 'mesh'\r\n ),\r\n Item(name='scene0',\r\n editor=SceneEditor(),\r\n show_label=False,\r\n resizable=True,\r\n springy=True,\r\n height=600,\r\n width=600,\r\n label='displacement'\r\n ),\r\n Item(name='scene1',\r\n editor=SceneEditor(),\r\n show_label=False,\r\n resizable=True,\r\n springy=True,\r\n height=600,\r\n width=600,\r\n label = 'stress'\r\n ),\r\n Item(name='scene2',\r\n editor=SceneEditor(),\r\n show_label=False,\r\n resizable=True,\r\n springy=True,\r\n height=600,\r\n width=600,\r\n label = 'strain'\r\n ),\r\n Item(name='scene3',\r\n editor=SceneEditor(),\r\n show_label=False,\r\n resizable=True,\r\n springy=True,\r\n height=600,\r\n width=600,\r\n label='density'\r\n ),\r\n Item(name='scene4',\r\n editor=SceneEditor(),\r\n show_label=False,\r\n resizable=True,\r\n springy=True,\r\n height=600,\r\n width=600,\r\n label='animating'\r\n ),\r\n layout = 'tabbed'),\r\n\r\n orientation = 'vertical'),\r\n orientation = 'horizontal'\r\n ),\r\n height = 600,\r\n width = 760,\r\n resizable=True,\r\n # scrollable=True,\r\n title = 'ShorthairCat',\r\n )\r\n\r\n #**traits 表示传入参数的个数不确定\r\n def __init__(self,type,r,penal,move,e,nu,volfac,**traits):\r\n\r\n HasTraits.__init__(self, **traits)\r\n self.scene.mayavi_scene.name = 'Geometry'\r\n self.scene.foreground = (1,170/255,0)\r\n self.scene0.mayavi_scene.name = 'Displacement'\r\n self.scene1.mayavi_scene.name = 'Stress'\r\n self.scene2.mayavi_scene.name = 'Strain'\r\n self.scene3.mayavi_scene.name = 'Density'\r\n self.scene4.mayavi_scene.name = 'Animate'\r\n\r\n #初始化enine_view\r\n self.engine_view = EngineView(engine=self.scene.engine)\r\n\r\n #对current_selection 进行动态监听,如果current_selection的值发生变化就调用 self._selection_change\r\n\r\n self.scene.engine.on_trait_change(self._selection_change,name = 'current_selection')\r\n self.simp_solver = None\r\n self.type = type\r\n self.r = r\r\n self.penal = penal\r\n self.move = move\r\n self.e = e\r\n self.nu = nu\r\n self.volfac = volfac\r\n self.address = 'H:\\GitHub\\Topology-optimization-of-structure-via-simp-method'\r\n self.i = 1\r\n def _initial_button_fired(self):\r\n self.initial_thread = threading.Thread(target = self._initial,args=(),name='Thread-1')\r\n self.initial_thread.daemon = True\r\n self.initial_thread.start()\r\n\r\n def _initial(self):\r\n global_variable.hyperparameter(r=self.r,move=self.move,e=self.e,penal=self.penal,nu=self.nu,volfac=self.volfac)\r\n global_variable.initialize_global_variable(type =self.type)\r\n self.simp_solver = Simp()\r\n self._mayavi()\r\n self.simp_solver.on_trait_change(self._update_vtkdatasource,name = 'loop')\r\n self.simp_solver.on_trait_change(self._save_fig, name='loop',dispatch = 'ui')\r\n def _save_fig(self):\r\n path = 'H:\\GitHub\\Topology-optimization-of-structure-via-simp-method\\\\fig\\\\'\r\n fname = path + 'density' + str(self.simp_solver.loop) + '.png'\r\n self.scene3.mayavi_scene.scene.save(fname)\r\n\r\n\r\n\r\n\r\n\r\n def _calculate_button_fired(self):\r\n #监听loop,一改变立刻更新曲线,同时建立background thread ,在后台进行有限元计算\r\n # self.simp_solver.on_trait_change(self._plot_convergence_curve, name='loop', dispatch='new')#TODO 发现如果用dispatch = 'ui' 有很大几率卡死,但是这个模式会报错,不过不影响使用\r\n #self.simp_solver.on_trait_change(self._plot,name = 'loop')\r\n\r\n self.computation_thread = threading.Thread(target=self.simp_solver.simp,args=(),name= 'Thread-2')\r\n self.computation_thread.daemon = True\r\n self.computation_thread.start()\r\n\r\n self.plot_thread = threading.Thread(target = self._plot_convergence_curve,args = (),name = 'Thread-3')\r\n self.plot_thread.daemon = True\r\n self.plot_thread.start()\r\n\r\n\r\n\r\n\r\n def _animate_button_fired(self):\r\n #创建一个background thread 不停的显示动画\r\n animate_thread = threading.Thread(target= self._animate(),args=())\r\n animate_thread.daemon = True\r\n animate_thread.start()\r\n\r\n # 静态监听密度过滤器\r\n def _density_filter_changed(self):\r\n print('the density is :',self.density_filter)\r\n self.simp_solver.resultdata.unstrgrid_density = self.simp_solver.resultdata.generate_unstrgrid_mesh(self.density_filter)\r\n self.simp_solver.resultdata.update_unstrgrid_density(self.simp_solver.resultdata.density)\r\n self.simp_solver.resultdata.vtkdatasource_density.data = self.simp_solver.resultdata.unstrgrid_density\r\n self.simp_solver.resultdata.vtkdatasource_density.update()\r\n\r\n self.simp_solver.resultdata.unstrgrid_stress = self.simp_solver.resultdata.generate_unstrgrid_mesh(self.density_filter)\r\n self.simp_solver.resultdata.update_unstrgrid_stress(self.simp_solver.resultdata.stress)\r\n self.simp_solver.resultdata.vtkdatasource_stress.data = self.simp_solver.resultdata.unstrgrid_stress\r\n self.simp_solver.resultdata.vtkdatasource_stress.update()\r\n\r\n\r\n #初始化场景\r\n def _mayavi(self):\r\n \"\"\"Shows how you can generate data using mayavi instead of mlab.\"\"\"\r\n print('updating mayavi')\r\n\r\n e = self.scene.engine\r\n\r\n #网格scene配置\r\n e.current_scene = self.scene.mayavi_scene\r\n e.add_source(self.simp_solver.resultdata.vtkdatasource_mesh)\r\n e.add_module(Surface(name = 'mesh_wireframe'))\r\n e.current_scene.children[0].children[0].children[0].actor.property.representation = 'wireframe'\r\n e.current_scene.children[0].children[0].children[0].actor.property.color = (0,0,0)\r\n e.current_scene.children[0].children[0].children[0].actor.property.line_width = 1.0\r\n e.add_module(Surface(name='mesh_solid'))\r\n\r\n #位移scene配置\r\n e.current_scene = self.scene0.mayavi_scene\r\n e.add_source(self.simp_solver.resultdata.vtkdatasource_displacement)\r\n e.add_module(Surface(name = 'displacement'))\r\n self.scene.engine.current_scene.children[0].children[0].children[0].enable_contours = True\r\n self.scene.engine.current_scene.children[0].children[0].children[0].contour.filled_contours = True\r\n self.scene.engine.current_scene.children[0].children[0].children[0].module_manager.scalar_lut_manager.show_legend = True\r\n\r\n #应力scene配置\r\n e.current_scene = self.scene1.mayavi_scene\r\n e.add_source(self.simp_solver.resultdata.vtkdatasource_stress)\r\n e.add_module(Surface(name = 'stress'))\r\n self.scene.engine.current_scene.children[0].children[0].children[0].enable_contours = True\r\n self.scene.engine.current_scene.children[0].children[0].children[0].contour.filled_contours = True\r\n self.scene.engine.current_scene.children[0].children[0].children[0].module_manager.scalar_lut_manager.show_legend = True\r\n\r\n #应变scene配置\r\n e.current_scene = self.scene2.mayavi_scene\r\n e.add_source(self.simp_solver.resultdata.vtkdatasource_strain)\r\n e.add_module(Surface(name = 'strain'))\r\n self.scene.engine.current_scene.children[0].children[0].children[0].enable_contours = True\r\n self.scene.engine.current_scene.children[0].children[0].children[0].contour.filled_contours = True\r\n self.scene.engine.current_scene.children[0].children[0].children[0].module_manager.scalar_lut_manager.show_legend = True\r\n\r\n #密度scene配置\r\n e.current_scene = self.scene3.mayavi_scene\r\n e.add_source(self.simp_solver.resultdata.vtkdatasource_density)\r\n e.add_module(Surface(name = 'density'))\r\n self.scene.engine.current_scene.children[0].children[0].children[0].module_manager.scalar_lut_manager.show_legend = True\r\n \r\n \r\n def _update_vtkdatasource(self,old,new):\r\n self.simp_solver.loop\r\n\r\n filter = 0\r\n print('updating vtkdatasource')\r\n if 0< self.simp_solver.loop < 10:\r\n filter = 0.85\r\n if self.simp_solver.loop >= 10:\r\n filter = 1/np.e**(self.i) + 0.5\r\n self.i = self.i+1\r\n self.simp_solver.resultdata.vtkdatasource_displacement.data = self.simp_solver.resultdata.unstrgrid_displacement\r\n self.simp_solver.resultdata.vtkdatasource_displacement.update()\r\n\r\n self.simp_solver.resultdata.vtkdatasource_stress.data = self.simp_solver.resultdata.unstrgrid_stress\r\n self.simp_solver.resultdata.vtkdatasource_stress.update()\r\n\r\n self.simp_solver.resultdata.vtkdatasource_strain.data = self.simp_solver.resultdata.unstrgrid_strain\r\n self.simp_solver.resultdata.vtkdatasource_strain.update()\r\n self.simp_solver.resultdata.unstrgrid_density = self.simp_solver.resultdata.generate_unstrgrid_mesh(filter=1)\r\n self.simp_solver.resultdata.update_unstrgrid_density(self.simp_solver.resultdata.density)\r\n self.simp_solver.resultdata.vtkdatasource_density.data = self.simp_solver.resultdata.unstrgrid_density\r\n self.simp_solver.resultdata.vtkdatasource_density.update()\r\n\r\n print('updating done')\r\n print(\"----------------------\")\r\n\r\n #动态监听currentselection\r\n def _selection_change(self, old, new):\r\n self.trait_property_changed('current_selection', old, new)\r\n\r\n def _get_current_selection(self):\r\n return self.scene.engine.current_selection\r\n\r\n def _plot_convergence_curve(self):\r\n\r\n plt.close() # clf() # 清图 cla() # 清坐标轴 close() # 关窗口\r\n\r\n fig = plt.figure()\r\n fig.hold(False)\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.axis(\"auto\")\r\n ax.set_ylabel('Strain_energy')\r\n\r\n ax.set_title('convergence curves of strain energy and volume rate')\r\n\r\n ax1 = ax.twinx()\r\n ax1.set_ylabel('volume_rate')\r\n ax1.set_ylim([0,1])\r\n # ax.xaxis()# 设置图像显示的时候XY轴比例\r\n plt.grid(True) # 添加网格\r\n plt.ion() # interactive mode on\r\n try:\r\n while 1:\r\n ax.set_xlabel('Iteration:' + str(self.simp_solver.loop))\r\n ax.plot(self.simp_solver.strain_energy,c='b')\r\n ax1.plot(self.simp_solver.volume_rate,c = 'g')\r\n plt.pause(0.5)\r\n if self.simp_solver.finished:\r\n break\r\n ax.plot(self.simp_solver.strain_energy,c = 'b')\r\n ax1.plot(self.simp_solver.volume_rate, c='g')\r\n plt.savefig('Convergence_curve.png')\r\n plt.pause(36000)\r\n\r\n except Exception as err:\r\n print(err)\r\n # plt.plot(self.simp_solver.strain_energy)H:\\GitHub\\Topology-optimization-of-structure-via-simp-method\\Python\r\n # ylabel = 'strain_energy/iteration: '+str(self.simp_solver.loop)\r\n # plt.ylabel(ylabel)\r\n # plt.xlabel('steps')\r\n # plt.title('convergence curve of strain energy')\r\n # plt.show()\r\n\r\n\r\n\r\n def _plot(self):\r\n pass\r\n def _animate(self):\r\n\r\n self.scene.engine.current_scene = self.scene4.mayavi_scene\r\n src = mlab.pipeline.open((self.address+'\\density\\density_00.vtu'))\r\n src.play = False\r\n src.add_module(Surface(name='animate_density'))\r\n # self.scene.engine.current_scene.children[0].children[0].children[0].enable_contours=True\r\n # self.scene.engine.current_scene.children[0].children[0].children[0].contour.filled_contours=True\r\n self.scene.engine.current_scene.children[0].children[0].children[0].module_manager.scalar_lut_manager.show_legend=True\r\n\r\n\r\nif __name__ == '__main__':\r\n #for cantilever2D e = 1, nu = 0.3\r\n #for complex2D e = 2.1*50000 ,nu = 0.3\r\n #for MBB,L_shape,center_load,distributed_load:\r\n # e = 1, nu = 0.3\r\n #L_shape: r = 1.5\r\n #MBB,center_load,distributed_load: r = 1.2\r\n\r\n # m = ShorthairCat(type='top2d',e =1, nu=0.3, r = 1.2, penal = 3, move = 0.1,volfac = 0.4)\r\n #e = 20000\r\n m = ShorthairCat(type='top3d',e =1000, nu=0.2, r = 30, penal = 3, move = 0.1,volfac = 0.2)\r\n m.configure_traits()\r\n try:\r\n vtu2stl()\r\n except:\r\n print('an error occured,please check your vtu file named as :top3d ')\r\n\r\n\r\n\r\n"
] |
[
[
"numpy.e.add_source",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.close",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.figure"
]
] |
ZongSingHuang/NSGA-II
|
[
"d7f0d3596b711eadd0c34ce1d741fc60caa37d77"
] |
[
"NSGA II.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 24 16:01:41 2021\r\n\r\n@author: zongsing.huang\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport itertools\r\n\r\n#%% 目標函式\r\ndef fitness(X):\r\n if X.ndim==1:\r\n X = X.reshape(1, -1)\r\n \r\n F1 = X**2 # First function to optimize\r\n F2 = (X-2)**2 # Second function to optimize\r\n \r\n F = np.hstack([F1, F2])\r\n \r\n return F\r\n\r\n#%% 選擇1\r\n# tournament selection\r\ndef selection1(X, FNDS, CD, k):\r\n P = X.shape[0]\r\n p1_idx = np.zeros(int(P/2)) - 1\r\n p2_idx = np.zeros(int(P/2)) - 1\r\n\r\n for i in range(int(P/2)):\r\n while True:\r\n competitor = np.random.choice(P, size=k, replace=False)\r\n competitor_FNDS = FNDS[competitor]\r\n competitor_CD = CD[competitor]\r\n \r\n if len(np.unique(competitor_FNDS))==1:\r\n evaluation_for_CD = np.argmin(competitor_CD)\r\n best = competitor[evaluation_for_CD]\r\n else:\r\n evaluation_for_FNDS = np.argmin(competitor_FNDS)\r\n best = competitor[evaluation_for_FNDS]\r\n \r\n if p1_idx[i]==-1:\r\n p1_idx[i] = best\r\n elif best!=p1_idx[i]:\r\n p2_idx[i] = best\r\n break\r\n \r\n p1 = X[p1_idx.astype(int)]\r\n p2 = X[p2_idx.astype(int)]\r\n\r\n return p1, p2\r\n\r\n#%% 選擇2\r\ndef selection2(X, FNDS, CD, P):\r\n selected_idx = []\r\n rank_set = np.sort( np.unique(FNDS) )\r\n \r\n for rank in rank_set:\r\n rank_length = np.sum( FNDS==rank )\r\n \r\n if len(selected_idx)+rank_length<=P:\r\n selected_idx = selected_idx + np.where(FNDS==rank)[0].tolist()\r\n else:\r\n vacancy_P = int(P - len(selected_idx))\r\n competitor = np.where(FNDS==rank)[0]\r\n competitor_CD = CD[competitor]\r\n evaluation_for_CD = np.argsort(competitor_CD)[::-1]\r\n best = competitor[evaluation_for_CD]\r\n selected_idx = selected_idx + best[:vacancy_P].tolist()\r\n\r\n if len(selected_idx)==P:\r\n break\r\n \r\n new_X = X[selected_idx]\r\n \r\n return new_X\r\n\r\n#%% 交配\r\n# whole arithmetic crossover\r\ndef crossover(p1, p2, pc):\r\n P = p1.shape[0]\r\n D = p1.shape[1]\r\n c1 = p1.copy()\r\n c2 = p2.copy()\r\n \r\n r = np.random.uniform(size=[P])\r\n beta = np.random.uniform(size=[P, D])\r\n mask = r<=pc\r\n c1[mask] = beta[mask]*p1[mask] + (1-beta[mask])*p2[mask]\r\n c2[mask] = beta[mask]*p2[mask] + (1-beta[mask])*p1[mask]\r\n \r\n return c1, c2\r\n\r\n#%% 突變\r\n# bit flip mutation\r\ndef mutation(c1, pm, lb, ub):\r\n P = c1.shape[0]\r\n D = c1.shape[1]\r\n ub = ub[0]\r\n lb = lb[0]\r\n \r\n r = np.random.uniform(size=[P, D])\r\n mask = r<=pm\r\n new_gene = np.random.uniform(low=lb, high=ub, size=[P, D])\r\n c1[mask] = new_gene[mask]\r\n \r\n return c1\r\n\r\n#%% 快速非支配排序\r\ndef fast_non_dominated_sort(F):\r\n P = F.shape[0]\r\n Sp = [ [] for i in range(P) ]\r\n Np = np.zeros([P])\r\n FNDS = np.zeros([P]) - 1\r\n \r\n # step1. 染色體倆倆比較\r\n for i, j in itertools.combinations(range(P), 2):\r\n # case1. 若染色體i的所有目標式之適應值,至少一個優於染色體j,其餘不分勝負,則代表染色體i支配染色體j\r\n if all(F[i]<=F[j])==True and any(F[i]<F[j])==True:\r\n Sp[i].append(j)\r\n Np[j] = Np[j] + 1\r\n # case2. 若染色體i的所有目標式之適應值,至少一個遜於染色體j,其餘不分勝負,則代表染色體j支配染色體i\r\n elif all(F[i]>=F[j])==True and any(F[i]>F[j])==True:\r\n Sp[j].append(i)\r\n Np[i] = Np[i] + 1\r\n \r\n # step5. The above procedures are continued until all fronts are identified\r\n ct = 1 # 初始化名次\r\n while True:\r\n # step2. 找出所有的非支配解,也就是Np=0的染色體\r\n Np_equal_0 = np.where(Np==0)[0]\r\n \r\n for idx in Np_equal_0:\r\n # step3-1. 給予名次\r\n FNDS[idx] = ct\r\n # step3-2. 隸屬於旗下的被支配解Np-1\r\n dominated_set = Sp[idx]\r\n Np[dominated_set] = Np[dominated_set] - 1\r\n # step3-3. 以inf表示完成\r\n Np[idx] = np.inf\r\n \r\n # step4-1. 若所有染色體的Np都是inf,代表已經歷遍完畢\r\n if all(Np==np.inf):\r\n break\r\n # step4-2. 若仍有染色體的Np=0,更新名次並繼續\r\n elif 0 in Np:\r\n ct = ct + 1\r\n \r\n group_FNDS = [np.where(FNDS==i+1)[0] for i in range(ct)]\r\n \r\n return FNDS, group_FNDS\r\n\r\n#%% 擁擠距離\r\n# =============================================================================\r\n# 如果該層級有1個解,則distance = [inf]\r\n# 如果該層級有2個解,則distance = [inf, inf]\r\n# 如果該層級有3個解,則distance = [inf, ?, inf]\r\n# 如果該層級有4個解,則distance = [inf, ?, ?, inf]\r\n# ...\r\n# 因此可以得知,該層級需要有>=3個解才需要進行計算\r\n# =============================================================================\r\ndef crowding_distance(F, group_FNDS):\r\n P = F.shape[0]\r\n CD = np.zeros(P) - 1\r\n \r\n for i in range(len(group_FNDS)):\r\n selected_idx = group_FNDS[i]\r\n distance = np.zeros(len(selected_idx)) + np.inf\r\n \r\n if len(distance)>2:\r\n sorted_F = np.sort(F[selected_idx], axis=0)\r\n \r\n for j in range(1, len(distance)-1):\r\n temp = sorted_F[j+1] - sorted_F[j-1]\r\n \r\n if 0 in ( np.max(F, axis=0) - np.min(F, axis=0) ):\r\n distance[j] = 0\r\n print('完全收斂,請重新確認參數設定!!!')\r\n else:\r\n temp = temp / ( np.max(F, axis=0) - np.min(F, axis=0) )\r\n distance[j] = temp.sum()\r\n \r\n CD[selected_idx] = distance\r\n \r\n return CD\r\n\r\n#%% 參數設定\r\nP = 40\r\nD = 1\r\nG = 1000\r\npc = 1.0\r\npm = 1.0\r\nlb = -1e3\r\nub =1e3\r\nk = 2\r\n\r\n#%% 初始化\r\nlb = lb*np.ones([P, D])\r\nub = ub*np.ones([P, D])\r\nX = np.random.uniform(low=lb, high=ub)\r\n\r\n#%% 迭代\r\nfor g in range(G):\r\n # 適應值計算\r\n F = fitness(X)\r\n \r\n # 非凌越排序\r\n FNDS, group_FNDS = fast_non_dominated_sort(F)\r\n\r\n # 擁擠距離\r\n CD = crowding_distance(F, group_FNDS)\r\n \r\n # 選擇1\r\n p1, p2 = selection1(X, FNDS, CD, k)\r\n \r\n # 交配\r\n c1, c2 = crossover(p1, p2, pc)\r\n \r\n # 突變\r\n c1 = mutation(c1, pm, lb, ub)\r\n c2 = mutation(c2, pm, lb, ub)\r\n\r\n # 建立子代\r\n new_X = np.vstack([c1, c2])\r\n np.random.shuffle(new_X)\r\n\r\n # 菁英策略(母代與子代作合併)\r\n elite_X = np.vstack([X, new_X])\r\n\r\n # 適應值計算\r\n elite_F = fitness(elite_X)\r\n\r\n # 非凌越排序\r\n FNDS, group_FNDS = fast_non_dominated_sort(elite_F)\r\n \r\n # 擁擠距離\r\n CD = crowding_distance(elite_F, group_FNDS)\r\n\r\n # 選擇2\r\n X = selection2(elite_X, FNDS, CD, P)\r\n gbest_X = X.copy()\r\n \r\n print(g)\r\n\r\n#%% 畫圖\r\nF = fitness(gbest_X)\r\nplt.figure()\r\nplt.xlabel('F1', fontsize=15)\r\nplt.ylabel('F2', fontsize=15)\r\nplt.scatter(F[:, 0], F[:, 1], edgecolors='black')\r\nplt.grid()\r\nplt.show()"
] |
[
[
"numpy.max",
"numpy.argmin",
"numpy.where",
"numpy.hstack",
"numpy.unique",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.random.choice",
"numpy.min",
"numpy.argsort",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"numpy.random.shuffle",
"numpy.ones",
"numpy.sort",
"numpy.random.uniform",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"numpy.vstack"
]
] |
aa1371/pandas
|
[
"77443dce2734d57484e3f5f38eba6d1897089182"
] |
[
"pandas/core/indexes/base.py"
] |
[
"from __future__ import annotations\n\nfrom datetime import datetime\nimport functools\nfrom itertools import zip_longest\nimport operator\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Hashable,\n Literal,\n Sequence,\n TypeVar,\n cast,\n final,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n algos as libalgos,\n index as libindex,\n lib,\n)\nimport pandas._libs.join as libjoin\nfrom pandas._libs.lib import (\n is_datetime_array,\n no_default,\n)\nfrom pandas._libs.tslibs import (\n IncompatibleFrequency,\n NaTType,\n OutOfBoundsDatetime,\n Timestamp,\n tz_compare,\n)\nfrom pandas._typing import (\n AnyArrayLike,\n ArrayLike,\n Dtype,\n DtypeObj,\n F,\n Shape,\n npt,\n)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import (\n DuplicateLabelError,\n InvalidIndexError,\n)\nfrom pandas.util._decorators import (\n Appender,\n cache_readonly,\n deprecate_nonkeyword_arguments,\n doc,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.cast import (\n can_hold_element,\n find_common_type,\n infer_dtype_from,\n validate_numeric_casting,\n)\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_object,\n ensure_platform_int,\n is_bool_dtype,\n is_categorical_dtype,\n is_dtype_equal,\n is_ea_or_datetimelike_dtype,\n is_extension_array_dtype,\n is_float,\n is_float_dtype,\n is_hashable,\n is_integer,\n is_interval_dtype,\n is_iterator,\n is_list_like,\n is_numeric_dtype,\n is_object_dtype,\n is_scalar,\n is_signed_integer_dtype,\n is_unsigned_integer_dtype,\n needs_i8_conversion,\n pandas_dtype,\n validate_all_hashable,\n)\nfrom pandas.core.dtypes.concat import concat_compat\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n ExtensionDtype,\n IntervalDtype,\n PandasDtype,\n PeriodDtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDatetimeIndex,\n ABCMultiIndex,\n ABCPeriodIndex,\n ABCSeries,\n ABCTimedeltaIndex,\n)\nfrom pandas.core.dtypes.inference import is_dict_like\nfrom pandas.core.dtypes.missing import (\n array_equivalent,\n is_valid_na_for_dtype,\n isna,\n)\n\nfrom pandas.core import (\n missing,\n ops,\n)\nfrom pandas.core.accessor import CachedAccessor\nimport pandas.core.algorithms as algos\nfrom pandas.core.array_algos.putmask import (\n setitem_datetimelike_compat,\n validate_putmask,\n)\nfrom pandas.core.arrays import (\n Categorical,\n ExtensionArray,\n)\nfrom pandas.core.arrays.datetimes import (\n tz_to_dtype,\n validate_tz_from_dtype,\n)\nfrom pandas.core.arrays.sparse import SparseDtype\nfrom pandas.core.base import (\n IndexOpsMixin,\n PandasObject,\n)\nimport pandas.core.common as com\nfrom pandas.core.construction import (\n ensure_wrapped_if_datetimelike,\n extract_array,\n sanitize_array,\n)\nfrom pandas.core.indexers import deprecate_ndim_indexing\nfrom pandas.core.indexes.frozen import FrozenList\nfrom pandas.core.ops import get_op_result_name\nfrom pandas.core.ops.invalid import make_invalid_op\nfrom pandas.core.sorting import (\n ensure_key_mapped,\n get_group_index_sorter,\n nargsort,\n)\nfrom pandas.core.strings import StringMethods\n\nfrom pandas.io.formats.printing import (\n PrettyDict,\n default_pprint,\n format_object_attrs,\n format_object_summary,\n pprint_thing,\n)\n\nif TYPE_CHECKING:\n\n from pandas import (\n CategoricalIndex,\n DataFrame,\n IntervalIndex,\n MultiIndex,\n RangeIndex,\n Series,\n )\n\n\n__all__ = [\"Index\"]\n\n_unsortable_types = frozenset((\"mixed\", \"mixed-integer\"))\n\n_index_doc_kwargs: dict[str, str] = {\n \"klass\": \"Index\",\n \"inplace\": \"\",\n \"target_klass\": \"Index\",\n \"raises_section\": \"\",\n \"unique\": \"Index\",\n \"duplicated\": \"np.ndarray\",\n}\n_index_shared_docs: dict[str, str] = {}\nstr_t = str\n\n\n_o_dtype = np.dtype(\"object\")\n\n\ndef _maybe_return_indexers(meth: F) -> F:\n \"\"\"\n Decorator to simplify 'return_indexers' checks in Index.join.\n \"\"\"\n\n @functools.wraps(meth)\n def join(\n self,\n other,\n how: str_t = \"left\",\n level=None,\n return_indexers: bool = False,\n sort: bool = False,\n ):\n join_index, lidx, ridx = meth(self, other, how=how, level=level, sort=sort)\n if not return_indexers:\n return join_index\n\n if lidx is not None:\n lidx = ensure_platform_int(lidx)\n if ridx is not None:\n ridx = ensure_platform_int(ridx)\n return join_index, lidx, ridx\n\n return cast(F, join)\n\n\ndef disallow_kwargs(kwargs: dict[str, Any]) -> None:\n if kwargs:\n raise TypeError(f\"Unexpected keyword arguments {repr(set(kwargs))}\")\n\n\ndef _new_Index(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't\n have arguments and breaks __new__.\n \"\"\"\n # required for backward compat, because PI can't be instantiated with\n # ordinals through __new__ GH #13277\n if issubclass(cls, ABCPeriodIndex):\n from pandas.core.indexes.period import _new_PeriodIndex\n\n return _new_PeriodIndex(cls, **d)\n\n if issubclass(cls, ABCMultiIndex):\n if \"labels\" in d and \"codes\" not in d:\n # GH#23752 \"labels\" kwarg has been replaced with \"codes\"\n d[\"codes\"] = d.pop(\"labels\")\n\n return cls.__new__(cls, **d)\n\n\n_IndexT = TypeVar(\"_IndexT\", bound=\"Index\")\n\n\nclass Index(IndexOpsMixin, PandasObject):\n \"\"\"\n Immutable sequence used for indexing and alignment. The basic object\n storing axis labels for all pandas objects.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype (default: object)\n If dtype is None, we find the dtype that best fits the data.\n If an actual dtype is provided, we coerce to that dtype if it's safe.\n Otherwise, an error will be raised.\n copy : bool\n Make a copy of input ndarray.\n name : object\n Name to be stored in the index.\n tupleize_cols : bool (default: True)\n When True, attempt to create a MultiIndex if possible.\n\n See Also\n --------\n RangeIndex : Index implementing a monotonic integer range.\n CategoricalIndex : Index of :class:`Categorical` s.\n MultiIndex : A multi-level, or hierarchical Index.\n IntervalIndex : An Index of :class:`Interval` s.\n DatetimeIndex : Index of datetime64 data.\n TimedeltaIndex : Index of timedelta64 data.\n PeriodIndex : Index of Period data.\n Int64Index : A special case of :class:`Index` with purely integer labels.\n UInt64Index : A special case of :class:`Index` with purely unsigned integer labels.\n Float64Index : A special case of :class:`Index` with purely float labels.\n\n Notes\n -----\n An Index instance can **only** contain hashable objects\n\n Examples\n --------\n >>> pd.Index([1, 2, 3])\n Int64Index([1, 2, 3], dtype='int64')\n\n >>> pd.Index(list('abc'))\n Index(['a', 'b', 'c'], dtype='object')\n \"\"\"\n\n # tolist is not actually deprecated, just suppressed in the __dir__\n _hidden_attrs: frozenset[str] = (\n PandasObject._hidden_attrs\n | IndexOpsMixin._hidden_attrs\n | frozenset([\"contains\", \"set_value\"])\n )\n\n # To hand over control to subclasses\n _join_precedence = 1\n\n # Cython methods; see github.com/cython/cython/issues/2647\n # for why we need to wrap these instead of making them class attributes\n # Moreover, cython will choose the appropriate-dtyped sub-function\n # given the dtypes of the passed arguments\n\n @final\n def _left_indexer_unique(self: _IndexT, other: _IndexT) -> npt.NDArray[np.intp]:\n # Caller is responsible for ensuring other.dtype == self.dtype\n sv = self._get_join_target()\n ov = other._get_join_target()\n return libjoin.left_join_indexer_unique(sv, ov)\n\n @final\n def _left_indexer(\n self: _IndexT, other: _IndexT\n ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n # Caller is responsible for ensuring other.dtype == self.dtype\n sv = self._get_join_target()\n ov = other._get_join_target()\n joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov)\n joined = self._from_join_target(joined_ndarray)\n return joined, lidx, ridx\n\n @final\n def _inner_indexer(\n self: _IndexT, other: _IndexT\n ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n # Caller is responsible for ensuring other.dtype == self.dtype\n sv = self._get_join_target()\n ov = other._get_join_target()\n joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov)\n joined = self._from_join_target(joined_ndarray)\n return joined, lidx, ridx\n\n @final\n def _outer_indexer(\n self: _IndexT, other: _IndexT\n ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n # Caller is responsible for ensuring other.dtype == self.dtype\n sv = self._get_join_target()\n ov = other._get_join_target()\n joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov)\n joined = self._from_join_target(joined_ndarray)\n return joined, lidx, ridx\n\n _typ: str = \"index\"\n _data: ExtensionArray | np.ndarray\n _id: object | None = None\n _name: Hashable = None\n # MultiIndex.levels previously allowed setting the index name. We\n # don't allow this anymore, and raise if it happens rather than\n # failing silently.\n _no_setting_name: bool = False\n _comparables: list[str] = [\"name\"]\n _attributes: list[str] = [\"name\"]\n _is_numeric_dtype: bool = False\n _can_hold_na: bool = True\n _can_hold_strings: bool = True\n\n # Whether this index is a NumericIndex, but not a Int64Index, Float64Index,\n # UInt64Index or RangeIndex. Needed for backwards compat. Remove this attribute and\n # associated code in pandas 2.0.\n _is_backward_compat_public_numeric_index: bool = False\n\n _engine_type: type[libindex.IndexEngine] = libindex.ObjectEngine\n # whether we support partial string indexing. Overridden\n # in DatetimeIndex and PeriodIndex\n _supports_partial_string_indexing = False\n\n _accessors = {\"str\"}\n\n str = CachedAccessor(\"str\", StringMethods)\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls, data=None, dtype=None, copy=False, name=None, tupleize_cols=True, **kwargs\n ) -> Index:\n\n if kwargs:\n warnings.warn(\n \"Passing keywords other than 'data', 'dtype', 'copy', 'name', \"\n \"'tupleize_cols' is deprecated and will raise TypeError in a \"\n \"future version. Use the specific Index subclass directly instead\",\n FutureWarning,\n stacklevel=2,\n )\n\n from pandas.core.arrays import PandasArray\n from pandas.core.indexes.range import RangeIndex\n\n name = maybe_extract_name(name, data, cls)\n\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n if \"tz\" in kwargs:\n tz = kwargs.pop(\"tz\")\n validate_tz_from_dtype(dtype, tz)\n dtype = tz_to_dtype(tz)\n\n if isinstance(data, PandasArray):\n # ensure users don't accidentally put a PandasArray in an index.\n data = data.to_numpy()\n if isinstance(dtype, PandasDtype):\n dtype = dtype.numpy_dtype\n\n data_dtype = getattr(data, \"dtype\", None)\n\n # range\n if isinstance(data, (range, RangeIndex)):\n result = RangeIndex(start=data, copy=copy, name=name)\n if dtype is not None:\n return result.astype(dtype, copy=False)\n return result\n\n elif is_ea_or_datetimelike_dtype(dtype):\n # non-EA dtype indexes have special casting logic, so we punt here\n klass = cls._dtype_to_subclass(dtype)\n if klass is not Index:\n return klass(data, dtype=dtype, copy=copy, name=name, **kwargs)\n\n ea_cls = dtype.construct_array_type()\n data = ea_cls._from_sequence(data, dtype=dtype, copy=copy)\n data = np.asarray(data, dtype=object)\n disallow_kwargs(kwargs)\n return Index._simple_new(data, name=name)\n\n elif is_ea_or_datetimelike_dtype(data_dtype):\n klass = cls._dtype_to_subclass(data_dtype)\n if klass is not Index:\n result = klass(data, copy=copy, name=name, **kwargs)\n if dtype is not None:\n return result.astype(dtype, copy=False)\n return result\n\n data = np.array(data, dtype=object, copy=copy)\n disallow_kwargs(kwargs)\n return Index._simple_new(data, name=name)\n\n # index-like\n elif (\n isinstance(data, Index)\n and data._is_backward_compat_public_numeric_index\n and dtype is None\n ):\n return data._constructor(data, name=name, copy=copy)\n elif isinstance(data, (np.ndarray, Index, ABCSeries)):\n\n if isinstance(data, ABCMultiIndex):\n data = data._values\n\n if dtype is not None:\n # we need to avoid having numpy coerce\n # things that look like ints/floats to ints unless\n # they are actually ints, e.g. '0' and 0.0\n # should not be coerced\n # GH 11836\n data = sanitize_array(data, None, dtype=dtype, copy=copy)\n\n dtype = data.dtype\n\n if data.dtype.kind in [\"i\", \"u\", \"f\"]:\n # maybe coerce to a sub-class\n arr = data\n else:\n arr = com.asarray_tuplesafe(data, dtype=np.dtype(\"object\"))\n\n if dtype is None:\n arr = _maybe_cast_data_without_dtype(\n arr, cast_numeric_deprecated=True\n )\n dtype = arr.dtype\n\n if kwargs:\n return cls(arr, dtype, copy=copy, name=name, **kwargs)\n\n klass = cls._dtype_to_subclass(arr.dtype)\n arr = klass._ensure_array(arr, dtype, copy)\n disallow_kwargs(kwargs)\n return klass._simple_new(arr, name)\n\n elif is_scalar(data):\n raise cls._scalar_data_error(data)\n elif hasattr(data, \"__array__\"):\n return Index(np.asarray(data), dtype=dtype, copy=copy, name=name, **kwargs)\n else:\n\n if tupleize_cols and is_list_like(data):\n # GH21470: convert iterable to list before determining if empty\n if is_iterator(data):\n data = list(data)\n\n if data and all(isinstance(e, tuple) for e in data):\n # we must be all tuples, otherwise don't construct\n # 10697\n from pandas.core.indexes.multi import MultiIndex\n\n return MultiIndex.from_tuples(\n data, names=name or kwargs.get(\"names\")\n )\n # other iterable of some kind\n\n subarr = com.asarray_tuplesafe(data, dtype=np.dtype(\"object\"))\n if dtype is None:\n # with e.g. a list [1, 2, 3] casting to numeric is _not_ deprecated\n # error: Incompatible types in assignment (expression has type\n # \"Union[ExtensionArray, ndarray[Any, Any]]\", variable has type\n # \"ndarray[Any, Any]\")\n subarr = _maybe_cast_data_without_dtype( # type: ignore[assignment]\n subarr, cast_numeric_deprecated=False\n )\n dtype = subarr.dtype\n return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)\n\n @classmethod\n def _ensure_array(cls, data, dtype, copy: bool):\n \"\"\"\n Ensure we have a valid array to pass to _simple_new.\n \"\"\"\n if data.ndim > 1:\n # GH#13601, GH#20285, GH#27125\n raise ValueError(\"Index data must be 1-dimensional\")\n if copy:\n # asarray_tuplesafe does not always copy underlying data,\n # so need to make sure that this happens\n data = data.copy()\n return data\n\n @final\n @classmethod\n def _dtype_to_subclass(cls, dtype: DtypeObj):\n # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423\n\n if isinstance(dtype, ExtensionDtype):\n if isinstance(dtype, DatetimeTZDtype):\n from pandas import DatetimeIndex\n\n return DatetimeIndex\n elif isinstance(dtype, CategoricalDtype):\n from pandas import CategoricalIndex\n\n return CategoricalIndex\n elif isinstance(dtype, IntervalDtype):\n from pandas import IntervalIndex\n\n return IntervalIndex\n elif isinstance(dtype, PeriodDtype):\n from pandas import PeriodIndex\n\n return PeriodIndex\n\n elif isinstance(dtype, SparseDtype):\n return cls._dtype_to_subclass(dtype.subtype)\n\n return Index\n\n if dtype.kind == \"M\":\n from pandas import DatetimeIndex\n\n return DatetimeIndex\n\n elif dtype.kind == \"m\":\n from pandas import TimedeltaIndex\n\n return TimedeltaIndex\n\n elif is_float_dtype(dtype):\n from pandas import Float64Index\n\n return Float64Index\n elif is_unsigned_integer_dtype(dtype):\n from pandas import UInt64Index\n\n return UInt64Index\n elif is_signed_integer_dtype(dtype):\n from pandas import Int64Index\n\n return Int64Index\n\n # error: Non-overlapping equality check (left operand type: \"dtype[Any]\", right\n # operand type: \"Type[object]\")\n elif dtype == object: # type: ignore[comparison-overlap]\n # NB: assuming away MultiIndex\n return Index\n\n elif issubclass(dtype.type, (str, bool, np.bool_)):\n return Index\n\n raise NotImplementedError(dtype)\n\n \"\"\"\n NOTE for new Index creation:\n\n - _simple_new: It returns new Index with the same type as the caller.\n All metadata (such as name) must be provided by caller's responsibility.\n Using _shallow_copy is recommended because it fills these metadata\n otherwise specified.\n\n - _shallow_copy: It returns new Index with the same type (using\n _simple_new), but fills caller's metadata otherwise specified. Passed\n kwargs will overwrite corresponding metadata.\n\n See each method's docstring.\n \"\"\"\n\n @property\n def asi8(self):\n \"\"\"\n Integer representation of the values.\n\n Returns\n -------\n ndarray\n An ndarray with int64 dtype.\n \"\"\"\n warnings.warn(\n \"Index.asi8 is deprecated and will be removed in a future version\",\n FutureWarning,\n stacklevel=2,\n )\n return None\n\n @classmethod\n def _simple_new(cls: type[_IndexT], values, name: Hashable = None) -> _IndexT:\n \"\"\"\n We require that we have a dtype compat for the values. If we are passed\n a non-dtype compat, then coerce using the constructor.\n\n Must be careful not to recurse.\n \"\"\"\n assert isinstance(values, np.ndarray), type(values)\n\n result = object.__new__(cls)\n result._data = values\n # _index_data is a (temporary?) fix to ensure that the direct data\n # manipulation we do in `_libs/reduction.pyx` continues to work.\n # We need access to the actual ndarray, since we're messing with\n # data buffers and strides.\n result._index_data = values\n result._name = name\n result._cache = {}\n result._reset_identity()\n\n return result\n\n @classmethod\n def _with_infer(cls, *args, **kwargs):\n \"\"\"\n Constructor that uses the 1.0.x behavior inferring numeric dtypes\n for ndarray[object] inputs.\n \"\"\"\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \".*the Index constructor\", FutureWarning)\n result = cls(*args, **kwargs)\n\n if result.dtype == object and not result._is_multi:\n # error: Argument 1 to \"maybe_convert_objects\" has incompatible type\n # \"Union[ExtensionArray, ndarray[Any, Any]]\"; expected\n # \"ndarray[Any, Any]\"\n values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type]\n if values.dtype.kind in [\"i\", \"u\", \"f\"]:\n return Index(values, name=result.name)\n\n return result\n\n @cache_readonly\n def _constructor(self: _IndexT) -> type[_IndexT]:\n return type(self)\n\n @final\n def _maybe_check_unique(self) -> None:\n \"\"\"\n Check that an Index has no duplicates.\n\n This is typically only called via\n `NDFrame.flags.allows_duplicate_labels.setter` when it's set to\n True (duplicates aren't allowed).\n\n Raises\n ------\n DuplicateLabelError\n When the index is not unique.\n \"\"\"\n if not self.is_unique:\n msg = \"\"\"Index has duplicates.\"\"\"\n duplicates = self._format_duplicate_message()\n msg += f\"\\n{duplicates}\"\n\n raise DuplicateLabelError(msg)\n\n @final\n def _format_duplicate_message(self) -> DataFrame:\n \"\"\"\n Construct the DataFrame for a DuplicateLabelError.\n\n This returns a DataFrame indicating the labels and positions\n of duplicates in an index. This should only be called when it's\n already known that duplicates are present.\n\n Examples\n --------\n >>> idx = pd.Index(['a', 'b', 'a'])\n >>> idx._format_duplicate_message()\n positions\n label\n a [0, 2]\n \"\"\"\n from pandas import Series\n\n duplicates = self[self.duplicated(keep=\"first\")].unique()\n assert len(duplicates)\n\n out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates]\n if self._is_multi:\n # test_format_duplicate_labels_message_multi\n # error: \"Type[Index]\" has no attribute \"from_tuples\" [attr-defined]\n out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined]\n\n if self.nlevels == 1:\n out = out.rename_axis(\"label\")\n return out.to_frame(name=\"positions\")\n\n # --------------------------------------------------------------------\n # Index Internals Methods\n\n @final\n def _get_attributes_dict(self) -> dict[str_t, Any]:\n \"\"\"\n Return an attributes dict for my class.\n \"\"\"\n return {k: getattr(self, k, None) for k in self._attributes}\n\n def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT:\n \"\"\"\n Create a new Index with the same class as the caller, don't copy the\n data, use the same object attributes with passed in attributes taking\n precedence.\n\n *this is an internal non-public method*\n\n Parameters\n ----------\n values : the values to create the new Index, optional\n name : Label, defaults to self.name\n \"\"\"\n name = self._name if name is no_default else name\n\n return self._simple_new(values, name=name)\n\n def _view(self: _IndexT) -> _IndexT:\n \"\"\"\n fastpath to make a shallow copy, i.e. new object with same data.\n \"\"\"\n result = self._simple_new(self._values, name=self._name)\n\n result._cache = self._cache\n return result\n\n @final\n def _rename(self: _IndexT, name: Hashable) -> _IndexT:\n \"\"\"\n fastpath for rename if new name is already validated.\n \"\"\"\n result = self._view()\n result._name = name\n return result\n\n @final\n def is_(self, other) -> bool:\n \"\"\"\n More flexible, faster check like ``is`` but that works through views.\n\n Note: this is *not* the same as ``Index.identical()``, which checks\n that metadata is also the same.\n\n Parameters\n ----------\n other : object\n Other object to compare against.\n\n Returns\n -------\n bool\n True if both have same underlying data, False otherwise.\n\n See Also\n --------\n Index.identical : Works like ``Index.is_`` but also checks metadata.\n \"\"\"\n if self is other:\n return True\n elif not hasattr(other, \"_id\"):\n return False\n elif self._id is None or other._id is None:\n return False\n else:\n return self._id is other._id\n\n @final\n def _reset_identity(self) -> None:\n \"\"\"\n Initializes or resets ``_id`` attribute with new object.\n \"\"\"\n self._id = object()\n\n @final\n def _cleanup(self) -> None:\n self._engine.clear_mapping()\n\n @cache_readonly\n def _engine(self) -> libindex.IndexEngine:\n # For base class (object dtype) we get ObjectEngine\n\n # to avoid a reference cycle, bind `target_values` to a local variable, so\n # `self` is not passed into the lambda.\n target_values = self._get_engine_target()\n return self._engine_type(lambda: target_values, len(self))\n\n @final\n @cache_readonly\n def _dir_additions_for_owner(self) -> set[str_t]:\n \"\"\"\n Add the string-like labels to the owner dataframe/series dir output.\n\n If this is a MultiIndex, it's first level values are used.\n \"\"\"\n return {\n c\n for c in self.unique(level=0)[:100]\n if isinstance(c, str) and c.isidentifier()\n }\n\n # --------------------------------------------------------------------\n # Array-Like Methods\n\n # ndarray compat\n def __len__(self) -> int:\n \"\"\"\n Return the length of the Index.\n \"\"\"\n return len(self._data)\n\n def __array__(self, dtype=None) -> np.ndarray:\n \"\"\"\n The array interface, return my values.\n \"\"\"\n return np.asarray(self._data, dtype=dtype)\n\n def __array_wrap__(self, result, context=None):\n \"\"\"\n Gets called after a ufunc and other functions.\n \"\"\"\n result = lib.item_from_zerodim(result)\n if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1:\n return result\n\n attrs = self._get_attributes_dict()\n attrs.pop(\"freq\", None) # For DatetimeIndex/TimedeltaIndex\n return Index(result, **attrs)\n\n @cache_readonly\n def dtype(self) -> DtypeObj:\n \"\"\"\n Return the dtype object of the underlying data.\n \"\"\"\n return self._data.dtype\n\n @final\n def ravel(self, order=\"C\"):\n \"\"\"\n Return an ndarray of the flattened values of the underlying data.\n\n Returns\n -------\n numpy.ndarray\n Flattened array.\n\n See Also\n --------\n numpy.ndarray.ravel : Return a flattened array.\n \"\"\"\n warnings.warn(\n \"Index.ravel returning ndarray is deprecated; in a future version \"\n \"this will return a view on self.\",\n FutureWarning,\n stacklevel=2,\n )\n values = self._get_engine_target()\n return values.ravel(order=order)\n\n def view(self, cls=None):\n\n # we need to see if we are subclassing an\n # index type here\n if cls is not None and not hasattr(cls, \"_typ\"):\n dtype = cls\n if isinstance(cls, str):\n dtype = pandas_dtype(cls)\n\n if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion(\n dtype\n ):\n if dtype.kind == \"m\" and dtype != \"m8[ns]\":\n # e.g. m8[s]\n return self._data.view(cls)\n\n arr = self._data.view(\"i8\")\n idx_cls = self._dtype_to_subclass(dtype)\n arr_cls = idx_cls._data_cls\n arr = arr_cls(self._data.view(\"i8\"), dtype=dtype)\n return idx_cls._simple_new(arr, name=self.name)\n\n result = self._data.view(cls)\n else:\n result = self._view()\n if isinstance(result, Index):\n result._id = self._id\n return result\n\n def astype(self, dtype, copy=True):\n \"\"\"\n Create an Index with values cast to dtypes.\n\n The class of a new Index is determined by dtype. When conversion is\n impossible, a TypeError exception is raised.\n\n Parameters\n ----------\n dtype : numpy dtype or pandas type\n Note that any signed integer `dtype` is treated as ``'int64'``,\n and any unsigned integer `dtype` is treated as ``'uint64'``,\n regardless of the size.\n copy : bool, default True\n By default, astype always returns a newly allocated object.\n If copy is set to False and internal requirements on dtype are\n satisfied, the original data is used to create a new Index\n or the original Index is returned.\n\n Returns\n -------\n Index\n Index with values cast to specified dtype.\n \"\"\"\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n if is_dtype_equal(self.dtype, dtype):\n return self.copy() if copy else self\n\n elif isinstance(dtype, ExtensionDtype):\n cls = dtype.construct_array_type()\n new_values = cls._from_sequence(self, dtype=dtype, copy=False)\n return Index(new_values, dtype=dtype, copy=copy, name=self.name)\n\n try:\n casted = self._values.astype(dtype, copy=copy)\n except (TypeError, ValueError) as err:\n raise TypeError(\n f\"Cannot cast {type(self).__name__} to dtype {dtype}\"\n ) from err\n return Index(casted, name=self.name, dtype=dtype)\n\n _index_shared_docs[\n \"take\"\n ] = \"\"\"\n Return a new %(klass)s of the values selected by the indices.\n\n For internal compatibility with numpy arrays.\n\n Parameters\n ----------\n indices : array-like\n Indices to be taken.\n axis : int, optional\n The axis over which to select values, always 0.\n allow_fill : bool, default True\n fill_value : scalar, default None\n If allow_fill=True and fill_value is not None, indices specified by\n -1 are regarded as NA. If Index doesn't hold NA, raise ValueError.\n\n Returns\n -------\n Index\n An index formed of elements at the given indices. Will be the same\n type as self, except for RangeIndex.\n\n See Also\n --------\n numpy.ndarray.take: Return an array formed from the\n elements of a at the given indices.\n \"\"\"\n\n @Appender(_index_shared_docs[\"take\"] % _index_doc_kwargs)\n def take(\n self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs\n ):\n if kwargs:\n nv.validate_take((), kwargs)\n indices = ensure_platform_int(indices)\n allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)\n\n # Note: we discard fill_value and use self._na_value, only relevant\n # in the case where allow_fill is True and fill_value is not None\n taken = algos.take(\n self._values, indices, allow_fill=allow_fill, fill_value=self._na_value\n )\n return type(self)._simple_new(taken, name=self.name)\n\n @final\n def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool:\n \"\"\"\n We only use pandas-style take when allow_fill is True _and_\n fill_value is not None.\n \"\"\"\n if allow_fill and fill_value is not None:\n # only fill if we are passing a non-None fill_value\n if self._can_hold_na:\n if (indices < -1).any():\n raise ValueError(\n \"When allow_fill=True and fill_value is not None, \"\n \"all indices must be >= -1\"\n )\n else:\n cls_name = type(self).__name__\n raise ValueError(\n f\"Unable to fill values because {cls_name} cannot contain NA\"\n )\n else:\n allow_fill = False\n return allow_fill\n\n _index_shared_docs[\n \"repeat\"\n ] = \"\"\"\n Repeat elements of a %(klass)s.\n\n Returns a new %(klass)s where each element of the current %(klass)s\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n %(klass)s.\n axis : None\n Must be ``None``. Has no effect but is accepted for compatibility\n with numpy.\n\n Returns\n -------\n repeated_index : %(klass)s\n Newly created %(klass)s with repeated elements.\n\n See Also\n --------\n Series.repeat : Equivalent function for Series.\n numpy.repeat : Similar method for :class:`numpy.ndarray`.\n\n Examples\n --------\n >>> idx = pd.Index(['a', 'b', 'c'])\n >>> idx\n Index(['a', 'b', 'c'], dtype='object')\n >>> idx.repeat(2)\n Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object')\n >>> idx.repeat([1, 2, 3])\n Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object')\n \"\"\"\n\n @Appender(_index_shared_docs[\"repeat\"] % _index_doc_kwargs)\n def repeat(self, repeats, axis=None):\n repeats = ensure_platform_int(repeats)\n nv.validate_repeat((), {\"axis\": axis})\n res_values = self._values.repeat(repeats)\n\n return type(self)._simple_new(res_values, name=self.name)\n\n # --------------------------------------------------------------------\n # Copying Methods\n\n def copy(\n self: _IndexT,\n name: Hashable | None = None,\n deep: bool = False,\n dtype: Dtype | None = None,\n names: Sequence[Hashable] | None = None,\n ) -> _IndexT:\n \"\"\"\n Make a copy of this object.\n\n Name and dtype sets those attributes on the new object.\n\n Parameters\n ----------\n name : Label, optional\n Set name for new object.\n deep : bool, default False\n dtype : numpy dtype or pandas type, optional\n Set dtype for new object.\n\n .. deprecated:: 1.2.0\n use ``astype`` method instead.\n names : list-like, optional\n Kept for compatibility with MultiIndex. Should not be used.\n\n Returns\n -------\n Index\n Index refer to new object which is a copy of this object.\n\n Notes\n -----\n In most cases, there should be no functional difference from using\n ``deep``, but if ``deep`` is passed it will attempt to deepcopy.\n \"\"\"\n name = self._validate_names(name=name, names=names, deep=deep)[0]\n if deep:\n new_data = self._data.copy()\n new_index = type(self)._simple_new(new_data, name=name)\n else:\n new_index = self._rename(name=name)\n\n if dtype:\n warnings.warn(\n \"parameter dtype is deprecated and will be removed in a future \"\n \"version. Use the astype method instead.\",\n FutureWarning,\n stacklevel=2,\n )\n new_index = new_index.astype(dtype)\n return new_index\n\n @final\n def __copy__(self: _IndexT, **kwargs) -> _IndexT:\n return self.copy(**kwargs)\n\n @final\n def __deepcopy__(self: _IndexT, memo=None) -> _IndexT:\n \"\"\"\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n \"\"\"\n return self.copy(deep=True)\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n @final\n def __repr__(self) -> str_t:\n \"\"\"\n Return a string representation for this object.\n \"\"\"\n klass_name = type(self).__name__\n data = self._format_data()\n attrs = self._format_attrs()\n space = self._format_space()\n attrs_str = [f\"{k}={v}\" for k, v in attrs]\n prepr = f\",{space}\".join(attrs_str)\n\n # no data provided, just attributes\n if data is None:\n data = \"\"\n\n return f\"{klass_name}({data}{prepr})\"\n\n def _format_space(self) -> str_t:\n\n # using space here controls if the attributes\n # are line separated or not (the default)\n\n # max_seq_items = get_option('display.max_seq_items')\n # if len(self) > max_seq_items:\n # space = \"\\n%s\" % (' ' * (len(klass) + 1))\n return \" \"\n\n @property\n def _formatter_func(self):\n \"\"\"\n Return the formatter function.\n \"\"\"\n return default_pprint\n\n def _format_data(self, name=None) -> str_t:\n \"\"\"\n Return the formatted data as a unicode string.\n \"\"\"\n # do we want to justify (only do so for non-objects)\n is_justify = True\n\n if self.inferred_type == \"string\":\n is_justify = False\n elif self.inferred_type == \"categorical\":\n self = cast(\"CategoricalIndex\", self)\n if is_object_dtype(self.categories):\n is_justify = False\n\n return format_object_summary(\n self,\n self._formatter_func,\n is_justify=is_justify,\n name=name,\n line_break_each_value=self._is_multi,\n )\n\n def _format_attrs(self) -> list[tuple[str_t, str_t | int]]:\n \"\"\"\n Return a list of tuples of the (attr,formatted_value).\n \"\"\"\n return format_object_attrs(self, include_dtype=not self._is_multi)\n\n @final\n def _mpl_repr(self) -> np.ndarray:\n # how to represent ourselves to matplotlib\n if isinstance(self.dtype, np.dtype) and self.dtype.kind != \"M\":\n return cast(np.ndarray, self.values)\n return self.astype(object, copy=False)._values\n\n def format(\n self,\n name: bool = False,\n formatter: Callable | None = None,\n na_rep: str_t = \"NaN\",\n ) -> list[str_t]:\n \"\"\"\n Render a string representation of the Index.\n \"\"\"\n header = []\n if name:\n header.append(\n pprint_thing(self.name, escape_chars=(\"\\t\", \"\\r\", \"\\n\"))\n if self.name is not None\n else \"\"\n )\n\n if formatter is not None:\n return header + list(self.map(formatter))\n\n return self._format_with_header(header, na_rep=na_rep)\n\n def _format_with_header(\n self, header: list[str_t], na_rep: str_t = \"NaN\"\n ) -> list[str_t]:\n from pandas.io.formats.format import format_array\n\n values = self._values\n\n if is_object_dtype(values.dtype):\n values = cast(np.ndarray, values)\n values = lib.maybe_convert_objects(values, safe=True)\n\n result = [pprint_thing(x, escape_chars=(\"\\t\", \"\\r\", \"\\n\")) for x in values]\n\n # could have nans\n mask = isna(values)\n if mask.any():\n result_arr = np.array(result)\n result_arr[mask] = na_rep\n result = result_arr.tolist()\n else:\n result = trim_front(format_array(values, None, justify=\"left\"))\n return header + result\n\n @final\n def to_native_types(self, slicer=None, **kwargs) -> np.ndarray:\n \"\"\"\n Format specified values of `self` and return them.\n\n .. deprecated:: 1.2.0\n\n Parameters\n ----------\n slicer : int, array-like\n An indexer into `self` that specifies which values\n are used in the formatting process.\n kwargs : dict\n Options for specifying how the values should be formatted.\n These options include the following:\n\n 1) na_rep : str\n The value that serves as a placeholder for NULL values\n 2) quoting : bool or None\n Whether or not there are quoted values in `self`\n 3) date_format : str\n The format used to represent date-like values.\n\n Returns\n -------\n numpy.ndarray\n Formatted values.\n \"\"\"\n warnings.warn(\n \"The 'to_native_types' method is deprecated and will be removed in \"\n \"a future version. Use 'astype(str)' instead.\",\n FutureWarning,\n stacklevel=2,\n )\n values = self\n if slicer is not None:\n values = values[slicer]\n return values._format_native_types(**kwargs)\n\n def _format_native_types(self, na_rep=\"\", quoting=None, **kwargs):\n \"\"\"\n Actually format specific types of the index.\n \"\"\"\n mask = isna(self)\n if not self.is_object() and not quoting:\n values = np.asarray(self).astype(str)\n else:\n values = np.array(self, dtype=object, copy=True)\n\n values[mask] = na_rep\n return values\n\n def _summary(self, name=None) -> str_t:\n \"\"\"\n Return a summarized representation.\n\n Parameters\n ----------\n name : str\n name to use in the summary representation\n\n Returns\n -------\n String with a summarized representation of the index\n \"\"\"\n if len(self) > 0:\n head = self[0]\n if hasattr(head, \"format\") and not isinstance(head, str):\n head = head.format()\n tail = self[-1]\n if hasattr(tail, \"format\") and not isinstance(tail, str):\n tail = tail.format()\n index_summary = f\", {head} to {tail}\"\n else:\n index_summary = \"\"\n\n if name is None:\n name = type(self).__name__\n return f\"{name}: {len(self)} entries{index_summary}\"\n\n # --------------------------------------------------------------------\n # Conversion Methods\n\n def to_flat_index(self):\n \"\"\"\n Identity method.\n\n This is implemented for compatibility with subclass implementations\n when chaining.\n\n Returns\n -------\n pd.Index\n Caller.\n\n See Also\n --------\n MultiIndex.to_flat_index : Subclass implementation.\n \"\"\"\n return self\n\n def to_series(self, index=None, name: Hashable = None) -> Series:\n \"\"\"\n Create a Series with both index and values equal to the index keys.\n\n Useful with map for returning an indexer based on an index.\n\n Parameters\n ----------\n index : Index, optional\n Index of resulting Series. If None, defaults to original index.\n name : str, optional\n Name of resulting Series. If None, defaults to name of original\n index.\n\n Returns\n -------\n Series\n The dtype will be based on the type of the Index values.\n\n See Also\n --------\n Index.to_frame : Convert an Index to a DataFrame.\n Series.to_frame : Convert Series to DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')\n\n By default, the original Index and original name is reused.\n\n >>> idx.to_series()\n animal\n Ant Ant\n Bear Bear\n Cow Cow\n Name: animal, dtype: object\n\n To enforce a new Index, specify new labels to ``index``:\n\n >>> idx.to_series(index=[0, 1, 2])\n 0 Ant\n 1 Bear\n 2 Cow\n Name: animal, dtype: object\n\n To override the name of the resulting column, specify `name`:\n\n >>> idx.to_series(name='zoo')\n animal\n Ant Ant\n Bear Bear\n Cow Cow\n Name: zoo, dtype: object\n \"\"\"\n from pandas import Series\n\n if index is None:\n index = self._view()\n if name is None:\n name = self.name\n\n return Series(self._values.copy(), index=index, name=name)\n\n def to_frame(self, index: bool = True, name: Hashable = None) -> DataFrame:\n \"\"\"\n Create a DataFrame with a column containing the Index.\n\n Parameters\n ----------\n index : bool, default True\n Set the index of the returned DataFrame as the original Index.\n\n name : object, default None\n The passed name should substitute for the index name (if it has\n one).\n\n Returns\n -------\n DataFrame\n DataFrame containing the original Index data.\n\n See Also\n --------\n Index.to_series : Convert an Index to a Series.\n Series.to_frame : Convert Series to DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')\n >>> idx.to_frame()\n animal\n animal\n Ant Ant\n Bear Bear\n Cow Cow\n\n By default, the original Index is reused. To enforce a new Index:\n\n >>> idx.to_frame(index=False)\n animal\n 0 Ant\n 1 Bear\n 2 Cow\n\n To override the name of the resulting column, specify `name`:\n\n >>> idx.to_frame(index=False, name='zoo')\n zoo\n 0 Ant\n 1 Bear\n 2 Cow\n \"\"\"\n from pandas import DataFrame\n\n if name is None:\n name = self.name or 0\n result = DataFrame({name: self._values.copy()})\n\n if index:\n result.index = self\n return result\n\n # --------------------------------------------------------------------\n # Name-Centric Methods\n\n @property\n def name(self):\n \"\"\"\n Return Index or MultiIndex name.\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, value: Hashable):\n if self._no_setting_name:\n # Used in MultiIndex.levels to avoid silently ignoring name updates.\n raise RuntimeError(\n \"Cannot set name on a level of a MultiIndex. Use \"\n \"'MultiIndex.set_names' instead.\"\n )\n maybe_extract_name(value, None, type(self))\n self._name = value\n\n @final\n def _validate_names(\n self, name=None, names=None, deep: bool = False\n ) -> list[Hashable]:\n \"\"\"\n Handles the quirks of having a singular 'name' parameter for general\n Index and plural 'names' parameter for MultiIndex.\n \"\"\"\n from copy import deepcopy\n\n if names is not None and name is not None:\n raise TypeError(\"Can only provide one of `names` and `name`\")\n elif names is None and name is None:\n new_names = deepcopy(self.names) if deep else self.names\n elif names is not None:\n if not is_list_like(names):\n raise TypeError(\"Must pass list-like as `names`.\")\n new_names = names\n elif not is_list_like(name):\n new_names = [name]\n else:\n new_names = name\n\n if len(new_names) != len(self.names):\n raise ValueError(\n f\"Length of new names must be {len(self.names)}, got {len(new_names)}\"\n )\n\n # All items in 'new_names' need to be hashable\n validate_all_hashable(*new_names, error_name=f\"{type(self).__name__}.name\")\n\n return new_names\n\n def _get_names(self) -> FrozenList:\n return FrozenList((self.name,))\n\n def _set_names(self, values, *, level=None) -> None:\n \"\"\"\n Set new names on index. Each name has to be a hashable type.\n\n Parameters\n ----------\n values : str or sequence\n name(s) to set\n level : int, level name, or sequence of int/level names (default None)\n If the index is a MultiIndex (hierarchical), level(s) to set (None\n for all levels). Otherwise level must be None\n\n Raises\n ------\n TypeError if each name is not hashable.\n \"\"\"\n if not is_list_like(values):\n raise ValueError(\"Names must be a list-like\")\n if len(values) != 1:\n raise ValueError(f\"Length of new names must be 1, got {len(values)}\")\n\n # GH 20527\n # All items in 'name' need to be hashable:\n validate_all_hashable(*values, error_name=f\"{type(self).__name__}.name\")\n\n self._name = values[0]\n\n names = property(fset=_set_names, fget=_get_names)\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"names\"])\n def set_names(self, names, level=None, inplace: bool = False):\n \"\"\"\n Set Index or MultiIndex name.\n\n Able to set new names partially and by level.\n\n Parameters\n ----------\n\n names : label or list of label or dict-like for MultiIndex\n Name(s) to set.\n\n .. versionchanged:: 1.3.0\n\n level : int, label or list of int or label, optional\n If the index is a MultiIndex and names is not dict-like, level(s) to set\n (None for all levels). Otherwise level must be None.\n\n .. versionchanged:: 1.3.0\n\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Index or\n MultiIndex.\n\n Returns\n -------\n Index or None\n The same type as the caller or None if ``inplace=True``.\n\n See Also\n --------\n Index.rename : Able to set new names without level.\n\n Examples\n --------\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx\n Int64Index([1, 2, 3, 4], dtype='int64')\n >>> idx.set_names('quarter')\n Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')\n\n >>> idx = pd.MultiIndex.from_product([['python', 'cobra'],\n ... [2018, 2019]])\n >>> idx\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n )\n >>> idx.set_names(['kind', 'year'], inplace=True)\n >>> idx\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['kind', 'year'])\n >>> idx.set_names('species', level=0)\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['species', 'year'])\n\n When renaming levels with a dict, levels can not be passed.\n\n >>> idx.set_names({'kind': 'snake'})\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['snake', 'year'])\n \"\"\"\n if level is not None and not isinstance(self, ABCMultiIndex):\n raise ValueError(\"Level must be None for non-MultiIndex\")\n\n elif level is not None and not is_list_like(level) and is_list_like(names):\n raise TypeError(\"Names must be a string when a single level is provided.\")\n\n elif not is_list_like(names) and level is None and self.nlevels > 1:\n raise TypeError(\"Must pass list-like as `names`.\")\n\n elif is_dict_like(names) and not isinstance(self, ABCMultiIndex):\n raise TypeError(\"Can only pass dict-like as `names` for MultiIndex.\")\n\n elif is_dict_like(names) and level is not None:\n raise TypeError(\"Can not pass level for dictlike `names`.\")\n\n if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None:\n # Transform dict to list of new names and corresponding levels\n level, names_adjusted = [], []\n for i, name in enumerate(self.names):\n if name in names.keys():\n level.append(i)\n names_adjusted.append(names[name])\n names = names_adjusted\n\n if not is_list_like(names):\n names = [names]\n if level is not None and not is_list_like(level):\n level = [level]\n\n if inplace:\n idx = self\n else:\n idx = self._view()\n\n idx._set_names(names, level=level)\n if not inplace:\n return idx\n\n def rename(self, name, inplace=False):\n \"\"\"\n Alter Index or MultiIndex name.\n\n Able to set new names without level. Defaults to returning new index.\n Length of names must match number of levels in MultiIndex.\n\n Parameters\n ----------\n name : label or list of labels\n Name(s) to set.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Index or\n MultiIndex.\n\n Returns\n -------\n Index or None\n The same type as the caller or None if ``inplace=True``.\n\n See Also\n --------\n Index.set_names : Able to set new names partially and by level.\n\n Examples\n --------\n >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score')\n >>> idx.rename('grade')\n Index(['A', 'C', 'A', 'B'], dtype='object', name='grade')\n\n >>> idx = pd.MultiIndex.from_product([['python', 'cobra'],\n ... [2018, 2019]],\n ... names=['kind', 'year'])\n >>> idx\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['kind', 'year'])\n >>> idx.rename(['species', 'year'])\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['species', 'year'])\n >>> idx.rename('species')\n Traceback (most recent call last):\n TypeError: Must pass list-like as `names`.\n \"\"\"\n return self.set_names([name], inplace=inplace)\n\n # --------------------------------------------------------------------\n # Level-Centric Methods\n\n @property\n def nlevels(self) -> int:\n \"\"\"\n Number of levels.\n \"\"\"\n return 1\n\n def _sort_levels_monotonic(self: _IndexT) -> _IndexT:\n \"\"\"\n Compat with MultiIndex.\n \"\"\"\n return self\n\n @final\n def _validate_index_level(self, level) -> None:\n \"\"\"\n Validate index level.\n\n For single-level Index getting level number is a no-op, but some\n verification must be done like in MultiIndex.\n\n \"\"\"\n if isinstance(level, int):\n if level < 0 and level != -1:\n raise IndexError(\n \"Too many levels: Index has only 1 level, \"\n f\"{level} is not a valid level number\"\n )\n elif level > 0:\n raise IndexError(\n f\"Too many levels: Index has only 1 level, not {level + 1}\"\n )\n elif level != self.name:\n raise KeyError(\n f\"Requested level ({level}) does not match index name ({self.name})\"\n )\n\n def _get_level_number(self, level) -> int:\n self._validate_index_level(level)\n return 0\n\n def sortlevel(self, level=None, ascending=True, sort_remaining=None):\n \"\"\"\n For internal compatibility with the Index API.\n\n Sort the Index. This is for compat with MultiIndex\n\n Parameters\n ----------\n ascending : bool, default True\n False to sort in descending order\n\n level, sort_remaining are compat parameters\n\n Returns\n -------\n Index\n \"\"\"\n if not isinstance(ascending, (list, bool)):\n raise TypeError(\n \"ascending must be a single bool value or\"\n \"a list of bool values of length 1\"\n )\n\n if isinstance(ascending, list):\n if len(ascending) != 1:\n raise TypeError(\"ascending must be a list of bool values of length 1\")\n ascending = ascending[0]\n\n if not isinstance(ascending, bool):\n raise TypeError(\"ascending must be a bool value\")\n\n return self.sort_values(return_indexer=True, ascending=ascending)\n\n def _get_level_values(self, level) -> Index:\n \"\"\"\n Return an Index of values for requested level.\n\n This is primarily useful to get an individual level of values from a\n MultiIndex, but is provided on Index as well for compatibility.\n\n Parameters\n ----------\n level : int or str\n It is either the integer position or the name of the level.\n\n Returns\n -------\n Index\n Calling object, as there is only one level in the Index.\n\n See Also\n --------\n MultiIndex.get_level_values : Get values for a level of a MultiIndex.\n\n Notes\n -----\n For Index, level should be 0, since there are no multiple levels.\n\n Examples\n --------\n >>> idx = pd.Index(list('abc'))\n >>> idx\n Index(['a', 'b', 'c'], dtype='object')\n\n Get level values by supplying `level` as integer:\n\n >>> idx.get_level_values(0)\n Index(['a', 'b', 'c'], dtype='object')\n \"\"\"\n self._validate_index_level(level)\n return self\n\n get_level_values = _get_level_values\n\n @final\n def droplevel(self, level=0):\n \"\"\"\n Return index with requested level(s) removed.\n\n If resulting index has only 1 level left, the result will be\n of Index type, not MultiIndex.\n\n Parameters\n ----------\n level : int, str, or list-like, default 0\n If a string is given, must be the name of a level\n If list-like, elements must be names or indexes of levels.\n\n Returns\n -------\n Index or MultiIndex\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays(\n ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])\n >>> mi\n MultiIndex([(1, 3, 5),\n (2, 4, 6)],\n names=['x', 'y', 'z'])\n\n >>> mi.droplevel()\n MultiIndex([(3, 5),\n (4, 6)],\n names=['y', 'z'])\n\n >>> mi.droplevel(2)\n MultiIndex([(1, 3),\n (2, 4)],\n names=['x', 'y'])\n\n >>> mi.droplevel('z')\n MultiIndex([(1, 3),\n (2, 4)],\n names=['x', 'y'])\n\n >>> mi.droplevel(['x', 'y'])\n Int64Index([5, 6], dtype='int64', name='z')\n \"\"\"\n if not isinstance(level, (tuple, list)):\n level = [level]\n\n levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]\n\n return self._drop_level_numbers(levnums)\n\n @final\n def _drop_level_numbers(self, levnums: list[int]):\n \"\"\"\n Drop MultiIndex levels by level _number_, not name.\n \"\"\"\n\n if not levnums and not isinstance(self, ABCMultiIndex):\n return self\n if len(levnums) >= self.nlevels:\n raise ValueError(\n f\"Cannot remove {len(levnums)} levels from an index with \"\n f\"{self.nlevels} levels: at least one level must be left.\"\n )\n # The two checks above guarantee that here self is a MultiIndex\n self = cast(\"MultiIndex\", self)\n\n new_levels = list(self.levels)\n new_codes = list(self.codes)\n new_names = list(self.names)\n\n for i in levnums:\n new_levels.pop(i)\n new_codes.pop(i)\n new_names.pop(i)\n\n if len(new_levels) == 1:\n lev = new_levels[0]\n\n if len(lev) == 0:\n # If lev is empty, lev.take will fail GH#42055\n res_values = algos.take(lev._values, new_codes[0], allow_fill=True)\n result = type(lev)._simple_new(res_values, name=new_names[0])\n else:\n # set nan if needed\n mask = new_codes[0] == -1\n result = new_levels[0].take(new_codes[0])\n if mask.any():\n result = result.putmask(mask, np.nan)\n\n result._name = new_names[0]\n\n return result\n else:\n from pandas.core.indexes.multi import MultiIndex\n\n return MultiIndex(\n levels=new_levels,\n codes=new_codes,\n names=new_names,\n verify_integrity=False,\n )\n\n def _get_grouper_for_level(self, mapper, *, level=None):\n \"\"\"\n Get index grouper corresponding to an index level\n\n Parameters\n ----------\n mapper: Group mapping function or None\n Function mapping index values to groups\n level : int or None\n Index level, positional\n\n Returns\n -------\n grouper : Index\n Index of values to group on.\n labels : ndarray of int or None\n Array of locations in level_index.\n uniques : Index or None\n Index of unique values for level.\n \"\"\"\n assert level is None or level == 0\n if mapper is None:\n grouper = self\n else:\n grouper = self.map(mapper)\n\n return grouper, None, None\n\n # --------------------------------------------------------------------\n # Introspection Methods\n\n @final\n @property\n def is_monotonic(self) -> bool:\n \"\"\"\n Alias for is_monotonic_increasing.\n \"\"\"\n return self.is_monotonic_increasing\n\n @property\n def is_monotonic_increasing(self) -> bool:\n \"\"\"\n Return if the index is monotonic increasing (only equal or\n increasing) values.\n\n Examples\n --------\n >>> Index([1, 2, 3]).is_monotonic_increasing\n True\n >>> Index([1, 2, 2]).is_monotonic_increasing\n True\n >>> Index([1, 3, 2]).is_monotonic_increasing\n False\n \"\"\"\n return self._engine.is_monotonic_increasing\n\n @property\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return if the index is monotonic decreasing (only equal or\n decreasing) values.\n\n Examples\n --------\n >>> Index([3, 2, 1]).is_monotonic_decreasing\n True\n >>> Index([3, 2, 2]).is_monotonic_decreasing\n True\n >>> Index([3, 1, 2]).is_monotonic_decreasing\n False\n \"\"\"\n return self._engine.is_monotonic_decreasing\n\n @final\n @property\n def _is_strictly_monotonic_increasing(self) -> bool:\n \"\"\"\n Return if the index is strictly monotonic increasing\n (only increasing) values.\n\n Examples\n --------\n >>> Index([1, 2, 3])._is_strictly_monotonic_increasing\n True\n >>> Index([1, 2, 2])._is_strictly_monotonic_increasing\n False\n >>> Index([1, 3, 2])._is_strictly_monotonic_increasing\n False\n \"\"\"\n return self.is_unique and self.is_monotonic_increasing\n\n @final\n @property\n def _is_strictly_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return if the index is strictly monotonic decreasing\n (only decreasing) values.\n\n Examples\n --------\n >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing\n True\n >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing\n False\n >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing\n False\n \"\"\"\n return self.is_unique and self.is_monotonic_decreasing\n\n @cache_readonly\n def is_unique(self) -> bool:\n \"\"\"\n Return if the index has unique values.\n \"\"\"\n return self._engine.is_unique\n\n @final\n @property\n def has_duplicates(self) -> bool:\n \"\"\"\n Check if the Index has duplicate values.\n\n Returns\n -------\n bool\n Whether or not the Index has duplicate values.\n\n Examples\n --------\n >>> idx = pd.Index([1, 5, 7, 7])\n >>> idx.has_duplicates\n True\n\n >>> idx = pd.Index([1, 5, 7])\n >>> idx.has_duplicates\n False\n\n >>> idx = pd.Index([\"Watermelon\", \"Orange\", \"Apple\",\n ... \"Watermelon\"]).astype(\"category\")\n >>> idx.has_duplicates\n True\n\n >>> idx = pd.Index([\"Orange\", \"Apple\",\n ... \"Watermelon\"]).astype(\"category\")\n >>> idx.has_duplicates\n False\n \"\"\"\n return not self.is_unique\n\n @final\n def is_boolean(self) -> bool:\n \"\"\"\n Check if the Index only consists of booleans.\n\n Returns\n -------\n bool\n Whether or not the Index only consists of booleans.\n\n See Also\n --------\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([True, False, True])\n >>> idx.is_boolean()\n True\n\n >>> idx = pd.Index([\"True\", \"False\", \"True\"])\n >>> idx.is_boolean()\n False\n\n >>> idx = pd.Index([True, False, \"True\"])\n >>> idx.is_boolean()\n False\n \"\"\"\n return self.inferred_type in [\"boolean\"]\n\n @final\n def is_integer(self) -> bool:\n \"\"\"\n Check if the Index only consists of integers.\n\n Returns\n -------\n bool\n Whether or not the Index only consists of integers.\n\n See Also\n --------\n is_boolean : Check if the Index only consists of booleans.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx.is_integer()\n True\n\n >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])\n >>> idx.is_integer()\n False\n\n >>> idx = pd.Index([\"Apple\", \"Mango\", \"Watermelon\"])\n >>> idx.is_integer()\n False\n \"\"\"\n return self.inferred_type in [\"integer\"]\n\n @final\n def is_floating(self) -> bool:\n \"\"\"\n Check if the Index is a floating type.\n\n The Index may consist of only floats, NaNs, or a mix of floats,\n integers, or NaNs.\n\n Returns\n -------\n bool\n Whether or not the Index only consists of only consists of floats, NaNs, or\n a mix of floats, integers, or NaNs.\n\n See Also\n --------\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])\n >>> idx.is_floating()\n True\n\n >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0])\n >>> idx.is_floating()\n True\n\n >>> idx = pd.Index([1, 2, 3, 4, np.nan])\n >>> idx.is_floating()\n True\n\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx.is_floating()\n False\n \"\"\"\n return self.inferred_type in [\"floating\", \"mixed-integer-float\", \"integer-na\"]\n\n @final\n def is_numeric(self) -> bool:\n \"\"\"\n Check if the Index only consists of numeric data.\n\n Returns\n -------\n bool\n Whether or not the Index only consists of numeric data.\n\n See Also\n --------\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])\n >>> idx.is_numeric()\n True\n\n >>> idx = pd.Index([1, 2, 3, 4.0])\n >>> idx.is_numeric()\n True\n\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx.is_numeric()\n True\n\n >>> idx = pd.Index([1, 2, 3, 4.0, np.nan])\n >>> idx.is_numeric()\n True\n\n >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, \"Apple\"])\n >>> idx.is_numeric()\n False\n \"\"\"\n return self.inferred_type in [\"integer\", \"floating\"]\n\n @final\n def is_object(self) -> bool:\n \"\"\"\n Check if the Index is of the object dtype.\n\n Returns\n -------\n bool\n Whether or not the Index is of the object dtype.\n\n See Also\n --------\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([\"Apple\", \"Mango\", \"Watermelon\"])\n >>> idx.is_object()\n True\n\n >>> idx = pd.Index([\"Apple\", \"Mango\", 2.0])\n >>> idx.is_object()\n True\n\n >>> idx = pd.Index([\"Watermelon\", \"Orange\", \"Apple\",\n ... \"Watermelon\"]).astype(\"category\")\n >>> idx.is_object()\n False\n\n >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])\n >>> idx.is_object()\n False\n \"\"\"\n return is_object_dtype(self.dtype)\n\n @final\n def is_categorical(self) -> bool:\n \"\"\"\n Check if the Index holds categorical data.\n\n Returns\n -------\n bool\n True if the Index is categorical.\n\n See Also\n --------\n CategoricalIndex : Index for categorical data.\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_interval : Check if the Index holds Interval objects.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([\"Watermelon\", \"Orange\", \"Apple\",\n ... \"Watermelon\"]).astype(\"category\")\n >>> idx.is_categorical()\n True\n\n >>> idx = pd.Index([1, 3, 5, 7])\n >>> idx.is_categorical()\n False\n\n >>> s = pd.Series([\"Peter\", \"Victor\", \"Elisabeth\", \"Mar\"])\n >>> s\n 0 Peter\n 1 Victor\n 2 Elisabeth\n 3 Mar\n dtype: object\n >>> s.index.is_categorical()\n False\n \"\"\"\n return self.inferred_type in [\"categorical\"]\n\n @final\n def is_interval(self) -> bool:\n \"\"\"\n Check if the Index holds Interval objects.\n\n Returns\n -------\n bool\n Whether or not the Index holds Interval objects.\n\n See Also\n --------\n IntervalIndex : Index for Interval objects.\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_mixed : Check if the Index holds data with mixed data types.\n\n Examples\n --------\n >>> idx = pd.Index([pd.Interval(left=0, right=5),\n ... pd.Interval(left=5, right=10)])\n >>> idx.is_interval()\n True\n\n >>> idx = pd.Index([1, 3, 5, 7])\n >>> idx.is_interval()\n False\n \"\"\"\n return self.inferred_type in [\"interval\"]\n\n @final\n def is_mixed(self) -> bool:\n \"\"\"\n Check if the Index holds data with mixed data types.\n\n Returns\n -------\n bool\n Whether or not the Index holds data with mixed data types.\n\n See Also\n --------\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n\n Examples\n --------\n >>> idx = pd.Index(['a', np.nan, 'b'])\n >>> idx.is_mixed()\n True\n\n >>> idx = pd.Index([1.0, 2.0, 3.0, 5.0])\n >>> idx.is_mixed()\n False\n \"\"\"\n warnings.warn(\n \"Index.is_mixed is deprecated and will be removed in a future version. \"\n \"Check index.inferred_type directly instead.\",\n FutureWarning,\n stacklevel=2,\n )\n return self.inferred_type in [\"mixed\"]\n\n @final\n def holds_integer(self) -> bool:\n \"\"\"\n Whether the type is an integer type.\n \"\"\"\n return self.inferred_type in [\"integer\", \"mixed-integer\"]\n\n @cache_readonly\n def inferred_type(self) -> str_t:\n \"\"\"\n Return a string of the type inferred from the values.\n \"\"\"\n return lib.infer_dtype(self._values, skipna=False)\n\n @cache_readonly\n def _is_all_dates(self) -> bool:\n \"\"\"\n Whether or not the index values only consist of dates.\n \"\"\"\n return is_datetime_array(ensure_object(self._values))\n\n @cache_readonly\n @final\n def is_all_dates(self) -> bool:\n \"\"\"\n Whether or not the index values only consist of dates.\n \"\"\"\n warnings.warn(\n \"Index.is_all_dates is deprecated, will be removed in a future version. \"\n \"check index.inferred_type instead\",\n FutureWarning,\n stacklevel=2,\n )\n return self._is_all_dates\n\n @cache_readonly\n def _is_multi(self) -> bool:\n \"\"\"\n Cached check equivalent to isinstance(self, MultiIndex)\n \"\"\"\n return isinstance(self, ABCMultiIndex)\n\n # --------------------------------------------------------------------\n # Pickle Methods\n\n def __reduce__(self):\n d = {\"data\": self._data}\n d.update(self._get_attributes_dict())\n return _new_Index, (type(self), d), None\n\n # --------------------------------------------------------------------\n # Null Handling Methods\n\n _na_value: float | NaTType = np.nan\n \"\"\"The expected NA value to use with this index.\"\"\"\n\n @cache_readonly\n def _isnan(self) -> np.ndarray:\n \"\"\"\n Return if each value is NaN.\n \"\"\"\n if self._can_hold_na:\n return isna(self)\n else:\n # shouldn't reach to this condition by checking hasnans beforehand\n values = np.empty(len(self), dtype=np.bool_)\n values.fill(False)\n return values\n\n @cache_readonly\n def hasnans(self) -> bool:\n \"\"\"\n Return if I have any nans; enables various perf speedups.\n \"\"\"\n if self._can_hold_na:\n return bool(self._isnan.any())\n else:\n return False\n\n @final\n def isna(self) -> np.ndarray:\n \"\"\"\n Detect missing values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get\n mapped to ``True`` values.\n Everything else get mapped to ``False`` values. Characters such as\n empty strings `''` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n\n Returns\n -------\n numpy.ndarray[bool]\n A boolean array of whether my values are NA.\n\n See Also\n --------\n Index.notna : Boolean inverse of isna.\n Index.dropna : Omit entries with missing values.\n isna : Top-level isna.\n Series.isna : Detect missing values in Series object.\n\n Examples\n --------\n Show which entries in a pandas.Index are NA. The result is an\n array.\n\n >>> idx = pd.Index([5.2, 6.0, np.NaN])\n >>> idx\n Float64Index([5.2, 6.0, nan], dtype='float64')\n >>> idx.isna()\n array([False, False, True])\n\n Empty strings are not considered NA values. None is considered an NA\n value.\n\n >>> idx = pd.Index(['black', '', 'red', None])\n >>> idx\n Index(['black', '', 'red', None], dtype='object')\n >>> idx.isna()\n array([False, False, False, True])\n\n For datetimes, `NaT` (Not a Time) is considered as an NA value.\n\n >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),\n ... pd.Timestamp(''), None, pd.NaT])\n >>> idx\n DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],\n dtype='datetime64[ns]', freq=None)\n >>> idx.isna()\n array([False, True, True, True])\n \"\"\"\n return self._isnan\n\n isnull = isna\n\n @final\n def notna(self) -> np.ndarray:\n \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to ``True``. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``\n values.\n\n Returns\n -------\n numpy.ndarray[bool]\n Boolean array to indicate which entries are not NA.\n\n See Also\n --------\n Index.notnull : Alias of notna.\n Index.isna: Inverse of notna.\n notna : Top-level notna.\n\n Examples\n --------\n Show which entries in an Index are not NA. The result is an\n array.\n\n >>> idx = pd.Index([5.2, 6.0, np.NaN])\n >>> idx\n Float64Index([5.2, 6.0, nan], dtype='float64')\n >>> idx.notna()\n array([ True, True, False])\n\n Empty strings are not considered NA values. None is considered a NA\n value.\n\n >>> idx = pd.Index(['black', '', 'red', None])\n >>> idx\n Index(['black', '', 'red', None], dtype='object')\n >>> idx.notna()\n array([ True, True, True, False])\n \"\"\"\n return ~self.isna()\n\n notnull = notna\n\n def fillna(self, value=None, downcast=None):\n \"\"\"\n Fill NA/NaN values with the specified value.\n\n Parameters\n ----------\n value : scalar\n Scalar value to use to fill holes (e.g. 0).\n This value cannot be a list-likes.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n Index\n\n See Also\n --------\n DataFrame.fillna : Fill NaN values of a DataFrame.\n Series.fillna : Fill NaN Values of a Series.\n \"\"\"\n value = self._require_scalar(value)\n if self.hasnans:\n result = self.putmask(self._isnan, value)\n if downcast is None:\n # no need to care metadata other than name\n # because it can't have freq if\n return Index._with_infer(result, name=self.name)\n return self._view()\n\n def dropna(self: _IndexT, how: str_t = \"any\") -> _IndexT:\n \"\"\"\n Return Index without NA/NaN values.\n\n Parameters\n ----------\n how : {'any', 'all'}, default 'any'\n If the Index is a MultiIndex, drop the value when any or all levels\n are NaN.\n\n Returns\n -------\n Index\n \"\"\"\n if how not in (\"any\", \"all\"):\n raise ValueError(f\"invalid how option: {how}\")\n\n if self.hasnans:\n res_values = self._values[~self._isnan]\n return type(self)._simple_new(res_values, name=self.name)\n return self._view()\n\n # --------------------------------------------------------------------\n # Uniqueness Methods\n\n def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT:\n \"\"\"\n Return unique values in the index.\n\n Unique values are returned in order of appearance, this does NOT sort.\n\n Parameters\n ----------\n level : int or hashable, optional\n Only return values from specified level (for MultiIndex).\n If int, gets the level by integer position, else by level name.\n\n Returns\n -------\n Index\n\n See Also\n --------\n unique : Numpy array of unique values in that column.\n Series.unique : Return unique values of Series object.\n \"\"\"\n if level is not None:\n self._validate_index_level(level)\n\n if self.is_unique:\n return self._view()\n\n result = super().unique()\n return self._shallow_copy(result)\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\"])\n def drop_duplicates(self: _IndexT, keep: str_t | bool = \"first\") -> _IndexT:\n \"\"\"\n Return Index with duplicate values removed.\n\n Parameters\n ----------\n keep : {'first', 'last', ``False``}, default 'first'\n - 'first' : Drop duplicates except for the first occurrence.\n - 'last' : Drop duplicates except for the last occurrence.\n - ``False`` : Drop all duplicates.\n\n Returns\n -------\n deduplicated : Index\n\n See Also\n --------\n Series.drop_duplicates : Equivalent method on Series.\n DataFrame.drop_duplicates : Equivalent method on DataFrame.\n Index.duplicated : Related method on Index, indicating duplicate\n Index values.\n\n Examples\n --------\n Generate an pandas.Index with duplicate values.\n\n >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])\n\n The `keep` parameter controls which duplicate values are removed.\n The value 'first' keeps the first occurrence for each\n set of duplicated entries. The default value of keep is 'first'.\n\n >>> idx.drop_duplicates(keep='first')\n Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')\n\n The value 'last' keeps the last occurrence for each set of duplicated\n entries.\n\n >>> idx.drop_duplicates(keep='last')\n Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')\n\n The value ``False`` discards all sets of duplicated entries.\n\n >>> idx.drop_duplicates(keep=False)\n Index(['cow', 'beetle', 'hippo'], dtype='object')\n \"\"\"\n if self.is_unique:\n return self._view()\n\n return super().drop_duplicates(keep=keep)\n\n def duplicated(self, keep: Literal[\"first\", \"last\", False] = \"first\") -> np.ndarray:\n \"\"\"\n Indicate duplicate index values.\n\n Duplicated values are indicated as ``True`` values in the resulting\n array. Either all duplicates, all except the first, or all except the\n last occurrence of duplicates can be indicated.\n\n Parameters\n ----------\n keep : {'first', 'last', False}, default 'first'\n The value or values in a set of duplicates to mark as missing.\n\n - 'first' : Mark duplicates as ``True`` except for the first\n occurrence.\n - 'last' : Mark duplicates as ``True`` except for the last\n occurrence.\n - ``False`` : Mark all duplicates as ``True``.\n\n Returns\n -------\n np.ndarray[bool]\n\n See Also\n --------\n Series.duplicated : Equivalent method on pandas.Series.\n DataFrame.duplicated : Equivalent method on pandas.DataFrame.\n Index.drop_duplicates : Remove duplicate values from Index.\n\n Examples\n --------\n By default, for each set of duplicated values, the first occurrence is\n set to False and all others to True:\n\n >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])\n >>> idx.duplicated()\n array([False, False, True, False, True])\n\n which is equivalent to\n\n >>> idx.duplicated(keep='first')\n array([False, False, True, False, True])\n\n By using 'last', the last occurrence of each set of duplicated values\n is set on False and all others on True:\n\n >>> idx.duplicated(keep='last')\n array([ True, False, True, False, False])\n\n By setting keep on ``False``, all duplicates are True:\n\n >>> idx.duplicated(keep=False)\n array([ True, False, True, False, True])\n \"\"\"\n if self.is_unique:\n # fastpath available bc we are immutable\n return np.zeros(len(self), dtype=bool)\n return self._duplicated(keep=keep)\n\n # --------------------------------------------------------------------\n # Arithmetic & Logical Methods\n\n def __iadd__(self, other):\n # alias for __add__\n return self + other\n\n @final\n def __and__(self, other):\n warnings.warn(\n \"Index.__and__ operating as a set operation is deprecated, \"\n \"in the future this will be a logical operation matching \"\n \"Series.__and__. Use index.intersection(other) instead\",\n FutureWarning,\n stacklevel=2,\n )\n return self.intersection(other)\n\n @final\n def __or__(self, other):\n warnings.warn(\n \"Index.__or__ operating as a set operation is deprecated, \"\n \"in the future this will be a logical operation matching \"\n \"Series.__or__. Use index.union(other) instead\",\n FutureWarning,\n stacklevel=2,\n )\n return self.union(other)\n\n @final\n def __xor__(self, other):\n warnings.warn(\n \"Index.__xor__ operating as a set operation is deprecated, \"\n \"in the future this will be a logical operation matching \"\n \"Series.__xor__. Use index.symmetric_difference(other) instead\",\n FutureWarning,\n stacklevel=2,\n )\n return self.symmetric_difference(other)\n\n @final\n def __nonzero__(self):\n raise ValueError(\n f\"The truth value of a {type(self).__name__} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n )\n\n __bool__ = __nonzero__\n\n # --------------------------------------------------------------------\n # Set Operation Methods\n\n def _get_reconciled_name_object(self, other):\n \"\"\"\n If the result of a set operation will be self,\n return self, unless the name changes, in which\n case make a shallow copy of self.\n \"\"\"\n name = get_op_result_name(self, other)\n if self.name != name:\n return self.rename(name)\n return self\n\n @final\n def _validate_sort_keyword(self, sort):\n if sort not in [None, False]:\n raise ValueError(\n \"The 'sort' keyword only takes the values of \"\n f\"None or False; {sort} was passed.\"\n )\n\n @final\n def union(self, other, sort=None):\n \"\"\"\n Form the union of two Index objects.\n\n If the Index objects are incompatible, both Index objects will be\n cast to dtype('object') first.\n\n .. versionchanged:: 0.25.0\n\n Parameters\n ----------\n other : Index or array-like\n sort : bool or None, default None\n Whether to sort the resulting Index.\n\n * None : Sort the result, except when\n\n 1. `self` and `other` are equal.\n 2. `self` or `other` has length 0.\n 3. Some values in `self` or `other` cannot be compared.\n A RuntimeWarning is issued in this case.\n\n * False : do not sort the result.\n\n Returns\n -------\n union : Index\n\n Examples\n --------\n Union matching dtypes\n\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.union(idx2)\n Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')\n\n Union mismatched dtypes\n\n >>> idx1 = pd.Index(['a', 'b', 'c', 'd'])\n >>> idx2 = pd.Index([1, 2, 3, 4])\n >>> idx1.union(idx2)\n Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object')\n\n MultiIndex case\n\n >>> idx1 = pd.MultiIndex.from_arrays(\n ... [[1, 1, 2, 2], [\"Red\", \"Blue\", \"Red\", \"Blue\"]]\n ... )\n >>> idx1\n MultiIndex([(1, 'Red'),\n (1, 'Blue'),\n (2, 'Red'),\n (2, 'Blue')],\n )\n >>> idx2 = pd.MultiIndex.from_arrays(\n ... [[3, 3, 2, 2], [\"Red\", \"Green\", \"Red\", \"Green\"]]\n ... )\n >>> idx2\n MultiIndex([(3, 'Red'),\n (3, 'Green'),\n (2, 'Red'),\n (2, 'Green')],\n )\n >>> idx1.union(idx2)\n MultiIndex([(1, 'Blue'),\n (1, 'Red'),\n (2, 'Blue'),\n (2, 'Green'),\n (2, 'Red'),\n (3, 'Green'),\n (3, 'Red')],\n )\n >>> idx1.union(idx2, sort=False)\n MultiIndex([(1, 'Red'),\n (1, 'Blue'),\n (2, 'Red'),\n (2, 'Blue'),\n (3, 'Red'),\n (3, 'Green'),\n (2, 'Green')],\n )\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name = self._convert_can_do_setop(other)\n\n if not is_dtype_equal(self.dtype, other.dtype):\n if (\n isinstance(self, ABCMultiIndex)\n and not is_object_dtype(unpack_nested_dtype(other))\n and len(other) > 0\n ):\n raise NotImplementedError(\n \"Can only union MultiIndex with MultiIndex or Index of tuples, \"\n \"try mi.to_flat_index().union(other) instead.\"\n )\n if (\n isinstance(self, ABCDatetimeIndex)\n and isinstance(other, ABCDatetimeIndex)\n and self.tz is not None\n and other.tz is not None\n ):\n # GH#39328\n warnings.warn(\n \"In a future version, the union of DatetimeIndex objects \"\n \"with mismatched timezones will cast both to UTC instead of \"\n \"object dtype. To retain the old behavior, \"\n \"use `index.astype(object).union(other)`\",\n FutureWarning,\n stacklevel=2,\n )\n\n dtype = self._find_common_type_compat(other)\n left = self.astype(dtype, copy=False)\n right = other.astype(dtype, copy=False)\n return left.union(right, sort=sort)\n\n elif not len(other) or self.equals(other):\n # NB: whether this (and the `if not len(self)` check below) come before\n # or after the is_dtype_equal check above affects the returned dtype\n return self._get_reconciled_name_object(other)\n\n elif not len(self):\n return other._get_reconciled_name_object(self)\n\n result = self._union(other, sort=sort)\n\n return self._wrap_setop_result(other, result)\n\n def _union(self, other: Index, sort):\n \"\"\"\n Specific union logic should go here. In subclasses, union behavior\n should be overwritten here rather than in `self.union`.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default False\n Whether to sort the resulting index.\n\n * False : do not sort the result.\n * None : sort the result, except when `self` and `other` are equal\n or when the values cannot be compared.\n\n Returns\n -------\n Index\n \"\"\"\n # TODO(EA): setops-refactor, clean all this up\n lvals = self._values\n rvals = other._values\n\n if (\n sort is None\n and self.is_monotonic\n and other.is_monotonic\n and not (self.has_duplicates and other.has_duplicates)\n ):\n # Both are unique and monotonic, so can use outer join\n try:\n return self._outer_indexer(other)[0]\n except (TypeError, IncompatibleFrequency):\n # incomparable objects\n value_list = list(lvals)\n\n # worth making this faster? a very unusual case\n value_set = set(lvals)\n value_list.extend([x for x in rvals if x not in value_set])\n # If objects are unorderable, we must have object dtype.\n return np.array(value_list, dtype=object)\n\n elif not other.is_unique:\n # other has duplicates\n result = algos.union_with_duplicates(lvals, rvals)\n return _maybe_try_sort(result, sort)\n\n # Self may have duplicates\n # find indexes of things in \"other\" that are not in \"self\"\n if self._index_as_unique:\n indexer = self.get_indexer(other)\n missing = (indexer == -1).nonzero()[0]\n else:\n missing = algos.unique1d(self.get_indexer_non_unique(other)[1])\n\n if len(missing) > 0:\n other_diff = rvals.take(missing)\n result = concat_compat((lvals, other_diff))\n else:\n result = lvals\n\n if not self.is_monotonic or not other.is_monotonic:\n result = _maybe_try_sort(result, sort)\n\n return result\n\n @final\n def _wrap_setop_result(self, other: Index, result) -> Index:\n name = get_op_result_name(self, other)\n if isinstance(result, Index):\n if result.name != name:\n return result.rename(name)\n return result\n else:\n return self._shallow_copy(result, name=name)\n\n # TODO: standardize return type of non-union setops type(self vs other)\n @final\n def intersection(self, other, sort=False):\n \"\"\"\n Form the intersection of two Index objects.\n\n This returns a new Index with elements common to the index and `other`.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default False\n Whether to sort the resulting index.\n\n * False : do not sort the result.\n * None : sort the result, except when `self` and `other` are equal\n or when the values cannot be compared.\n\n Returns\n -------\n intersection : Index\n\n Examples\n --------\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.intersection(idx2)\n Int64Index([3, 4], dtype='int64')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name = self._convert_can_do_setop(other)\n\n if self.equals(other):\n if self.has_duplicates:\n return self.unique()._get_reconciled_name_object(other)\n return self._get_reconciled_name_object(other)\n\n if len(self) == 0 or len(other) == 0:\n # fastpath; we need to be careful about having commutativity\n\n if self._is_multi or other._is_multi:\n # _convert_can_do_setop ensures that we have both or neither\n # We retain self.levels\n return self[:0].rename(result_name)\n\n dtype = self._find_common_type_compat(other)\n if is_dtype_equal(self.dtype, dtype):\n # Slicing allows us to retain DTI/TDI.freq, RangeIndex\n\n # Note: self[:0] vs other[:0] affects\n # 1) which index's `freq` we get in DTI/TDI cases\n # This may be a historical artifact, i.e. no documented\n # reason for this choice.\n # 2) The `step` we get in RangeIndex cases\n if len(self) == 0:\n return self[:0].rename(result_name)\n else:\n return other[:0].rename(result_name)\n\n return Index([], dtype=dtype, name=result_name)\n\n elif not self._should_compare(other):\n # We can infer that the intersection is empty.\n if isinstance(self, ABCMultiIndex):\n return self[:0].rename(result_name)\n return Index([], name=result_name)\n\n elif not is_dtype_equal(self.dtype, other.dtype):\n dtype = self._find_common_type_compat(other)\n this = self.astype(dtype, copy=False)\n other = other.astype(dtype, copy=False)\n return this.intersection(other, sort=sort)\n\n result = self._intersection(other, sort=sort)\n return self._wrap_intersection_result(other, result)\n\n def _intersection(self, other: Index, sort=False):\n \"\"\"\n intersection specialized to the case with matching dtypes.\n \"\"\"\n if (\n self.is_monotonic\n and other.is_monotonic\n and not is_interval_dtype(self.dtype)\n ):\n # For IntervalIndex _inner_indexer is not more performant than get_indexer,\n # so don't take this fastpath\n try:\n result = self._inner_indexer(other)[0]\n except TypeError:\n pass\n else:\n # TODO: algos.unique1d should preserve DTA/TDA\n res = algos.unique1d(result)\n return ensure_wrapped_if_datetimelike(res)\n\n res_values = self._intersection_via_get_indexer(other, sort=sort)\n res_values = _maybe_try_sort(res_values, sort)\n return res_values\n\n def _wrap_intersection_result(self, other, result):\n # We will override for MultiIndex to handle empty results\n return self._wrap_setop_result(other, result)\n\n @final\n def _intersection_via_get_indexer(self, other: Index, sort) -> ArrayLike:\n \"\"\"\n Find the intersection of two Indexes using get_indexer.\n\n Returns\n -------\n np.ndarray or ExtensionArray\n The returned array will be unique.\n \"\"\"\n left_unique = self.unique()\n right_unique = other.unique()\n\n # even though we are unique, we need get_indexer_for for IntervalIndex\n indexer = left_unique.get_indexer_for(right_unique)\n\n mask = indexer != -1\n\n taker = indexer.take(mask.nonzero()[0])\n if sort is False:\n # sort bc we want the elements in the same order they are in self\n # unnecessary in the case with sort=None bc we will sort later\n taker = np.sort(taker)\n\n result = left_unique.take(taker)._values\n return result\n\n @final\n def difference(self, other, sort=None):\n \"\"\"\n Return a new Index with elements of index not in `other`.\n\n This is the set difference of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default None\n Whether to sort the resulting index. By default, the\n values are attempted to be sorted, but any TypeError from\n incomparable elements is caught by pandas.\n\n * None : Attempt to sort the result, but catch any TypeErrors\n from comparing incomparable elements.\n * False : Do not sort the result.\n\n Returns\n -------\n difference : Index\n\n Examples\n --------\n >>> idx1 = pd.Index([2, 1, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.difference(idx2)\n Int64Index([1, 2], dtype='int64')\n >>> idx1.difference(idx2, sort=False)\n Int64Index([2, 1], dtype='int64')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name = self._convert_can_do_setop(other)\n\n if self.equals(other):\n # Note: we do not (yet) sort even if sort=None GH#24959\n return self[:0].rename(result_name)\n\n if len(other) == 0:\n # Note: we do not (yet) sort even if sort=None GH#24959\n return self.rename(result_name)\n\n if not self._should_compare(other):\n # Nothing matches -> difference is everything\n return self.rename(result_name)\n\n result = self._difference(other, sort=sort)\n return self._wrap_difference_result(other, result)\n\n def _difference(self, other, sort):\n # overridden by RangeIndex\n\n this = self.unique()\n\n indexer = this.get_indexer_for(other)\n indexer = indexer.take((indexer != -1).nonzero()[0])\n\n label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)\n the_diff = this._values.take(label_diff)\n the_diff = _maybe_try_sort(the_diff, sort)\n\n return the_diff\n\n def _wrap_difference_result(self, other, result):\n # We will override for MultiIndex to handle empty results\n return self._wrap_setop_result(other, result)\n\n def symmetric_difference(self, other, result_name=None, sort=None):\n \"\"\"\n Compute the symmetric difference of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n result_name : str\n sort : False or None, default None\n Whether to sort the resulting index. By default, the\n values are attempted to be sorted, but any TypeError from\n incomparable elements is caught by pandas.\n\n * None : Attempt to sort the result, but catch any TypeErrors\n from comparing incomparable elements.\n * False : Do not sort the result.\n\n Returns\n -------\n symmetric_difference : Index\n\n Notes\n -----\n ``symmetric_difference`` contains elements that appear in either\n ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by\n ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates\n dropped.\n\n Examples\n --------\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([2, 3, 4, 5])\n >>> idx1.symmetric_difference(idx2)\n Int64Index([1, 5], dtype='int64')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name_update = self._convert_can_do_setop(other)\n if result_name is None:\n result_name = result_name_update\n\n if not self._should_compare(other):\n return self.union(other, sort=sort).rename(result_name)\n\n elif not is_dtype_equal(self.dtype, other.dtype):\n dtype = self._find_common_type_compat(other)\n this = self.astype(dtype, copy=False)\n that = other.astype(dtype, copy=False)\n return this.symmetric_difference(that, sort=sort).rename(result_name)\n\n this = self.unique()\n other = other.unique()\n indexer = this.get_indexer_for(other)\n\n # {this} minus {other}\n common_indexer = indexer.take((indexer != -1).nonzero()[0])\n left_indexer = np.setdiff1d(\n np.arange(this.size), common_indexer, assume_unique=True\n )\n left_diff = this._values.take(left_indexer)\n\n # {other} minus {this}\n right_indexer = (indexer == -1).nonzero()[0]\n right_diff = other._values.take(right_indexer)\n\n res_values = concat_compat([left_diff, right_diff])\n res_values = _maybe_try_sort(res_values, sort)\n\n result = Index(res_values, name=result_name)\n\n if self._is_multi:\n self = cast(\"MultiIndex\", self)\n if len(result) == 0:\n # On equal symmetric_difference MultiIndexes the difference is empty.\n # Therefore, an empty MultiIndex is returned GH#13490\n return type(self)(\n levels=[[] for _ in range(self.nlevels)],\n codes=[[] for _ in range(self.nlevels)],\n names=result.name,\n )\n return type(self).from_tuples(result, names=result.name)\n\n return result\n\n @final\n def _assert_can_do_setop(self, other) -> bool:\n if not is_list_like(other):\n raise TypeError(\"Input must be Index or array-like\")\n return True\n\n def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]:\n if not isinstance(other, Index):\n other = Index(other, name=self.name)\n result_name = self.name\n else:\n result_name = get_op_result_name(self, other)\n return other, result_name\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"\n Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional\n * default: exact matches only.\n * pad / ffill: find the PREVIOUS index value if no exact match.\n * backfill / bfill: use NEXT index value if no exact match\n * nearest: use the NEAREST index value if no exact match. Tied\n distances are broken by preferring the larger index value.\n tolerance : int or float, optional\n Maximum distance from index value for inexact matches. The value of\n the index at the matching location must satisfy the equation\n ``abs(index[loc] - key) <= tolerance``.\n\n Returns\n -------\n loc : int if unique index, slice if monotonic index, else mask\n\n Examples\n --------\n >>> unique_index = pd.Index(list('abc'))\n >>> unique_index.get_loc('b')\n 1\n\n >>> monotonic_index = pd.Index(list('abbc'))\n >>> monotonic_index.get_loc('b')\n slice(1, 3, None)\n\n >>> non_monotonic_index = pd.Index(list('abcb'))\n >>> non_monotonic_index.get_loc('b')\n array([False, True, False, True])\n \"\"\"\n if method is None:\n if tolerance is not None:\n raise ValueError(\n \"tolerance argument only valid if using pad, \"\n \"backfill or nearest lookups\"\n )\n casted_key = self._maybe_cast_indexer(key)\n try:\n return self._engine.get_loc(casted_key)\n except KeyError as err:\n raise KeyError(key) from err\n\n # GH#42269\n warnings.warn(\n f\"Passing method to {type(self).__name__}.get_loc is deprecated \"\n \"and will raise in a future version. Use \"\n \"index.get_indexer([item], method=...) instead\",\n FutureWarning,\n stacklevel=2,\n )\n\n if is_scalar(key) and isna(key) and not self.hasnans:\n raise KeyError(key)\n\n if tolerance is not None:\n tolerance = self._convert_tolerance(tolerance, np.asarray(key))\n\n indexer = self.get_indexer([key], method=method, tolerance=tolerance)\n if indexer.ndim > 1 or indexer.size > 1:\n raise TypeError(\"get_loc requires scalar valued input\")\n loc = indexer.item()\n if loc == -1:\n raise KeyError(key)\n return loc\n\n _index_shared_docs[\n \"get_indexer\"\n ] = \"\"\"\n Compute indexer and mask for new index given the current index. The\n indexer should be then used as an input to ndarray.take to align the\n current data to the new index.\n\n Parameters\n ----------\n target : %(target_klass)s\n method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional\n * default: exact matches only.\n * pad / ffill: find the PREVIOUS index value if no exact match.\n * backfill / bfill: use NEXT index value if no exact match\n * nearest: use the NEAREST index value if no exact match. Tied\n distances are broken by preferring the larger index value.\n limit : int, optional\n Maximum number of consecutive labels in ``target`` to match for\n inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n Returns\n -------\n indexer : np.ndarray[np.intp]\n Integers from 0 to n - 1 indicating that the index at these\n positions matches the corresponding target values. Missing values\n in the target are marked by -1.\n %(raises_section)s\n Examples\n --------\n >>> index = pd.Index(['c', 'a', 'b'])\n >>> index.get_indexer(['a', 'b', 'x'])\n array([ 1, 2, -1])\n\n Notice that the return value is an array of locations in ``index``\n and ``x`` is marked by -1, as it is not in ``index``.\n \"\"\"\n\n @Appender(_index_shared_docs[\"get_indexer\"] % _index_doc_kwargs)\n @final\n def get_indexer(\n self,\n target,\n method: str_t | None = None,\n limit: int | None = None,\n tolerance=None,\n ) -> npt.NDArray[np.intp]:\n method = missing.clean_reindex_fill_method(method)\n target = self._maybe_cast_listlike_indexer(target)\n\n self._check_indexing_method(method, limit, tolerance)\n\n if not self._index_as_unique:\n raise InvalidIndexError(self._requires_unique_msg)\n\n if len(target) == 0:\n return np.array([], dtype=np.intp)\n\n if not self._should_compare(target) and not self._should_partial_index(target):\n # IntervalIndex get special treatment bc numeric scalars can be\n # matched to Interval scalars\n return self._get_indexer_non_comparable(target, method=method, unique=True)\n\n if is_categorical_dtype(self.dtype):\n # _maybe_cast_listlike_indexer ensures target has our dtype\n # (could improve perf by doing _should_compare check earlier?)\n assert is_dtype_equal(self.dtype, target.dtype)\n\n indexer = self._engine.get_indexer(target.codes)\n if self.hasnans and target.hasnans:\n loc = self.get_loc(np.nan)\n mask = target.isna()\n indexer[mask] = loc\n return indexer\n\n if is_categorical_dtype(target.dtype):\n # potential fastpath\n # get an indexer for unique categories then propagate to codes via take_nd\n # get_indexer instead of _get_indexer needed for MultiIndex cases\n # e.g. test_append_different_columns_types\n categories_indexer = self.get_indexer(target.categories)\n\n indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1)\n\n if (not self._is_multi and self.hasnans) and target.hasnans:\n # Exclude MultiIndex because hasnans raises NotImplementedError\n # we should only get here if we are unique, so loc is an integer\n # GH#41934\n loc = self.get_loc(np.nan)\n mask = target.isna()\n indexer[mask] = loc\n\n return ensure_platform_int(indexer)\n\n pself, ptarget = self._maybe_promote(target)\n if pself is not self or ptarget is not target:\n return pself.get_indexer(\n ptarget, method=method, limit=limit, tolerance=tolerance\n )\n\n if is_dtype_equal(self.dtype, target.dtype) and self.equals(target):\n # Only call equals if we have same dtype to avoid inference/casting\n return np.arange(len(target), dtype=np.intp)\n\n if not is_dtype_equal(self.dtype, target.dtype) and not is_interval_dtype(\n self.dtype\n ):\n # IntervalIndex gets special treatment for partial-indexing\n dtype = self._find_common_type_compat(target)\n\n this = self.astype(dtype, copy=False)\n target = target.astype(dtype, copy=False)\n return this._get_indexer(\n target, method=method, limit=limit, tolerance=tolerance\n )\n\n return self._get_indexer(target, method, limit, tolerance)\n\n def _get_indexer(\n self,\n target: Index,\n method: str_t | None = None,\n limit: int | None = None,\n tolerance=None,\n ) -> np.ndarray:\n if tolerance is not None:\n tolerance = self._convert_tolerance(tolerance, target)\n\n if method in [\"pad\", \"backfill\"]:\n indexer = self._get_fill_indexer(target, method, limit, tolerance)\n elif method == \"nearest\":\n indexer = self._get_nearest_indexer(target, limit, tolerance)\n else:\n indexer = self._engine.get_indexer(target._get_engine_target())\n\n return ensure_platform_int(indexer)\n\n @final\n def _should_partial_index(self, target: Index) -> bool:\n \"\"\"\n Should we attempt partial-matching indexing?\n \"\"\"\n if is_interval_dtype(self.dtype):\n # \"Index\" has no attribute \"left\"\n return self.left._should_compare(target) # type: ignore[attr-defined]\n return False\n\n @final\n def _check_indexing_method(\n self,\n method: str_t | None,\n limit: int | None = None,\n tolerance=None,\n ) -> None:\n \"\"\"\n Raise if we have a get_indexer `method` that is not supported or valid.\n \"\"\"\n if method not in [None, \"bfill\", \"backfill\", \"pad\", \"ffill\", \"nearest\"]:\n # in practice the clean_reindex_fill_method call would raise\n # before we get here\n raise ValueError(\"Invalid fill method\") # pragma: no cover\n\n if self._is_multi:\n if method == \"nearest\":\n raise NotImplementedError(\n \"method='nearest' not implemented yet \"\n \"for MultiIndex; see GitHub issue 9365\"\n )\n elif method == \"pad\" or method == \"backfill\":\n if tolerance is not None:\n raise NotImplementedError(\n \"tolerance not implemented yet for MultiIndex\"\n )\n\n if is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype):\n # GH#37871 for now this is only for IntervalIndex and CategoricalIndex\n if method is not None:\n raise NotImplementedError(\n f\"method {method} not yet implemented for {type(self).__name__}\"\n )\n\n if method is None:\n if tolerance is not None:\n raise ValueError(\n \"tolerance argument only valid if doing pad, \"\n \"backfill or nearest reindexing\"\n )\n if limit is not None:\n raise ValueError(\n \"limit argument only valid if doing pad, \"\n \"backfill or nearest reindexing\"\n )\n\n def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray:\n # override this method on subclasses\n tolerance = np.asarray(tolerance)\n if target.size != tolerance.size and tolerance.size > 1:\n raise ValueError(\"list-like tolerance size must match target index size\")\n return tolerance\n\n @final\n def _get_fill_indexer(\n self, target: Index, method: str_t, limit: int | None = None, tolerance=None\n ) -> np.ndarray:\n\n if self._is_multi:\n # TODO: get_indexer_with_fill docstring says values must be _sorted_\n # but that doesn't appear to be enforced\n return self._engine.get_indexer_with_fill(\n target=target._values, values=self._values, method=method, limit=limit\n )\n\n target_values = target._get_engine_target()\n\n if self.is_monotonic_increasing and target.is_monotonic_increasing:\n engine_method = (\n self._engine.get_pad_indexer\n if method == \"pad\"\n else self._engine.get_backfill_indexer\n )\n indexer = engine_method(target_values, limit)\n else:\n indexer = self._get_fill_indexer_searchsorted(target, method, limit)\n if tolerance is not None and len(self):\n indexer = self._filter_indexer_tolerance(target_values, indexer, tolerance)\n return indexer\n\n @final\n def _get_fill_indexer_searchsorted(\n self, target: Index, method: str_t, limit: int | None = None\n ) -> np.ndarray:\n \"\"\"\n Fallback pad/backfill get_indexer that works for monotonic decreasing\n indexes and non-monotonic targets.\n \"\"\"\n if limit is not None:\n raise ValueError(\n f\"limit argument for {repr(method)} method only well-defined \"\n \"if index and target are monotonic\"\n )\n\n side = \"left\" if method == \"pad\" else \"right\"\n\n # find exact matches first (this simplifies the algorithm)\n indexer = self.get_indexer(target)\n nonexact = indexer == -1\n indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side)\n if side == \"left\":\n # searchsorted returns \"indices into a sorted array such that,\n # if the corresponding elements in v were inserted before the\n # indices, the order of a would be preserved\".\n # Thus, we need to subtract 1 to find values to the left.\n indexer[nonexact] -= 1\n # This also mapped not found values (values of 0 from\n # np.searchsorted) to -1, which conveniently is also our\n # sentinel for missing values\n else:\n # Mark indices to the right of the largest value as not found\n indexer[indexer == len(self)] = -1\n return indexer\n\n @final\n def _get_nearest_indexer(\n self, target: Index, limit: int | None, tolerance\n ) -> np.ndarray:\n \"\"\"\n Get the indexer for the nearest index labels; requires an index with\n values that can be subtracted from each other (e.g., not strings or\n tuples).\n \"\"\"\n if not len(self):\n return self._get_fill_indexer(target, \"pad\")\n\n left_indexer = self.get_indexer(target, \"pad\", limit=limit)\n right_indexer = self.get_indexer(target, \"backfill\", limit=limit)\n\n target_values = target._get_engine_target()\n own_values = self._get_engine_target()\n left_distances = np.abs(own_values[left_indexer] - target_values)\n right_distances = np.abs(own_values[right_indexer] - target_values)\n\n op = operator.lt if self.is_monotonic_increasing else operator.le\n indexer = np.where(\n op(left_distances, right_distances) | (right_indexer == -1),\n left_indexer,\n right_indexer,\n )\n if tolerance is not None:\n indexer = self._filter_indexer_tolerance(target_values, indexer, tolerance)\n return indexer\n\n @final\n def _filter_indexer_tolerance(\n self,\n target: Index | np.ndarray | ExtensionArray,\n indexer: np.ndarray,\n tolerance,\n ) -> np.ndarray:\n own_values = self._get_engine_target()\n distance = abs(own_values[indexer] - target)\n return np.where(distance <= tolerance, indexer, -1)\n\n # --------------------------------------------------------------------\n # Indexer Conversion Methods\n\n @final\n def _validate_positional_slice(self, key: slice) -> None:\n \"\"\"\n For positional indexing, a slice must have either int or None\n for each of start, stop, and step.\n \"\"\"\n self._validate_indexer(\"positional\", key.start, \"iloc\")\n self._validate_indexer(\"positional\", key.stop, \"iloc\")\n self._validate_indexer(\"positional\", key.step, \"iloc\")\n\n def _convert_slice_indexer(self, key: slice, kind: str_t):\n \"\"\"\n Convert a slice indexer.\n\n By definition, these are labels unless 'iloc' is passed in.\n Floats are not allowed as the start, step, or stop of the slice.\n\n Parameters\n ----------\n key : label of the slice bound\n kind : {'loc', 'getitem'}\n \"\"\"\n assert kind in [\"loc\", \"getitem\"], kind\n\n # potentially cast the bounds to integers\n start, stop, step = key.start, key.stop, key.step\n\n # figure out if this is a positional indexer\n def is_int(v):\n return v is None or is_integer(v)\n\n is_index_slice = is_int(start) and is_int(stop) and is_int(step)\n is_positional = is_index_slice and not (\n self.is_integer() or self.is_categorical()\n )\n\n if kind == \"getitem\":\n \"\"\"\n called from the getitem slicers, validate that we are in fact\n integers\n \"\"\"\n if self.is_integer() or is_index_slice:\n self._validate_indexer(\"slice\", key.start, \"getitem\")\n self._validate_indexer(\"slice\", key.stop, \"getitem\")\n self._validate_indexer(\"slice\", key.step, \"getitem\")\n return key\n\n # convert the slice to an indexer here\n\n # if we are mixed and have integers\n if is_positional:\n try:\n # Validate start & stop\n if start is not None:\n self.get_loc(start)\n if stop is not None:\n self.get_loc(stop)\n is_positional = False\n except KeyError:\n pass\n\n if com.is_null_slice(key):\n # It doesn't matter if we are positional or label based\n indexer = key\n elif is_positional:\n if kind == \"loc\":\n # GH#16121, GH#24612, GH#31810\n warnings.warn(\n \"Slicing a positional slice with .loc is not supported, \"\n \"and will raise TypeError in a future version. \"\n \"Use .loc with labels or .iloc with positions instead.\",\n FutureWarning,\n stacklevel=5,\n )\n indexer = key\n else:\n indexer = self.slice_indexer(start, stop, step)\n\n return indexer\n\n @final\n def _invalid_indexer(self, form: str_t, key) -> TypeError:\n \"\"\"\n Consistent invalid indexer message.\n \"\"\"\n return TypeError(\n f\"cannot do {form} indexing on {type(self).__name__} with these \"\n f\"indexers [{key}] of type {type(key).__name__}\"\n )\n\n # --------------------------------------------------------------------\n # Reindex Methods\n\n @final\n def _validate_can_reindex(self, indexer: np.ndarray) -> None:\n \"\"\"\n Check if we are allowing reindexing with this particular indexer.\n\n Parameters\n ----------\n indexer : an integer ndarray\n\n Raises\n ------\n ValueError if its a duplicate axis\n \"\"\"\n # trying to reindex on an axis with duplicates\n if not self._index_as_unique and len(indexer):\n raise ValueError(\"cannot reindex on an axis with duplicate labels\")\n\n def reindex(\n self, target, method=None, level=None, limit=None, tolerance=None\n ) -> tuple[Index, npt.NDArray[np.intp] | None]:\n \"\"\"\n Create index with target's values.\n\n Parameters\n ----------\n target : an iterable\n\n Returns\n -------\n new_index : pd.Index\n Resulting index.\n indexer : np.ndarray[np.intp] or None\n Indices of output values in original index.\n \"\"\"\n # GH6552: preserve names when reindexing to non-named target\n # (i.e. neither Index nor Series).\n preserve_names = not hasattr(target, \"name\")\n\n # GH7774: preserve dtype/tz if target is empty and not an Index.\n target = ensure_has_len(target) # target may be an iterator\n\n if not isinstance(target, Index) and len(target) == 0:\n if level is not None and self._is_multi:\n # \"Index\" has no attribute \"levels\"; maybe \"nlevels\"?\n idx = self.levels[level] # type: ignore[attr-defined]\n else:\n idx = self\n target = idx[:0]\n else:\n target = ensure_index(target)\n\n if level is not None:\n if method is not None:\n raise TypeError(\"Fill method not supported if level passed\")\n\n # TODO: tests where passing `keep_order=not self._is_multi`\n # makes a difference for non-MultiIndex case\n target, indexer, _ = self._join_level(\n target, level, how=\"right\", keep_order=not self._is_multi\n )\n\n else:\n if self.equals(target):\n indexer = None\n else:\n if self._index_as_unique:\n indexer = self.get_indexer(\n target, method=method, limit=limit, tolerance=tolerance\n )\n elif self._is_multi:\n raise ValueError(\"cannot handle a non-unique multi-index!\")\n else:\n if method is not None or limit is not None:\n raise ValueError(\n \"cannot reindex a non-unique index \"\n \"with a method or limit\"\n )\n indexer, _ = self.get_indexer_non_unique(target)\n\n if not self.is_unique:\n # GH#42568\n warnings.warn(\n \"reindexing with a non-unique Index is deprecated and \"\n \"will raise in a future version\",\n FutureWarning,\n stacklevel=2,\n )\n\n target = self._wrap_reindex_result(target, indexer, preserve_names)\n return target, indexer\n\n def _wrap_reindex_result(self, target, indexer, preserve_names: bool):\n target = self._maybe_preserve_names(target, preserve_names)\n return target\n\n def _maybe_preserve_names(self, target: Index, preserve_names: bool):\n if preserve_names and target.nlevels == 1 and target.name != self.name:\n target = target.copy(deep=False)\n target.name = self.name\n return target\n\n @final\n def _reindex_non_unique(\n self, target: Index\n ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]:\n \"\"\"\n Create a new index with target's values (move/add/delete values as\n necessary) use with non-unique Index and a possibly non-unique target.\n\n Parameters\n ----------\n target : an iterable\n\n Returns\n -------\n new_index : pd.Index\n Resulting index.\n indexer : np.ndarray[np.intp]\n Indices of output values in original index.\n new_indexer : np.ndarray[np.intp] or None\n\n \"\"\"\n target = ensure_index(target)\n if len(target) == 0:\n # GH#13691\n return self[:0], np.array([], dtype=np.intp), None\n\n indexer, missing = self.get_indexer_non_unique(target)\n check = indexer != -1\n new_labels = self.take(indexer[check])\n new_indexer = None\n\n if len(missing):\n length = np.arange(len(indexer), dtype=np.intp)\n\n missing = ensure_platform_int(missing)\n missing_labels = target.take(missing)\n missing_indexer = length[~check]\n cur_labels = self.take(indexer[check]).values\n cur_indexer = length[check]\n\n # Index constructor below will do inference\n new_labels = np.empty((len(indexer),), dtype=object)\n new_labels[cur_indexer] = cur_labels\n new_labels[missing_indexer] = missing_labels\n\n # GH#38906\n if not len(self):\n\n new_indexer = np.arange(0, dtype=np.intp)\n\n # a unique indexer\n elif target.is_unique:\n\n # see GH5553, make sure we use the right indexer\n new_indexer = np.arange(len(indexer), dtype=np.intp)\n new_indexer[cur_indexer] = np.arange(len(cur_labels))\n new_indexer[missing_indexer] = -1\n\n # we have a non_unique selector, need to use the original\n # indexer here\n else:\n\n # need to retake to have the same size as the indexer\n indexer[~check] = -1\n\n # reset the new indexer to account for the new size\n new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp)\n new_indexer[~check] = -1\n\n if isinstance(self, ABCMultiIndex):\n new_index = type(self).from_tuples(new_labels, names=self.names)\n else:\n new_index = Index._with_infer(new_labels, name=self.name)\n return new_index, indexer, new_indexer\n\n # --------------------------------------------------------------------\n # Join Methods\n\n @final\n @_maybe_return_indexers\n def join(\n self,\n other,\n how: str_t = \"left\",\n level=None,\n return_indexers: bool = False,\n sort: bool = False,\n ):\n \"\"\"\n Compute join_index and indexers to conform data\n structures to the new index.\n\n Parameters\n ----------\n other : Index\n how : {'left', 'right', 'inner', 'outer'}\n level : int or level name, default None\n return_indexers : bool, default False\n sort : bool, default False\n Sort the join keys lexicographically in the result Index. If False,\n the order of the join keys depends on the join type (how keyword).\n\n Returns\n -------\n join_index, (left_indexer, right_indexer)\n \"\"\"\n other = ensure_index(other)\n self_is_mi = isinstance(self, ABCMultiIndex)\n other_is_mi = isinstance(other, ABCMultiIndex)\n\n if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex):\n if (self.tz is None) ^ (other.tz is None):\n # Raise instead of casting to object below.\n raise TypeError(\"Cannot join tz-naive with tz-aware DatetimeIndex\")\n\n if not self._is_multi and not other._is_multi:\n # We have specific handling for MultiIndex below\n pself, pother = self._maybe_promote(other)\n if pself is not self or pother is not other:\n return pself.join(\n pother, how=how, level=level, return_indexers=True, sort=sort\n )\n\n lindexer: np.ndarray | None\n rindexer: np.ndarray | None\n\n # try to figure out the join level\n # GH3662\n if level is None and (self_is_mi or other_is_mi):\n\n # have the same levels/names so a simple join\n if self.names == other.names:\n pass\n else:\n return self._join_multi(other, how=how)\n\n # join on the level\n if level is not None and (self_is_mi or other_is_mi):\n return self._join_level(other, level, how=how)\n\n if len(other) == 0 and how in (\"left\", \"outer\"):\n join_index = self._view()\n rindexer = np.repeat(np.intp(-1), len(join_index))\n return join_index, None, rindexer\n\n if len(self) == 0 and how in (\"right\", \"outer\"):\n join_index = other._view()\n lindexer = np.repeat(np.intp(-1), len(join_index))\n return join_index, lindexer, None\n\n if self._join_precedence < other._join_precedence:\n how = {\"right\": \"left\", \"left\": \"right\"}.get(how, how)\n join_index, lidx, ridx = other.join(\n self, how=how, level=level, return_indexers=True\n )\n lidx, ridx = ridx, lidx\n return join_index, lidx, ridx\n\n if not is_dtype_equal(self.dtype, other.dtype):\n dtype = self._find_common_type_compat(other)\n this = self.astype(dtype, copy=False)\n other = other.astype(dtype, copy=False)\n return this.join(other, how=how, return_indexers=True)\n\n _validate_join_method(how)\n\n if not self.is_unique and not other.is_unique:\n return self._join_non_unique(other, how=how)\n elif not self.is_unique or not other.is_unique:\n if self.is_monotonic and other.is_monotonic:\n return self._join_monotonic(other, how=how)\n else:\n return self._join_non_unique(other, how=how)\n elif (\n self.is_monotonic\n and other.is_monotonic\n and (\n not isinstance(self, ABCMultiIndex)\n or not any(is_categorical_dtype(dtype) for dtype in self.dtypes)\n )\n ):\n # Categorical is monotonic if data are ordered as categories, but join can\n # not handle this in case of not lexicographically monotonic GH#38502\n try:\n return self._join_monotonic(other, how=how)\n except TypeError:\n pass\n\n if how == \"left\":\n join_index = self\n elif how == \"right\":\n join_index = other\n elif how == \"inner\":\n # TODO: sort=False here for backwards compat. It may\n # be better to use the sort parameter passed into join\n join_index = self.intersection(other, sort=False)\n elif how == \"outer\":\n # TODO: sort=True here for backwards compat. It may\n # be better to use the sort parameter passed into join\n join_index = self.union(other)\n\n if sort:\n join_index = join_index.sort_values()\n\n if join_index is self:\n lindexer = None\n else:\n lindexer = self.get_indexer(join_index)\n if join_index is other:\n rindexer = None\n else:\n rindexer = other.get_indexer(join_index)\n return join_index, lindexer, rindexer\n\n @final\n def _join_multi(self, other: Index, how: str_t):\n from pandas.core.indexes.multi import MultiIndex\n from pandas.core.reshape.merge import restore_dropped_levels_multijoin\n\n # figure out join names\n self_names_list = list(com.not_none(*self.names))\n other_names_list = list(com.not_none(*other.names))\n self_names_order = self_names_list.index\n other_names_order = other_names_list.index\n self_names = set(self_names_list)\n other_names = set(other_names_list)\n overlap = self_names & other_names\n\n # need at least 1 in common\n if not overlap:\n raise ValueError(\"cannot join with no overlapping index names\")\n\n if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):\n\n # Drop the non-matching levels from left and right respectively\n ldrop_names = sorted(self_names - overlap, key=self_names_order)\n rdrop_names = sorted(other_names - overlap, key=other_names_order)\n\n # if only the order differs\n if not len(ldrop_names + rdrop_names):\n self_jnlevels = self\n other_jnlevels = other.reorder_levels(self.names)\n else:\n self_jnlevels = self.droplevel(ldrop_names)\n other_jnlevels = other.droplevel(rdrop_names)\n\n # Join left and right\n # Join on same leveled multi-index frames is supported\n join_idx, lidx, ridx = self_jnlevels.join(\n other_jnlevels, how, return_indexers=True\n )\n\n # Restore the dropped levels\n # Returned index level order is\n # common levels, ldrop_names, rdrop_names\n dropped_names = ldrop_names + rdrop_names\n\n levels, codes, names = restore_dropped_levels_multijoin(\n self, other, dropped_names, join_idx, lidx, ridx\n )\n\n # Re-create the multi-index\n multi_join_idx = MultiIndex(\n levels=levels, codes=codes, names=names, verify_integrity=False\n )\n\n multi_join_idx = multi_join_idx.remove_unused_levels()\n\n return multi_join_idx, lidx, ridx\n\n jl = list(overlap)[0]\n\n # Case where only one index is multi\n # make the indices into mi's that match\n flip_order = False\n if isinstance(self, MultiIndex):\n self, other = other, self\n flip_order = True\n # flip if join method is right or left\n how = {\"right\": \"left\", \"left\": \"right\"}.get(how, how)\n\n level = other.names.index(jl)\n result = self._join_level(other, level, how=how)\n\n if flip_order:\n return result[0], result[2], result[1]\n return result\n\n @final\n def _join_non_unique(\n self, other: Index, how: str_t = \"left\"\n ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n from pandas.core.reshape.merge import get_join_indexers\n\n # We only get here if dtypes match\n assert self.dtype == other.dtype\n\n lvalues = self._get_join_target()\n rvalues = other._get_join_target()\n\n left_idx, right_idx = get_join_indexers(\n [lvalues], [rvalues], how=how, sort=True\n )\n\n left_idx = ensure_platform_int(left_idx)\n right_idx = ensure_platform_int(right_idx)\n\n join_array = np.asarray(lvalues.take(left_idx))\n mask = left_idx == -1\n np.putmask(join_array, mask, rvalues.take(right_idx))\n\n join_arraylike = self._from_join_target(join_array)\n join_index = self._wrap_joined_index(join_arraylike, other)\n\n return join_index, left_idx, right_idx\n\n @final\n def _join_level(\n self, other: Index, level, how: str_t = \"left\", keep_order: bool = True\n ) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:\n \"\"\"\n The join method *only* affects the level of the resulting\n MultiIndex. Otherwise it just exactly aligns the Index data to the\n labels of the level in the MultiIndex.\n\n If ```keep_order == True```, the order of the data indexed by the\n MultiIndex will not be changed; otherwise, it will tie out\n with `other`.\n \"\"\"\n from pandas.core.indexes.multi import MultiIndex\n\n def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]:\n \"\"\"\n Returns sorter for the inner most level while preserving the\n order of higher levels.\n\n Parameters\n ----------\n labels : list[np.ndarray]\n Each ndarray has signed integer dtype, not necessarily identical.\n\n Returns\n -------\n np.ndarray[np.intp]\n \"\"\"\n if labels[0].size == 0:\n return np.empty(0, dtype=np.intp)\n\n if len(labels) == 1:\n return get_group_index_sorter(ensure_platform_int(labels[0]))\n\n # find indexers of beginning of each set of\n # same-key labels w.r.t all but last level\n tic = labels[0][:-1] != labels[0][1:]\n for lab in labels[1:-1]:\n tic |= lab[:-1] != lab[1:]\n\n starts = np.hstack(([True], tic, [True])).nonzero()[0]\n lab = ensure_int64(labels[-1])\n return lib.get_level_sorter(lab, ensure_platform_int(starts))\n\n if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):\n raise TypeError(\"Join on level between two MultiIndex objects is ambiguous\")\n\n left, right = self, other\n\n flip_order = not isinstance(self, MultiIndex)\n if flip_order:\n left, right = right, left\n how = {\"right\": \"left\", \"left\": \"right\"}.get(how, how)\n\n assert isinstance(left, MultiIndex)\n\n level = left._get_level_number(level)\n old_level = left.levels[level]\n\n if not right.is_unique:\n raise NotImplementedError(\n \"Index._join_level on non-unique index is not implemented\"\n )\n\n new_level, left_lev_indexer, right_lev_indexer = old_level.join(\n right, how=how, return_indexers=True\n )\n\n if left_lev_indexer is None:\n if keep_order or len(left) == 0:\n left_indexer = None\n join_index = left\n else: # sort the leaves\n left_indexer = _get_leaf_sorter(left.codes[: level + 1])\n join_index = left[left_indexer]\n\n else:\n left_lev_indexer = ensure_platform_int(left_lev_indexer)\n rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level))\n old_codes = left.codes[level]\n\n taker = old_codes[old_codes != -1]\n new_lev_codes = rev_indexer.take(taker)\n\n new_codes = list(left.codes)\n new_codes[level] = new_lev_codes\n\n new_levels = list(left.levels)\n new_levels[level] = new_level\n\n if keep_order: # just drop missing values. o.w. keep order\n left_indexer = np.arange(len(left), dtype=np.intp)\n left_indexer = cast(np.ndarray, left_indexer)\n mask = new_lev_codes != -1\n if not mask.all():\n new_codes = [lab[mask] for lab in new_codes]\n left_indexer = left_indexer[mask]\n\n else: # tie out the order with other\n if level == 0: # outer most level, take the fast route\n max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max()\n ngroups = 1 + max_new_lev\n left_indexer, counts = libalgos.groupsort_indexer(\n new_lev_codes, ngroups\n )\n\n # missing values are placed first; drop them!\n left_indexer = left_indexer[counts[0] :]\n new_codes = [lab[left_indexer] for lab in new_codes]\n\n else: # sort the leaves\n mask = new_lev_codes != -1\n mask_all = mask.all()\n if not mask_all:\n new_codes = [lab[mask] for lab in new_codes]\n\n left_indexer = _get_leaf_sorter(new_codes[: level + 1])\n new_codes = [lab[left_indexer] for lab in new_codes]\n\n # left_indexers are w.r.t masked frame.\n # reverse to original frame!\n if not mask_all:\n left_indexer = mask.nonzero()[0][left_indexer]\n\n join_index = MultiIndex(\n levels=new_levels,\n codes=new_codes,\n names=left.names,\n verify_integrity=False,\n )\n\n if right_lev_indexer is not None:\n right_indexer = right_lev_indexer.take(join_index.codes[level])\n else:\n right_indexer = join_index.codes[level]\n\n if flip_order:\n left_indexer, right_indexer = right_indexer, left_indexer\n\n left_indexer = (\n None if left_indexer is None else ensure_platform_int(left_indexer)\n )\n right_indexer = (\n None if right_indexer is None else ensure_platform_int(right_indexer)\n )\n return join_index, left_indexer, right_indexer\n\n @final\n def _join_monotonic(self, other: Index, how: str_t = \"left\"):\n # We only get here with matching dtypes\n assert other.dtype == self.dtype\n\n if self.equals(other):\n ret_index = other if how == \"right\" else self\n return ret_index, None, None\n\n ridx: np.ndarray | None\n lidx: np.ndarray | None\n\n if self.is_unique and other.is_unique:\n # We can perform much better than the general case\n if how == \"left\":\n join_index = self\n lidx = None\n ridx = self._left_indexer_unique(other)\n elif how == \"right\":\n join_index = other\n lidx = other._left_indexer_unique(self)\n ridx = None\n elif how == \"inner\":\n join_array, lidx, ridx = self._inner_indexer(other)\n join_index = self._wrap_joined_index(join_array, other)\n elif how == \"outer\":\n join_array, lidx, ridx = self._outer_indexer(other)\n join_index = self._wrap_joined_index(join_array, other)\n else:\n if how == \"left\":\n join_array, lidx, ridx = self._left_indexer(other)\n elif how == \"right\":\n join_array, ridx, lidx = other._left_indexer(self)\n elif how == \"inner\":\n join_array, lidx, ridx = self._inner_indexer(other)\n elif how == \"outer\":\n join_array, lidx, ridx = self._outer_indexer(other)\n\n join_index = self._wrap_joined_index(join_array, other)\n\n lidx = None if lidx is None else ensure_platform_int(lidx)\n ridx = None if ridx is None else ensure_platform_int(ridx)\n return join_index, lidx, ridx\n\n def _wrap_joined_index(self: _IndexT, joined: ArrayLike, other: _IndexT) -> _IndexT:\n assert other.dtype == self.dtype\n\n if isinstance(self, ABCMultiIndex):\n name = self.names if self.names == other.names else None\n # error: Incompatible return value type (got \"MultiIndex\",\n # expected \"_IndexT\")\n return self._constructor(joined, name=name) # type: ignore[return-value]\n else:\n name = get_op_result_name(self, other)\n return self._constructor._with_infer(joined, name=name)\n\n # --------------------------------------------------------------------\n # Uncategorized Methods\n\n @property\n def values(self) -> ArrayLike:\n \"\"\"\n Return an array representing the data in the Index.\n\n .. warning::\n\n We recommend using :attr:`Index.array` or\n :meth:`Index.to_numpy`, depending on whether you need\n a reference to the underlying data or a NumPy array.\n\n Returns\n -------\n array: numpy.ndarray or ExtensionArray\n\n See Also\n --------\n Index.array : Reference to the underlying data.\n Index.to_numpy : A NumPy array representing the underlying data.\n \"\"\"\n return self._data\n\n @cache_readonly\n @doc(IndexOpsMixin.array)\n def array(self) -> ExtensionArray:\n array = self._data\n if isinstance(array, np.ndarray):\n from pandas.core.arrays.numpy_ import PandasArray\n\n array = PandasArray(array)\n return array\n\n @property\n def _values(self) -> ExtensionArray | np.ndarray:\n \"\"\"\n The best array representation.\n\n This is an ndarray or ExtensionArray.\n\n ``_values`` are consistent between ``Series`` and ``Index``.\n\n It may differ from the public '.values' method.\n\n index | values | _values |\n ----------------- | --------------- | ------------- |\n Index | ndarray | ndarray |\n CategoricalIndex | Categorical | Categorical |\n DatetimeIndex | ndarray[M8ns] | DatetimeArray |\n DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray |\n PeriodIndex | ndarray[object] | PeriodArray |\n IntervalIndex | IntervalArray | IntervalArray |\n\n See Also\n --------\n values : Values\n \"\"\"\n return self._data\n\n def _get_engine_target(self) -> np.ndarray:\n \"\"\"\n Get the ndarray that we can pass to the IndexEngine constructor.\n \"\"\"\n # error: Incompatible return value type (got \"Union[ExtensionArray,\n # ndarray]\", expected \"ndarray\")\n return self._values # type: ignore[return-value]\n\n def _get_join_target(self) -> np.ndarray:\n \"\"\"\n Get the ndarray that we will pass to libjoin functions.\n \"\"\"\n return self._get_engine_target()\n\n def _from_join_target(self, result: np.ndarray) -> ArrayLike:\n \"\"\"\n Cast the ndarray returned from one of the libjoin.foo_indexer functions\n back to type(self)._data.\n \"\"\"\n return result\n\n @doc(IndexOpsMixin._memory_usage)\n def memory_usage(self, deep: bool = False) -> int:\n result = self._memory_usage(deep=deep)\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result\n\n @final\n def where(self, cond, other=None) -> Index:\n \"\"\"\n Replace values where the condition is False.\n\n The replacement is taken from other.\n\n Parameters\n ----------\n cond : bool array-like with the same length as self\n Condition to select the values on.\n other : scalar, or array-like, default None\n Replacement if the condition is False.\n\n Returns\n -------\n pandas.Index\n A copy of self with values replaced from other\n where the condition is False.\n\n See Also\n --------\n Series.where : Same method for Series.\n DataFrame.where : Same method for DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index(['car', 'bike', 'train', 'tractor'])\n >>> idx\n Index(['car', 'bike', 'train', 'tractor'], dtype='object')\n >>> idx.where(idx.isin(['car', 'train']), 'other')\n Index(['car', 'other', 'train', 'other'], dtype='object')\n \"\"\"\n if isinstance(self, ABCMultiIndex):\n raise NotImplementedError(\n \".where is not supported for MultiIndex operations\"\n )\n cond = np.asarray(cond, dtype=bool)\n return self.putmask(~cond, other)\n\n # construction helpers\n @final\n @classmethod\n def _scalar_data_error(cls, data):\n # We return the TypeError so that we can raise it from the constructor\n # in order to keep mypy happy\n return TypeError(\n f\"{cls.__name__}(...) must be called with a collection of some \"\n f\"kind, {repr(data)} was passed\"\n )\n\n @final\n @classmethod\n def _string_data_error(cls, data):\n raise TypeError(\n \"String dtype not supported, you may need \"\n \"to explicitly cast to a numeric type\"\n )\n\n def _validate_fill_value(self, value):\n \"\"\"\n Check if the value can be inserted into our array without casting,\n and convert it to an appropriate native type if necessary.\n\n Raises\n ------\n TypeError\n If the value cannot be inserted into an array of this dtype.\n \"\"\"\n if not can_hold_element(self._values, value):\n raise TypeError\n return value\n\n @final\n def _require_scalar(self, value):\n \"\"\"\n Check that this is a scalar value that we can use for setitem-like\n operations without changing dtype.\n \"\"\"\n if not is_scalar(value):\n raise TypeError(f\"'value' must be a scalar, passed: {type(value).__name__}\")\n return value\n\n @property\n def _has_complex_internals(self) -> bool:\n \"\"\"\n Indicates if an index is not directly backed by a numpy array\n \"\"\"\n # used to avoid libreduction code paths, which raise or require conversion\n return False\n\n def _is_memory_usage_qualified(self) -> bool:\n \"\"\"\n Return a boolean if we need a qualified .info display.\n \"\"\"\n return self.is_object()\n\n def is_type_compatible(self, kind: str_t) -> bool:\n \"\"\"\n Whether the index type is compatible with the provided type.\n \"\"\"\n warnings.warn(\n \"Index.is_type_compatible is deprecated and will be removed in a \"\n \"future version\",\n FutureWarning,\n stacklevel=2,\n )\n return kind == self.inferred_type\n\n def __contains__(self, key: Any) -> bool:\n \"\"\"\n Return a boolean indicating whether the provided key is in the index.\n\n Parameters\n ----------\n key : label\n The key to check if it is present in the index.\n\n Returns\n -------\n bool\n Whether the key search is in the index.\n\n Raises\n ------\n TypeError\n If the key is not hashable.\n\n See Also\n --------\n Index.isin : Returns an ndarray of boolean dtype indicating whether the\n list-like key is in the index.\n\n Examples\n --------\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx\n Int64Index([1, 2, 3, 4], dtype='int64')\n\n >>> 2 in idx\n True\n >>> 6 in idx\n False\n \"\"\"\n hash(key)\n try:\n return key in self._engine\n except (OverflowError, TypeError, ValueError):\n return False\n\n # https://github.com/python/typeshed/issues/2148#issuecomment-520783318\n # Incompatible types in assignment (expression has type \"None\", base class\n # \"object\" defined the type as \"Callable[[object], int]\")\n __hash__: None # type: ignore[assignment]\n\n @final\n def __setitem__(self, key, value):\n raise TypeError(\"Index does not support mutable operations\")\n\n def __getitem__(self, key):\n \"\"\"\n Override numpy.ndarray's __getitem__ method to work as desired.\n\n This function adds lists and Series as valid boolean indexers\n (ndarrays only supports ndarray with dtype=bool).\n\n If resulting ndim != 1, plain ndarray is returned instead of\n corresponding `Index` subclass.\n\n \"\"\"\n # There's no custom logic to be implemented in __getslice__, so it's\n # not overloaded intentionally.\n getitem = self._data.__getitem__\n\n if is_scalar(key):\n key = com.cast_scalar_indexer(key, warn_float=True)\n return getitem(key)\n\n if isinstance(key, slice):\n # This case is separated from the conditional above to avoid\n # pessimization of basic indexing.\n result = getitem(key)\n # Going through simple_new for performance.\n return type(self)._simple_new(result, name=self._name)\n\n if com.is_bool_indexer(key):\n key = np.asarray(key, dtype=bool)\n\n result = getitem(key)\n if not is_scalar(result):\n # error: Argument 1 to \"ndim\" has incompatible type \"Union[ExtensionArray,\n # Any]\"; expected \"Union[Union[int, float, complex, str, bytes, generic],\n # Sequence[Union[int, float, complex, str, bytes, generic]],\n # Sequence[Sequence[Any]], _SupportsArray]\"\n if np.ndim(result) > 1: # type: ignore[arg-type]\n deprecate_ndim_indexing(result)\n return result\n # NB: Using _constructor._simple_new would break if MultiIndex\n # didn't override __getitem__\n return self._constructor._simple_new(result, name=self._name)\n else:\n return result\n\n def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT:\n \"\"\"\n Fastpath for __getitem__ when we know we have a slice.\n \"\"\"\n res = self._data[slobj]\n return type(self)._simple_new(res, name=self._name)\n\n @final\n def _can_hold_identifiers_and_holds_name(self, name) -> bool:\n \"\"\"\n Faster check for ``name in self`` when we know `name` is a Python\n identifier (e.g. in NDFrame.__getattr__, which hits this to support\n . key lookup). For indexes that can't hold identifiers (everything\n but object & categorical) we just return False.\n\n https://github.com/pandas-dev/pandas/issues/19764\n \"\"\"\n if self.is_object() or self.is_categorical():\n return name in self\n return False\n\n def append(self, other: Index | Sequence[Index]) -> Index:\n \"\"\"\n Append a collection of Index options together.\n\n Parameters\n ----------\n other : Index or list/tuple of indices\n\n Returns\n -------\n Index\n \"\"\"\n to_concat = [self]\n\n if isinstance(other, (list, tuple)):\n to_concat += list(other)\n else:\n # error: Argument 1 to \"append\" of \"list\" has incompatible type\n # \"Union[Index, Sequence[Index]]\"; expected \"Index\"\n to_concat.append(other) # type: ignore[arg-type]\n\n for obj in to_concat:\n if not isinstance(obj, Index):\n raise TypeError(\"all inputs must be Index\")\n\n names = {obj.name for obj in to_concat}\n name = None if len(names) > 1 else self.name\n\n return self._concat(to_concat, name)\n\n def _concat(self, to_concat: list[Index], name: Hashable) -> Index:\n \"\"\"\n Concatenate multiple Index objects.\n \"\"\"\n to_concat_vals = [x._values for x in to_concat]\n\n result = concat_compat(to_concat_vals)\n return Index._with_infer(result, name=name)\n\n def putmask(self, mask, value) -> Index:\n \"\"\"\n Return a new Index of the values set with the mask.\n\n Returns\n -------\n Index\n\n See Also\n --------\n numpy.ndarray.putmask : Changes elements of an array\n based on conditional and input values.\n \"\"\"\n mask, noop = validate_putmask(self._values, mask)\n if noop:\n return self.copy()\n\n if value is None and (self._is_numeric_dtype or self.dtype == object):\n value = self._na_value\n try:\n converted = self._validate_fill_value(value)\n except (ValueError, TypeError) as err:\n if is_object_dtype(self):\n raise err\n\n dtype = self._find_common_type_compat(value)\n return self.astype(dtype).putmask(mask, value)\n\n values = self._values.copy()\n # error: Argument 1 to \"setitem_datetimelike_compat\" has incompatible type\n # \"Union[ExtensionArray, ndarray]\"; expected \"ndarray\"\n converted = setitem_datetimelike_compat(\n values, mask.sum(), converted # type: ignore[arg-type]\n )\n np.putmask(values, mask, converted)\n\n return type(self)._simple_new(values, name=self.name)\n\n def equals(self, other: Any) -> bool:\n \"\"\"\n Determine if two Index object are equal.\n\n The things that are being compared are:\n\n * The elements inside the Index object.\n * The order of the elements inside the Index object.\n\n Parameters\n ----------\n other : Any\n The other object to compare against.\n\n Returns\n -------\n bool\n True if \"other\" is an Index and it has the same elements and order\n as the calling index; False otherwise.\n\n Examples\n --------\n >>> idx1 = pd.Index([1, 2, 3])\n >>> idx1\n Int64Index([1, 2, 3], dtype='int64')\n >>> idx1.equals(pd.Index([1, 2, 3]))\n True\n\n The elements inside are compared\n\n >>> idx2 = pd.Index([\"1\", \"2\", \"3\"])\n >>> idx2\n Index(['1', '2', '3'], dtype='object')\n\n >>> idx1.equals(idx2)\n False\n\n The order is compared\n\n >>> ascending_idx = pd.Index([1, 2, 3])\n >>> ascending_idx\n Int64Index([1, 2, 3], dtype='int64')\n >>> descending_idx = pd.Index([3, 2, 1])\n >>> descending_idx\n Int64Index([3, 2, 1], dtype='int64')\n >>> ascending_idx.equals(descending_idx)\n False\n\n The dtype is *not* compared\n\n >>> int64_idx = pd.Int64Index([1, 2, 3])\n >>> int64_idx\n Int64Index([1, 2, 3], dtype='int64')\n >>> uint64_idx = pd.UInt64Index([1, 2, 3])\n >>> uint64_idx\n UInt64Index([1, 2, 3], dtype='uint64')\n >>> int64_idx.equals(uint64_idx)\n True\n \"\"\"\n if self.is_(other):\n return True\n\n if not isinstance(other, Index):\n return False\n\n if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype):\n # if other is not object, use other's logic for coercion\n return other.equals(self)\n\n if isinstance(other, ABCMultiIndex):\n # d-level MultiIndex can equal d-tuple Index\n return other.equals(self)\n\n if is_extension_array_dtype(other.dtype):\n # All EA-backed Index subclasses override equals\n return other.equals(self)\n\n return array_equivalent(self._values, other._values)\n\n @final\n def identical(self, other) -> bool:\n \"\"\"\n Similar to equals, but checks that object attributes and types are also equal.\n\n Returns\n -------\n bool\n If two Index objects have equal elements and same type True,\n otherwise False.\n \"\"\"\n return (\n self.equals(other)\n and all(\n getattr(self, c, None) == getattr(other, c, None)\n for c in self._comparables\n )\n and type(self) == type(other)\n )\n\n @final\n def asof(self, label):\n \"\"\"\n Return the label from the index, or, if not present, the previous one.\n\n Assuming that the index is sorted, return the passed index label if it\n is in the index, or return the previous index label if the passed one\n is not in the index.\n\n Parameters\n ----------\n label : object\n The label up to which the method returns the latest index label.\n\n Returns\n -------\n object\n The passed label if it is in the index. The previous label if the\n passed label is not in the sorted index or `NaN` if there is no\n such label.\n\n See Also\n --------\n Series.asof : Return the latest value in a Series up to the\n passed index.\n merge_asof : Perform an asof merge (similar to left join but it\n matches on nearest key rather than equal key).\n Index.get_loc : An `asof` is a thin wrapper around `get_loc`\n with method='pad'.\n\n Examples\n --------\n `Index.asof` returns the latest index label up to the passed label.\n\n >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03'])\n >>> idx.asof('2014-01-01')\n '2013-12-31'\n\n If the label is in the index, the method returns the passed label.\n\n >>> idx.asof('2014-01-02')\n '2014-01-02'\n\n If all of the labels in the index are later than the passed label,\n NaN is returned.\n\n >>> idx.asof('1999-01-02')\n nan\n\n If the index is not sorted, an error is raised.\n\n >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02',\n ... '2014-01-03'])\n >>> idx_not_sorted.asof('2013-12-31')\n Traceback (most recent call last):\n ValueError: index must be monotonic increasing or decreasing\n \"\"\"\n self._searchsorted_monotonic(label) # validate sortedness\n try:\n loc = self.get_loc(label)\n except (KeyError, TypeError):\n # KeyError -> No exact match, try for padded\n # TypeError -> passed e.g. non-hashable, fall through to get\n # the tested exception message\n indexer = self.get_indexer([label], method=\"pad\")\n if indexer.ndim > 1 or indexer.size > 1:\n raise TypeError(\"asof requires scalar valued input\")\n loc = indexer.item()\n if loc == -1:\n return self._na_value\n else:\n if isinstance(loc, slice):\n loc = loc.indices(len(self))[-1]\n\n return self[loc]\n\n def asof_locs(self, where: Index, mask: np.ndarray) -> npt.NDArray[np.intp]:\n \"\"\"\n Return the locations (indices) of labels in the index.\n\n As in the `asof` function, if the label (a particular entry in\n `where`) is not in the index, the latest index label up to the\n passed label is chosen and its index returned.\n\n If all of the labels in the index are later than a label in `where`,\n -1 is returned.\n\n `mask` is used to ignore NA values in the index during calculation.\n\n Parameters\n ----------\n where : Index\n An Index consisting of an array of timestamps.\n mask : np.ndarray[bool]\n Array of booleans denoting where values in the original\n data are not NA.\n\n Returns\n -------\n np.ndarray[np.intp]\n An array of locations (indices) of the labels from the Index\n which correspond to the return values of the `asof` function\n for every element in `where`.\n \"\"\"\n locs = self._values[mask].searchsorted(where._values, side=\"right\")\n locs = np.where(locs > 0, locs - 1, 0)\n\n result = np.arange(len(self), dtype=np.intp)[mask].take(locs)\n\n # TODO: overload return type of ExtensionArray.__getitem__\n first_value = cast(Any, self._values[mask.argmax()])\n result[(locs == 0) & (where._values < first_value)] = -1\n\n return result\n\n @final\n def sort_values(\n self,\n return_indexer: bool = False,\n ascending: bool = True,\n na_position: str_t = \"last\",\n key: Callable | None = None,\n ):\n \"\"\"\n Return a sorted copy of the index.\n\n Return a sorted copy of the index, and optionally return the indices\n that sorted the index itself.\n\n Parameters\n ----------\n return_indexer : bool, default False\n Should the indices that would sort the index be returned.\n ascending : bool, default True\n Should the index values be sorted in an ascending order.\n na_position : {'first' or 'last'}, default 'last'\n Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at\n the end.\n\n .. versionadded:: 1.2.0\n\n key : callable, optional\n If not None, apply the key function to the index values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect an\n ``Index`` and return an ``Index`` of the same shape.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n sorted_index : pandas.Index\n Sorted copy of the index.\n indexer : numpy.ndarray, optional\n The indices that the index itself was sorted by.\n\n See Also\n --------\n Series.sort_values : Sort values of a Series.\n DataFrame.sort_values : Sort values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([10, 100, 1, 1000])\n >>> idx\n Int64Index([10, 100, 1, 1000], dtype='int64')\n\n Sort values in ascending order (default behavior).\n\n >>> idx.sort_values()\n Int64Index([1, 10, 100, 1000], dtype='int64')\n\n Sort values in descending order, and also get the indices `idx` was\n sorted by.\n\n >>> idx.sort_values(ascending=False, return_indexer=True)\n (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))\n \"\"\"\n idx = ensure_key_mapped(self, key)\n\n # GH 35584. Sort missing values according to na_position kwarg\n # ignore na_position for MultiIndex\n if not isinstance(self, ABCMultiIndex):\n _as = nargsort(\n items=idx, ascending=ascending, na_position=na_position, key=key\n )\n else:\n _as = idx.argsort()\n if not ascending:\n _as = _as[::-1]\n\n sorted_index = self.take(_as)\n\n if return_indexer:\n return sorted_index, _as\n else:\n return sorted_index\n\n @final\n def sort(self, *args, **kwargs):\n \"\"\"\n Use sort_values instead.\n \"\"\"\n raise TypeError(\"cannot sort an Index object in-place, use sort_values instead\")\n\n def shift(self, periods=1, freq=None):\n \"\"\"\n Shift index by desired number of time frequency increments.\n\n This method is for shifting the values of datetime-like indexes\n by a specified time increment a given number of times.\n\n Parameters\n ----------\n periods : int, default 1\n Number of periods (or increments) to shift by,\n can be positive or negative.\n freq : pandas.DateOffset, pandas.Timedelta or str, optional\n Frequency increment to shift by.\n If None, the index is shifted by its own `freq` attribute.\n Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.\n\n Returns\n -------\n pandas.Index\n Shifted index.\n\n See Also\n --------\n Series.shift : Shift values of Series.\n\n Notes\n -----\n This method is only implemented for datetime-like index classes,\n i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.\n\n Examples\n --------\n Put the first 5 month starts of 2011 into an index.\n\n >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')\n >>> month_starts\n DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',\n '2011-05-01'],\n dtype='datetime64[ns]', freq='MS')\n\n Shift the index by 10 days.\n\n >>> month_starts.shift(10, freq='D')\n DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',\n '2011-05-11'],\n dtype='datetime64[ns]', freq=None)\n\n The default value of `freq` is the `freq` attribute of the index,\n which is 'MS' (month start) in this example.\n\n >>> month_starts.shift(10)\n DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',\n '2012-03-01'],\n dtype='datetime64[ns]', freq='MS')\n \"\"\"\n raise NotImplementedError(\n f\"This method is only implemented for DatetimeIndex, PeriodIndex and \"\n f\"TimedeltaIndex; Got type {type(self).__name__}\"\n )\n\n def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:\n \"\"\"\n Return the integer indices that would sort the index.\n\n Parameters\n ----------\n *args\n Passed to `numpy.ndarray.argsort`.\n **kwargs\n Passed to `numpy.ndarray.argsort`.\n\n Returns\n -------\n np.ndarray[np.intp]\n Integer indices that would sort the index if used as\n an indexer.\n\n See Also\n --------\n numpy.argsort : Similar method for NumPy arrays.\n Index.sort_values : Return sorted copy of Index.\n\n Examples\n --------\n >>> idx = pd.Index(['b', 'a', 'd', 'c'])\n >>> idx\n Index(['b', 'a', 'd', 'c'], dtype='object')\n\n >>> order = idx.argsort()\n >>> order\n array([1, 0, 3, 2])\n\n >>> idx[order]\n Index(['a', 'b', 'c', 'd'], dtype='object')\n \"\"\"\n # This works for either ndarray or EA, is overridden\n # by RangeIndex, MultIIndex\n return self._data.argsort(*args, **kwargs)\n\n @final\n def get_value(self, series: Series, key):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray.\n\n Only use this if you know what you're doing.\n\n Returns\n -------\n scalar or Series\n \"\"\"\n warnings.warn(\n \"get_value is deprecated and will be removed in a future version. \"\n \"Use Series[key] instead\",\n FutureWarning,\n stacklevel=2,\n )\n\n self._check_indexing_error(key)\n\n try:\n # GH 20882, 21257\n # First try to convert the key to a location\n # If that fails, raise a KeyError if an integer\n # index, otherwise, see if key is an integer, and\n # try that\n loc = self.get_loc(key)\n except KeyError:\n if not self._should_fallback_to_positional:\n raise\n elif is_integer(key):\n # If the Index cannot hold integer, then this is unambiguously\n # a locational lookup.\n loc = key\n else:\n raise\n\n return self._get_values_for_loc(series, loc, key)\n\n def _check_indexing_error(self, key):\n if not is_scalar(key):\n # if key is not a scalar, directly raise an error (the code below\n # would convert to numpy arrays and raise later any way) - GH29926\n raise InvalidIndexError(key)\n\n @cache_readonly\n def _should_fallback_to_positional(self) -> bool:\n \"\"\"\n Should an integer key be treated as positional?\n \"\"\"\n return not self.holds_integer() and not self.is_boolean()\n\n def _get_values_for_loc(self, series: Series, loc, key):\n \"\"\"\n Do a positional lookup on the given Series, returning either a scalar\n or a Series.\n\n Assumes that `series.index is self`\n\n key is included for MultiIndex compat.\n \"\"\"\n if is_integer(loc):\n return series._values[loc]\n\n return series.iloc[loc]\n\n @final\n def set_value(self, arr, key, value):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray.\n\n .. deprecated:: 1.0\n\n Notes\n -----\n Only use this if you know what you're doing.\n \"\"\"\n warnings.warn(\n (\n \"The 'set_value' method is deprecated, and \"\n \"will be removed in a future version.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n loc = self._engine.get_loc(key)\n validate_numeric_casting(arr.dtype, value)\n arr[loc] = value\n\n _index_shared_docs[\n \"get_indexer_non_unique\"\n ] = \"\"\"\n Compute indexer and mask for new index given the current index. The\n indexer should be then used as an input to ndarray.take to align the\n current data to the new index.\n\n Parameters\n ----------\n target : %(target_klass)s\n\n Returns\n -------\n indexer : np.ndarray[np.intp]\n Integers from 0 to n - 1 indicating that the index at these\n positions matches the corresponding target values. Missing values\n in the target are marked by -1.\n missing : np.ndarray[np.intp]\n An indexer into the target of the values not found.\n These correspond to the -1 in the indexer array.\n \"\"\"\n\n @Appender(_index_shared_docs[\"get_indexer_non_unique\"] % _index_doc_kwargs)\n def get_indexer_non_unique(\n self, target\n ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n target = ensure_index(target)\n target = self._maybe_cast_listlike_indexer(target)\n\n if not self._should_compare(target) and not is_interval_dtype(self.dtype):\n # IntervalIndex get special treatment bc numeric scalars can be\n # matched to Interval scalars\n return self._get_indexer_non_comparable(target, method=None, unique=False)\n\n pself, ptarget = self._maybe_promote(target)\n if pself is not self or ptarget is not target:\n return pself.get_indexer_non_unique(ptarget)\n\n if not is_dtype_equal(self.dtype, target.dtype):\n # TODO: if object, could use infer_dtype to preempt costly\n # conversion if still non-comparable?\n dtype = self._find_common_type_compat(target)\n\n this = self.astype(dtype, copy=False)\n that = target.astype(dtype, copy=False)\n return this.get_indexer_non_unique(that)\n\n # Note: _maybe_promote ensures we never get here with MultiIndex\n # self and non-Multi target\n tgt_values = target._get_engine_target()\n\n indexer, missing = self._engine.get_indexer_non_unique(tgt_values)\n return ensure_platform_int(indexer), ensure_platform_int(missing)\n\n @final\n def get_indexer_for(self, target) -> npt.NDArray[np.intp]:\n \"\"\"\n Guaranteed return of an indexer even when non-unique.\n\n This dispatches to get_indexer or get_indexer_non_unique\n as appropriate.\n\n Returns\n -------\n np.ndarray[np.intp]\n List of indices.\n\n Examples\n --------\n >>> idx = pd.Index([np.nan, 'var1', np.nan])\n >>> idx.get_indexer_for([np.nan])\n array([0, 2])\n \"\"\"\n if self._index_as_unique:\n return self.get_indexer(target)\n indexer, _ = self.get_indexer_non_unique(target)\n return indexer\n\n def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]:\n \"\"\"\n Analogue to get_indexer that raises if any elements are missing.\n \"\"\"\n keyarr = key\n if not isinstance(keyarr, Index):\n keyarr = com.asarray_tuplesafe(keyarr)\n\n if self._index_as_unique:\n indexer = self.get_indexer_for(keyarr)\n keyarr = self.reindex(keyarr)[0]\n else:\n keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr)\n\n self._raise_if_missing(keyarr, indexer, axis_name)\n\n keyarr = self.take(indexer)\n if isinstance(key, Index):\n # GH 42790 - Preserve name from an Index\n keyarr.name = key.name\n if keyarr.dtype.kind in [\"m\", \"M\"]:\n # DTI/TDI.take can infer a freq in some cases when we dont want one\n if isinstance(key, list) or (\n isinstance(key, type(self))\n # \"Index\" has no attribute \"freq\"\n and key.freq is None # type: ignore[attr-defined]\n ):\n keyarr = keyarr._with_freq(None)\n\n return keyarr, indexer\n\n def _raise_if_missing(self, key, indexer, axis_name: str_t):\n \"\"\"\n Check that indexer can be used to return a result.\n\n e.g. at least one element was found,\n unless the list of keys was actually empty.\n\n Parameters\n ----------\n key : list-like\n Targeted labels (only used to show correct error message).\n indexer: array-like of booleans\n Indices corresponding to the key,\n (with -1 indicating not found).\n axis_name : str\n\n Raises\n ------\n KeyError\n If at least one key was requested but none was found.\n \"\"\"\n if len(key) == 0:\n return\n\n # Count missing values\n missing_mask = indexer < 0\n nmissing = missing_mask.sum()\n\n if nmissing:\n\n # TODO: remove special-case; this is just to keep exception\n # message tests from raising while debugging\n use_interval_msg = is_interval_dtype(self.dtype) or (\n is_categorical_dtype(self.dtype)\n # \"Index\" has no attribute \"categories\" [attr-defined]\n and is_interval_dtype(\n self.categories.dtype # type: ignore[attr-defined]\n )\n )\n\n if nmissing == len(indexer):\n if use_interval_msg:\n key = list(key)\n raise KeyError(f\"None of [{key}] are in the [{axis_name}]\")\n\n not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique())\n raise KeyError(f\"{not_found} not in index\")\n\n @overload\n def _get_indexer_non_comparable(\n self, target: Index, method, unique: Literal[True] = ...\n ) -> npt.NDArray[np.intp]:\n ...\n\n @overload\n def _get_indexer_non_comparable(\n self, target: Index, method, unique: Literal[False]\n ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n ...\n\n @overload\n def _get_indexer_non_comparable(\n self, target: Index, method, unique: bool = True\n ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n ...\n\n @final\n def _get_indexer_non_comparable(\n self, target: Index, method, unique: bool = True\n ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n \"\"\"\n Called from get_indexer or get_indexer_non_unique when the target\n is of a non-comparable dtype.\n\n For get_indexer lookups with method=None, get_indexer is an _equality_\n check, so non-comparable dtypes mean we will always have no matches.\n\n For get_indexer lookups with a method, get_indexer is an _inequality_\n check, so non-comparable dtypes mean we will always raise TypeError.\n\n Parameters\n ----------\n target : Index\n method : str or None\n unique : bool, default True\n * True if called from get_indexer.\n * False if called from get_indexer_non_unique.\n\n Raises\n ------\n TypeError\n If doing an inequality check, i.e. method is not None.\n \"\"\"\n if method is not None:\n other = unpack_nested_dtype(target)\n raise TypeError(f\"Cannot compare dtypes {self.dtype} and {other.dtype}\")\n\n no_matches = -1 * np.ones(target.shape, dtype=np.intp)\n if unique:\n # This is for get_indexer\n return no_matches\n else:\n # This is for get_indexer_non_unique\n missing = np.arange(len(target), dtype=np.intp)\n return no_matches, missing\n\n @property\n def _index_as_unique(self) -> bool:\n \"\"\"\n Whether we should treat this as unique for the sake of\n get_indexer vs get_indexer_non_unique.\n\n For IntervalIndex compat.\n \"\"\"\n return self.is_unique\n\n _requires_unique_msg = \"Reindexing only valid with uniquely valued Index objects\"\n\n @final\n def _maybe_promote(self, other: Index) -> tuple[Index, Index]:\n \"\"\"\n When dealing with an object-dtype Index and a non-object Index, see\n if we can upcast the object-dtype one to improve performance.\n \"\"\"\n\n if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex):\n if (\n self.tz is not None\n and other.tz is not None\n and not tz_compare(self.tz, other.tz)\n ):\n # standardize on UTC\n return self.tz_convert(\"UTC\"), other.tz_convert(\"UTC\")\n\n elif self.inferred_type == \"date\" and isinstance(other, ABCDatetimeIndex):\n try:\n return type(other)(self), other\n except OutOfBoundsDatetime:\n return self, other\n elif self.inferred_type == \"timedelta\" and isinstance(other, ABCTimedeltaIndex):\n # TODO: we dont have tests that get here\n return type(other)(self), other\n elif self.inferred_type == \"boolean\":\n if not is_object_dtype(self.dtype):\n return self.astype(\"object\"), other.astype(\"object\")\n\n elif self.dtype.kind == \"u\" and other.dtype.kind == \"i\":\n # GH#41873\n if other.min() >= 0:\n # lookup min as it may be cached\n # TODO: may need itemsize check if we have non-64-bit Indexes\n return self, other.astype(self.dtype)\n\n elif self._is_multi and not other._is_multi:\n try:\n # \"Type[Index]\" has no attribute \"from_tuples\"\n other = type(self).from_tuples(other) # type: ignore[attr-defined]\n except (TypeError, ValueError):\n # let's instead try with a straight Index\n self = Index(self._values)\n\n if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype):\n # Reverse op so we dont need to re-implement on the subclasses\n other, self = other._maybe_promote(self)\n\n return self, other\n\n @final\n def _find_common_type_compat(self, target) -> DtypeObj:\n \"\"\"\n Implementation of find_common_type that adjusts for Index-specific\n special cases.\n \"\"\"\n if is_interval_dtype(self.dtype) and is_valid_na_for_dtype(target, self.dtype):\n # e.g. setting NA value into IntervalArray[int64]\n self = cast(\"IntervalIndex\", self)\n return IntervalDtype(np.float64, closed=self.closed)\n\n target_dtype, _ = infer_dtype_from(target, pandas_dtype=True)\n\n # special case: if one dtype is uint64 and the other a signed int, return object\n # See https://github.com/pandas-dev/pandas/issues/26778 for discussion\n # Now it's:\n # * float | [u]int -> float\n # * uint64 | signed int -> object\n # We may change union(float | [u]int) to go to object.\n if self.dtype == \"uint64\" or target_dtype == \"uint64\":\n if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype(\n target_dtype\n ):\n return np.dtype(\"object\")\n\n dtype = find_common_type([self.dtype, target_dtype])\n\n if dtype.kind in [\"i\", \"u\"]:\n # TODO: what about reversed with self being categorical?\n if (\n isinstance(target, Index)\n and is_categorical_dtype(target.dtype)\n and target.hasnans\n ):\n # FIXME: find_common_type incorrect with Categorical GH#38240\n # FIXME: some cases where float64 cast can be lossy?\n dtype = np.dtype(np.float64)\n if dtype.kind == \"c\":\n dtype = np.dtype(object)\n return dtype\n\n @final\n def _should_compare(self, other: Index) -> bool:\n \"\"\"\n Check if `self == other` can ever have non-False entries.\n \"\"\"\n\n if (other.is_boolean() and self.is_numeric()) or (\n self.is_boolean() and other.is_numeric()\n ):\n # GH#16877 Treat boolean labels passed to a numeric index as not\n # found. Without this fix False and True would be treated as 0 and 1\n # respectively.\n return False\n\n other = unpack_nested_dtype(other)\n dtype = other.dtype\n return self._is_comparable_dtype(dtype) or is_object_dtype(dtype)\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n \"\"\"\n Can we compare values of the given dtype to our own?\n \"\"\"\n return True\n\n @final\n def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]:\n \"\"\"\n Group the index labels by a given array of values.\n\n Parameters\n ----------\n values : array\n Values used to determine the groups.\n\n Returns\n -------\n dict\n {group name -> group labels}\n \"\"\"\n # TODO: if we are a MultiIndex, we can do better\n # that converting to tuples\n if isinstance(values, ABCMultiIndex):\n values = values._values\n values = Categorical(values)\n result = values._reverse_indexer()\n\n # map to the label\n result = {k: self.take(v) for k, v in result.items()}\n\n return PrettyDict(result)\n\n def map(self, mapper, na_action=None):\n \"\"\"\n Map values using input correspondence (a dict, Series, or function).\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values, without passing them to the\n mapping correspondence.\n\n Returns\n -------\n applied : Union[Index, MultiIndex], inferred\n The output of the mapping function applied to the index.\n If the function returns a tuple with more than one element\n a MultiIndex will be returned.\n \"\"\"\n from pandas.core.indexes.multi import MultiIndex\n\n new_values = self._map_values(mapper, na_action=na_action)\n\n attributes = self._get_attributes_dict()\n\n # we can return a MultiIndex\n if new_values.size and isinstance(new_values[0], tuple):\n if isinstance(self, MultiIndex):\n names = self.names\n elif attributes.get(\"name\"):\n names = [attributes.get(\"name\")] * len(new_values[0])\n else:\n names = None\n return MultiIndex.from_tuples(new_values, names=names)\n\n attributes[\"copy\"] = False\n if not new_values.size:\n # empty\n attributes[\"dtype\"] = self.dtype\n\n if self._is_backward_compat_public_numeric_index and is_numeric_dtype(\n new_values.dtype\n ):\n return self._constructor(new_values, **attributes)\n\n return Index._with_infer(new_values, **attributes)\n\n # TODO: De-duplicate with map, xref GH#32349\n @final\n def _transform_index(self, func, *, level=None) -> Index:\n \"\"\"\n Apply function to all values found in index.\n\n This includes transforming multiindex entries separately.\n Only apply function to one level of the MultiIndex if level is specified.\n \"\"\"\n if isinstance(self, ABCMultiIndex):\n if level is not None:\n # Caller is responsible for ensuring level is positional.\n items = [\n tuple(func(y) if i == level else y for i, y in enumerate(x))\n for x in self\n ]\n else:\n items = [tuple(func(y) for y in x) for x in self]\n return type(self).from_tuples(items, names=self.names)\n else:\n items = [func(x) for x in self]\n return Index(items, name=self.name, tupleize_cols=False)\n\n def isin(self, values, level=None) -> np.ndarray:\n \"\"\"\n Return a boolean array where the index values are in `values`.\n\n Compute boolean array of whether each index value is found in the\n passed set of values. The length of the returned boolean array matches\n the length of the index.\n\n Parameters\n ----------\n values : set or list-like\n Sought values.\n level : str or int, optional\n Name or position of the index level to use (if the index is a\n `MultiIndex`).\n\n Returns\n -------\n np.ndarray[bool]\n NumPy array of boolean values.\n\n See Also\n --------\n Series.isin : Same for Series.\n DataFrame.isin : Same method for DataFrames.\n\n Notes\n -----\n In the case of `MultiIndex` you must either specify `values` as a\n list-like object containing tuples that are the same length as the\n number of levels, or specify `level`. Otherwise it will raise a\n ``ValueError``.\n\n If `level` is specified:\n\n - if it is the name of one *and only one* index level, use that level;\n - otherwise it should be a number indicating level position.\n\n Examples\n --------\n >>> idx = pd.Index([1,2,3])\n >>> idx\n Int64Index([1, 2, 3], dtype='int64')\n\n Check whether each index value in a list of values.\n\n >>> idx.isin([1, 4])\n array([ True, False, False])\n\n >>> midx = pd.MultiIndex.from_arrays([[1,2,3],\n ... ['red', 'blue', 'green']],\n ... names=('number', 'color'))\n >>> midx\n MultiIndex([(1, 'red'),\n (2, 'blue'),\n (3, 'green')],\n names=['number', 'color'])\n\n Check whether the strings in the 'color' level of the MultiIndex\n are in a list of colors.\n\n >>> midx.isin(['red', 'orange', 'yellow'], level='color')\n array([ True, False, False])\n\n To check across the levels of a MultiIndex, pass a list of tuples:\n\n >>> midx.isin([(1, 'red'), (3, 'red')])\n array([ True, False, False])\n\n For a DatetimeIndex, string values in `values` are converted to\n Timestamps.\n\n >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']\n >>> dti = pd.to_datetime(dates)\n >>> dti\n DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],\n dtype='datetime64[ns]', freq=None)\n\n >>> dti.isin(['2000-03-11'])\n array([ True, False, False])\n \"\"\"\n if level is not None:\n self._validate_index_level(level)\n return algos.isin(self._values, values)\n\n def _get_string_slice(self, key: str_t):\n # this is for partial string indexing,\n # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex\n raise NotImplementedError\n\n def slice_indexer(\n self,\n start: Hashable | None = None,\n end: Hashable | None = None,\n step: int | None = None,\n kind=no_default,\n ) -> slice:\n \"\"\"\n Compute the slice indexer for input labels and step.\n\n Index needs to be ordered and unique.\n\n Parameters\n ----------\n start : label, default None\n If None, defaults to the beginning.\n end : label, default None\n If None, defaults to the end.\n step : int, default None\n kind : str, default None\n\n .. deprecated:: 1.4.0\n\n Returns\n -------\n indexer : slice\n\n Raises\n ------\n KeyError : If key does not exist, or key is not unique and index is\n not ordered.\n\n Notes\n -----\n This function assumes that the data is sorted, so use at your own peril\n\n Examples\n --------\n This is a method on all index types. For example you can do:\n\n >>> idx = pd.Index(list('abcd'))\n >>> idx.slice_indexer(start='b', end='c')\n slice(1, 3, None)\n\n >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])\n >>> idx.slice_indexer(start='b', end=('c', 'g'))\n slice(1, 3, None)\n \"\"\"\n self._deprecated_arg(kind, \"kind\", \"slice_indexer\")\n\n start_slice, end_slice = self.slice_locs(start, end, step=step)\n\n # return a slice\n if not is_scalar(start_slice):\n raise AssertionError(\"Start slice bound is non-scalar\")\n if not is_scalar(end_slice):\n raise AssertionError(\"End slice bound is non-scalar\")\n\n return slice(start_slice, end_slice, step)\n\n def _maybe_cast_indexer(self, key):\n \"\"\"\n If we have a float key and are not a floating index, then try to cast\n to an int if equivalent.\n \"\"\"\n if not self.is_floating():\n return com.cast_scalar_indexer(key)\n return key\n\n def _maybe_cast_listlike_indexer(self, target) -> Index:\n \"\"\"\n Analogue to maybe_cast_indexer for get_indexer instead of get_loc.\n \"\"\"\n return ensure_index(target)\n\n @final\n def _validate_indexer(self, form: str_t, key, kind: str_t):\n \"\"\"\n If we are positional indexer, validate that we have appropriate\n typed bounds must be an integer.\n \"\"\"\n assert kind in [\"getitem\", \"iloc\"]\n\n if key is not None and not is_integer(key):\n raise self._invalid_indexer(form, key)\n\n def _maybe_cast_slice_bound(self, label, side: str_t, kind=no_default):\n \"\"\"\n This function should be overloaded in subclasses that allow non-trivial\n casting on label-slice bounds, e.g. datetime-like indices allowing\n strings containing formatted datetimes.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'} or None\n\n .. deprecated:: 1.3.0\n\n Returns\n -------\n label : object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n \"\"\"\n assert kind in [\"loc\", \"getitem\", None, no_default]\n self._deprecated_arg(kind, \"kind\", \"_maybe_cast_slice_bound\")\n\n # We are a plain index here (sub-class override this method if they\n # wish to have special treatment for floats/ints, e.g. Float64Index and\n # datetimelike Indexes\n # reject them, if index does not contain label\n if (is_float(label) or is_integer(label)) and label not in self._values:\n raise self._invalid_indexer(\"slice\", label)\n\n return label\n\n def _searchsorted_monotonic(self, label, side: str_t = \"left\"):\n if self.is_monotonic_increasing:\n return self.searchsorted(label, side=side)\n elif self.is_monotonic_decreasing:\n # np.searchsorted expects ascending sort order, have to reverse\n # everything for it to work (element ordering, search side and\n # resulting value).\n pos = self[::-1].searchsorted(\n label, side=\"right\" if side == \"left\" else \"left\"\n )\n return len(self) - pos\n\n raise ValueError(\"index must be monotonic increasing or decreasing\")\n\n def get_slice_bound(self, label, side: str_t, kind=no_default) -> int:\n \"\"\"\n Calculate slice bound that corresponds to given label.\n\n Returns leftmost (one-past-the-rightmost if ``side=='right'``) position\n of given label.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'} or None\n\n .. deprecated:: 1.4.0\n\n Returns\n -------\n int\n Index of label.\n \"\"\"\n assert kind in [\"loc\", \"getitem\", None, no_default]\n self._deprecated_arg(kind, \"kind\", \"get_slice_bound\")\n\n if side not in (\"left\", \"right\"):\n raise ValueError(\n \"Invalid value for side kwarg, must be either \"\n f\"'left' or 'right': {side}\"\n )\n\n original_label = label\n\n # For datetime indices label may be a string that has to be converted\n # to datetime boundary according to its resolution.\n label = self._maybe_cast_slice_bound(label, side)\n\n # we need to look up the label\n try:\n slc = self.get_loc(label)\n except KeyError as err:\n try:\n return self._searchsorted_monotonic(label, side)\n except ValueError:\n # raise the original KeyError\n raise err\n\n if isinstance(slc, np.ndarray):\n # get_loc may return a boolean array or an array of indices, which\n # is OK as long as they are representable by a slice.\n if is_bool_dtype(slc):\n slc = lib.maybe_booleans_to_slice(slc.view(\"u1\"))\n else:\n slc = lib.maybe_indices_to_slice(\n slc.astype(np.intp, copy=False), len(self)\n )\n if isinstance(slc, np.ndarray):\n raise KeyError(\n f\"Cannot get {side} slice bound for non-unique \"\n f\"label: {repr(original_label)}\"\n )\n\n if isinstance(slc, slice):\n if side == \"left\":\n return slc.start\n else:\n return slc.stop\n else:\n if side == \"right\":\n return slc + 1\n else:\n return slc\n\n def slice_locs(self, start=None, end=None, step=None, kind=no_default):\n \"\"\"\n Compute slice locations for input labels.\n\n Parameters\n ----------\n start : label, default None\n If None, defaults to the beginning.\n end : label, default None\n If None, defaults to the end.\n step : int, defaults None\n If None, defaults to 1.\n kind : {'loc', 'getitem'} or None\n\n .. deprecated:: 1.4.0\n\n Returns\n -------\n start, end : int\n\n See Also\n --------\n Index.get_loc : Get location for a single label.\n\n Notes\n -----\n This method only works if the index is monotonic or unique.\n\n Examples\n --------\n >>> idx = pd.Index(list('abcd'))\n >>> idx.slice_locs(start='b', end='c')\n (1, 3)\n \"\"\"\n self._deprecated_arg(kind, \"kind\", \"slice_locs\")\n inc = step is None or step >= 0\n\n if not inc:\n # If it's a reverse slice, temporarily swap bounds.\n start, end = end, start\n\n # GH 16785: If start and end happen to be date strings with UTC offsets\n # attempt to parse and check that the offsets are the same\n if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)):\n try:\n ts_start = Timestamp(start)\n ts_end = Timestamp(end)\n except (ValueError, TypeError):\n pass\n else:\n if not tz_compare(ts_start.tzinfo, ts_end.tzinfo):\n raise ValueError(\"Both dates must have the same UTC offset\")\n\n start_slice = None\n if start is not None:\n start_slice = self.get_slice_bound(start, \"left\")\n if start_slice is None:\n start_slice = 0\n\n end_slice = None\n if end is not None:\n end_slice = self.get_slice_bound(end, \"right\")\n if end_slice is None:\n end_slice = len(self)\n\n if not inc:\n # Bounds at this moment are swapped, swap them back and shift by 1.\n #\n # slice_locs('B', 'A', step=-1): s='B', e='A'\n #\n # s='A' e='B'\n # AFTER SWAP: | |\n # v ------------------> V\n # -----------------------------------\n # | | |A|A|A|A| | | | | |B|B| | | | |\n # -----------------------------------\n # ^ <------------------ ^\n # SHOULD BE: | |\n # end=s-1 start=e-1\n #\n end_slice, start_slice = start_slice - 1, end_slice - 1\n\n # i == -1 triggers ``len(self) + i`` selection that points to the\n # last element, not before-the-first one, subtracting len(self)\n # compensates that.\n if end_slice == -1:\n end_slice -= len(self)\n if start_slice == -1:\n start_slice -= len(self)\n\n return start_slice, end_slice\n\n def delete(self: _IndexT, loc) -> _IndexT:\n \"\"\"\n Make new Index with passed location(-s) deleted.\n\n Parameters\n ----------\n loc : int or list of int\n Location of item(-s) which will be deleted.\n Use a list of locations to delete more than one value at the same time.\n\n Returns\n -------\n Index\n Will be same type as self, except for RangeIndex.\n\n See Also\n --------\n numpy.delete : Delete any rows and column from NumPy array (ndarray).\n\n Examples\n --------\n >>> idx = pd.Index(['a', 'b', 'c'])\n >>> idx.delete(1)\n Index(['a', 'c'], dtype='object')\n\n >>> idx = pd.Index(['a', 'b', 'c'])\n >>> idx.delete([0, 2])\n Index(['b'], dtype='object')\n \"\"\"\n res_values = np.delete(self._data, loc)\n return type(self)._simple_new(res_values, name=self.name)\n\n def insert(self, loc: int, item) -> Index:\n \"\"\"\n Make new Index inserting new item at location.\n\n Follows Python list.append semantics for negative values.\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n new_index : Index\n \"\"\"\n # Note: this method is overridden by all ExtensionIndex subclasses,\n # so self is never backed by an EA.\n item = lib.item_from_zerodim(item)\n if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object:\n item = self._na_value\n\n try:\n item = self._validate_fill_value(item)\n except TypeError:\n dtype = self._find_common_type_compat(item)\n return self.astype(dtype).insert(loc, item)\n\n arr = np.asarray(self)\n\n # Use Index constructor to ensure we get tuples cast correctly.\n item = Index([item], dtype=self.dtype)._values\n idx = np.concatenate((arr[:loc], item, arr[loc:]))\n return Index._with_infer(idx, name=self.name)\n\n def drop(self, labels, errors: str_t = \"raise\") -> Index:\n \"\"\"\n Make new Index with passed list of labels deleted.\n\n Parameters\n ----------\n labels : array-like or scalar\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n Returns\n -------\n dropped : Index\n Will be same type as self, except for RangeIndex.\n\n Raises\n ------\n KeyError\n If not all of the labels are found in the selected axis\n \"\"\"\n arr_dtype = \"object\" if self.dtype == \"object\" else None\n labels = com.index_labels_to_array(labels, dtype=arr_dtype)\n indexer = self.get_indexer_for(labels)\n mask = indexer == -1\n if mask.any():\n if errors != \"ignore\":\n raise KeyError(f\"{labels[mask]} not found in axis\")\n indexer = indexer[~mask]\n return self.delete(indexer)\n\n # --------------------------------------------------------------------\n # Generated Arithmetic, Comparison, and Unary Methods\n\n def _cmp_method(self, other, op):\n \"\"\"\n Wrapper used to dispatch comparison operations.\n \"\"\"\n if self.is_(other):\n # fastpath\n if op in {operator.eq, operator.le, operator.ge}:\n arr = np.ones(len(self), dtype=bool)\n if self._can_hold_na and not isinstance(self, ABCMultiIndex):\n # TODO: should set MultiIndex._can_hold_na = False?\n arr[self.isna()] = False\n return arr\n elif op in {operator.ne, operator.lt, operator.gt}:\n return np.zeros(len(self), dtype=bool)\n\n if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len(\n self\n ) != len(other):\n raise ValueError(\"Lengths must match to compare\")\n\n if not isinstance(other, ABCMultiIndex):\n other = extract_array(other, extract_numpy=True)\n else:\n other = np.asarray(other)\n\n if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray):\n # e.g. PeriodArray, Categorical\n with np.errstate(all=\"ignore\"):\n result = op(self._values, other)\n\n elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex):\n # don't pass MultiIndex\n with np.errstate(all=\"ignore\"):\n result = ops.comp_method_OBJECT_ARRAY(op, self._values, other)\n\n else:\n with np.errstate(all=\"ignore\"):\n result = ops.comparison_op(self._values, other, op)\n\n return result\n\n def _arith_method(self, other, op):\n \"\"\"\n Wrapper used to dispatch arithmetic operations.\n \"\"\"\n\n from pandas import Series\n\n result = op(Series(self), other)\n if isinstance(result, tuple):\n return (Index._with_infer(result[0]), Index(result[1]))\n return Index._with_infer(result)\n\n @final\n def _unary_method(self, op):\n result = op(self._values)\n return Index(result, name=self.name)\n\n def __abs__(self):\n return self._unary_method(operator.abs)\n\n def __neg__(self):\n return self._unary_method(operator.neg)\n\n def __pos__(self):\n return self._unary_method(operator.pos)\n\n def __inv__(self):\n # TODO: why not operator.inv?\n # TODO: __inv__ vs __invert__?\n return self._unary_method(lambda x: -x)\n\n # --------------------------------------------------------------------\n # Reductions\n\n def any(self, *args, **kwargs):\n \"\"\"\n Return whether any element is Truthy.\n\n Parameters\n ----------\n *args\n Required for compatibility with numpy.\n **kwargs\n Required for compatibility with numpy.\n\n Returns\n -------\n any : bool or array-like (if axis is specified)\n A single element array-like may be converted to bool.\n\n See Also\n --------\n Index.all : Return whether all elements are True.\n Series.all : Return whether all elements are True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to True because these are not equal to zero.\n\n Examples\n --------\n >>> index = pd.Index([0, 1, 2])\n >>> index.any()\n True\n\n >>> index = pd.Index([0, 0, 0])\n >>> index.any()\n False\n \"\"\"\n nv.validate_any(args, kwargs)\n self._maybe_disable_logical_methods(\"any\")\n # error: Argument 1 to \"any\" has incompatible type \"ArrayLike\"; expected\n # \"Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int,\n # float, complex, str, bytes, generic]], Sequence[Sequence[Any]],\n # _SupportsArray]\"\n return np.any(self.values) # type: ignore[arg-type]\n\n def all(self, *args, **kwargs):\n \"\"\"\n Return whether all elements are Truthy.\n\n Parameters\n ----------\n *args\n Required for compatibility with numpy.\n **kwargs\n Required for compatibility with numpy.\n\n Returns\n -------\n all : bool or array-like (if axis is specified)\n A single element array-like may be converted to bool.\n\n See Also\n --------\n Index.any : Return whether any element in an Index is True.\n Series.any : Return whether any element in a Series is True.\n Series.all : Return whether all elements in a Series are True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to True because these are not equal to zero.\n\n Examples\n --------\n **all**\n\n True, because nonzero integers are considered True.\n\n >>> pd.Index([1, 2, 3]).all()\n True\n\n False, because ``0`` is considered False.\n\n >>> pd.Index([0, 1, 2]).all()\n False\n\n **any**\n\n True, because ``1`` is considered True.\n\n >>> pd.Index([0, 0, 1]).any()\n True\n\n False, because ``0`` is considered False.\n\n >>> pd.Index([0, 0, 0]).any()\n False\n \"\"\"\n nv.validate_all(args, kwargs)\n self._maybe_disable_logical_methods(\"all\")\n # error: Argument 1 to \"all\" has incompatible type \"ArrayLike\"; expected\n # \"Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int,\n # float, complex, str, bytes, generic]], Sequence[Sequence[Any]],\n # _SupportsArray]\"\n return np.all(self.values) # type: ignore[arg-type]\n\n @final\n def _maybe_disable_logical_methods(self, opname: str_t):\n \"\"\"\n raise if this Index subclass does not support any or all.\n \"\"\"\n if (\n isinstance(self, ABCMultiIndex)\n or needs_i8_conversion(self.dtype)\n or is_interval_dtype(self.dtype)\n or is_categorical_dtype(self.dtype)\n or is_float_dtype(self.dtype)\n ):\n # This call will raise\n make_invalid_op(opname)(self)\n\n @Appender(IndexOpsMixin.argmin.__doc__)\n def argmin(self, axis=None, skipna=True, *args, **kwargs):\n nv.validate_argmin(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n if not self._is_multi and self.hasnans:\n # Take advantage of cache\n mask = self._isnan\n if not skipna or mask.all():\n return -1\n return super().argmin(skipna=skipna)\n\n @Appender(IndexOpsMixin.argmax.__doc__)\n def argmax(self, axis=None, skipna=True, *args, **kwargs):\n nv.validate_argmax(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n if not self._is_multi and self.hasnans:\n # Take advantage of cache\n mask = self._isnan\n if not skipna or mask.all():\n return -1\n return super().argmax(skipna=skipna)\n\n @doc(IndexOpsMixin.min)\n def min(self, axis=None, skipna=True, *args, **kwargs):\n nv.validate_min(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n if not len(self):\n return self._na_value\n\n if len(self) and self.is_monotonic_increasing:\n # quick check\n first = self[0]\n if not isna(first):\n return first\n\n if not self._is_multi and self.hasnans:\n # Take advantage of cache\n mask = self._isnan\n if not skipna or mask.all():\n return self._na_value\n\n if not self._is_multi and not isinstance(self._values, np.ndarray):\n # \"ExtensionArray\" has no attribute \"min\"\n return self._values.min(skipna=skipna) # type: ignore[attr-defined]\n\n return super().min(skipna=skipna)\n\n @doc(IndexOpsMixin.max)\n def max(self, axis=None, skipna=True, *args, **kwargs):\n nv.validate_max(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n if not len(self):\n return self._na_value\n\n if len(self) and self.is_monotonic_increasing:\n # quick check\n last = self[-1]\n if not isna(last):\n return last\n\n if not self._is_multi and self.hasnans:\n # Take advantage of cache\n mask = self._isnan\n if not skipna or mask.all():\n return self._na_value\n\n if not self._is_multi and not isinstance(self._values, np.ndarray):\n # \"ExtensionArray\" has no attribute \"max\"\n return self._values.max(skipna=skipna) # type: ignore[attr-defined]\n\n return super().max(skipna=skipna)\n\n # --------------------------------------------------------------------\n\n @final\n @property\n def shape(self) -> Shape:\n \"\"\"\n Return a tuple of the shape of the underlying data.\n \"\"\"\n # See GH#27775, GH#27384 for history/reasoning in how this is defined.\n return (len(self),)\n\n @final\n def _deprecated_arg(self, value, name: str_t, methodname: str_t) -> None:\n \"\"\"\n Issue a FutureWarning if the arg/kwarg is not no_default.\n \"\"\"\n if value is not no_default:\n warnings.warn(\n f\"'{name}' argument in {methodname} is deprecated \"\n \"and will be removed in a future version. Do not pass it.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n\ndef ensure_index_from_sequences(sequences, names=None):\n \"\"\"\n Construct an index from sequences of data.\n\n A single sequence returns an Index. Many sequences returns a\n MultiIndex.\n\n Parameters\n ----------\n sequences : sequence of sequences\n names : sequence of str\n\n Returns\n -------\n index : Index or MultiIndex\n\n Examples\n --------\n >>> ensure_index_from_sequences([[1, 2, 3]], names=[\"name\"])\n Int64Index([1, 2, 3], dtype='int64', name='name')\n\n >>> ensure_index_from_sequences([[\"a\", \"a\"], [\"a\", \"b\"]], names=[\"L1\", \"L2\"])\n MultiIndex([('a', 'a'),\n ('a', 'b')],\n names=['L1', 'L2'])\n\n See Also\n --------\n ensure_index\n \"\"\"\n from pandas.core.indexes.multi import MultiIndex\n\n if len(sequences) == 1:\n if names is not None:\n names = names[0]\n return Index(sequences[0], name=names)\n else:\n return MultiIndex.from_arrays(sequences, names=names)\n\n\ndef ensure_index(index_like: AnyArrayLike | Sequence, copy: bool = False) -> Index:\n \"\"\"\n Ensure that we have an index from some index-like object.\n\n Parameters\n ----------\n index_like : sequence\n An Index or other sequence\n copy : bool, default False\n\n Returns\n -------\n index : Index or MultiIndex\n\n See Also\n --------\n ensure_index_from_sequences\n\n Examples\n --------\n >>> ensure_index(['a', 'b'])\n Index(['a', 'b'], dtype='object')\n\n >>> ensure_index([('a', 'a'), ('b', 'c')])\n Index([('a', 'a'), ('b', 'c')], dtype='object')\n\n >>> ensure_index([['a', 'a'], ['b', 'c']])\n MultiIndex([('a', 'b'),\n ('a', 'c')],\n )\n \"\"\"\n if isinstance(index_like, Index):\n if copy:\n index_like = index_like.copy()\n return index_like\n\n if isinstance(index_like, ABCSeries):\n name = index_like.name\n return Index._with_infer(index_like, name=name, copy=copy)\n\n if is_iterator(index_like):\n index_like = list(index_like)\n\n if isinstance(index_like, list):\n if type(index_like) is not list:\n # must check for exactly list here because of strict type\n # check in clean_index_list\n index_like = list(index_like)\n\n if len(index_like) and lib.is_all_arraylike(index_like):\n from pandas.core.indexes.multi import MultiIndex\n\n return MultiIndex.from_arrays(index_like)\n else:\n return Index._with_infer(index_like, copy=copy, tupleize_cols=False)\n else:\n return Index._with_infer(index_like, copy=copy)\n\n\ndef ensure_has_len(seq):\n \"\"\"\n If seq is an iterator, put its values into a list.\n \"\"\"\n try:\n len(seq)\n except TypeError:\n return list(seq)\n else:\n return seq\n\n\ndef trim_front(strings: list[str]) -> list[str]:\n \"\"\"\n Trims zeros and decimal points.\n\n Examples\n --------\n >>> trim_front([\" a\", \" b\"])\n ['a', 'b']\n\n >>> trim_front([\" a\", \" \"])\n ['a', '']\n \"\"\"\n if not strings:\n return strings\n while all(strings) and all(x[0] == \" \" for x in strings):\n strings = [x[1:] for x in strings]\n return strings\n\n\ndef _validate_join_method(method: str) -> None:\n if method not in [\"left\", \"right\", \"inner\", \"outer\"]:\n raise ValueError(f\"do not recognize join method {method}\")\n\n\ndef default_index(n: int) -> RangeIndex:\n from pandas.core.indexes.range import RangeIndex\n\n return RangeIndex(0, n, name=None)\n\n\ndef maybe_extract_name(name, obj, cls) -> Hashable:\n \"\"\"\n If no name is passed, then extract it from data, validating hashability.\n \"\"\"\n if name is None and isinstance(obj, (Index, ABCSeries)):\n # Note we don't just check for \"name\" attribute since that would\n # pick up e.g. dtype.name\n name = obj.name\n\n # GH#29069\n if not is_hashable(name):\n raise TypeError(f\"{cls.__name__}.name must be a hashable type\")\n\n return name\n\n\n_cast_depr_msg = (\n \"In a future version, passing an object-dtype arraylike to pd.Index will \"\n \"not infer numeric values to numeric dtype (matching the Series behavior). \"\n \"To retain the old behavior, explicitly pass the desired dtype or use the \"\n \"desired Index subclass\"\n)\n\n\ndef _maybe_cast_data_without_dtype(\n subarr: np.ndarray, cast_numeric_deprecated: bool = True\n) -> ArrayLike:\n \"\"\"\n If we have an arraylike input but no passed dtype, try to infer\n a supported dtype.\n\n Parameters\n ----------\n subarr : np.ndarray[object]\n cast_numeric_deprecated : bool, default True\n Whether to issue a FutureWarning when inferring numeric dtypes.\n\n Returns\n -------\n np.ndarray or ExtensionArray\n \"\"\"\n\n result = lib.maybe_convert_objects(\n subarr,\n convert_datetime=True,\n convert_timedelta=True,\n convert_period=True,\n convert_interval=True,\n dtype_if_all_nat=np.dtype(\"datetime64[ns]\"),\n )\n if result.dtype.kind in [\"i\", \"u\", \"f\"]:\n if not cast_numeric_deprecated:\n # i.e. we started with a list, not an ndarray[object]\n return result\n\n warnings.warn(\n \"In a future version, the Index constructor will not infer numeric \"\n \"dtypes when passed object-dtype sequences (matching Series behavior)\",\n FutureWarning,\n stacklevel=3,\n )\n if result.dtype.kind in [\"b\", \"c\"]:\n return subarr\n result = ensure_wrapped_if_datetimelike(result)\n return result\n\n\ndef get_unanimous_names(*indexes: Index) -> tuple[Hashable, ...]:\n \"\"\"\n Return common name if all indices agree, otherwise None (level-by-level).\n\n Parameters\n ----------\n indexes : list of Index objects\n\n Returns\n -------\n list\n A list representing the unanimous 'names' found.\n \"\"\"\n name_tups = [tuple(i.names) for i in indexes]\n name_sets = [{*ns} for ns in zip_longest(*name_tups)]\n names = tuple(ns.pop() if len(ns) == 1 else None for ns in name_sets)\n return names\n\n\ndef unpack_nested_dtype(other: _IndexT) -> _IndexT:\n \"\"\"\n When checking if our dtype is comparable with another, we need\n to unpack CategoricalDtype to look at its categories.dtype.\n\n Parameters\n ----------\n other : Index\n\n Returns\n -------\n Index\n \"\"\"\n dtype = other.dtype\n if is_categorical_dtype(dtype):\n # If there is ever a SparseIndex, this could get dispatched\n # here too.\n return dtype.categories\n return other\n\n\ndef _maybe_try_sort(result, sort):\n if sort is None:\n try:\n result = algos.safe_sort(result)\n except TypeError as err:\n warnings.warn(\n f\"{err}, sort order is undefined for incomparable objects\",\n RuntimeWarning,\n stacklevel=4,\n )\n return result\n"
] |
[
[
"numpy.all",
"pandas.core.indexes.multi.MultiIndex",
"pandas.core.indexes.range.RangeIndex",
"pandas.util._decorators.deprecate_nonkeyword_arguments",
"pandas.core.dtypes.common.ensure_object",
"numpy.where",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.core.reshape.merge.restore_dropped_levels_multijoin",
"pandas.core.common.cast_scalar_indexer",
"pandas.core.dtypes.common.is_iterator",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas._libs.join.outer_join_indexer",
"pandas.core.dtypes.common.is_list_like",
"numpy.delete",
"numpy.array",
"pandas.core.algorithms.take",
"pandas.core.ops.comparison_op",
"pandas.compat.numpy.function.validate_minmax_axis",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.common.is_null_slice",
"pandas.core.dtypes.cast.find_common_type",
"pandas.core.dtypes.missing.isna",
"pandas.core.algorithms.union_with_duplicates",
"pandas.compat.numpy.function.validate_repeat",
"pandas.io.formats.printing.pprint_thing",
"pandas.core.sorting.ensure_key_mapped",
"pandas.Series",
"pandas._libs.tslibs.Timestamp",
"numpy.asarray",
"pandas.core.arrays.datetimes.tz_to_dtype",
"pandas._libs.join.inner_join_indexer",
"pandas.core.dtypes.dtypes.IntervalDtype",
"numpy.concatenate",
"pandas.core.dtypes.common.is_unsigned_integer_dtype",
"pandas.compat.numpy.function.validate_take",
"pandas.core.common.asarray_tuplesafe",
"pandas.io.formats.printing.PrettyDict",
"pandas.io.formats.format.format_array",
"pandas.core.indexes.multi.MultiIndex.from_tuples",
"pandas.core.algorithms.take_nd",
"numpy.putmask",
"pandas.core.algorithms.safe_sort",
"pandas.core.dtypes.common.is_ea_or_datetimelike_dtype",
"pandas.core.dtypes.inference.is_dict_like",
"numpy.ndim",
"pandas.core.construction.ensure_wrapped_if_datetimelike",
"numpy.errstate",
"pandas.core.indexers.deprecate_ndim_indexing",
"pandas.core.dtypes.cast.infer_dtype_from",
"pandas._libs.algos.groupsort_indexer",
"pandas.core.dtypes.common.is_integer",
"pandas.core.algorithms.unique1d",
"numpy.ones",
"pandas.compat.numpy.function.validate_min",
"pandas._libs.lib.infer_dtype",
"pandas.util._decorators.doc",
"pandas._libs.lib.item_from_zerodim",
"numpy.empty",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas._libs.lib.is_scalar",
"pandas.core.indexes.frozen.FrozenList",
"pandas.compat.numpy.function.validate_any",
"pandas.core.array_algos.putmask.validate_putmask",
"pandas._libs.lib.is_all_arraylike",
"pandas.compat.numpy.function.validate_all",
"numpy.hstack",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.errors.DuplicateLabelError",
"pandas.core.common.not_none",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.concat.concat_compat",
"pandas.core.arrays.numpy_.PandasArray",
"pandas.core.ops.invalid.make_invalid_op",
"pandas._libs.join.left_join_indexer_unique",
"pandas.errors.InvalidIndexError",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.cast.validate_numeric_casting",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_hashable",
"pandas.core.indexes.period._new_PeriodIndex",
"pandas.compat.numpy.function.validate_argmin",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.ops.comp_method_OBJECT_ARRAY",
"pandas.core.dtypes.missing.is_valid_na_for_dtype",
"pandas.core.algorithms.isin",
"pandas.core.common.is_bool_indexer",
"pandas.core.sorting.nargsort",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.missing.array_equivalent",
"pandas.core.accessor.CachedAccessor",
"pandas.core.dtypes.cast.can_hold_element",
"pandas.io.formats.printing.format_object_summary",
"pandas.compat.numpy.function.validate_argmax",
"numpy.dtype",
"numpy.any",
"pandas.core.reshape.merge.get_join_indexers",
"pandas.core.dtypes.common.is_signed_integer_dtype",
"numpy.arange",
"pandas.core.ops.get_op_result_name",
"pandas.core.arrays.Categorical",
"pandas.core.dtypes.common.is_float",
"pandas._libs.tslibs.tz_compare",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.io.formats.printing.format_object_attrs",
"pandas.util._exceptions.find_stack_level",
"numpy.abs",
"numpy.intp",
"pandas.compat.numpy.function.validate_max",
"pandas._libs.join.left_join_indexer",
"numpy.sort",
"pandas.core.arrays.datetimes.validate_tz_from_dtype",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.indexes.multi.MultiIndex.from_arrays",
"pandas.core.construction.sanitize_array",
"pandas._libs.lib.maybe_convert_objects",
"pandas.core.common.index_labels_to_array",
"pandas.core.construction.extract_array"
]
] |
cerebis/pygraphistry
|
[
"53bd7a779b9efc216301fcee94b493de9184cbc2"
] |
[
"graphistry/tigeristry.py"
] |
[
"import requests\nimport pandas as pd\n\ndef merge_dicts(x, y):\n return dict(list(x.items()) + list(y.items()))\n\nclass Tigeristry(object):\n \"\"\"Tigergraph bindings class\n\n * Initialize with DB cfg\n * Register named stored procedures and graphistry bindings\n * Call stored procedures\n * Call interpreted queries\n\n \"\"\"\n \n # ----------------- Helpers ----------------------- \n\n def __log(self, v):\n if self.tiger_config['verbose']:\n print(v)\n \n # () -> 'http://site.com:9000' \n def __base_url(self, mode = 'api'):\n port = self.tiger_config['web_port'] if mode == 'web' else self.tiger_config['api_port']\n who = \\\n (self.tiger_config['user'] + ':' + self.tiger_config['pwd'] + '@') \\\n if (not (self.tiger_config['user'] is None) and not (self.tiger_config['pwd'] is None)) \\\n else ''\n return self.tiger_config['protocol'] + '://' + who + self.tiger_config['server'] + ':' + str(port)\n \n def __check_initialized(self, graphistry):\n if (graphistry is None) or (graphistry._tigergraph is None):\n raise Exception(\"First register a tigergraph db via .tigergraph() or .register(tigergraph=)\")\n\n \n # --------------------------------------------------\n\n \n def __init__(\n self,\n graphistry,\n protocol = 'http',\n server = 'localhost',\n web_port = 14240,\n api_port = 9000,\n db = None,\n user = 'tigergraph',\n pwd = 'tigergraph',\n verbose = False\n ):\n\n self.tiger_config = {\n 'protocol': protocol,\n 'server': server,\n 'web_port': web_port,\n 'api_port': api_port,\n 'db': db,\n 'user': user,\n 'pwd': pwd,\n 'verbose': verbose\n }\n\n self.__log('TG config: ' + str({k: v for k, v in self.tiger_config.items() if k not in ['pwd']}))\n \n\n # -------------------------------------------------- \n\n\n def __verify_and_unwrap_json_result(self, json):\n \n if json is None:\n raise Exception(\"No response!\")\n elif 'error' not in json:\n raise Exception(\"Unexpected response format, no validity indicator\", json)\n elif json['error']:\n raise Exception(\"Database returned error\", json['message'] if 'message' in json else 'No message')\n elif not ('results' in json):\n raise Exception(\"No field results in database response\")\n \n return json['results']\n\n \n # str * ?dict * ?str => json graph\n def __gsql_endpoint(self, method_name, args = {}, db = None, dry_run = False):\n\n db = self.tiger_config['db'] if db is None else db\n if db is None:\n raise Exception(\"Must specify db in Tigeristry constructor or .__call()\")\n\n base_url = self.__base_url('api')\n url = base_url + '/query/' + db + '/' + method_name\n if len(args.items()) > 0:\n url = url + '?' + '&'.join( [str(k) + '=' + str(v) for k, v in args.items()] )\n self.__log(url)\n\n if dry_run: \n return url\n\n resp = requests.get(url)\n self.__log(resp)\n json = resp.json()\n\n return self.__verify_and_unwrap_json_result(json)\n\n\n def __json_to_graphistry(self, graphistry, json, bindings): \n edges_df = pd.DataFrame({'from_id': [], 'to_id': []})\n edge_key = bindings['edges']\n edges = [x for x in json if edge_key in x] \n if len(edges) > 0 and (edge_key in edges[0]):\n edges = edges[0][edge_key]\n edges_df = pd.DataFrame(edges)\n try:\n edges_df = edges_df.drop(columns=['attributes'])\n attrs = [x['attributes'] for x in edges]\n edges_df = pd.merge( edges_df, pd.DataFrame(attrs), left_index=True, right_index=True )\n except:\n self.__log('Failed to extract edge attrs')\n g = graphistry.bind(source='from_id', destination='to_id').edges(edges_df)\n \n nodes_df = pd.DataFrame({'type': [], 'node_id': []})\n node_key = bindings['nodes']\n nodes = [x for x in json if node_key in x]\n if len(nodes) > 0 and (node_key in nodes[0]):\n nodes = nodes[0][node_key]\n nodes_df = pd.DataFrame(nodes)\n try:\n nodes_df = nodes_df.drop(columns=['attributes'])\n attrs = [x['attributes'] for x in nodes]\n nodes_df = pd.merge( nodes_df, pd.DataFrame(attrs), left_index=True, right_index=True )\n except:\n self.__log('Failed to extract node attrs')\n else: \n nodes_df = pd.DataFrame({'node_id': edges_df['from_id'].append(edges_df['to_id'])}) \\\n .drop_duplicates().reset_index(drop=True) \n from_types = nodes_df.merge(edges_df[['from_id', 'from_type']].rename(columns={'from_id': 'node_id', 'from_type': 'type'}), on='node_id', how='left')\n to_types = nodes_df.merge(edges_df[['to_id', 'to_type']].rename(columns={'to_id': 'node_id', 'to_type': 'type'}), on='node_id', how='left')\n nodes_df = nodes_df.merge(\n pd.DataFrame(\n {'type': \n from_types.merge(to_types, left_index=True, right_index=True)\n .apply(\n lambda row: row['type_x'] if not pd.isna(row['type_x']) else row['type_y'],\n axis=1)}),\n left_index=True, right_index=True) \n g = g.bind(node='node_id').nodes(nodes_df)\n return g\n\n\n def __gsql(self, query, dry_run = False):\n base_url = self.__base_url('web')\n url = base_url + '/gsqlserver/interpreted_query'\n self.__log(url)\n if dry_run:\n return url\n response = requests.post(url, data=query)\n json = response.json()\n return self.__verify_and_unwrap_json_result(json)\n\n\n # --------------------------------------------------\n\n # Tigeristry * Plotter * string * ?dict * ?dict * ?string => Plotter\n def gsql_endpoint(self, graphistry, method_name, args = {}, bindings = {}, db = None, dry_run = False):\n \n self.__check_initialized(graphistry)\n\n json = self.__gsql_endpoint(method_name, args, db, dry_run)\n\n if dry_run:\n url = json\n return url\n\n bindings = merge_dicts(\n {\n 'edges': '@@edgeList',\n 'nodes': '@@nodeList'\n },\n bindings\n ) \n\n return self.__json_to_graphistry(graphistry, json, bindings)\n\n\n # Tigeristry * Plotter * string * ?dict => Plotter\n def gsql(self, graphistry, query, bindings = {}, dry_run = False):\n\n self.__check_initialized(graphistry)\n\n json = self.__gsql(query, dry_run)\n\n if dry_run:\n url = json\n return url\n\n bindings = merge_dicts(\n {\n 'edges': '@@edgeList',\n 'nodes': '@@nodeList'\n },\n bindings\n ) \n\n return self.__json_to_graphistry(graphistry, json, bindings)\n"
] |
[
[
"pandas.isna",
"pandas.DataFrame"
]
] |
aioz-ai/ECCVW20_MILQT
|
[
"5fa179cb880d8915af96e9f0cd8c742f75fdc8b7",
"5fa179cb880d8915af96e9f0cd8c742f75fdc8b7"
] |
[
"evaluate.py",
"tools/adaptive_detection_features_converter.py"
] |
[
"\"\"\"\nThis code is modified from Hengyuan Hu's repository.\nhttps://github.com/hengyuan-hu/bottom-up-attention-vqa\n\"\"\"\nimport argparse\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport os\n\nfrom dataset_VQA import Dictionary, VQAFeatureDataset\nimport dataset_TDIUC\nimport base_model\nfrom train import evaluate\nimport utils\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n # MODIFIABLE MILQT HYPER-PARAMETERS--------------------------------------------------------------------------------\n # Model loading/saving\n parser.add_argument('--split', type=str, default='val')\n parser.add_argument('--input', type=str, default='saved_models/MILQT',\n help='input file directory for loading a model')\n parser.add_argument('--output', type=str, default='results/MILQT',\n help='output file directory for saving VQA answer prediction file')\n # Utilities\n parser.add_argument('--epoch', type=int, default=12,\n help='the best epoch')\n\n # Gradient accumulation\n parser.add_argument('--batch_size', type=int, default=64,\n help='batch size')\n\n # Choices of models\n parser.add_argument('--model', type=str, default='MILQT', choices=['MILQT'],\n help='the model we use')\n parser.add_argument('--comp_attns', type=str, default='BAN_COUNTER,BAN,SAN',\n help='list of attention components. Note that, component attentions are seperated by commas, e.g. <BAN_COUNTER,BAN,SAN>.')\n\n # INTERACTION LEARNING COMPONENTS HYPER-PARAMETERS------------------------------------------------------------------\n # BAN\n parser.add_argument('--gamma', type=int, default=2,\n help='glimpse in Bilinear Attention Networks')\n parser.add_argument('--use_counter', action='store_true', default=False,\n help='use counter module')\n\n # Stacked Attention Networks\n parser.add_argument('--num_stacks', default=2, type=int,\n help='num of stacks in Stack Attention Networks')\n\n #CONSTANT HYPER-PARAMETERS (Advanced hyper-params for testing, experimenting or fine-tuning)------------------------\n # Utilities - gpu\n parser.add_argument('--gpu', type=int, default=0,\n help='specify index of GPU using for training, to use CPU: -1')\n\n #Bounding box set\n parser.add_argument('--max_boxes', default=50, type=int, metavar='N',\n help='number of maximum bounding boxes for K-adaptive')\n\n # Question embedding\n parser.add_argument('--op', type=str, default='c',\n help='concatenated 600-D word embedding')\n\n # Joint representation C dimension\n parser.add_argument('--num_hid', type=int, default=1024,\n help='dim of joint semantic features')\n\n # MILQT hyper-params\n parser.add_argument('--combination_operator', type=str, default='mul', choices=['add', 'mul'],\n help='multi-level multi-model operation')\n parser.add_argument('--question_type_mapping', type=str, default='question_type_mapping.txt',\n help='the path of question type mapping file')\n parser.add_argument('--counter_act', type=str, default='zhang', choices=['zhang'],\n help='the counter activation')\n parser.add_argument('--activation', type=str, default='swish', choices=['relu', 'swish'],\n help='the activation to use for final classifier')\n parser.add_argument('--dropout', default=0.45, type=float, metavar='dropout',\n help='dropout of rate of final classifier')\n\n # Use MoD features\n parser.add_argument('--use_MoD', action='store_true', default=False,\n help='Using MoD features')\n parser.add_argument('--MoD_dir', type=str,\n help='MoD features dir')\n\n # Train with TDIUC\n parser.add_argument('--use_TDIUC', action='store_true', default=False,\n help='Using TDIUC dataset to train')\n parser.add_argument('--TDIUC_dir', type=str,\n help='TDIUC dir')\n\n # Return args\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n print('Evaluate a given model optimized by training split using validation split.')\n args = parse_args()\n print(args)\n torch.backends.cudnn.benchmark = True\n args.device = torch.device(\"cuda:\" + str(args.gpu) if args.gpu >= 0 else \"cpu\")\n\n if args.use_TDIUC:\n dictionary = dataset_TDIUC.Dictionary.load_from_file(os.path.join(args.TDIUC_dir, 'dictionary.pkl'))\n eval_dset = dataset_TDIUC.VQAFeatureDataset(args.split, args, dictionary, adaptive=True)\n else:\n dictionary = Dictionary.load_from_file('data/dictionary.pkl')\n eval_dset = VQAFeatureDataset(args.split, args, dictionary, adaptive=True)\n\n n_device = torch.cuda.device_count()\n batch_size = args.batch_size * n_device\n\n constructor = 'build_%s' % args.model\n model = getattr(base_model, constructor)(eval_dset, args.num_hid, args.op, args.gamma)\n print(model)\n eval_loader = DataLoader(eval_dset, batch_size, shuffle=False, num_workers=1, collate_fn=utils.trim_collate)\n\n model_path = args.input + '/model_epoch%s.pth' % args.epoch\n print('loading %s' % model_path)\n model_data = torch.load(model_path)\n\n # Comment because do not use multi gpu\n # model = nn.DataParallel(model)\n model = model.to(args.device)\n model.load_state_dict(model_data.get('model_state', model_data))\n\n print(\"Evaluating...\")\n model.train(False)\n eval_score, bound, eval_question_type_score, eval_question_type_upper_bound = evaluate(model, eval_loader, args)\n print('\\teval score: %.2f (%.2f)' % (100 * eval_score, 100 * bound))\n print('\\tqt_eval score: %.2f (%.2f)' % (100 * eval_question_type_score, 100 * eval_question_type_upper_bound))",
"\"\"\"\nThis code is modified from Hengyuan Hu's repository.\nhttps://github.com/hengyuan-hu/bottom-up-attention-vqa\n\nReads in a tsv file with pre-trained bottom up attention features \nof the adaptive number of boxes and stores it in HDF5 format. \nAlso store {image_id: feature_idx} as a pickle file.\n\nHierarchy of HDF5 file:\n\n{ 'image_features': num_boxes x 2048\n 'image_bb': num_boxes x 4\n 'spatial_features': num_boxes x 6\n 'pos_boxes': num_images x 2 }\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nimport base64\nimport csv\nimport h5py\nimport _pickle as cPickle\nimport numpy as np\nimport utils\nimport os\n\ncsv.field_size_limit(sys.maxsize)\n\ndef extract(split, infiles, outfiles):\n FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']\n data_file = {\n 'train': os.path.join(outfiles, 'train.hdf5'),\n 'val': os.path.join(outfiles, 'val.hdf5'),\n 'test': os.path.join(outfiles, 'test2015.hdf5')}\n indices_file = {\n 'train': 'data/train_imgid2idx.pkl',\n 'val': 'data/val_imgid2idx.pkl',\n 'test': 'data/test2015_imgid2idx.pkl'}\n ids_file = {\n 'train': 'data/train_ids.pkl',\n 'val': 'data/val_ids.pkl',\n 'test': 'data/test2015_ids.pkl'}\n path_imgs = {\n 'train': 'data/train2014',\n 'val': 'data/val2014',\n 'test': 'data/test2015'\n }\n known_num_boxes = {\n 'train': 2643089,\n 'val': 1281164,\n 'test': 2566887,\n }\n feature_length = 3072\n min_fixed_boxes = 10\n max_fixed_boxes = 100\n\n if os.path.exists(ids_file[split]):\n imgids = cPickle.load(open(ids_file[split], 'rb'))\n else:\n imgids = utils.load_imageid(path_imgs[split])\n cPickle.dump(imgids, open(ids_file[split], 'wb'))\n\n h = h5py.File(data_file[split], 'w')\n\n if known_num_boxes[split] is None:\n num_boxes = 0\n for infile in infiles:\n print(\"reading tsv...%s\" % infile)\n with open(infile, \"r+\") as tsv_in_file:\n reader = csv.DictReader(tsv_in_file, delimiter='\\t', fieldnames=FIELDNAMES)\n for item in reader:\n item['num_boxes'] = int(item['num_boxes'])\n image_id = int(item['image_id'])\n if image_id in imgids:\n num_boxes += item['num_boxes']\n else:\n num_boxes = known_num_boxes[split]\n\n print('num_boxes=%d' % num_boxes)\n\n img_features = h.create_dataset(\n 'image_features', (num_boxes, feature_length), 'f')\n img_bb = h.create_dataset(\n 'image_bb', (num_boxes, 4), 'f')\n spatial_img_features = h.create_dataset(\n 'spatial_features', (num_boxes, 6), 'f')\n pos_boxes = h.create_dataset(\n 'pos_boxes', (len(imgids), 2), dtype='int32')\n\n counter = 0\n num_boxes = 0\n indices = {}\n\n for infile in infiles:\n unknown_ids = []\n print(\"reading tsv...%s\" % infile)\n with open(infile, \"r+\") as tsv_in_file:\n reader = csv.DictReader(tsv_in_file, delimiter='\\t', fieldnames=FIELDNAMES)\n for item in reader:\n item['num_boxes'] = int(item['num_boxes'])\n item['boxes'] = bytes(item['boxes'], 'utf')\n item['features'] = bytes(item['features'], 'utf')\n image_id = int(item['image_id'])\n image_w = float(item['image_w'])\n image_h = float(item['image_h'])\n bboxes = np.frombuffer(\n base64.b64decode(item['boxes']),\n dtype=np.float64).reshape((item['num_boxes'], -1))\n\n box_width = bboxes[:, 2] - bboxes[:, 0]\n box_height = bboxes[:, 3] - bboxes[:, 1]\n scaled_width = box_width / image_w\n scaled_height = box_height / image_h\n scaled_x = bboxes[:, 0] / image_w\n scaled_y = bboxes[:, 1] / image_h\n\n box_width = box_width[..., np.newaxis]\n box_height = box_height[..., np.newaxis]\n scaled_width = scaled_width[..., np.newaxis]\n scaled_height = scaled_height[..., np.newaxis]\n scaled_x = scaled_x[..., np.newaxis]\n scaled_y = scaled_y[..., np.newaxis]\n\n spatial_features = np.concatenate(\n (scaled_x,\n scaled_y,\n scaled_x + scaled_width,\n scaled_y + scaled_height,\n scaled_width,\n scaled_height),\n axis=1)\n\n if image_id in imgids:\n imgids.remove(image_id)\n indices[image_id] = counter\n pos_boxes[counter,:] = np.array([num_boxes, num_boxes + item['num_boxes']])\n img_bb[num_boxes:num_boxes+item['num_boxes'], :] = bboxes\n img_features[num_boxes:num_boxes+item['num_boxes'], :] = np.frombuffer(\n base64.b64decode(item['features']),\n dtype=np.float).reshape((item['num_boxes'], -1))\n spatial_img_features[num_boxes:num_boxes+item['num_boxes'], :] = spatial_features\n counter += 1\n num_boxes += item['num_boxes']\n else:\n unknown_ids.append(image_id)\n\n print('%d unknown_ids...' % len(unknown_ids))\n print('%d image_ids left...' % len(imgids))\n\n if len(imgids) != 0:\n print('Warning: %s_image_ids is not empty' % split)\n\n cPickle.dump(indices, open(indices_file[split], 'wb'))\n h.close()\n print(\"done!\")\n\nif __name__ == '__main__':\n outfiles = 'data/faster_rcnn_fpn_101_1x/tsvfile'\n\n # infile = ['data/trainval/X/test2014_fpn.tsv',\n # 'data/trainval/X/train2014_fpn.tsv.0',\n # 'data/trainval/X/train2014_fpn.tsv.1',\n # 'data/trainval/X/val2014_fpn.tsv']\n\n infile = [os.path.join(outfiles, 'test2014_extend.tsv'),\n os.path.join(outfiles, 'train2014_extend.tsv.0'),\n os.path.join(outfiles, 'train2014_extend.tsv.1'),\n os.path.join(outfiles, 'val2014_extend.tsv')]\n #\n extract('train', infile, outfiles)\n extract('val', infile, outfiles)\n infile = [os.path.join(outfiles, 'test2015_extend.tsv')]\n extract('test', infile, outfiles)\n\n\n "
] |
[
[
"torch.cuda.device_count",
"torch.utils.data.DataLoader",
"torch.load"
],
[
"numpy.concatenate",
"numpy.array"
]
] |
opendilab/DI-smartcross
|
[
"362c6c6dcfd2e1f59d3e7c955ffe2d9d1b13d8d2"
] |
[
"entry/sumo_config/sumo_wj3_rainbow_default_config.py"
] |
[
"from easydict import EasyDict\nfrom torch import nn\n\nnstep = 3\nsumo_rainbow_default_config = dict(\n exp_name='sumo_wj3_md_rainbow_dqn',\n env=dict(\n manager=dict(\n # Whether to use shared memory. Only effective if manager type is 'subprocess'\n shared_memory=False,\n context='spawn',\n retry_type='renew',\n max_retry=2,\n ),\n # Episode number for evaluation.\n n_evaluator_episode=1,\n # Once evaluation reward reaches \"stop_value\", which means the policy converges, the training can end.\n stop_value=0,\n collector_env_num=15,\n evaluator_env_num=1,\n ),\n policy=dict(\n # Whether to use cuda for network.\n cuda=True,\n # Whether use priority\n priority=True,\n priority_IS_weight=True,\n # Reward's future discount facotr, aka. gamma.\n discount_factor=0.99,\n # How many steps in td error.\n nstep=nstep,\n # Model config used for model creating. Remember to change \"obs_shape\" and \"action_shape\" according to env.\n model=dict(\n obs_shape=442,\n action_shape=[4, 4, 4],\n v_max=10,\n v_min=-10,\n n_atom=51,\n activation=nn.Tanh(),\n ),\n # learn_mode config\n learn=dict(\n # How many steps to train after one collection. Bigger \"update_per_collect\" means bigger off-policy.\n # collect data -> train fixed steps -> collect data -> ...\n update_per_collect=200,\n batch_size=64,\n learning_rate=1e-4,\n target_update_freq=100,\n learner=dict(\n hook=dict(\n save_ckpt_after_iter=1000,\n log_show_after_iter=1000,\n ),\n ),\n ),\n # collect_mode config\n collect=dict(\n # Cut trajectories into pieces with length \"unrol_len\".\n unroll_len=1,\n # You can use either \"n_sample\" or \"n_episode\" in collector.collect.\n # Get \"n_sample\" samples per collect.\n n_sample=600,\n # Get \"n_episode\" complete episodic trajectories per collect.\n # n_episode=8,\n collector=dict(\n # Get \"n_episode\" complete episodic trajectories per collect.\n # n_episode=8,\n transform_obs=True,\n collect_print_freq=1000,\n ),\n ),\n eval=dict(\n evaluator=dict(\n # Evaluate every \"eval_freq\" training steps.\n eval_freq=1000,\n )\n ),\n # command_mode config\n other=dict(\n # Epsilon greedy with decay.\n eps=dict(\n # Decay type. Support ['exp', 'linear'].\n type='exp',\n start=0.95,\n end=0.1,\n decay=50000,\n ),\n replay_buffer=dict(\n replay_buffer_size=400000,\n max_use=10000,\n monitor=dict(\n sampled_data_attr=dict(print_freq=300, ),\n periodic_thruput=dict(seconds=300, ),\n ),\n ),\n )\n ),\n)\n\ncreate_config = dict(\n env_manager=dict(type='subprocess', ),\n env=dict(\n # Must use the absolute path. All the following \"import_names\" should obey this too.\n import_names=['smartcross.envs.sumo_env'],\n type='sumo_env',\n ),\n # RL policy register name (refer to function \"register_policy\").\n policy=dict(\n import_names=['dizoo.common.policy.md_rainbow_dqn'],\n type='md_rainbow_dqn',\n ),\n)\n\ncreate_config = EasyDict(create_config)\nsumo_rainbow_default_config = EasyDict(sumo_rainbow_default_config)\nmain_config = sumo_rainbow_default_config\n"
] |
[
[
"torch.nn.Tanh"
]
] |
bnoi/scikit-tracker
|
[
"8771006a3a682ab299e4446c800c05f36a027df7"
] |
[
"sktracker/utils/tests/test_progress.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport io\n\nimport pandas as pd\nimport numpy as np\n\nfrom sktracker.utils import progress_apply\nfrom sktracker.utils import print_progress\n\n\ndef test_print_progress():\n out = io.StringIO()\n print_progress(15, out=out)\n output = out.getvalue().strip()\n bar = \"15% [======> ]\"\n assert bar == output\n\n\ndef test_print_progress_remove_bar():\n out = io.StringIO()\n print_progress(-1, out=out)\n output = out.getvalue().strip()\n bar = \"\"\n assert bar == output\n\n\ndef test_progress_apply():\n df = pd.DataFrame(np.random.choice(range(100), (1000000, 4)), columns=['A', 'B', 'C', 'D'])\n gp = df.groupby('A')\n out = io.StringIO()\n progress_apply(gp, lambda x: np.sqrt((x**2) / 1e99), out=out)\n progress = out.getvalue().strip()\n assert '50% [========================> ]' in progress\n"
] |
[
[
"numpy.sqrt"
]
] |
jaspereb/DM-Count
|
[
"48c7636223e5244b35ddb222f2075368c03fbb66"
] |
[
"losses/ot_loss.py"
] |
[
"import torch\nfrom torch.nn import Module\nfrom .bregman_pytorch import sinkhorn\n\nclass OT_Loss(Module):\n def __init__(self, c_size, stride, norm_cood, device, num_of_iter_in_ot=100, reg=10.0):\n super(OT_Loss, self).__init__()\n assert c_size % stride == 0\n\n self.c_size = c_size\n self.device = device\n self.norm_cood = norm_cood\n self.num_of_iter_in_ot = num_of_iter_in_ot\n self.reg = reg\n\n # coordinate is same to image space, set to constant since crop size is same\n self.cood = torch.arange(0, c_size, step=stride,\n dtype=torch.float32, device=device) + stride / 2\n self.density_size = self.cood.size(0)\n self.cood.unsqueeze_(0) # [1, #cood]\n if self.norm_cood:\n self.cood = self.cood / c_size * 2 - 1 # map to [-1, 1]\n self.output_size = self.cood.size(1)\n\n\n def forward(self, normed_density, unnormed_density, points):\n batch_size = normed_density.size(0)\n assert len(points) == batch_size\n assert self.output_size == normed_density.size(2)\n loss = torch.zeros([1]).to(self.device)\n ot_obj_values = torch.zeros([1]).to(self.device)\n wd = 0 # wasserstain distance\n for idx, im_points in enumerate(points):\n if len(im_points) > 0:\n # compute l2 square distance, it should be source target distance. [#gt, #cood * #cood]\n if self.norm_cood:\n im_points = im_points / self.c_size * 2 - 1 # map to [-1, 1]\n x = im_points[:, 0].unsqueeze_(1) # [#gt, 1]\n y = im_points[:, 1].unsqueeze_(1)\n x_dis = -2 * torch.matmul(x, self.cood) + x * x + self.cood * self.cood # [#gt, #cood]\n y_dis = -2 * torch.matmul(y, self.cood) + y * y + self.cood * self.cood\n y_dis.unsqueeze_(2)\n x_dis.unsqueeze_(1)\n dis = y_dis + x_dis\n dis = dis.view((dis.size(0), -1)) # size of [#gt, #cood * #cood]\n\n source_prob = normed_density[idx][0].view([-1]).detach()\n target_prob = (torch.ones([len(im_points)]) / len(im_points)).to(self.device)\n # use sinkhorn to solve OT, compute optimal beta.\n P, log = sinkhorn(target_prob, source_prob, dis, self.reg, maxIter=self.num_of_iter_in_ot, log=True)\n beta = log['beta'] # size is the same as source_prob: [#cood * #cood]\n ot_obj_values += torch.sum(normed_density[idx] * beta.view([1, self.output_size, self.output_size]))\n # compute the gradient of OT loss to predicted density (unnormed_density).\n # im_grad = beta / source_count - < beta, source_density> / (source_count)^2\n source_density = unnormed_density[idx][0].view([-1]).detach()\n source_count = source_density.sum()\n im_grad_1 = (source_count) / (source_count * source_count+1e-8) * beta # size of [#cood * #cood]\n im_grad_2 = (source_density * beta).sum() / (source_count * source_count + 1e-8) # size of 1\n im_grad = im_grad_1 - im_grad_2\n im_grad = im_grad.detach().view([1, self.output_size, self.output_size])\n # Define loss = <im_grad, predicted density>. The gradient of loss w.r.t prediced density is im_grad.\n loss += torch.sum(unnormed_density[idx] * im_grad)\n wd += torch.sum(dis * P).item()\n\n return loss, wd, ot_obj_values\n\n\n"
] |
[
[
"torch.sum",
"torch.zeros",
"torch.matmul",
"torch.arange"
]
] |
AndreaCeolin/Functionalism_Contrast_Change
|
[
"1557a4c76c253c7db292e503d6bd5cff5cea2d93"
] |
[
"Chapter2/Fig2.12-contr-biginventory/sound_change-contr-biginventory.py"
] |
[
"#!/usr/bin/env python3\r\n\r\nimport random\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\n\r\n\r\n'''\r\n1. ALPHABET and LEXICON\r\n\r\nThis part defines the alphabet of the language, and maps symbols to indexes.\r\n\r\nIn this version, the vowel dictionary maps 30 different symbols onto different indexes and\r\nthe consonant dictionary maps 52 different symbols onto different indexes.\r\n'''\r\n\r\nvowels = {'i': 0, 'e': 1, 'a': 2, 'o': 3, 'u': 4, 'ou':5, 'ei':6, 'ea':7, 'ee':8, 'oo':9, 'ai':10, 'oa':11,\r\n 'oi':12, 'io':13, 'ie':14, '1':15, '2':16, '3':17, '4':18, '5':19, '6':20, '7':21, '8':22, '9':23,\r\n '10':24, '11':25, '12':26, '13':27, '14':28, '15':29}\r\nconsonants = {'m': 0, 'p':1, 'b':2, 'f': 3, 'v': 4, 'd': 5, 't': 6, 'l': 7, 'n': 8, 'r': 9, 's': 10, 'k': 11,\r\n 'y': 12, 'g': 13, 'j':14, 'h': 15, 'c':16, ' ':17, 'th':18, 'sh':19, 'wh':20, 'ch':21, 'tw':22,\r\n 'x':23, 'w':24, 'z':25, '1':26, '2':27, '3':28, '4':29, '5':30, '6':31, '7':32, '8':33, '9':34,\r\n '10':35, '11':36, '12':37, '13':38, '14':39, '15':40, '16':41, '17':42, '18':43, '19':44, '20':45,\r\n '21':46, '22':47, '23':48, '24':49, '25':50, '26':51}\r\n\r\n'''\r\nWith this dictionary, we can represent each word as an integer tuple. Words are initially read from a text file\r\nin the form of 3-dimensional tuples of the type (onset, nucleus, coda). For instance the word \"dog\" will be \r\nprocessed in the form of a 3-dimensional tuple ('d', 'o', 'g'). Then, we can use the two dictionaries to transform \r\neach word into a integer vector, through a helper function, for computation purposes. The word \"dog\" will be \r\ntransformed in the tuple (5, 3, 13).\r\n'''\r\n\r\ndog = ('d', 'o', 'g')\r\n\r\ndef vectorize(word):\r\n onset, nucleus, coda = word\r\n return consonants[onset], vowels[nucleus], consonants[coda]\r\n\r\nprint(vectorize(dog))\r\n\r\n\r\n'''\r\nThe lexicon is a list of words (tuples), and is stored as a global variable, since we need to modify it as sound change\r\noccurs. A wordlist containing the words \"dog\", \"cat\", and \"pig\" will be represented by the following variable:\r\n'''\r\n\r\nwordlist = [('d', 'o', 'g'), ('c', 'a', 't'), ('p', 'i', 'g')]\r\n\r\n'''\r\nSince we want to keep track of the number of symbols and the possible environments, we also store three sets that\r\ncontain the onsets, nuclei and codas available in the lexicon in its current state. These three sets are all \r\nglobal variables.\r\n'''\r\n\r\ndef get_onset(wordlist):\r\n return {word[0] for word in wordlist}\r\n\r\ndef get_nucleus(wordlist):\r\n return {word[1] for word in wordlist}\r\n\r\ndef get_coda(wordlist):\r\n return {word[2] for word in wordlist}\r\n\r\n\r\nonset, nucleus, coda = get_onset(wordlist), get_nucleus(wordlist), get_coda(wordlist)\r\n\r\nprint(onset)\r\nprint(nucleus)\r\nprint(coda)\r\n\r\n\r\n'''\r\nFor illustratory purposes, we need a reverse-dictionary, which can be used to retrieve the symbols given their index,\r\nand a helper function to retrieve the word (in string format) given its integer vector representation:\r\n'''\r\n\r\nrev_vowels = {code: letter for letter, code in vowels.items()}\r\nrev_consonants = {code: letter for letter, code in consonants.items()}\r\n\r\n\r\ndog = (5, 3, 13)\r\n\r\ndef vectorize_inverse(word):\r\n onset, nucleus, coda = word\r\n return rev_consonants[onset] + rev_vowels[nucleus] + rev_consonants[coda]\r\n\r\nprint(vectorize_inverse(dog))\r\n\r\n\r\n'''\r\nAnother helper function that we need is a function that returns the average Levenshtein distance within a wordlist:\r\n'''\r\n\r\ndef average(wordlist):\r\n av_length = []\r\n for index, word in enumerate(wordlist):\r\n for word2 in wordlist[index+1:]:\r\n lev = 0\r\n for i, letter in enumerate(word):\r\n if word2[i] != letter:\r\n lev += 1\r\n av_length.append(lev)\r\n return sum(av_length)/len(av_length)\r\n\r\n\r\nprint(average(wordlist))\r\n\r\n\r\n'''\r\n2. SOUND CHANGE FUNCTIONS\r\n\r\nThis part defines the sound change functions. These functions modify the lexicon by applying sound changes.\r\nThe first function represents a sound change that targets the onset of the word.\r\n'''\r\n\r\ndef change_onset():\r\n #call the lexicon list and the onset set\r\n global lexicon, onset\r\n #prepare a new empty list, that will be filled with the form of the words after the sound change applies\r\n new_lexicon = []\r\n #pick an onset at random and name it target. This is the target of the sound change\r\n target = random.choice(list(onset))\r\n #pick an onset at random and name it outcome. This is the outcome of the sound change\r\n outcome = random.choice(list(rev_consonants))\r\n #select a random subset of nuclei as the conditioning environment\r\n environment = random.sample(nucleus, random.randint(0, len(nucleus) - 1))\r\n #apply the change to the lexicon\r\n for word in lexicon:\r\n #check words where target is the onset\r\n if word[0] == target:\r\n #determine whether the nucleus is in the conditioning environment\r\n if word[1] in environment:\r\n #if the nucleus is in the conditioning environment, then change target into outcome\r\n new_lexicon.append((outcome, word[1], word[2]))\r\n else:\r\n #if not, the change does not apply\r\n new_lexicon.append(word)\r\n else:\r\n #if the word does not start with target, the change does not apply\r\n new_lexicon.append(word)\r\n #this prints a line describing the change that happened\r\n print('/' + rev_consonants[target] + '/ becomes /' + rev_consonants[outcome] + '/ in onset before ['\r\n + ' '.join([rev_vowels[index] for index in environment]) + ']')\r\n #Update lexicon and onsets\r\n lexicon = new_lexicon\r\n onset = get_onset(lexicon)\r\n\r\n\r\n'''\r\nThe following two functions will apply a change to the nucleus. The only difference between the two is whether\r\nthe conditioning environment is the onset or the coda.\r\n'''\r\n\r\ndef change_nucleus():\r\n #call the lexicon list and the nucleus set\r\n global lexicon, nucleus\r\n #prepare a new empty list, that will be filled with the form of the words after the sound change applies\r\n new_lexicon = []\r\n #pick a nucleus at random and name it target. This is the target of the sound change\r\n target = random.choice(list(nucleus))\r\n #pick a nucleus at random and name it outcome. This is the outcome of the sound change\r\n outcome = random.choice(list(rev_vowels))\r\n #select a random subset of onsets as the conditioning environment\r\n environment = random.sample(onset, random.randint(0, len(onset) - 1))\r\n #apply the change to the lexicon\r\n for word in lexicon:\r\n #check words where target is the nucleus\r\n if word[1] == target:\r\n #determine whether the onset is in the conditioning environment\r\n if word[0] in environment:\r\n #if the onset is in the conditioning environment, then change target into outcome\r\n new_lexicon.append((word[0], outcome, word[2]))\r\n else:\r\n #if not, the change does not apply\r\n new_lexicon.append(word)\r\n else:\r\n #if the word does not have target, the change does not apply\r\n new_lexicon.append(word)\r\n #this prints a line describing the change that happened\r\n print('/' + rev_vowels[target] + '/ becomes /' + rev_vowels[outcome] + '/ after ['\r\n + ' '.join([rev_consonants[index] for index in environment]) + ']')\r\n #Update lexicon and nuclei\r\n lexicon = new_lexicon\r\n nucleus = get_nucleus(lexicon)\r\n\r\ndef change_nucleus2():\r\n #call the lexicon list and the nucleus set\r\n global lexicon, nucleus\r\n #prepare a new empty list, that will be filled with the form of the words after the sound change applies\r\n new_lexicon = []\r\n #pick a nucleus at random and name it target. This is the target of the sound change\r\n target = random.choice(list(nucleus))\r\n #pick a nucleus at random and name it outcome. This is the outcome of the sound change\r\n outcome = random.choice(list(rev_vowels))\r\n #select a random subset of codas as the conditioning environment\r\n environment = random.sample(coda, random.randint(0, len(coda) - 1))\r\n #apply the change to the lexicon\r\n for word in lexicon:\r\n #check words where target is the nucleus\r\n if word[1] == target:\r\n #determine whether the coda is in the conditioning environment\r\n if word[2] in environment:\r\n #if the coda is in the conditioning environment, then change target into outcome\r\n new_lexicon.append((word[0], outcome, word[2]))\r\n else:\r\n #if not, the change does not apply\r\n new_lexicon.append(word)\r\n else:\r\n #if the word does not have target, the change does not apply\r\n new_lexicon.append(word)\r\n print('/' + rev_vowels[target] + '/ becomes /' + rev_vowels[outcome] + '/ before ['\r\n + ' '.join([rev_consonants[index] for index in environment]) + ']')\r\n #Update lexicon and nuclei\r\n lexicon = new_lexicon\r\n nucleus = get_nucleus(lexicon)\r\n\r\n\r\n'''\r\nFinally, this function changes the coda consonant.\r\n'''\r\n\r\n\r\ndef change_coda():\r\n #call the lexicon list and the coda set\r\n global lexicon, coda\r\n #prepare a new empty list, that will be filled with the form of the words after the sound change applies\r\n new_lexicon = []\r\n #pick a coda at random and name it target. This is the target of the sound change\r\n target = random.choice(list(coda))\r\n #pick a coda at random and name it outcome. This is the outcome of the sound change\r\n outcome = random.choice(list(rev_consonants))\r\n #select a random subset of nuclei as the conditioning environment\r\n environment = random.sample(nucleus, random.randint(0, len(nucleus) - 1))\r\n #apply the change to the lexicon\r\n for word in lexicon:\r\n #check words where target is the coda\r\n if word[2] == target:\r\n #determine whether the nucleus is in the conditioning environment\r\n if word[1] in environment:\r\n #if the nucleus is in the conditioning environment, then change target into outcome\r\n new_lexicon.append((word[0], word[1], outcome))\r\n else:\r\n #if not, the change does not apply\r\n new_lexicon.append(word)\r\n else:\r\n #if the word does not end with target, the change does not apply\r\n new_lexicon.append(word)\r\n print('/' + rev_consonants[target] + '/ becomes /' + rev_consonants[outcome] + '/ in coda after ['\r\n + ' '.join([rev_vowels[index] for index in environment]) + ']')\r\n #Update lexicon and onsets\r\n lexicon = new_lexicon\r\n coda = get_coda(lexicon)\r\n\r\n'''\r\nNow, we add the two contraction functions:'''\r\n\r\ndef contraction_onset():\r\n #call the lexicon list and the onset and nucleus sets\r\n global lexicon, onset, nucleus\r\n new_lexicon = []\r\n #we select a CV sequence as the target of the contraction\r\n target_C, target_V = random.choice(list(onset)), random.choice(list(nucleus))\r\n #this selects an outcome among those which are not available in the language\r\n possible_outcome = [key for key in rev_consonants if key not in onset]\r\n if possible_outcome:\r\n outcome = random.choice(possible_outcome)\r\n #if all the possible onsets are already represented, pick one at random\r\n else:\r\n outcome = random.choice(list(onset))\r\n #this is the vowel added after the new onset\r\n filler = random.choice(list(nucleus))\r\n for word in lexicon:\r\n if (word[0], word[1]) == (target_C, target_V):\r\n new_lexicon.append((outcome, filler, word[2]))\r\n else:\r\n new_lexicon.append(word)\r\n #this prints a line describing the change that happened\r\n print('Contraction of /' + rev_consonants[target_C] + rev_vowels[target_V] + '/ in /' + rev_consonants[outcome] + rev_vowels[filler] + '/ in onsets')\r\n #Update lexicon and onsets\r\n lexicon = new_lexicon\r\n onset, nucleus = get_onset(lexicon), get_nucleus(lexicon)\r\n\r\n\r\ndef contraction_coda():\r\n #call the lexicon list and the onset and nucleus sets\r\n global lexicon, nucleus, coda\r\n new_lexicon = []\r\n #we select a VC sequence as the target of the contraction\r\n target_V, target_C = random.choice(list(nucleus)), random.choice(list(coda))\r\n #this selects an outcome among those which are not available in the language\r\n possible_outcome = [key for key in rev_consonants if key not in coda]\r\n if possible_outcome:\r\n outcome = random.choice(possible_outcome)\r\n #if all the possible onsets are already represented, pick one at random\r\n else:\r\n outcome = random.choice(list(coda))\r\n #this is the vowel added before the new coda\r\n filler = random.choice(list(nucleus))\r\n for word in lexicon:\r\n if (word[1], word[2]) == (target_V, target_C):\r\n new_lexicon.append((word[0], filler, outcome))\r\n else:\r\n new_lexicon.append(word)\r\n #this prints a line describing the change that happened\r\n print('Contraction of /' + rev_vowels[target_V]+rev_consonants[target_C] + '/ in /' + rev_vowels[filler] + rev_consonants[outcome] + '/ in codas')\r\n #Update lexicon and onsets\r\n lexicon = new_lexicon\r\n nucleus, coda = get_nucleus(lexicon), get_coda(lexicon)\r\n\r\n\r\n\r\n\r\n'''\r\n3. THE SOUND CHANGE SIMULATIONS\r\n\r\nThe following function initiates the sound change simulations and prints the graphs presented in the chapter.\r\nThe functions takes three arguments: the name of the file containing the wordlist, the number of changes, and the\r\nnumber of simulations.\r\n'''\r\n\r\n\r\ndef main(file, n_changes, iterations):\r\n for i in range(int(iterations)):\r\n print('#######Language Change is happening!')\r\n global onset, nucleus, coda, lexicon\r\n #the initial lexicon is read from a text file. Onsets, nuclei and codas are separated by a '-'\r\n initial_lexicon = [element.strip('\\n').split('-') for element in open(file)]\r\n #this line loads the lexicon in the format described above: a list of integer tuples\r\n lexicon = [(consonants[word[0]], vowels[word[1]], consonants[word[2]]) for word in initial_lexicon]\r\n #this line gets the onset, nucleus, and coda sets\r\n onset, nucleus, coda = get_onset(lexicon), get_nucleus(lexicon), get_coda(lexicon)\r\n #this line will be used to define the sound change functions used in the simulation and their weight\r\n #with this setting, each function is equally weighted\r\n functions = [change_onset, change_nucleus, change_nucleus2, change_coda, contraction_onset, contraction_coda]\r\n #we initialize lists that will keep track of the number of the iteration, the number of the phonemes,\r\n #and the average distance\r\n x_axis = [0]\r\n phonemes = [len(onset.union(coda)) + len(nucleus)]\r\n av_length = [average(lexicon)]\r\n for n in range(int(n_changes)):\r\n #this line selects a sound change function at random and applies it\r\n random.choice(functions)()\r\n #This is needed to make the plot lighter. For the toy example in Figure 2.2, '500' has been reduced to '1'\r\n if n % 500 == 0:\r\n #we update the lists that keep track of the number of the iteration, the number of the phonemes, and the\r\n #average distance\r\n x_axis.append(n+1)\r\n phonemes.append(len(onset.union(coda)) + len(nucleus))\r\n av_length.append(average(lexicon))\r\n #this loop prints the shape of the lexicon at the beginning of the simulation and after the\r\n #sound changes applied\r\n for index, word in enumerate(lexicon):\r\n print(''.join(initial_lexicon[index]) + '->' + ''.join(rev_consonants[word[0]] + rev_vowels[word[1]] + rev_consonants[word[2]]))\r\n print('#######Language Change is finished!')\r\n print('###################################!')\r\n #After the simulation has ended, we plot the change in the number of phonemes and in the average distance\r\n #during the simulation\r\n #plot phoneme size\r\n plt.subplot(1, 2, 1)\r\n plt.plot(x_axis, phonemes)\r\n #plt.xticks(np.arange(1, 4, step=1)) #This is for the toy example in Figure 2.2\r\n #plt.yticks(np.arange(36, 39, step=1)) #This is for the toy example in Figure 2.2\r\n plt.title('Number of Phonemes')\r\n plt.xlabel('Iterations')\r\n plt.ylabel('Counts')\r\n #plot av_length\r\n plt.subplot(1, 2, 2)\r\n plt.plot(x_axis, av_length)\r\n #plt.xticks(np.arange(1, 4, step=1)) #This is for the toy example in Figure 2.2\r\n plt.title('Average Levenshtein Distance')\r\n plt.xlabel('Iterations')\r\n plt.ylabel('Counts')\r\n plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print('###################################!')\r\n main(sys.argv[1], sys.argv[2], sys.argv[3])\r\n\r\n\r\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
jbueltemeier/pystiche
|
[
"0d0707121e63c4355303446e62a4894e86a7b763",
"0d0707121e63c4355303446e62a4894e86a7b763"
] |
[
"pystiche/misc/misc.py",
"tests/test_enc.py"
] |
[
"import contextlib\nimport hashlib\nimport itertools\nimport random\nimport shutil\nimport tempfile\nimport warnings\nfrom collections import OrderedDict\nfrom functools import reduce as _reduce\nfrom operator import mul\nfrom os import path\nfrom typing import (\n Any,\n Callable,\n Dict,\n Iterable,\n Iterator,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n Union,\n)\n\nimport numpy as np\nimport requests\n\nimport torch\nfrom torch import nn\nfrom torch.hub import _get_torch_home\n\n__all__ = [\n \"prod\",\n \"to_1d_arg\",\n \"to_2d_arg\",\n \"to_3d_arg\",\n \"zip_equal\",\n \"to_eng\",\n \"to_engstr\",\n \"to_tuplestr\",\n \"to_engtuplestr\",\n \"build_fmtstr\",\n \"format_dict\",\n \"verify_str_arg\",\n \"build_obj_str\",\n \"build_complex_obj_repr\",\n \"is_almost\",\n \"make_reproducible\",\n \"get_input_image\",\n \"get_tmp_dir\",\n \"get_sha256_hash\",\n \"save_state_dict\",\n \"build_deprecation_message\",\n \"warn_deprecation\",\n \"get_device\",\n \"download_file\",\n \"reduce\",\n]\n\n\ndef prod(iterable: Iterable) -> Any:\n return _reduce(mul, iterable)\n\n\nT = TypeVar(\"T\")\n\n\ndef _to_nd_arg(dims: int) -> Callable[[Union[T, Sequence[T]]], Tuple[T, ...]]:\n def to_nd_arg(x: Union[T, Sequence[T]]) -> Tuple[T, ...]:\n if x is None:\n msg = build_deprecation_message( # type: ignore[unreachable]\n \"Passing None as argument\",\n \"0.4.0\",\n info=\"If you need this behavior, please implement it in the caller.\",\n )\n warnings.warn(msg)\n return None\n\n if isinstance(x, Sequence):\n if len(x) != dims:\n raise RuntimeError\n return tuple(x)\n else:\n return tuple(itertools.repeat(x, dims))\n\n return to_nd_arg\n\n\nto_1d_arg = _to_nd_arg(1)\nto_2d_arg = _to_nd_arg(2)\nto_3d_arg = _to_nd_arg(3)\n\n\ndef zip_equal(*sequences: Sequence) -> Iterable:\n numel = len(sequences[0])\n if not all([len(sequence) == numel for sequence in sequences[1:]]):\n raise RuntimeError(\"All sequences should have the same length\")\n return zip(*sequences)\n\n\ndef to_eng(num: float, eps: float = 1e-8) -> Tuple[float, int]:\n if np.abs(num) < eps:\n return 0.0, 0\n\n exp = np.floor(np.log10(np.abs(num))).astype(np.int)\n exp -= np.mod(exp, 3)\n sig = num * 10.0 ** -exp\n\n return sig, exp\n\n\ndef to_engstr(\n num: float, digits: int = 4, exp_sep: str = \"e\", eps: float = 1e-8\n) -> str:\n sig, exp = to_eng(num, eps=eps)\n mag = np.abs(sig)\n\n if mag < 1.0 - eps:\n return \"0\"\n\n fmt_str = \"{:.\" + str(digits) + \"g}\"\n\n if exp == -3 and mag > 1.0 + eps:\n return fmt_str.format(num)\n\n sigstr = fmt_str.format(sig)\n expstr = (exp_sep + str(exp)) if exp != 0 else \"\"\n return sigstr + expstr\n\n\n# TODO: has this function any purpose?\ndef to_tuplestr(sequence: Sequence) -> str:\n sequence = [str(item) for item in sequence]\n if len(sequence) == 0:\n values = \"\"\n elif len(sequence) == 1:\n values = sequence[0] + \",\"\n else:\n values = \", \".join(sequence)\n return f\"({values})\"\n\n\ndef to_engtuplestr(sequence: Sequence, **kwargs: Any) -> str:\n return to_tuplestr([to_engstr(item, **kwargs) for item in sequence])\n\n\n# FIXME: add padding\n# FIXME: add sign\n# see https://pyformat.info/#param_align\ndef build_fmtstr(\n id: Optional[Union[int, str]] = None,\n align: Optional[str] = None,\n field_len: Optional[Union[int, str]] = None,\n precision: Optional[Union[int, str]] = None,\n type: Optional[str] = None,\n) -> str:\n fmtstr = r\"{\"\n if id is not None:\n fmtstr += str(id)\n fmtstr += \":\"\n if align is not None:\n fmtstr += align\n if field_len is not None:\n fmtstr += str(field_len)\n if precision is not None:\n fmtstr += \".\" + str(precision)\n if type is not None:\n fmtstr += type\n fmtstr += r\"}\"\n return fmtstr\n\n\n# FIXME: this should be able to handle multi line values\ndef format_dict(\n dct: Dict[str, Any], sep: str = \": \", key_align: str = \"<\", value_align: str = \"<\"\n) -> str:\n key_field_len, val_field_len = [\n max(lens)\n for lens in zip(*[(len(key), len(str(val))) for key, val in dct.items()])\n ]\n\n fmtstr = build_fmtstr(id=0, align=key_align, field_len=key_field_len, type=\"s\")\n fmtstr += sep\n fmtstr += build_fmtstr(id=1, align=value_align, field_len=val_field_len, type=\"s\")\n\n lines = [fmtstr.format(key, str(val)) for key, val in dct.items()]\n return \"\\n\".join(lines)\n\n\ndef verify_str_arg(\n arg: Any, param: Optional[str] = None, valid_args: Optional[Sequence[str]] = None\n) -> str:\n if not isinstance(arg, str):\n if param is None:\n msg1 = \"Expected type str\"\n else:\n msg1 = f\"Expected type str for parameter {param}\"\n msg2 = f\", but got type {type(arg)}.\"\n raise ValueError(msg1 + msg2)\n\n if valid_args is None:\n return arg\n\n if arg not in valid_args:\n if param is None:\n msg1 = f\"Unknown argument '{arg}'. \"\n else:\n msg1 = f\"Unknown argument '{arg}' for parameter {param}. \"\n msg2 = \"Valid arguments are {{{}}}.\"\n msg2 = msg2.format(\"'\" + \"', '\".join(valid_args) + \"'\")\n raise ValueError(msg1 + msg2)\n\n return arg\n\n\ndef build_complex_obj_repr(\n name: str,\n properties: Optional[Dict[str, Any]] = None,\n named_children: Sequence[Tuple[str, Any]] = (),\n line_length: int = 80,\n num_indent: int = 2,\n) -> str:\n def format_properties(properties: Dict[str, Any], sep: str) -> str:\n return sep.join([f\"{key}={value}\" for key, value in properties.items()])\n\n def indent(line: str) -> str:\n return \" \" * num_indent + line\n\n if properties is None:\n properties = {}\n\n prefix = f\"{name}(\"\n postfix = \")\"\n\n body = format_properties(properties, \", \")\n\n body_too_long = (\n len(body) + (num_indent if named_children else len(prefix) + len(postfix))\n > line_length\n )\n multiline_body = len(str(body).splitlines()) > 1\n\n if body_too_long or multiline_body:\n body = format_properties(properties, \",\\n\")\n elif not named_children:\n return prefix + body + postfix\n\n body = [indent(line) for line in body.splitlines()]\n\n for name, module in named_children:\n lines = str(module).splitlines()\n body.append(indent(f\"({name}): {lines[0]}\"))\n for line in lines[1:]:\n body.append(indent(line))\n\n return \"\\n\".join([prefix] + body + [postfix])\n\n\ndef build_obj_str(\n name: str,\n properties: Optional[Dict[str, Any]] = None,\n properties_threshold: Optional[int] = None,\n **kwargs: Any,\n) -> str:\n msg = build_deprecation_message(\n \"The function build_obj_str\",\n \"0.4.0\",\n info=\"It was renamed to build_complex_obj_repr.\",\n )\n warnings.warn(msg)\n\n if properties is not None and properties_threshold is not None:\n msg = build_deprecation_message(\n \"The parameter properties_threshold\",\n \"0.4.0\",\n info=\"The line breaks are now controlled by the line_length parameter.\",\n )\n warnings.warn(msg)\n line_length = 0 if len(properties) > properties_threshold else 10_000\n else:\n line_length = 80\n\n return build_complex_obj_repr(name, line_length=line_length, **kwargs)\n\n\ndef is_almost(actual: float, desired: float, eps: float = 1e-6) -> bool:\n return abs(actual - desired) < eps\n\n\ndef make_reproducible(seed: int = 0) -> None:\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.backends.cudnn.is_available():\n # Both attributes are dynamically assigned to the module. See\n # https://github.com/pytorch/pytorch/blob/a1eaaea288cf51abcd69eb9b0993b1aa9c0ce41f/torch/backends/cudnn/__init__.py#L115-L129\n # The type errors are ignored, since this is still the recommended practice.\n # https://pytorch.org/docs/stable/notes/randomness.html#cudnn\n torch.backends.cudnn.deterministic = True # type: ignore\n torch.backends.cudnn.benchmark = False # type: ignore\n\n\ndef get_input_image(\n starting_point: Union[str, torch.Tensor] = \"content\",\n content_image: Optional[torch.Tensor] = None,\n style_image: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n \"\"\"Generates an input image for NST from the given ``starting_point``.\n\n Args:\n starting_point: If :class:`~torch.Tensor` returns a copy. If ``\"content\"`` or\n ``\"style\"`` returns a copy of ``content_image`` or ``style_image``,\n respectively. If ``\"random\"`` returns a white noise image with the dimensions\n of ``content_image`` or ``style_image``, respectively. Defaults to\n ``\"content\"``.\n content_image: Content image. Only required if ``starting_point`` is\n ``\"content\"`` or ``\"random\"``.\n style_image: Style image. Only required if ``starting_point`` is\n ``\"style\"`` or ``\"random\"``.\n \"\"\"\n if isinstance(starting_point, torch.Tensor):\n return starting_point.clone()\n\n starting_point = verify_str_arg(\n starting_point, \"starting_point\", (\"content\", \"style\", \"random\")\n )\n\n if starting_point == \"content\":\n if content_image is not None:\n return content_image.clone()\n raise RuntimeError(\"starting_point is 'content', but no content image is given\")\n elif starting_point == \"style\":\n if style_image is not None:\n return style_image.clone()\n raise RuntimeError(\"starting_point is 'style', but no style image is given\")\n elif starting_point == \"random\":\n if content_image is not None:\n return torch.rand_like(content_image)\n elif style_image is not None:\n return torch.rand_like(style_image)\n raise RuntimeError(\"starting_point is 'random', but no image is given\")\n\n raise RuntimeError\n\n\n@contextlib.contextmanager\ndef get_tmp_dir(**mkdtemp_kwargs: Any) -> Iterator[str]:\n tmp_dir = tempfile.mkdtemp(**mkdtemp_kwargs)\n try:\n yield tmp_dir\n finally:\n shutil.rmtree(tmp_dir)\n\n\ndef get_sha256_hash(file: str, chunk_size: int = 4096) -> str:\n hasher = hashlib.sha256()\n with open(file, \"rb\") as fh:\n for chunk in iter(lambda: fh.read(chunk_size), b\"\"):\n hasher.update(chunk)\n return hasher.hexdigest()\n\n\ndef save_state_dict(\n input: Union[Dict[str, torch.Tensor], nn.Module],\n name: str,\n root: Optional[str] = None,\n ext: str = \".pth\",\n to_cpu: bool = True,\n hash_len: int = 8,\n) -> str:\n if isinstance(input, nn.Module):\n state_dict = input.state_dict()\n else:\n state_dict = OrderedDict(input)\n\n if to_cpu:\n state_dict = OrderedDict(\n [(key, tensor.detach().cpu()) for key, tensor in state_dict.items()]\n )\n\n if root is None:\n root = _get_torch_home()\n\n with get_tmp_dir() as tmp_dir:\n tmp_file = path.join(tmp_dir, \"tmp\")\n torch.save(state_dict, tmp_file)\n sha256 = get_sha256_hash(tmp_file)\n\n file = path.join(root, f\"{name}-{sha256[:hash_len]}{ext}\")\n shutil.move(tmp_file, file)\n\n return file\n\n\ndef build_deprecation_message(\n description: str,\n version: str,\n info: Optional[str] = None,\n url: Optional[str] = None,\n) -> str:\n msg = (\n f\"{description.strip()} is deprecated since pystiche=={version} and will be \"\n \"removed in a future release.\"\n )\n if info is not None:\n msg += f\" {info.strip()}\"\n if url is not None:\n msg += f\" See {url} for further details.\"\n return msg\n\n\ndef warn_deprecation(\n msg_or_description: str,\n version: Optional[str] = None,\n info: Optional[str] = None,\n url: Optional[str] = None,\n) -> None:\n msg = build_deprecation_message(\n \"META: The function warn_deprecation\",\n \"0.4.0\",\n url=\"https://github.com/pmeier/pystiche/pull/189\",\n )\n warnings.warn(msg, DeprecationWarning)\n\n if version is not None:\n description = msg_or_description\n msg = build_deprecation_message(description, version, info=info, url=url)\n else:\n msg = msg_or_description\n warnings.warn(msg)\n\n\ndef get_device(device: Optional[str] = None) -> torch.device:\n \"\"\"Selects a device to perform an NST on.\n\n Args:\n device: If ``str``, returns the corresponding :class:`~torch.device`. If\n ``None`` selects CUDA if available and otherwise CPU. Defaults to ``None``.\n \"\"\"\n if device is None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n return torch.device(device)\n\n\ndef download_file(\n url: str, file: Optional[str] = None, user_agent: str = \"pystiche\"\n) -> str:\n if file is None:\n file = path.basename(url)\n headers = {\"User-Agent\": user_agent}\n with open(file, \"wb\") as fh:\n fh.write(requests.get(url, headers=headers).content)\n return file\n\n\ndef reduce(x: torch.Tensor, reduction: str) -> torch.Tensor:\n \"\"\"Reduces a :class:`~torch.Tensor` as specified.\n\n Args:\n x: Input tensor.\n reduction: Reduction method to be applied to ``x``. If ``\"none\"``, no reduction\n will be applied. If ``\"sum\"`` or ``\"mean\"``, the :func:`~torch.sum` or\n :func:`~torch.mean` will be applied across all dimensions of ``x``.\n \"\"\"\n verify_str_arg(reduction, \"reduction\", (\"mean\", \"sum\", \"none\"))\n if reduction == \"mean\":\n return torch.mean(x)\n elif reduction == \"sum\":\n return torch.sum(x)\n else: # reduction == \"none\":\n return x\n",
"from os import path\n\nimport pytest\n\nimport torch\nfrom torch import nn\n\nimport pystiche\nfrom pystiche import enc\nfrom pystiche.image.transforms import CaffePreprocessing, TorchPreprocessing\n\nfrom .utils import ForwardPassCounter, PysticheTestCase\n\n\nclass TestEncoder(PysticheTestCase):\n def test_SequentialEncoder_call(self):\n torch.manual_seed(0)\n modules = (nn.Conv2d(3, 3, 3), nn.ReLU())\n input = torch.rand(1, 3, 256, 256)\n\n pystiche_encoder = enc.SequentialEncoder(modules)\n torch_encoder = nn.Sequential(*modules)\n\n actual = pystiche_encoder(input)\n desired = torch_encoder(input)\n self.assertTensorAlmostEqual(actual, desired)\n\n\nclass TestModels(PysticheTestCase):\n @pytest.mark.large_download\n @pytest.mark.slow\n @pytest.mark.flaky\n def test_AlexNetMultiLayerEncoder(self):\n asset = self.load_asset(path.join(\"enc\", \"alexnet\"))\n\n multi_layer_encoder = enc.alexnet_multi_layer_encoder(\n weights=\"torch\", preprocessing=False, allow_inplace=False\n )\n layers = tuple(multi_layer_encoder.children_names())\n with torch.no_grad():\n encs = multi_layer_encoder(asset.input.image, layers)\n\n actual = dict(\n zip(\n layers,\n [pystiche.TensorKey(x, precision=asset.params.precision) for x in encs],\n )\n )\n desired = asset.output.enc_keys\n self.assertDictEqual(actual, desired)\n\n @pytest.mark.large_download\n @pytest.mark.slow\n def test_alexnet_multi_layer_encoder_smoke(self):\n multi_layer_encoder = enc.alexnet_multi_layer_encoder()\n self.assertIsInstance(multi_layer_encoder, enc.alexnet.AlexNetMultiLayerEncoder)\n\n @pytest.mark.large_download\n @pytest.mark.slow\n @pytest.mark.flaky\n def test_VGGMultiLayerEncoder(self):\n archs = (\"vgg11\", \"vgg13\", \"vgg16\", \"vgg19\")\n archs = (*archs, *[f\"{arch}_bn\" for arch in archs])\n\n for arch in archs:\n with self.subTest(arch=arch):\n asset = self.load_asset(path.join(\"enc\", arch))\n\n get_vgg_multi_layer_encoder = enc.__dict__[\n f\"{arch}_multi_layer_encoder\"\n ]\n multi_layer_encoder = get_vgg_multi_layer_encoder(\n weights=\"torch\", preprocessing=False, allow_inplace=False\n )\n layers = tuple(multi_layer_encoder.children_names())\n with torch.no_grad():\n encs = multi_layer_encoder(asset.input.image, layers)\n\n actual = dict(\n zip(\n layers,\n [\n pystiche.TensorKey(x, precision=asset.params.precision)\n for x in encs\n ],\n )\n )\n desired = asset.output.enc_keys\n self.assertDictEqual(actual, desired)\n\n @pytest.mark.large_download\n @pytest.mark.slow\n def test_vgg_multi_layer_encoder_smoke(self):\n fns = (\n enc.vgg11_multi_layer_encoder,\n enc.vgg11_bn_multi_layer_encoder,\n enc.vgg13_multi_layer_encoder,\n enc.vgg13_bn_multi_layer_encoder,\n enc.vgg16_multi_layer_encoder,\n enc.vgg16_bn_multi_layer_encoder,\n enc.vgg19_multi_layer_encoder,\n enc.vgg19_bn_multi_layer_encoder,\n )\n for fn in fns:\n with self.subTest(fn=fn.__name__):\n multi_layer_encoder = fn()\n self.assertIsInstance(multi_layer_encoder, enc.vgg.VGGMultiLayerEncoder)\n\n\nclass TestMultiLayerEncoder(PysticheTestCase):\n def test_MultiLayerEncoder(self):\n modules = [(str(idx), nn.Module()) for idx in range(3)]\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n for name, module in modules:\n actual = getattr(multi_layer_encoder, name)\n desired = module\n self.assertIs(actual, desired)\n\n def test_MultiLayerEncoder_named_children(self):\n modules = [(str(idx), nn.Module()) for idx in range(3)]\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n actual = tuple(multi_layer_encoder.children_names())\n desired = tuple(zip(*modules))[0]\n self.assertTupleEqual(actual, desired)\n\n def test_MultiLayerEncoder_contains(self):\n idcs = (0, 2)\n modules = [(str(idx), nn.Module()) for idx in idcs]\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n for idx in idcs:\n self.assertTrue(str(idx) in multi_layer_encoder)\n\n for idx in set(range(max(idcs))) - set(idcs):\n self.assertFalse(str(idx) in multi_layer_encoder)\n\n def test_MultiLayerEncoder_extract_deepest_layer(self):\n layers = [str(idx) for idx in range(3)]\n modules = [(layer, nn.Module()) for layer in layers]\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n actual = multi_layer_encoder.extract_deepest_layer(layers)\n desired = layers[-1]\n self.assertEqual(actual, desired)\n\n actual = multi_layer_encoder.extract_deepest_layer(sorted(layers, reverse=True))\n desired = layers[-1]\n self.assertEqual(actual, desired)\n\n del multi_layer_encoder._modules[layers[-1]]\n\n with self.assertRaises(ValueError):\n multi_layer_encoder.extract_deepest_layer(layers)\n\n layers = layers[:-1]\n\n actual = multi_layer_encoder.extract_deepest_layer(layers)\n desired = layers[-1]\n self.assertEqual(actual, desired)\n\n def test_MultiLayerEncoder_named_children_to(self):\n layers = [str(idx) for idx in range(3)]\n modules = [(layer, nn.Module()) for layer in layers]\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n actuals = multi_layer_encoder.named_children_to(layers[-2])\n desireds = modules[:-2]\n self.assertNamedChildrenEqual(actuals, desireds)\n\n actuals = multi_layer_encoder.named_children_to(layers[-2], include_last=True)\n desireds = modules[:-1]\n self.assertNamedChildrenEqual(actuals, desireds)\n\n def test_MultiLayerEncoder_named_children_from(self):\n layers = [str(idx) for idx in range(3)]\n modules = [(layer, nn.Module()) for layer in layers]\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n actuals = multi_layer_encoder.named_children_from(layers[-2])\n desireds = modules[1:]\n self.assertNamedChildrenEqual(actuals, desireds)\n\n actuals = multi_layer_encoder.named_children_from(\n layers[-2], include_first=False\n )\n desireds = modules[2:]\n self.assertNamedChildrenEqual(actuals, desireds)\n\n def test_MultiLayerEncoder_call(self):\n torch.manual_seed(0)\n conv = nn.Conv2d(3, 1, 1)\n relu = nn.ReLU(inplace=False)\n pool = nn.MaxPool2d(2)\n input = torch.rand(1, 3, 128, 128)\n\n modules = ((\"conv\", conv), (\"relu\", relu), (\"pool\", pool))\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n layers = (\"conv\", \"pool\")\n encs = multi_layer_encoder(input, layers)\n\n actual = encs[0]\n desired = conv(input)\n self.assertTensorAlmostEqual(actual, desired)\n\n actual = encs[1]\n desired = pool(relu(conv(input)))\n self.assertTensorAlmostEqual(actual, desired)\n\n def test_MultiLayerEncoder_call_store(self):\n torch.manual_seed(0)\n count = ForwardPassCounter()\n\n modules = ((\"count\", count),)\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n layers = (\"count\",)\n input = torch.rand(1, 3, 128, 128)\n multi_layer_encoder(input, layers, store=True)\n multi_layer_encoder(input, layers)\n\n actual = count.count\n desired = 1\n self.assertEqual(actual, desired)\n\n new_input = torch.rand(1, 3, 128, 128)\n multi_layer_encoder(new_input, layers)\n\n actual = count.count\n desired = 2\n self.assertEqual(actual, desired)\n\n def test_MultiLayerEncoder_extract_encoder(self):\n conv = nn.Conv2d(3, 1, 1)\n relu = nn.ReLU(inplace=False)\n\n modules = ((\"conv\", conv), (\"relu\", relu))\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n layer = \"relu\"\n single_layer_encoder = multi_layer_encoder.extract_encoder(layer)\n\n self.assertIsInstance(single_layer_encoder, enc.SingleLayerEncoder)\n self.assertIs(single_layer_encoder.multi_layer_encoder, multi_layer_encoder)\n self.assertEqual(single_layer_encoder.layer, layer)\n\n self.assertTrue(layer in multi_layer_encoder.registered_layers)\n\n def test_MultiLayerEncoder_encode(self):\n torch.manual_seed(0)\n count = ForwardPassCounter()\n conv = nn.Conv2d(3, 1, 1)\n relu = nn.ReLU(inplace=False)\n input = torch.rand(1, 3, 128, 128)\n\n modules = ((\"count\", count), (\"conv\", conv), (\"relu\", relu))\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n layers = (\"conv\", \"relu\")\n multi_layer_encoder.registered_layers.update(layers)\n multi_layer_encoder.encode(input)\n encs = multi_layer_encoder(input, layers)\n\n actual = encs[0]\n desired = conv(input)\n self.assertTensorAlmostEqual(actual, desired)\n\n actual = encs[1]\n desired = relu(conv(input))\n self.assertTensorAlmostEqual(actual, desired)\n\n actual = count.count\n desired = 1\n self.assertEqual(actual, desired)\n\n def test_MultiLayerEncoder_empty_storage(self):\n torch.manual_seed(0)\n count = ForwardPassCounter()\n input = torch.rand(1, 3, 128, 128)\n\n modules = ((\"count\", count),)\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n layers = (\"count\",)\n multi_layer_encoder(input, layers, store=True)\n multi_layer_encoder.empty_storage()\n multi_layer_encoder(input, layers)\n\n actual = count.count\n desired = 2\n self.assertEqual(actual, desired)\n\n def test_MultiLayerEncoder_trim(self):\n layers = [str(idx) for idx in range(3)]\n modules = [(layer, nn.Module()) for layer in layers]\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n for name, module in modules:\n actual = getattr(multi_layer_encoder, name)\n desired = module\n self.assertIs(actual, desired)\n\n idx = 1\n multi_layer_encoder.trim((str(idx),))\n\n for name, module in modules[: idx + 1]:\n actual = getattr(multi_layer_encoder, name)\n desired = module\n self.assertIs(actual, desired)\n\n for name in tuple(zip(*modules))[0][idx + 1 :]:\n with self.assertRaises(AttributeError):\n getattr(multi_layer_encoder, name)\n\n def test_MultiLayerEncoder_trim_layers(self):\n layers = [str(idx) for idx in range(3)]\n modules = [(layer, nn.Module()) for layer in layers]\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n for name, module in modules:\n actual = getattr(multi_layer_encoder, name)\n desired = module\n self.assertIs(actual, desired)\n\n idx = 1\n multi_layer_encoder.registered_layers.update(\n [str(idx) for idx in range(idx + 1)]\n )\n multi_layer_encoder.trim()\n\n for name, module in modules[: idx + 1]:\n actual = getattr(multi_layer_encoder, name)\n desired = module\n self.assertIs(actual, desired)\n\n for name in tuple(zip(*modules))[0][idx + 1 :]:\n with self.assertRaises(AttributeError):\n getattr(multi_layer_encoder, name)\n\n def test_SingleLayerEncoder_call(self):\n torch.manual_seed(0)\n conv = nn.Conv2d(3, 1, 1)\n relu = nn.ReLU(inplace=False)\n input = torch.rand(1, 3, 128, 128)\n\n modules = ((\"conv\", conv), (\"relu\", relu))\n multi_layer_encoder = enc.MultiLayerEncoder(modules)\n\n single_layer_encoder = enc.SingleLayerEncoder(multi_layer_encoder, \"conv\")\n\n actual = single_layer_encoder(input)\n desired = conv(input)\n self.assertTensorAlmostEqual(actual, desired)\n\n\nclass TestProcessing(PysticheTestCase):\n def test_get_preprocessor(self):\n get_preprocessor = enc.preprocessing.get_preprocessor\n self.assertIsInstance(get_preprocessor(\"torch\"), TorchPreprocessing)\n self.assertIsInstance(get_preprocessor(\"caffe\"), CaffePreprocessing)\n"
] |
[
[
"torch.mean",
"numpy.abs",
"numpy.random.seed",
"torch.rand_like",
"torch.manual_seed",
"torch.hub._get_torch_home",
"torch.backends.cudnn.is_available",
"torch.sum",
"torch.cuda.is_available",
"numpy.mod",
"torch.device",
"torch.save"
],
[
"torch.nn.Sequential",
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.nn.Module",
"torch.nn.MaxPool2d",
"torch.no_grad",
"torch.rand",
"torch.nn.ReLU"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.