repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
rafaelcgon/PC
|
[
"ce5f25c13741c02a19560a57838aec735aac2d88",
"ce5f25c13741c02a19560a57838aec735aac2d88"
] |
[
"example_2D.py",
"basisFunctions.py"
] |
[
"import numpy as np\nimport PC \nfrom matplotlib import pylab as pl \nimport matplotlib as mpl \nimport matplotlib.pyplot as plt \nimport seaborn as sns #\nimport scipy.io as sio\n\ndef model(X,lx=3, A=1.5): \n\t'''\n\t\tGenerates a N-D Gaussian surface.\n\n\t\tParameter\n\t\t----------\n\t\tX : ndarray\n\t\t\tM by N array, where M is the number of sampling points, and N is the number of input parameters.\n\t\tlx : ndarray\n\t\t\tN size array with correlation scales for each dimension of X\n\t\tA = amplitude of the Gaussian function\n\n\t\tReturns\n\t\t-------\n\t\toutput : scalar model output\n\t'''\n\tif np.shape(lx) == ():\n\t\tlx = np.array([lx])\n\tif lx.size!= X.shape[0]:\n\t\tlx = np.repeat(lx[0],X.shape[0])\n\tB = np.array([((X[n,:]-X[n,:].mean())/lx[n])**2 for n in range(X.shape[0])])\n\treturn A*np.exp(B.sum(axis=0)/(-2))\n\n\n# \npolOrder = 6 # polynomial expansion order\nfigName = 'example_2D.png' \n#\nx1 = np.arange(20,50.1,1) # input parameters\nx2 = np.arange(-10,40.1,1) # input parameters\nX1,X2= np.meshgrid(x1,x2)\nX = np.concatenate([X1.reshape([1,-1]),X2.reshape([1,-1])],axis=0)\nlx = np.array([10,20])\nY = model(X,lx) # model outputs at X points\nY = Y.reshape([x2.size,x1.size])\nV = np.linspace(0,Y.max(),30)\n\n # initialize PC components\ninLim = np.array([[x1.min(),x1.max()],[x2.min(),x2.max()]])\nuq = PC.PCE(inputLim=inLim,polOrder=polOrder)\n\n# sample model at quadrature points\nXqp1,Xqp2 = np.meshgrid(uq.input_qp[0,:],uq.input_qp[1,:])\nXqp = np.concatenate([Xqp1.reshape([1,-1]),Xqp2.reshape([1,-1])],axis=0)\nY_qp = model(Xqp,lx) \n\n# compute expansion coefficients\nuq.computePCE(Y_qp) \n\n#sample surrogate at x points\nsurrogate = PC.surrogate(uq,X,1) \nsurrogate = surrogate.reshape([x2.size,x1.size])\n\n# absolute error of the surrogate\nabsError = np.abs(surrogate - Y) \nV2 = np.linspace(0,absError.max(),30)\n\n\n# plot results\nFS = 14\nfigW = 16\nfigH = 8\nleft = [0.05,0.37,0.69]\nwidth = 0.27 \nbottom = 0.14\nheight = 0.8 #0.27 \n\nfig = plt.figure(figsize=(figW,figH))\nplot1=fig.add_axes([left[0],bottom,width,height],aspect = 'equal')\nplot1.contourf(x1,x2,Y,V,cmap=plt.get_cmap('jet')) \nplot1.plot(Xqp[0,:],Xqp[1,:],'ok')\nplot1.tick_params(axis='both',which='major',labelsize=FS)\nplot1.set_ylabel('Input 2',fontsize=FS)\nplot1.set_xlabel('Input 1',fontsize=FS)\nplot1.set_title('Model output',fontsize=FS)\nplt.legend([r'Quadrature points'],loc=4)\n\nplot2=fig.add_axes([left[1],bottom,width,height],aspect = 'equal')\nplot2.contourf(x1,x2,surrogate,V,cmap=plt.get_cmap('jet')) \nplot2.plot(Xqp[0,:],Xqp[1,:],'ok')\nplot2.tick_params(axis='both',which='major',labelsize=FS)\n#plot2.set_ylabel('Input 2',fontsize=FS)\nplot2.set_xlabel('Input 1',fontsize=FS)\nplot2.set_title('PC surrogate',fontsize=FS)\n\nplot3=fig.add_axes([left[2],bottom,width,height],aspect = 'equal')\nplot3.contourf(x1,x2,absError,V,cmap=plt.get_cmap('jet')) \nplot3.plot(Xqp[0,:],Xqp[1,:],'ok')\nplot3.tick_params(axis='both',which='major',labelsize=FS)\n#plot3.set_ylabel('Input 2',fontsize=FS)\nplot3.set_xlabel('Input 1',fontsize=FS)\nplot3.set_title('Absolute error',fontsize=FS)\n\n## INDEPENDENT COLORBAR\naxbar = fig.add_axes([0.055, 0.045, 0.9, 0.02])\ncb = mpl.colorbar.ColorbarBase(axbar, orientation = 'horizontal', boundaries = V,cmap='jet')\n# cb.set_ticks(Vt) #[-4, -3, -2, -1])\n## cb.set_ticklabels(VtL1)\ncb.ax.tick_params(labelsize = FS)\n\n\npl.savefig(figName)",
"import numpy as np\nimport scipy.special as sp\nfrom math import factorial\n\ndef gaussQuad(basis,polOrder):\n\t'''\n\treturn sample points and weigth for the Gauss quadrature.\n\t'''\n\tif (basis == 'legendre')|(basis == 'Legendre'):\n\t\treturn np.polynomial.legendre.leggauss(polOrder+1)\n\telif (basis == 'hermite')|(basis == 'Hermite'):\n\t\tqp,w= np.polynomial.hermite_e.hermegauss(polOrder+1)\n\t\treturn qp , w/np.sqrt(2*np.pi)\n\telse:\n\t\treturn np.polynomial.laguerre.laggauss(polOrder+1)\n\n#####################################################################################\ndef getBasis(x,N,basis='Legendre'):\n\t'''\n\treturn polynomial basis function.\n x : standardized inputs\n N : truncation order of the polynomial series\n basis: orthogonal polynomial basis\n\t'''\n\tif (basis == 'legendre')|(basis == 'Legendre'):\n\t\tP = legendrePol(x,N)\n\t\tP2 = (2./(2*np.array(range(N+1))+1.))\n\telif (basis == 'hermite')|(basis == 'Hermite'):\n\t\tP = spPol(x,N,basis)\n\t\tP2 = np.array([np.sqrt(np.pi)*(2**n)*factorial(n) for n in range(N+1)])\n\telif (basis == 'laguerre')|(basis == 'Laguerre'):\n\t\tP = laguerrePol(x,N,basis)\n\t\tP2 = np.array([sp.gamma(n+1)/factorial(n) for n in range(N+1)])\n\treturn P,P2\n#####################################################################################\ndef legendrePol(x,N):\n\t'''\n\t\tL = legendrePol(x,N)\n\n\t\tcomputes the Legendre polynomial of degree 0:N at points specified by x\n\t\tusing the recurrence relationship.\n\t\tUsage is L = legendrepol(x,N)\n '''\n\tL = np.array(np.zeros((np.size(x),N+1)))\n\tL[:,0] = 1.0 # Legendre polynomial of degree zero\n\tif (N>0):\n\t\tL[:,1] = x\n\t\tfor n in range(2,N+1):\n\t\t\ta = float((2*n-1))/float(n) \n\t\t\tb = float((n-1))/float(n)\n\t\t\tL[:,n] = a*x*L[:,n-1] - b*L[:,n-2]\n\treturn L\n#####################################################################################\ndef laguerrePol(x,N):\n\t'''\n\t\tL = laguerrePol(x,N)\n\n\t\tcomputes the Laguerre polynomial of degree 0:N at points specified by x\n\t\tusing the recurrence relationship.\n '''\n\tL = np.array(np.zeros((np.size(x),N+1)))\n\tL[:,0] = 1.0 # Legendre polynomial of degree zero\n\tif (N>0):\n\t\tL[:,1] = 1 - x\n\n\t\tfor n in np.arange(2,N+1):\n\t\t\ta = (2.*n-1.-x)/n \n\t\t\tb = (n-1.)/n\n\t\t\tL[:,n] = a*L[:,n-1] - b*L[:,n-2]\n\treturn L\n#####################################################################################\ndef spPol(x,N,basis):\n\t'''\n\tGet polynomial from scipy.special\n \tP = spPol(x,N,basis)\n\tx = array with input values\n\tN = max. polynomial order\n\tbasis = type of polynomial (Hermite or Laguerre)\n\t'''\n\t\n\tP = np.array(np.zeros((np.size(x),N+1)))\n\tif (basis == 'hermite')|(basis == 'Hermite'):\n\t\tcommand = 'P[:,n]=sp.hermitenorm(n)(x)'\n\telif (basis == 'laguerre')|(basis == 'Laguerre'):\n\t\tcommand = 'P[:,n]=sp.laguerre(n)(x)'\n\telse:\n\t\tcommand = 'P[:,n]=sp.legendre(n)(x)'\n\n\tfor n in range(N+1):\n\t\texec(command)\n\treturn P"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.abs",
"numpy.meshgrid",
"numpy.arange",
"numpy.repeat",
"matplotlib.pyplot.get_cmap",
"matplotlib.colorbar.ColorbarBase",
"numpy.shape",
"matplotlib.pylab.savefig",
"numpy.array",
"matplotlib.pyplot.figure"
],
[
"numpy.polynomial.legendre.leggauss",
"scipy.special.gamma",
"numpy.sqrt",
"numpy.arange",
"numpy.polynomial.laguerre.laggauss",
"numpy.size",
"numpy.polynomial.hermite_e.hermegauss"
]
] |
nailbiter/pyassistantbot2
|
[
"00a4171e8359ad4d61fea0bf7e84241ba79708d6"
] |
[
"_common/requests_cache.py"
] |
[
"\"\"\"===============================================================================\n\n FILE: _common/requests_cache.py\n\n USAGE: (not intended to be directly executed)\n\n DESCRIPTION: \n\n OPTIONS: ---\nREQUIREMENTS: ---\n BUGS: ---\n NOTES: ---\n AUTHOR: Alex Leontiev (alozz1991@gmail.com)\nORGANIZATION: \n VERSION: ---\n CREATED: 2022-04-18T21:47:10.758307\n REVISION: ---\n\n===============================================================================\"\"\"\n\nimport pandas as pd\nimport requests\nimport sqlite3\nfrom datetime import datetime, timedelta\nimport json\nimport logging\n\n\n_REQUEST_GET_TABLE_NAME = \"requests_get\"\n\n\nclass RequestGet:\n def __init__(self, cache_lifetime_min, cache_db, requests_kwargs={}):\n \"\"\"\n @param cache_lifetime_min 0, -1 or >0 (0 means no cache, -1 means endless)\n \"\"\"\n self._logger = logging.getLogger(self.__class__.__name__)\n assert cache_lifetime_min >= 0 or cache_lifetime_min == -1\n self._cache_lifetime_min = cache_lifetime_min\n self._cache_db = cache_db\n self._requests_kwargs = requests_kwargs\n\n def _get_conn(self,):\n conn = sqlite3.connect(self._cache_db)\n return conn\n\n def _get_url(self, url):\n return requests.get(url, **self._requests_kwargs)\n\n def _r_to_json(self, r):\n status_code = r.status_code\n text = r.text\n r = {\"status_code\": status_code, \"text\": text}\n return r\n\n def __call__(self, url):\n \"\"\"\n return (return_code, text)\n \"\"\"\n if self._cache_lifetime_min == 0:\n return self._get_url(url)\n conn = self._get_conn()\n try:\n df = pd.read_sql_query(\n f'SELECT reply_json, datetime FROM {_REQUEST_GET_TABLE_NAME} where url=\"{url}\" order by datetime desc', conn)\n except pd.io.sql.DatabaseError:\n df = pd.DataFrame([], columns=[\"reply_json\", \"datetime\"])\n df.datetime = df.datetime.apply(datetime.fromisoformat)\n df.reply_json = df.reply_json.apply(json.loads)\n now = datetime.now()\n if len(df) == 0 or ((now-df.datetime.iloc[0]).total_seconds()/60 >= self._cache_lifetime_min > 0):\n r = self._get_url(url)\n r = self._r_to_json(r)\n pd.DataFrame([\n {\"datetime\": now.isoformat(), \"reply_json\": json.dumps(r), \"url\": url}\n ]).to_sql(_REQUEST_GET_TABLE_NAME, conn, if_exists='append', index=None)\n else:\n active_for = str(\n now-df.datetime.iloc[0]) if self._cache_lifetime_min > 0 else \"∞\"\n self._logger.warning(\n f\"use cache (active for {active_for})\")\n r = df.reply_json.iloc[0]\n conn.close()\n return r[\"status_code\"], r[\"text\"]\n"
] |
[
[
"pandas.read_sql_query",
"pandas.DataFrame"
]
] |
abdel/imdb-sentiment-analysis
|
[
"f240c6b28cda01c9496fd5e0b02eedd8fbd22b44"
] |
[
"notebooks/utils.py"
] |
[
"import os\nimport re\nimport bcolz\nimport keras\nimport itertools\nimport numpy as np\nimport _pickle as pickle\n\nfrom itertools import chain\nfrom matplotlib import pyplot as plt\nfrom keras.utils.data_utils import get_file\nfrom numpy.random import normal\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n (This function is copied from the scikit docs.)\n \"\"\"\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(cm)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \ndef load_array(fname):\n return bcolz.open(fname)[:]\n\ndef get_glove_dataset(dataset):\n \"\"\"Download the requested glove dataset from files.fast.ai\n and return a location that can be passed to load_vectors.\n \"\"\"\n md5sums = {'6B.50d': '8e1557d1228decbda7db6dfd81cd9909',\n '6B.100d': 'c92dbbeacde2b0384a43014885a60b2c',\n '6B.200d': 'af271b46c04b0b2e41a84d8cd806178d',\n '6B.300d': '30290210376887dcc6d0a5a6374d8255'}\n glove_path = os.path.abspath('data/glove/results')\n return get_file(dataset,\n 'http://files.fast.ai/models/glove/' + dataset + '.tgz',\n cache_subdir=glove_path,\n md5_hash=md5sums.get(dataset, None),\n untar=True)\n\ndef load_vectors(loc):\n return (load_array(loc+'.dat'),\n pickle.load(open(loc+'_words.pkl','rb'), encoding='latin1'),\n pickle.load(open(loc+'_idx.pkl','rb'), encoding='latin1'))\n\ndef create_embeddings(max_features, vecs, wordidx, idx2word):\n n_fact = vecs.shape[1]\n emb = np.zeros((max_features, n_fact))\n\n for i in range(1,len(emb)):\n word = idx2word[i]\n if word and re.match(r\"^[a-zA-Z0-9\\-]*$\", word):\n src_idx = wordidx[word]\n emb[i] = vecs[src_idx]\n else:\n # If we can't find the word in glove, randomly initialize\n emb[i] = normal(scale=0.6, size=(n_fact,))\n\n # This is our \"rare word\" id - we want to randomly initialize\n emb[-1] = normal(scale=0.6, size=(n_fact,))\n emb/=3\n return emb"
] |
[
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"numpy.random.normal",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] |
hawflau/aws-sam-cli-app-templates
|
[
"47e7100659c1cdf8c1ae1d8eba36de5dbd01623c"
] |
[
"python3.9-image/cookiecutter-ml-apigw-pytorch/{{cookiecutter.project_name}}/app/app.py"
] |
[
"import torch\nimport torchvision\nimport base64\nimport json\nimport numpy as np\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom PIL import Image\nfrom io import BytesIO\n\nimage_transforms = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.1307,), (0.3081,))])\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, kernel_size=5)\n self.conv2 = nn.Conv2d(20, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n\n self.fc1 = nn.Linear(320, 100)\n self.bn1 = nn.BatchNorm1d(100)\n\n self.fc2 = nn.Linear(100, 100)\n self.bn2 = nn.BatchNorm1d(100)\n\n self.smax = nn.Linear(100, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n\n x = x.view(-1, 320)\n x = self.bn1(F.relu(self.fc1(x)))\n x = F.dropout(x, training=self.training)\n\n x = self.bn2(F.relu(self.fc2(x)))\n x = F.dropout(x, training=self.training)\n\n return F.softmax(self.smax(x), dim=-1)\n\n\nmodel_file = '/opt/ml/model'\nmodel = Net()\nmodel.load_state_dict(torch.load(model_file))\nmodel.eval()\n\n\ndef lambda_handler(event, context):\n image_bytes = event['body'].encode('utf-8')\n image = Image.open(BytesIO(base64.b64decode(image_bytes))).convert(mode='L')\n image = image.resize((28, 28))\n\n probabilities = model.forward(image_transforms(np.array(image)).reshape(-1, 1, 28, 28))\n label = torch.argmax(probabilities).item()\n\n return {\n 'statusCode': 200,\n 'body': json.dumps(\n {\n \"predicted_label\": label,\n }\n )\n }\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout2d",
"torch.load",
"torch.nn.functional.dropout",
"torch.nn.Conv2d",
"torch.nn.Linear",
"numpy.array",
"torch.argmax"
]
] |
dekuenstle/cnn-review
|
[
"47600418c0d256d0dfaf2f6feb018f793c1205bc"
] |
[
"vectorize.py"
] |
[
"#!/usr/bin/env python3\n\nfrom gensim.models import KeyedVectors\nfrom gensim.utils import tokenize\nimport numpy as np\n\nfrom config import random_seed, word2vec_file, word2vec_dim, max_word_num\n\ndef load_word2vec():\n print(\"Load Word2Vec from {} ...\".format(word2vec_file))\n return KeyedVectors.load_word2vec_format(word2vec_file,\n binary=True)\n\ndef word2vec_unknown():\n np.random.seed(random_seed)\n return np.random.uniform(-1, 1, (word2vec_dim,))\n\ndef word2vec_sentence(sentence, wv, arr=None, zero_padding=True, unknown_vec=None):\n tokens = list(tokenize(sentence, lowercase=False, deacc=False))\n if arr is None:\n n_used_token = len(tokens)\n arr = np.empty((n_used_token, word2vec_dim))\n else:\n n_used_token = min(arr.shape[0], len(tokens))\n if unknown_vec is None:\n unknown_vec = word2vec_unknown()\n\n skipped_ind = []\n for i, token in enumerate(tokens[:n_used_token]):\n if token in wv:\n arr[i,:] = wv[token]\n else:\n skipped_ind.append(i)\n arr[i,:] = unknown_vec\n if zero_padding and n_used_token < arr.shape[0]:\n arr[n_used_token:,:] = 0\n\n return arr, tokens, skipped_ind\n\n\ndef word2vec_sentences(sentences, wv=None, print_stat=True):\n if wv is None:\n wv = load_word2vec()\n data = np.empty((len(sentences), max_word_num, word2vec_dim))\n n = len(sentences)\n unknown_count = dict()\n token_set = set()\n n_unknown = np.empty((n,))\n n_token = np.empty((n,))\n offset = np.empty((n,))\n\n print(\"Transform words to vectors ...\", end='')\n for i in range(n):\n if (i + 1) % 5000 == 0:\n print(\".\", end=\"\")\n sentence_arr, tokens, unknown_ind = word2vec_sentence(sentences[i], wv, data[i])\n n_unknown[i] = len(unknown_ind)\n n_token[i] = len(tokens)\n token_set.update(tokens)\n for ind in unknown_ind:\n unknown_tok = tokens[ind]\n if unknown_tok in unknown_count:\n unknown_count[unknown_tok] += 1\n else:\n unknown_count[unknown_tok] = 1\n print(\".\")\n\n if print_stat:\n def print_stat_func(arr, sum_n_token, desc):\n print(\" {} of {} tokens are {} ({:.1f}%), min {}, max {}, mean {:.2f}, median {}\"\n .format(int(arr.sum()), int(sum_n_token), desc, 100*(arr.sum()/sum_n_token), int(arr.min()),\n int(arr.max()), arr.mean(), int(np.percentile(arr, 50))))\n\n print(\"Print statistics ...\")\n n_padded = (max_word_num - n_token).clip(0)\n n_clipped = (n_token - max_word_num).clip(0)\n sum_n_token = n_token.sum()\n print(\" Dataset contains {} sentences, fixed sentence length is {}, number of unique tokens is {}\"\n .format(n, max_word_num, len(token_set)))\n print_stat_func(n_token, sum_n_token, \"in dataset sentences\")\n print_stat_func(n_clipped, sum_n_token, \"clipped\")\n print_stat_func(n_padded, max_word_num * n, \"padded\")\n print_stat_func(n_unknown, sum_n_token, \"unknown\")\n common_unknowns = sorted(unknown_count.items(), key=lambda x: x[1])[::-1][:10]\n print(\" Most common unknowns: {}\"\n .format(\", \".join([\"{} ({})\".format(t, c) for t, c in common_unknowns])))\n return data\n\n\ndef main():\n wv = load_word2vec()\n np.random.seed(random_seed)\n sentence = 'The police in Hintertupfingen is slow today!'\n mat, tok, skipped = word2vec_sentence(sentence, wv)\n print(\"Full sentence: \", sentence)\n print(\"All tokens: \", tok)\n print(\"Skipped (zero vec): \", [tok[i] for i in skipped])\n print(\"Matrix (only 5D):\\n\", mat[:, :5])\n\n print()\n mat = word2vec_sentences([sentence], wv)\n print(\"Matrix (only 10x5D):\\n\", mat[0, :10, :5])\n\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.random.uniform",
"numpy.empty",
"numpy.percentile",
"numpy.random.seed"
]
] |
davidkartchner/rubrix
|
[
"33faa006d7498a806a9fd594036d4a42c7d70da2"
] |
[
"src/rubrix/labeling/text_classification/label_errors.py"
] |
[
"# coding=utf-8\n# Copyright 2021-present, the Recognai S.L. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nfrom enum import Enum\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom rubrix.client.datasets import DatasetForTextClassification\nfrom rubrix.client.models import TextClassificationRecord\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass SortBy(Enum):\n \"\"\"A sort by strategy\"\"\"\n\n LIKELIHOOD = \"likelihood\"\n PREDICTION = \"prediction\"\n NONE = \"none\"\n\n @classmethod\n def _missing_(cls, value):\n raise ValueError(\n f\"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}\"\n )\n\n\ndef find_label_errors(\n records: Union[List[TextClassificationRecord], DatasetForTextClassification],\n sort_by: Union[str, SortBy] = \"likelihood\",\n metadata_key: str = \"label_error_candidate\",\n n_jobs: Optional[int] = 1,\n **kwargs,\n) -> List[TextClassificationRecord]:\n \"\"\"Finds potential annotation/label errors in your records using [cleanlab](https://github.com/cleanlab/cleanlab).\n\n We will consider all records for which a prediction AND annotation is available. Make sure the predictions were made\n in a holdout manner, that is you should only include records that were not used in the training of the predictor.\n\n Args:\n records: A list of text classification records\n sort_by: One of the three options\n - \"likelihood\": sort the returned records by likelihood of containing a label error (most likely first)\n - \"prediction\": sort the returned records by the probability of the prediction (highest probability first)\n - \"none\": do not sort the returned records\n metadata_key: The key added to the record's metadata that holds the order, if ``sort_by`` is not \"none\".\n n_jobs : Number of processing threads used by multiprocessing. If None, uses the number of threads\n on your CPU. Defaults to 1, which removes parallel processing.\n **kwargs: Passed on to `cleanlab.pruning.get_noise_indices`\n\n Returns:\n A list of records containing potential annotation/label errors\n\n Raises:\n NoRecordsError: If none of the records has a prediction AND annotation.\n MissingPredictionError: If a prediction is missing for one of the labels.\n ValueError: If not supported kwargs are passed on, e.g. 'sorted_index_method'.\n\n Examples:\n >>> import rubrix as rb\n >>> records = rb.load(\"my_dataset\", as_pandas=False)\n >>> records_with_label_errors = find_label_errors(records)\n \"\"\"\n try:\n import cleanlab\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\n \"'cleanlab' must be installed to use the `find_label_errors` method! \"\n \"You can install 'cleanlab' with the command: `pip install cleanlab`\"\n )\n else:\n from cleanlab.pruning import get_noise_indices\n\n if isinstance(sort_by, str):\n sort_by = SortBy(sort_by)\n\n # select only records with prediction and annotation\n records = [rec for rec in records if rec.prediction and rec.annotation]\n if not records:\n raise NoRecordsError(\n \"It seems that none of your records have a prediction AND annotation!\"\n )\n\n # check and update kwargs for get_noise_indices\n _check_and_update_kwargs(records[0], sort_by, kwargs)\n\n # construct \"noisy\" label vector and probability matrix of the predictions\n s, psx = _construct_s_and_psx(records)\n\n indices = get_noise_indices(s, psx, n_jobs=n_jobs, **kwargs)\n\n records_with_label_errors = np.array(records)[indices].tolist()\n\n # add metadata\n if sort_by is not SortBy.NONE:\n for i, rec in enumerate(records_with_label_errors):\n rec.metadata[metadata_key] = i\n\n return records_with_label_errors\n\n\ndef _check_and_update_kwargs(\n record: TextClassificationRecord, sort_by: SortBy, kwargs: Dict\n):\n \"\"\"Helper function to check and update the kwargs passed on to cleanlab's `get_noise_indices`.\n\n Args:\n record: One of the records passed in the `find_label_error` function.\n sort_by: The sorting policy.\n kwargs: The passed on kwargs.\n\n Raises:\n ValueError: If not supported kwargs ('sorted_index_method') are passed on.\n \"\"\"\n if \"sorted_index_method\" in kwargs:\n raise ValueError(\n \"The 'sorted_index_method' kwarg is not supported, please use 'sort_by' instead.\"\n )\n kwargs[\"sorted_index_method\"] = \"normalized_margin\"\n if sort_by is SortBy.PREDICTION:\n kwargs[\"sorted_index_method\"] = \"prob_given_label\"\n elif sort_by is SortBy.NONE:\n kwargs[\"sorted_index_method\"] = None\n\n if \"multi_label\" in kwargs:\n _LOGGER.warning(\n \"You provided the kwarg 'multi_label', but it is determined automatically. \"\n f\"We will set it to '{record.multi_label}'.\"\n )\n kwargs[\"multi_label\"] = record.multi_label\n\n\ndef _construct_s_and_psx(\n records: List[TextClassificationRecord],\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Helper function to construct the s array and psx matrix.\n\n Args:\n records: List of records.\n\n Returns:\n A tuple containing the s array and the psx matrix.\n\n Raises:\n MissingPredictionError: If predictions are missing for certain labels.\n \"\"\"\n predictions = []\n labels = set() # use a dict to preserve the order\n for rec in records:\n predictions.append({pred[0]: pred[1] for pred in rec.prediction})\n labels.update(predictions[-1].keys())\n labels_mapping = {label: i for i, label in enumerate(sorted(labels))}\n\n s = (\n np.empty(len(records), dtype=object)\n if records[0].multi_label\n else np.zeros(len(records), dtype=np.short)\n )\n psx = np.zeros((len(records), len(labels)), dtype=np.float)\n\n for i, rec, pred in zip(range(len(records)), records, predictions):\n try:\n psx[i] = [pred[label] for label in labels_mapping]\n except KeyError as error:\n raise MissingPredictionError(\n f\"It seems a prediction for {error} is missing in the following record: {rec}\"\n )\n\n try:\n s[i] = (\n [labels_mapping[label] for label in rec.annotation]\n if rec.multi_label\n else labels_mapping[rec.annotation]\n )\n except KeyError as error:\n raise MissingPredictionError(\n f\"It seems predictions are missing for the label {error}!\"\n )\n\n return s, psx\n\n\nclass LabelErrorsException(Exception):\n pass\n\n\nclass NoRecordsError(LabelErrorsException):\n pass\n\n\nclass MissingPredictionError(LabelErrorsException):\n pass\n"
] |
[
[
"numpy.array"
]
] |
feras-oughali/fastai
|
[
"4052f7adb441ab8a00eaa807b444a4e583b6bcc7"
] |
[
"fastai/vision/models/presnet.py"
] |
[
"from pdb import set_trace\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch\nimport math\nimport torch.utils.model_zoo as model_zoo\n\n__all__ = ['PResNet', 'presnet18', 'presnet34', 'presnet50', 'presnet101', 'presnet152']\n\nact_fn = nn.ReLU\n\ndef init_cnn(m):\n if getattr(m, 'bias', None) is not None: nn.init.constant_(m.bias, 0)\n if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01)\n for l in m.children(): init_cnn(l)\n\ndef conv(ni, nf, ks=3, stride=1, bias=False):\n return nn.Conv2d(ni, nf, kernel_size=ks, stride=stride, padding=ks//2, bias=bias)\n\ndef conv_layer(conv_1st, ni, nf, ks=3, stride=1, zero_bn=False, bias=False):\n bn = nn.BatchNorm2d(nf if conv_1st else ni)\n nn.init.constant_(bn.weight, 0. if zero_bn else 1.)\n res = [act_fn(), bn]\n cn = conv(ni, nf, ks, stride=stride, bias=bias)\n res.insert(0 if conv_1st else 2, cn)\n return nn.Sequential(*res)\n\ndef conv_act(*args, **kwargs): return conv_layer(True , *args, **kwargs)\ndef act_conv(*args, **kwargs): return conv_layer(False, *args, **kwargs)\n\nclass BasicBlock(Module):\n expansion = 1\n\n def __init__(self, ni, nf, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = act_conv(ni, nf, stride=stride)\n self.conv2 = act_conv(nf, nf, zero_bn=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x if self.downsample is None else self.downsample(x)\n x = self.conv1(x)\n x = self.conv2(x)\n x += identity\n return x\n\nclass Bottleneck(Module):\n expansion = 4\n\n def __init__(self, ni, nf, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = act_conv(ni, nf, 1)\n self.conv2 = act_conv(nf, nf, stride=stride)\n self.conv3 = act_conv(nf, nf*self.expansion, 1)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x if self.downsample is None else self.downsample(x)\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x += identity\n return x\n\nclass PResNet(Module):\n\n def __init__(self, block, layers, num_classes=1000):\n self.ni = 64\n super().__init__()\n self.conv1 = conv_act(3, 16, stride=2)\n self.conv2 = conv_act(16, 32)\n self.conv3 = conv_act(32, 64)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n ni = 512*block.expansion\n self.avgpool = nn.Sequential(\n act_fn(), nn.BatchNorm2d(ni), nn.AdaptiveAvgPool2d(1))\n self.fc = nn.Linear(ni, num_classes)\n\n init_cnn(self)\n\n def _make_layer(self, block, nf, blocks, stride=1):\n downsample = None\n if stride != 1 or self.ni != nf*block.expansion:\n layers = [act_fn(), nn.BatchNorm2d(self.ni),\n nn.AvgPool2d(kernel_size=2)] if stride==2 else []\n layers.append(conv(self.ni, nf*block.expansion))\n downsample = nn.Sequential(*layers)\n\n layers = [block(self.ni, nf, stride, downsample)]\n self.ni = nf*block.expansion\n for i in range(1, blocks): layers.append(block(self.ni, nf))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\nmodel_urls = dict(presnet34='presnet34', presnet50='presnet50')\n\ndef presnet(block, n_layers, name, pre=False, **kwargs):\n model = PResNet(block, n_layers, **kwargs)\n #if pre: model.load_state_dict(model_zoo.load_url(model_urls[name]))\n if pre: model.load_state_dict(torch.load(model_urls[name]))\n return model\n\ndef presnet18(pretrained=False, **kwargs):\n return presnet(BasicBlock, [2, 2, 2, 2], 'presnet18', pre=pretrained, **kwargs)\n\ndef presnet34(pretrained=False, **kwargs):\n return presnet(BasicBlock, [3, 4, 6, 3], 'presnet34', pre=pretrained, **kwargs)\n\ndef presnet50(pretrained=False, **kwargs):\n return presnet(Bottleneck, [3, 4, 6, 3], 'presnet50', pre=pretrained, **kwargs)\n\ndef presnet101(pretrained=False, **kwargs):\n return presnet(Bottleneck, [3, 4, 23, 3], 'presnet101', pre=pretrained, **kwargs)\n\ndef presnet152(pretrained=False, **kwargs):\n return presnet(Bottleneck, [3, 8, 36, 3], 'presnet152', pre=pretrained, **kwargs)\n\n"
] |
[
[
"torch.nn.Sequential",
"torch.load",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_"
]
] |
batmanlab/skl-groups
|
[
"2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b"
] |
[
"skl_groups/tests/test_utils.py"
] |
[
"from __future__ import division\n\nimport os\nimport sys\n\nimport numpy as np\nfrom sklearn.externals.six import iteritems\nfrom nose.tools import assert_raises\n\nif __name__ == '__main__':\n # make this copy of skl_groups importable\n _this_dir = os.path.dirname(os.path.abspath(__file__))\n sys.path.insert(0, os.path.dirname(os.path.dirname(_this_dir)))\n\nfrom skl_groups import utils\n\n\n################################################################################\n\ndef test_type_utils():\n tests = {\n 'bool': (np.array([False, True]), False, True),\n 'int32': (np.arange(10, dtype=np.int32), True, True),\n 'int64': (np.arange(10, dtype=np.int64), True, True),\n 'float32': (np.arange(10, dtype=np.float32), False, False),\n 'float64': (np.arange(10, dtype=np.float64), False, False),\n }\n\n for name, (a, is_int, is_cat) in iteritems(tests):\n assert utils.is_integer_type(a) == is_int, name\n assert utils.is_categorical_type(a) == is_cat, name\n\n assert utils.is_integer(a[0]) == is_int, name\n assert utils.is_categorical(a[0]) == is_cat, name\n\n assert utils.is_integer_type(utils.as_integer_type(tests['float32'][0]))\n assert utils.is_integer_type(utils.as_integer_type(tests['float64'][0]))\n assert_raises(\n ValueError, lambda: utils.as_integer_type(tests['float32'][0] + .2))\n\n assert utils.is_integer(5)\n assert utils.is_categorical(False)\n assert utils.is_categorical(True)\n\n################################################################################\n\nif __name__ == '__main__':\n import nose\n nose.main()"
] |
[
[
"numpy.arange",
"sklearn.externals.six.iteritems",
"numpy.array"
]
] |
moaraccounts/pandas
|
[
"ebb727e5cd8865a7f5d6cfb4b22d3278b6bf5e6b"
] |
[
"pandas/core/arrays/sparse/array.py"
] |
[
"\"\"\"\nSparseArray data structure\n\"\"\"\nfrom collections import abc\nimport numbers\nimport operator\nfrom typing import Any, Callable, Union\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import lib\nimport pandas._libs.sparse as splib\nfrom pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex\nfrom pandas._libs.tslibs import NaT\nimport pandas.compat as compat\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import PerformanceWarning\n\nfrom pandas.core.dtypes.cast import (\n astype_nansafe,\n construct_1d_arraylike_from_scalar,\n find_common_type,\n infer_dtype_from_scalar,\n)\nfrom pandas.core.dtypes.common import (\n is_array_like,\n is_bool_dtype,\n is_datetime64_any_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_integer,\n is_object_dtype,\n is_scalar,\n is_string_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.generic import ABCIndexClass, ABCSeries\nfrom pandas.core.dtypes.missing import isna, na_value_for_dtype, notna\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import ExtensionArray, ExtensionOpsMixin\nfrom pandas.core.arrays.sparse.dtype import SparseDtype\nfrom pandas.core.base import PandasObject\nimport pandas.core.common as com\nfrom pandas.core.construction import extract_array, sanitize_array\nfrom pandas.core.indexers import check_array_indexer\nfrom pandas.core.missing import interpolate_2d\nimport pandas.core.ops as ops\nfrom pandas.core.ops.common import unpack_zerodim_and_defer\n\nimport pandas.io.formats.printing as printing\n\n# ----------------------------------------------------------------------------\n# Array\n\n\n_sparray_doc_kwargs = dict(klass=\"SparseArray\")\n\n\ndef _get_fill(arr: \"SparseArray\") -> np.ndarray:\n \"\"\"\n Create a 0-dim ndarray containing the fill value\n\n Parameters\n ----------\n arr : SparseArray\n\n Returns\n -------\n fill_value : ndarray\n 0-dim ndarray with just the fill value.\n\n Notes\n -----\n coerce fill_value to arr dtype if possible\n int64 SparseArray can have NaN as fill_value if there is no missing\n \"\"\"\n try:\n return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)\n except ValueError:\n return np.asarray(arr.fill_value)\n\n\ndef _sparse_array_op(\n left: \"SparseArray\", right: \"SparseArray\", op: Callable, name: str\n) -> Any:\n \"\"\"\n Perform a binary operation between two arrays.\n\n Parameters\n ----------\n left : Union[SparseArray, ndarray]\n right : Union[SparseArray, ndarray]\n op : Callable\n The binary operation to perform\n name str\n Name of the callable.\n\n Returns\n -------\n SparseArray\n \"\"\"\n if name.startswith(\"__\"):\n # For lookups in _libs.sparse we need non-dunder op name\n name = name[2:-2]\n\n # dtype used to find corresponding sparse method\n ltype = left.dtype.subtype\n rtype = right.dtype.subtype\n\n if not is_dtype_equal(ltype, rtype):\n subtype = find_common_type([ltype, rtype])\n ltype = SparseDtype(subtype, left.fill_value)\n rtype = SparseDtype(subtype, right.fill_value)\n\n # TODO(GH-23092): pass copy=False. Need to fix astype_nansafe\n left = left.astype(ltype)\n right = right.astype(rtype)\n dtype = ltype.subtype\n else:\n dtype = ltype\n\n # dtype the result must have\n result_dtype = None\n\n if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:\n with np.errstate(all=\"ignore\"):\n result = op(left.to_dense(), right.to_dense())\n fill = op(_get_fill(left), _get_fill(right))\n\n if left.sp_index.ngaps == 0:\n index = left.sp_index\n else:\n index = right.sp_index\n elif left.sp_index.equals(right.sp_index):\n with np.errstate(all=\"ignore\"):\n result = op(left.sp_values, right.sp_values)\n fill = op(_get_fill(left), _get_fill(right))\n index = left.sp_index\n else:\n if name[0] == \"r\":\n left, right = right, left\n name = name[1:]\n\n if name in (\"and\", \"or\", \"xor\") and dtype == \"bool\":\n opname = f\"sparse_{name}_uint8\"\n # to make template simple, cast here\n left_sp_values = left.sp_values.view(np.uint8)\n right_sp_values = right.sp_values.view(np.uint8)\n result_dtype = np.bool\n else:\n opname = f\"sparse_{name}_{dtype}\"\n left_sp_values = left.sp_values\n right_sp_values = right.sp_values\n\n sparse_op = getattr(splib, opname)\n\n with np.errstate(all=\"ignore\"):\n result, index, fill = sparse_op(\n left_sp_values,\n left.sp_index,\n left.fill_value,\n right_sp_values,\n right.sp_index,\n right.fill_value,\n )\n\n if result_dtype is None:\n result_dtype = result.dtype\n\n return _wrap_result(name, result, index, fill, dtype=result_dtype)\n\n\ndef _wrap_result(name, data, sparse_index, fill_value, dtype=None):\n \"\"\"\n wrap op result to have correct dtype\n \"\"\"\n if name.startswith(\"__\"):\n # e.g. __eq__ --> eq\n name = name[2:-2]\n\n if name in (\"eq\", \"ne\", \"lt\", \"gt\", \"le\", \"ge\"):\n dtype = np.bool\n\n fill_value = lib.item_from_zerodim(fill_value)\n\n if is_bool_dtype(dtype):\n # fill_value may be np.bool_\n fill_value = bool(fill_value)\n return SparseArray(\n data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype\n )\n\n\nclass SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin):\n \"\"\"\n An ExtensionArray for storing sparse data.\n\n .. versionchanged:: 0.24.0\n\n Implements the ExtensionArray interface.\n\n Parameters\n ----------\n data : array-like\n A dense array of values to store in the SparseArray. This may contain\n `fill_value`.\n sparse_index : SparseIndex, optional\n index : Index\n fill_value : scalar, optional\n Elements in `data` that are `fill_value` are not stored in the\n SparseArray. For memory savings, this should be the most common value\n in `data`. By default, `fill_value` depends on the dtype of `data`:\n\n =========== ==========\n data.dtype na_value\n =========== ==========\n float ``np.nan``\n int ``0``\n bool False\n datetime64 ``pd.NaT``\n timedelta64 ``pd.NaT``\n =========== ==========\n\n The fill value is potentially specified in three ways. In order of\n precedence, these are\n\n 1. The `fill_value` argument\n 2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is\n a ``SparseDtype``\n 3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`\n is not a ``SparseDtype`` and `data` is a ``SparseArray``.\n\n kind : {'int', 'block'}, default 'int'\n The type of storage for sparse locations.\n\n * 'block': Stores a `block` and `block_length` for each\n contiguous *span* of sparse values. This is best when\n sparse data tends to be clumped together, with large\n regions of ``fill-value`` values between sparse values.\n * 'integer': uses an integer to store the location of\n each sparse value.\n\n dtype : np.dtype or SparseDtype, optional\n The dtype to use for the SparseArray. For numpy dtypes, this\n determines the dtype of ``self.sp_values``. For SparseDtype,\n this determines ``self.sp_values`` and ``self.fill_value``.\n copy : bool, default False\n Whether to explicitly copy the incoming `data` array.\n\n Attributes\n ----------\n None\n\n Methods\n -------\n None\n\n Examples\n --------\n >>> from pandas.arrays import SparseArray\n >>> arr = SparseArray([0, 0, 1, 2])\n >>> arr\n [0, 0, 1, 2]\n Fill: 0\n IntIndex\n Indices: array([2, 3], dtype=int32)\n \"\"\"\n\n _subtyp = \"sparse_array\" # register ABCSparseArray\n _deprecations = PandasObject._deprecations | frozenset([\"get_values\"])\n _sparse_index: SparseIndex\n\n def __init__(\n self,\n data,\n sparse_index=None,\n index=None,\n fill_value=None,\n kind=\"integer\",\n dtype=None,\n copy=False,\n ):\n\n if fill_value is None and isinstance(dtype, SparseDtype):\n fill_value = dtype.fill_value\n\n if isinstance(data, type(self)):\n # disable normal inference on dtype, sparse_index, & fill_value\n if sparse_index is None:\n sparse_index = data.sp_index\n if fill_value is None:\n fill_value = data.fill_value\n if dtype is None:\n dtype = data.dtype\n # TODO: make kind=None, and use data.kind?\n data = data.sp_values\n\n # Handle use-provided dtype\n if isinstance(dtype, str):\n # Two options: dtype='int', regular numpy dtype\n # or dtype='Sparse[int]', a sparse dtype\n try:\n dtype = SparseDtype.construct_from_string(dtype)\n except TypeError:\n dtype = pandas_dtype(dtype)\n\n if isinstance(dtype, SparseDtype):\n if fill_value is None:\n fill_value = dtype.fill_value\n dtype = dtype.subtype\n\n if index is not None and not is_scalar(data):\n raise Exception(\"must only pass scalars with an index\")\n\n if is_scalar(data):\n if index is not None:\n if data is None:\n data = np.nan\n\n if index is not None:\n npoints = len(index)\n elif sparse_index is None:\n npoints = 1\n else:\n npoints = sparse_index.length\n\n dtype = infer_dtype_from_scalar(data)[0]\n data = construct_1d_arraylike_from_scalar(data, npoints, dtype)\n\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n # TODO: disentangle the fill_value dtype inference from\n # dtype inference\n if data is None:\n # TODO: What should the empty dtype be? Object or float?\n data = np.array([], dtype=dtype)\n\n if not is_array_like(data):\n try:\n # probably shared code in sanitize_series\n\n data = sanitize_array(data, index=None)\n except ValueError:\n # NumPy may raise a ValueError on data like [1, []]\n # we retry with object dtype here.\n if dtype is None:\n dtype = object\n data = np.atleast_1d(np.asarray(data, dtype=dtype))\n else:\n raise\n\n if copy:\n # TODO: avoid double copy when dtype forces cast.\n data = data.copy()\n\n if fill_value is None:\n fill_value_dtype = data.dtype if dtype is None else dtype\n if fill_value_dtype is None:\n fill_value = np.nan\n else:\n fill_value = na_value_for_dtype(fill_value_dtype)\n\n if isinstance(data, type(self)) and sparse_index is None:\n sparse_index = data._sparse_index\n sparse_values = np.asarray(data.sp_values, dtype=dtype)\n elif sparse_index is None:\n data = extract_array(data, extract_numpy=True)\n if not isinstance(data, np.ndarray):\n # EA\n if is_datetime64tz_dtype(data.dtype):\n warnings.warn(\n f\"Creating SparseArray from {data.dtype} data \"\n \"loses timezone information. Cast to object before \"\n \"sparse to retain timezone information.\",\n UserWarning,\n stacklevel=2,\n )\n data = np.asarray(data, dtype=\"datetime64[ns]\")\n data = np.asarray(data)\n sparse_values, sparse_index, fill_value = make_sparse(\n data, kind=kind, fill_value=fill_value, dtype=dtype\n )\n else:\n sparse_values = np.asarray(data, dtype=dtype)\n if len(sparse_values) != sparse_index.npoints:\n raise AssertionError(\n f\"Non array-like type {type(sparse_values)} must \"\n \"have the same length as the index\"\n )\n self._sparse_index = sparse_index\n self._sparse_values = sparse_values\n self._dtype = SparseDtype(sparse_values.dtype, fill_value)\n\n @classmethod\n def _simple_new(\n cls, sparse_array: np.ndarray, sparse_index: SparseIndex, dtype: SparseDtype\n ) -> \"SparseArray\":\n new = object.__new__(cls)\n new._sparse_index = sparse_index\n new._sparse_values = sparse_array\n new._dtype = dtype\n return new\n\n @classmethod\n def from_spmatrix(cls, data):\n \"\"\"\n Create a SparseArray from a scipy.sparse matrix.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n data : scipy.sparse.sp_matrix\n This should be a SciPy sparse matrix where the size\n of the second dimension is 1. In other words, a\n sparse matrix with a single column.\n\n Returns\n -------\n SparseArray\n\n Examples\n --------\n >>> import scipy.sparse\n >>> mat = scipy.sparse.coo_matrix((4, 1))\n >>> pd.arrays.SparseArray.from_spmatrix(mat)\n [0.0, 0.0, 0.0, 0.0]\n Fill: 0.0\n IntIndex\n Indices: array([], dtype=int32)\n \"\"\"\n length, ncol = data.shape\n\n if ncol != 1:\n raise ValueError(f\"'data' must have a single column, not '{ncol}'\")\n\n # our sparse index classes require that the positions be strictly\n # increasing. So we need to sort loc, and arr accordingly.\n data = data.tocsc()\n data.sort_indices()\n arr = data.data\n idx = data.indices\n\n zero = np.array(0, dtype=arr.dtype).item()\n dtype = SparseDtype(arr.dtype, zero)\n index = IntIndex(length, idx)\n\n return cls._simple_new(arr, index, dtype)\n\n def __array__(self, dtype=None, copy=True) -> np.ndarray:\n fill_value = self.fill_value\n\n if self.sp_index.ngaps == 0:\n # Compat for na dtype and int values.\n return self.sp_values\n if dtype is None:\n # Can NumPy represent this type?\n # If not, `np.result_type` will raise. We catch that\n # and return object.\n if is_datetime64_any_dtype(self.sp_values.dtype):\n # However, we *do* special-case the common case of\n # a datetime64 with pandas NaT.\n if fill_value is NaT:\n # Can't put pd.NaT in a datetime64[ns]\n fill_value = np.datetime64(\"NaT\")\n try:\n dtype = np.result_type(self.sp_values.dtype, type(fill_value))\n except TypeError:\n dtype = object\n\n out = np.full(self.shape, fill_value, dtype=dtype)\n out[self.sp_index.to_int_index().indices] = self.sp_values\n return out\n\n def __setitem__(self, key, value):\n # I suppose we could allow setting of non-fill_value elements.\n # TODO(SparseArray.__setitem__): remove special cases in\n # ExtensionBlock.where\n msg = \"SparseArray does not support item assignment via setitem\"\n raise TypeError(msg)\n\n @classmethod\n def _from_sequence(cls, scalars, dtype=None, copy=False):\n return cls(scalars, dtype=dtype)\n\n @classmethod\n def _from_factorized(cls, values, original):\n return cls(values, dtype=original.dtype)\n\n # ------------------------------------------------------------------------\n # Data\n # ------------------------------------------------------------------------\n @property\n def sp_index(self):\n \"\"\"\n The SparseIndex containing the location of non- ``fill_value`` points.\n \"\"\"\n return self._sparse_index\n\n @property\n def sp_values(self) -> np.ndarray:\n \"\"\"\n An ndarray containing the non- ``fill_value`` values.\n\n Examples\n --------\n >>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0)\n >>> s.sp_values\n array([1, 2])\n \"\"\"\n return self._sparse_values\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def fill_value(self):\n \"\"\"\n Elements in `data` that are `fill_value` are not stored.\n\n For memory savings, this should be the most common value in the array.\n \"\"\"\n return self.dtype.fill_value\n\n @fill_value.setter\n def fill_value(self, value):\n self._dtype = SparseDtype(self.dtype.subtype, value)\n\n @property\n def kind(self) -> str:\n \"\"\"\n The kind of sparse index for this array. One of {'integer', 'block'}.\n \"\"\"\n if isinstance(self.sp_index, IntIndex):\n return \"integer\"\n else:\n return \"block\"\n\n @property\n def _valid_sp_values(self):\n sp_vals = self.sp_values\n mask = notna(sp_vals)\n return sp_vals[mask]\n\n def __len__(self) -> int:\n return self.sp_index.length\n\n @property\n def _null_fill_value(self):\n return self._dtype._is_na_fill_value\n\n def _fill_value_matches(self, fill_value):\n if self._null_fill_value:\n return isna(fill_value)\n else:\n return self.fill_value == fill_value\n\n @property\n def nbytes(self) -> int:\n return self.sp_values.nbytes + self.sp_index.nbytes\n\n @property\n def density(self):\n \"\"\"\n The percent of non- ``fill_value`` points, as decimal.\n\n Examples\n --------\n >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)\n >>> s.density\n 0.6\n \"\"\"\n r = float(self.sp_index.npoints) / float(self.sp_index.length)\n return r\n\n @property\n def npoints(self) -> int:\n \"\"\"\n The number of non- ``fill_value`` points.\n\n Examples\n --------\n >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)\n >>> s.npoints\n 3\n \"\"\"\n return self.sp_index.npoints\n\n def isna(self):\n # If null fill value, we want SparseDtype[bool, true]\n # to preserve the same memory usage.\n dtype = SparseDtype(bool, self._null_fill_value)\n return type(self)._simple_new(isna(self.sp_values), self.sp_index, dtype)\n\n def fillna(self, value=None, method=None, limit=None):\n \"\"\"\n Fill missing values with `value`.\n\n Parameters\n ----------\n value : scalar, optional\n method : str, optional\n\n .. warning::\n\n Using 'method' will result in high memory use,\n as all `fill_value` methods will be converted to\n an in-memory ndarray\n\n limit : int, optional\n\n Returns\n -------\n SparseArray\n\n Notes\n -----\n When `value` is specified, the result's ``fill_value`` depends on\n ``self.fill_value``. The goal is to maintain low-memory use.\n\n If ``self.fill_value`` is NA, the result dtype will be\n ``SparseDtype(self.dtype, fill_value=value)``. This will preserve\n amount of memory used before and after filling.\n\n When ``self.fill_value`` is not NA, the result dtype will be\n ``self.dtype``. Again, this preserves the amount of memory used.\n \"\"\"\n if (method is None and value is None) or (\n method is not None and value is not None\n ):\n raise ValueError(\"Must specify one of 'method' or 'value'.\")\n\n elif method is not None:\n msg = \"fillna with 'method' requires high memory usage.\"\n warnings.warn(msg, PerformanceWarning)\n filled = interpolate_2d(np.asarray(self), method=method, limit=limit)\n return type(self)(filled, fill_value=self.fill_value)\n\n else:\n new_values = np.where(isna(self.sp_values), value, self.sp_values)\n\n if self._null_fill_value:\n # This is essentially just updating the dtype.\n new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)\n else:\n new_dtype = self.dtype\n\n return self._simple_new(new_values, self._sparse_index, new_dtype)\n\n def shift(self, periods=1, fill_value=None):\n\n if not len(self) or periods == 0:\n return self.copy()\n\n if isna(fill_value):\n fill_value = self.dtype.na_value\n\n subtype = np.result_type(fill_value, self.dtype.subtype)\n\n if subtype != self.dtype.subtype:\n # just coerce up front\n arr = self.astype(SparseDtype(subtype, self.fill_value))\n else:\n arr = self\n\n empty = self._from_sequence(\n [fill_value] * min(abs(periods), len(self)), dtype=arr.dtype\n )\n\n if periods > 0:\n a = empty\n b = arr[:-periods]\n else:\n a = arr[abs(periods) :]\n b = empty\n return arr._concat_same_type([a, b])\n\n def _first_fill_value_loc(self):\n \"\"\"\n Get the location of the first missing value.\n\n Returns\n -------\n int\n \"\"\"\n if len(self) == 0 or self.sp_index.npoints == len(self):\n return -1\n\n indices = self.sp_index.to_int_index().indices\n if not len(indices) or indices[0] > 0:\n return 0\n\n diff = indices[1:] - indices[:-1]\n return np.searchsorted(diff, 2) + 1\n\n def unique(self):\n uniques = list(algos.unique(self.sp_values))\n fill_loc = self._first_fill_value_loc()\n if fill_loc >= 0:\n uniques.insert(fill_loc, self.fill_value)\n return type(self)._from_sequence(uniques, dtype=self.dtype)\n\n def _values_for_factorize(self):\n # Still override this for hash_pandas_object\n return np.asarray(self), self.fill_value\n\n def factorize(self, na_sentinel=-1):\n # Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]\n # The sparsity on this is backwards from what Sparse would want. Want\n # ExtensionArray.factorize -> Tuple[EA, EA]\n # Given that we have to return a dense array of codes, why bother\n # implementing an efficient factorize?\n codes, uniques = algos.factorize(np.asarray(self), na_sentinel=na_sentinel)\n uniques = SparseArray(uniques, dtype=self.dtype)\n return codes, uniques\n\n def value_counts(self, dropna=True):\n \"\"\"\n Returns a Series containing counts of unique values.\n\n Parameters\n ----------\n dropna : boolean, default True\n Don't include counts of NaN, even if NaN is in sp_values.\n\n Returns\n -------\n counts : Series\n \"\"\"\n from pandas import Index, Series\n\n keys, counts = algos._value_counts_arraylike(self.sp_values, dropna=dropna)\n fcounts = self.sp_index.ngaps\n if fcounts > 0:\n if self._null_fill_value and dropna:\n pass\n else:\n if self._null_fill_value:\n mask = isna(keys)\n else:\n mask = keys == self.fill_value\n\n if mask.any():\n counts[mask] += fcounts\n else:\n keys = np.insert(keys, 0, self.fill_value)\n counts = np.insert(counts, 0, fcounts)\n\n if not isinstance(keys, ABCIndexClass):\n keys = Index(keys)\n result = Series(counts, index=keys)\n return result\n\n # --------\n # Indexing\n # --------\n\n def __getitem__(self, key):\n # avoid mypy issues when importing at the top-level\n from pandas.core.indexing import check_bool_indexer\n\n if isinstance(key, tuple):\n if len(key) > 1:\n raise IndexError(\"too many indices for array.\")\n key = key[0]\n\n if is_integer(key):\n return self._get_val_at(key)\n elif isinstance(key, tuple):\n data_slice = self.to_dense()[key]\n elif isinstance(key, slice):\n # special case to preserve dtypes\n if key == slice(None):\n return self.copy()\n # TODO: this logic is surely elsewhere\n # TODO: this could be more efficient\n indices = np.arange(len(self), dtype=np.int32)[key]\n return self.take(indices)\n else:\n # TODO: I think we can avoid densifying when masking a\n # boolean SparseArray with another. Need to look at the\n # key's fill_value for True / False, and then do an intersection\n # on the indicies of the sp_values.\n if isinstance(key, SparseArray):\n if is_bool_dtype(key):\n key = key.to_dense()\n else:\n key = np.asarray(key)\n\n key = check_array_indexer(self, key)\n\n if com.is_bool_indexer(key):\n key = check_bool_indexer(self, key)\n\n return self.take(np.arange(len(key), dtype=np.int32)[key])\n elif hasattr(key, \"__len__\"):\n return self.take(key)\n else:\n raise ValueError(f\"Cannot slice with '{key}'\")\n\n return type(self)(data_slice, kind=self.kind)\n\n def _get_val_at(self, loc):\n n = len(self)\n if loc < 0:\n loc += n\n\n if loc >= n or loc < 0:\n raise IndexError(\"Out of bounds access\")\n\n sp_loc = self.sp_index.lookup(loc)\n if sp_loc == -1:\n return self.fill_value\n else:\n val = self.sp_values[sp_loc]\n val = com.maybe_box_datetimelike(val, self.sp_values.dtype)\n return val\n\n def take(self, indices, allow_fill=False, fill_value=None) -> \"SparseArray\":\n if is_scalar(indices):\n raise ValueError(f\"'indices' must be an array, not a scalar '{indices}'.\")\n indices = np.asarray(indices, dtype=np.int32)\n\n if indices.size == 0:\n result = np.array([], dtype=\"object\")\n kwargs = {\"dtype\": self.dtype}\n elif allow_fill:\n result = self._take_with_fill(indices, fill_value=fill_value)\n kwargs = {}\n else:\n result = self._take_without_fill(indices)\n kwargs = {\"dtype\": self.dtype}\n\n return type(self)(result, fill_value=self.fill_value, kind=self.kind, **kwargs)\n\n def _take_with_fill(self, indices, fill_value=None) -> np.ndarray:\n if fill_value is None:\n fill_value = self.dtype.na_value\n\n if indices.min() < -1:\n raise ValueError(\n \"Invalid value in 'indices'. Must be between -1 \"\n \"and the length of the array.\"\n )\n\n if indices.max() >= len(self):\n raise IndexError(\"out of bounds value in 'indices'.\")\n\n if len(self) == 0:\n # Empty... Allow taking only if all empty\n if (indices == -1).all():\n dtype = np.result_type(self.sp_values, type(fill_value))\n taken = np.empty_like(indices, dtype=dtype)\n taken.fill(fill_value)\n return taken\n else:\n raise IndexError(\"cannot do a non-empty take from an empty axes.\")\n\n sp_indexer = self.sp_index.lookup_array(indices)\n\n if self.sp_index.npoints == 0:\n # Avoid taking from the empty self.sp_values\n taken = np.full(\n sp_indexer.shape,\n fill_value=fill_value,\n dtype=np.result_type(type(fill_value)),\n )\n else:\n taken = self.sp_values.take(sp_indexer)\n\n # sp_indexer may be -1 for two reasons\n # 1.) we took for an index of -1 (new)\n # 2.) we took a value that was self.fill_value (old)\n new_fill_indices = indices == -1\n old_fill_indices = (sp_indexer == -1) & ~new_fill_indices\n\n # Fill in two steps.\n # Old fill values\n # New fill values\n # potentially coercing to a new dtype at each stage.\n\n m0 = sp_indexer[old_fill_indices] < 0\n m1 = sp_indexer[new_fill_indices] < 0\n\n result_type = taken.dtype\n\n if m0.any():\n result_type = np.result_type(result_type, type(self.fill_value))\n taken = taken.astype(result_type)\n taken[old_fill_indices] = self.fill_value\n\n if m1.any():\n result_type = np.result_type(result_type, type(fill_value))\n taken = taken.astype(result_type)\n taken[new_fill_indices] = fill_value\n\n return taken\n\n def _take_without_fill(self, indices) -> Union[np.ndarray, \"SparseArray\"]:\n to_shift = indices < 0\n indices = indices.copy()\n\n n = len(self)\n\n if (indices.max() >= n) or (indices.min() < -n):\n if n == 0:\n raise IndexError(\"cannot do a non-empty take from an empty axes.\")\n else:\n raise IndexError(\"out of bounds value in 'indices'.\")\n\n if to_shift.any():\n indices[to_shift] += n\n\n if self.sp_index.npoints == 0:\n # edge case in take...\n # I think just return\n out = np.full(\n indices.shape,\n self.fill_value,\n dtype=np.result_type(type(self.fill_value)),\n )\n arr, sp_index, fill_value = make_sparse(out, fill_value=self.fill_value)\n return type(self)(arr, sparse_index=sp_index, fill_value=fill_value)\n\n sp_indexer = self.sp_index.lookup_array(indices)\n taken = self.sp_values.take(sp_indexer)\n fillable = sp_indexer < 0\n\n if fillable.any():\n # TODO: may need to coerce array to fill value\n result_type = np.result_type(taken, type(self.fill_value))\n taken = taken.astype(result_type)\n taken[fillable] = self.fill_value\n\n return taken\n\n def searchsorted(self, v, side=\"left\", sorter=None):\n msg = \"searchsorted requires high memory usage.\"\n warnings.warn(msg, PerformanceWarning, stacklevel=2)\n if not is_scalar(v):\n v = np.asarray(v)\n v = np.asarray(v)\n return np.asarray(self, dtype=self.dtype.subtype).searchsorted(v, side, sorter)\n\n def copy(self):\n values = self.sp_values.copy()\n return self._simple_new(values, self.sp_index, self.dtype)\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n fill_value = to_concat[0].fill_value\n\n values = []\n length = 0\n\n if to_concat:\n sp_kind = to_concat[0].kind\n else:\n sp_kind = \"integer\"\n\n if sp_kind == \"integer\":\n indices = []\n\n for arr in to_concat:\n idx = arr.sp_index.to_int_index().indices.copy()\n idx += length # TODO: wraparound\n length += arr.sp_index.length\n\n values.append(arr.sp_values)\n indices.append(idx)\n\n data = np.concatenate(values)\n indices = np.concatenate(indices)\n sp_index = IntIndex(length, indices)\n\n else:\n # when concatenating block indices, we don't claim that you'll\n # get an identical index as concating the values and then\n # creating a new index. We don't want to spend the time trying\n # to merge blocks across arrays in `to_concat`, so the resulting\n # BlockIndex may have more blocs.\n blengths = []\n blocs = []\n\n for arr in to_concat:\n idx = arr.sp_index.to_block_index()\n\n values.append(arr.sp_values)\n blocs.append(idx.blocs.copy() + length)\n blengths.append(idx.blengths)\n length += arr.sp_index.length\n\n data = np.concatenate(values)\n blocs = np.concatenate(blocs)\n blengths = np.concatenate(blengths)\n\n sp_index = BlockIndex(length, blocs, blengths)\n\n return cls(data, sparse_index=sp_index, fill_value=fill_value)\n\n def astype(self, dtype=None, copy=True):\n \"\"\"\n Change the dtype of a SparseArray.\n\n The output will always be a SparseArray. To convert to a dense\n ndarray with a certain dtype, use :meth:`numpy.asarray`.\n\n Parameters\n ----------\n dtype : np.dtype or ExtensionDtype\n For SparseDtype, this changes the dtype of\n ``self.sp_values`` and the ``self.fill_value``.\n\n For other dtypes, this only changes the dtype of\n ``self.sp_values``.\n\n copy : bool, default True\n Whether to ensure a copy is made, even if not necessary.\n\n Returns\n -------\n SparseArray\n\n Examples\n --------\n >>> arr = pd.arrays.SparseArray([0, 0, 1, 2])\n >>> arr\n [0, 0, 1, 2]\n Fill: 0\n IntIndex\n Indices: array([2, 3], dtype=int32)\n\n >>> arr.astype(np.dtype('int32'))\n [0, 0, 1, 2]\n Fill: 0\n IntIndex\n Indices: array([2, 3], dtype=int32)\n\n Using a NumPy dtype with a different kind (e.g. float) will coerce\n just ``self.sp_values``.\n\n >>> arr.astype(np.dtype('float64'))\n ... # doctest: +NORMALIZE_WHITESPACE\n [0.0, 0.0, 1.0, 2.0]\n Fill: 0.0\n IntIndex\n Indices: array([2, 3], dtype=int32)\n\n Use a SparseDtype if you wish to be change the fill value as well.\n\n >>> arr.astype(SparseDtype(\"float64\", fill_value=np.nan))\n ... # doctest: +NORMALIZE_WHITESPACE\n [nan, nan, 1.0, 2.0]\n Fill: nan\n IntIndex\n Indices: array([2, 3], dtype=int32)\n \"\"\"\n dtype = self.dtype.update_dtype(dtype)\n subtype = dtype._subtype_with_str\n sp_values = astype_nansafe(self.sp_values, subtype, copy=copy)\n if sp_values is self.sp_values and copy:\n sp_values = sp_values.copy()\n\n return self._simple_new(sp_values, self.sp_index, dtype)\n\n def map(self, mapper):\n \"\"\"\n Map categories using input correspondence (dict, Series, or function).\n\n Parameters\n ----------\n mapper : dict, Series, callable\n The correspondence from old values to new.\n\n Returns\n -------\n SparseArray\n The output array will have the same density as the input.\n The output fill value will be the result of applying the\n mapping to ``self.fill_value``\n\n Examples\n --------\n >>> arr = pd.arrays.SparseArray([0, 1, 2])\n >>> arr.map(lambda x: x + 10)\n [10, 11, 12]\n Fill: 10\n IntIndex\n Indices: array([1, 2], dtype=int32)\n\n >>> arr.map({0: 10, 1: 11, 2: 12})\n [10, 11, 12]\n Fill: 10\n IntIndex\n Indices: array([1, 2], dtype=int32)\n\n >>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2]))\n [10, 11, 12]\n Fill: 10\n IntIndex\n Indices: array([1, 2], dtype=int32)\n \"\"\"\n # this is used in apply.\n # We get hit since we're an \"is_extension_type\" but regular extension\n # types are not hit. This may be worth adding to the interface.\n if isinstance(mapper, ABCSeries):\n mapper = mapper.to_dict()\n\n if isinstance(mapper, abc.Mapping):\n fill_value = mapper.get(self.fill_value, self.fill_value)\n sp_values = [mapper.get(x, None) for x in self.sp_values]\n else:\n fill_value = mapper(self.fill_value)\n sp_values = [mapper(x) for x in self.sp_values]\n\n return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_value)\n\n def to_dense(self):\n \"\"\"\n Convert SparseArray to a NumPy array.\n\n Returns\n -------\n arr : NumPy array\n \"\"\"\n return np.asarray(self, dtype=self.sp_values.dtype)\n\n _internal_get_values = to_dense\n\n # ------------------------------------------------------------------------\n # IO\n # ------------------------------------------------------------------------\n def __setstate__(self, state):\n \"\"\"Necessary for making this object picklable\"\"\"\n if isinstance(state, tuple):\n # Compat for pandas < 0.24.0\n nd_state, (fill_value, sp_index) = state\n sparse_values = np.array([])\n sparse_values.__setstate__(nd_state)\n\n self._sparse_values = sparse_values\n self._sparse_index = sp_index\n self._dtype = SparseDtype(sparse_values.dtype, fill_value)\n else:\n self.__dict__.update(state)\n\n def nonzero(self):\n if self.fill_value == 0:\n return (self.sp_index.to_int_index().indices,)\n else:\n return (self.sp_index.to_int_index().indices[self.sp_values != 0],)\n\n # ------------------------------------------------------------------------\n # Reductions\n # ------------------------------------------------------------------------\n\n def _reduce(self, name, skipna=True, **kwargs):\n method = getattr(self, name, None)\n\n if method is None:\n raise TypeError(f\"cannot perform {name} with type {self.dtype}\")\n\n if skipna:\n arr = self\n else:\n arr = self.dropna()\n\n # we don't support these kwargs.\n # They should only be present when called via pandas, so do it here.\n # instead of in `any` / `all` (which will raise if they're present,\n # thanks to nv.validate\n kwargs.pop(\"filter_type\", None)\n kwargs.pop(\"numeric_only\", None)\n kwargs.pop(\"op\", None)\n return getattr(arr, name)(**kwargs)\n\n def all(self, axis=None, *args, **kwargs):\n \"\"\"\n Tests whether all elements evaluate True\n\n Returns\n -------\n all : bool\n\n See Also\n --------\n numpy.all\n \"\"\"\n nv.validate_all(args, kwargs)\n\n values = self.sp_values\n\n if len(values) != len(self) and not np.all(self.fill_value):\n return False\n\n return values.all()\n\n def any(self, axis=0, *args, **kwargs):\n \"\"\"\n Tests whether at least one of elements evaluate True\n\n Returns\n -------\n any : bool\n\n See Also\n --------\n numpy.any\n \"\"\"\n nv.validate_any(args, kwargs)\n\n values = self.sp_values\n\n if len(values) != len(self) and np.any(self.fill_value):\n return True\n\n return values.any().item()\n\n def sum(self, axis=0, *args, **kwargs):\n \"\"\"\n Sum of non-NA/null values\n\n Returns\n -------\n sum : float\n \"\"\"\n nv.validate_sum(args, kwargs)\n valid_vals = self._valid_sp_values\n sp_sum = valid_vals.sum()\n if self._null_fill_value:\n return sp_sum\n else:\n nsparse = self.sp_index.ngaps\n return sp_sum + self.fill_value * nsparse\n\n def cumsum(self, axis=0, *args, **kwargs):\n \"\"\"\n Cumulative sum of non-NA/null values.\n\n When performing the cumulative summation, any non-NA/null values will\n be skipped. The resulting SparseArray will preserve the locations of\n NaN values, but the fill value will be `np.nan` regardless.\n\n Parameters\n ----------\n axis : int or None\n Axis over which to perform the cumulative summation. If None,\n perform cumulative summation over flattened array.\n\n Returns\n -------\n cumsum : SparseArray\n \"\"\"\n nv.validate_cumsum(args, kwargs)\n\n if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.\n raise ValueError(f\"axis(={axis}) out of bounds\")\n\n if not self._null_fill_value:\n return SparseArray(self.to_dense()).cumsum()\n\n return SparseArray(\n self.sp_values.cumsum(),\n sparse_index=self.sp_index,\n fill_value=self.fill_value,\n )\n\n def mean(self, axis=0, *args, **kwargs):\n \"\"\"\n Mean of non-NA/null values\n\n Returns\n -------\n mean : float\n \"\"\"\n nv.validate_mean(args, kwargs)\n valid_vals = self._valid_sp_values\n sp_sum = valid_vals.sum()\n ct = len(valid_vals)\n\n if self._null_fill_value:\n return sp_sum / ct\n else:\n nsparse = self.sp_index.ngaps\n return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)\n\n def transpose(self, *axes) -> \"SparseArray\":\n \"\"\"\n Returns the SparseArray.\n \"\"\"\n return self\n\n @property\n def T(self) -> \"SparseArray\":\n \"\"\"\n Returns the SparseArray.\n \"\"\"\n return self\n\n # ------------------------------------------------------------------------\n # Ufuncs\n # ------------------------------------------------------------------------\n\n _HANDLED_TYPES = (np.ndarray, numbers.Number)\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n out = kwargs.get(\"out\", ())\n\n for x in inputs + out:\n if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)):\n return NotImplemented\n\n # for binary ops, use our custom dunder methods\n result = ops.maybe_dispatch_ufunc_to_dunder_op(\n self, ufunc, method, *inputs, **kwargs\n )\n if result is not NotImplemented:\n return result\n\n if len(inputs) == 1:\n # No alignment necessary.\n sp_values = getattr(ufunc, method)(self.sp_values, **kwargs)\n fill_value = getattr(ufunc, method)(self.fill_value, **kwargs)\n\n if isinstance(sp_values, tuple):\n # multiple outputs. e.g. modf\n arrays = tuple(\n self._simple_new(\n sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv)\n )\n for sp_value, fv in zip(sp_values, fill_value)\n )\n return arrays\n elif is_scalar(sp_values):\n # e.g. reductions\n return sp_values\n\n return self._simple_new(\n sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value)\n )\n\n result = getattr(ufunc, method)(*[np.asarray(x) for x in inputs], **kwargs)\n if out:\n if len(out) == 1:\n out = out[0]\n return out\n\n if type(result) is tuple:\n return tuple(type(self)(x) for x in result)\n elif method == \"at\":\n # no return value\n return None\n else:\n return type(self)(result)\n\n def __abs__(self):\n return np.abs(self)\n\n # ------------------------------------------------------------------------\n # Ops\n # ------------------------------------------------------------------------\n\n @classmethod\n def _create_unary_method(cls, op) -> Callable[[\"SparseArray\"], \"SparseArray\"]:\n def sparse_unary_method(self) -> \"SparseArray\":\n fill_value = op(np.array(self.fill_value)).item()\n values = op(self.sp_values)\n dtype = SparseDtype(values.dtype, fill_value)\n return cls._simple_new(values, self.sp_index, dtype)\n\n name = f\"__{op.__name__}__\"\n return compat.set_function_name(sparse_unary_method, name, cls)\n\n @classmethod\n def _create_arithmetic_method(cls, op):\n op_name = op.__name__\n\n @unpack_zerodim_and_defer(op_name)\n def sparse_arithmetic_method(self, other):\n\n if isinstance(other, SparseArray):\n return _sparse_array_op(self, other, op, op_name)\n\n elif is_scalar(other):\n with np.errstate(all=\"ignore\"):\n fill = op(_get_fill(self), np.asarray(other))\n result = op(self.sp_values, other)\n\n if op_name == \"divmod\":\n left, right = result\n lfill, rfill = fill\n return (\n _wrap_result(op_name, left, self.sp_index, lfill),\n _wrap_result(op_name, right, self.sp_index, rfill),\n )\n\n return _wrap_result(op_name, result, self.sp_index, fill)\n\n else:\n other = np.asarray(other)\n with np.errstate(all=\"ignore\"):\n # TODO: look into _wrap_result\n if len(self) != len(other):\n raise AssertionError(\n (f\"length mismatch: {len(self)} vs. {len(other)}\")\n )\n if not isinstance(other, SparseArray):\n dtype = getattr(other, \"dtype\", None)\n other = SparseArray(\n other, fill_value=self.fill_value, dtype=dtype\n )\n return _sparse_array_op(self, other, op, op_name)\n\n name = f\"__{op.__name__}__\"\n return compat.set_function_name(sparse_arithmetic_method, name, cls)\n\n @classmethod\n def _create_comparison_method(cls, op):\n op_name = op.__name__\n if op_name in {\"and_\", \"or_\"}:\n op_name = op_name[:-1]\n\n @unpack_zerodim_and_defer(op_name)\n def cmp_method(self, other):\n\n if not is_scalar(other) and not isinstance(other, type(self)):\n # convert list-like to ndarray\n other = np.asarray(other)\n\n if isinstance(other, np.ndarray):\n # TODO: make this more flexible than just ndarray...\n if len(self) != len(other):\n raise AssertionError(\n f\"length mismatch: {len(self)} vs. {len(other)}\"\n )\n other = SparseArray(other, fill_value=self.fill_value)\n\n if isinstance(other, SparseArray):\n return _sparse_array_op(self, other, op, op_name)\n else:\n with np.errstate(all=\"ignore\"):\n fill_value = op(self.fill_value, other)\n result = op(self.sp_values, other)\n\n return type(self)(\n result,\n sparse_index=self.sp_index,\n fill_value=fill_value,\n dtype=np.bool_,\n )\n\n name = f\"__{op.__name__}__\"\n return compat.set_function_name(cmp_method, name, cls)\n\n @classmethod\n def _add_unary_ops(cls):\n cls.__pos__ = cls._create_unary_method(operator.pos)\n cls.__neg__ = cls._create_unary_method(operator.neg)\n cls.__invert__ = cls._create_unary_method(operator.invert)\n\n @classmethod\n def _add_comparison_ops(cls):\n cls.__and__ = cls._create_comparison_method(operator.and_)\n cls.__or__ = cls._create_comparison_method(operator.or_)\n cls.__xor__ = cls._create_arithmetic_method(operator.xor)\n super()._add_comparison_ops()\n\n # ----------\n # Formatting\n # -----------\n def __repr__(self) -> str:\n pp_str = printing.pprint_thing(self)\n pp_fill = printing.pprint_thing(self.fill_value)\n pp_index = printing.pprint_thing(self.sp_index)\n return f\"{pp_str}\\nFill: {pp_fill}\\n{pp_index}\"\n\n def _formatter(self, boxed=False):\n # Defer to the formatter from the GenericArrayFormatter calling us.\n # This will infer the correct formatter from the dtype of the values.\n return None\n\n\nSparseArray._add_arithmetic_ops()\nSparseArray._add_comparison_ops()\nSparseArray._add_unary_ops()\n\n\ndef make_sparse(arr: np.ndarray, kind=\"block\", fill_value=None, dtype=None, copy=False):\n \"\"\"\n Convert ndarray to sparse format\n\n Parameters\n ----------\n arr : ndarray\n kind : {'block', 'integer'}\n fill_value : NaN or another value\n dtype : np.dtype, optional\n copy : bool, default False\n\n Returns\n -------\n (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)\n \"\"\"\n assert isinstance(arr, np.ndarray)\n\n if arr.ndim > 1:\n raise TypeError(\"expected dimension <= 1 data\")\n\n if fill_value is None:\n fill_value = na_value_for_dtype(arr.dtype)\n\n if isna(fill_value):\n mask = notna(arr)\n else:\n # cast to object comparison to be safe\n if is_string_dtype(arr):\n arr = arr.astype(object)\n\n if is_object_dtype(arr.dtype):\n # element-wise equality check method in numpy doesn't treat\n # each element type, eg. 0, 0.0, and False are treated as\n # same. So we have to check the both of its type and value.\n mask = splib.make_mask_object_ndarray(arr, fill_value)\n else:\n mask = arr != fill_value\n\n length = len(arr)\n if length != len(mask):\n # the arr is a SparseArray\n indices = mask.sp_index.indices\n else:\n indices = mask.nonzero()[0].astype(np.int32)\n\n index = _make_index(length, indices, kind)\n sparsified_values = arr[mask]\n if dtype is not None:\n sparsified_values = astype_nansafe(sparsified_values, dtype=dtype)\n # TODO: copy\n return sparsified_values, index, fill_value\n\n\ndef _make_index(length, indices, kind):\n\n if kind == \"block\" or isinstance(kind, BlockIndex):\n locs, lens = splib.get_blocks(indices)\n index = BlockIndex(length, locs, lens)\n elif kind == \"integer\" or isinstance(kind, IntIndex):\n index = IntIndex(length, indices)\n else: # pragma: no cover\n raise ValueError(\"must be block or integer type\")\n return index\n"
] |
[
[
"pandas._libs.sparse.make_mask_object_ndarray",
"pandas.Series",
"numpy.asarray",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas._libs.sparse.IntIndex",
"pandas.compat.numpy.function.validate_cumsum",
"pandas.core.dtypes.missing.notna",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"numpy.concatenate",
"numpy.all",
"pandas.core.arrays.sparse.dtype.SparseDtype",
"numpy.any",
"pandas.compat.numpy.function.validate_any",
"numpy.searchsorted",
"pandas.core.dtypes.common.is_array_like",
"pandas.core.indexers.check_array_indexer",
"pandas.core.ops.common.unpack_zerodim_and_defer",
"pandas.compat.numpy.function.validate_all",
"pandas.core.indexing.check_bool_indexer",
"pandas.core.algorithms.unique",
"numpy.empty_like",
"pandas.Index",
"numpy.full",
"pandas._libs.sparse.BlockIndex",
"pandas.core.ops.maybe_dispatch_ufunc_to_dunder_op",
"pandas.core.dtypes.cast.construct_1d_arraylike_from_scalar",
"numpy.insert",
"pandas.compat.set_function_name",
"pandas.core.dtypes.common.is_string_dtype",
"pandas.core.arrays.sparse.dtype.SparseDtype.construct_from_string",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.common.maybe_box_datetimelike",
"pandas.core.dtypes.cast.infer_dtype_from_scalar",
"pandas.compat.numpy.function.validate_mean",
"numpy.errstate",
"numpy.array",
"pandas.compat.numpy.function.validate_sum",
"pandas.core.dtypes.cast.astype_nansafe",
"pandas.core.dtypes.common.is_bool_dtype",
"numpy.abs",
"pandas.core.common.is_bool_indexer",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.cast.find_common_type",
"numpy.datetime64",
"pandas.io.formats.printing.pprint_thing",
"numpy.result_type",
"pandas.core.dtypes.missing.na_value_for_dtype",
"pandas.core.dtypes.common.is_object_dtype",
"pandas._libs.sparse.get_blocks",
"pandas.core.construction.sanitize_array",
"pandas.core.dtypes.missing.isna",
"pandas.core.construction.extract_array",
"pandas._libs.lib.item_from_zerodim",
"pandas.core.algorithms._value_counts_arraylike"
]
] |
gparolin/AEco
|
[
"5bdc542150cd89581b134418bac5a3bb39fc45d6"
] |
[
"Scripts/Model.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport warnings\nimport xarray as xr\nimport dask.dataframe as dd\nimport dask.array as da\n\nclass LCI():\n \"\"\"Defines a LCI class based on xr.DataArray.\"\"\"\n \n def __init__(self, name, type, iterations, UP, parameters):\n \"\"\"Initialization with the phases and substances of the LCI.\"\"\"\n \n self.name = name\n self.type = type\n self.UP = UP\n self.substances = UP.Substances\n self.p = parameters\n self.data = None\n self.build(iterations)\n \n def __repr__(self):\n return f\"{self.data}\"\n \n def __getitem__(self, phase):\n return self.data[phase]\n \n def __setitem__(self, phase, other):\n self.data[phase] = other\n \n def build(self, iterations):\n \"\"\"Builds the xr.DataArray for the LCI.\"\"\"\n \n if self.data == None:\n self.data = xr.Dataset(coords={'Substances': self.substances, \n 'i': np.arange(iterations)},\n attrs={'Name':self.name})\n self.data.coords['Units'] = self.substances.Units\n \n return self.data\n \n def substance(self, substance):\n \"\"\"Locates the specified substance on the data.\"\"\"\n return self.data.loc[{'Substances': substance}]\n \n def iteration(self, iteration):\n \"\"\"Locates the specified iteration on the data.\"\"\"\n \n return self.data.loc[{'i': iteration}]\n \n def find(self, phase, substance, iteration):\n \"\"\"Locates the specified substance, phase and iteration on the data.\"\"\"\n \n return self.data[phase].loc[{'Substances': substance, 'i':iteration}]\n \n def mean(self, phase):\n \"\"\"Returns the mean for all iterations of a certain phase.\"\"\"\n \n return self['Office'].mean('i').load()\n \n def median(self, phase):\n \"\"\"Returns the median for all iterations of a certain phase.\"\"\"\n \n return self['Office'].median('i').load()\n\n def office(self):\n LCI_E_office = self.electricity(self.p[\"E_office\"]) #per month\n LCI_E_office = LCI_E_office * self.p[\"devmonths\"] #per development\n\n LCI_water_office = self.UP[\"Water\"] * self.p[\"water_office\"] \\\n + self.UP[\"Wastewater\"] * self.p[\"wastewater_office\"] #per month\n LCI_water_office = LCI_water_office * self.p[\"devmonths\"] #per development\n\n self.p[\"travel\"] = 18470 / 12 * self.p[\"developers\"] * self.p[\"devmonths\"] #in km\n\n LCI_travel = self.UP[\"Car\"]*self.p[\"travel\"]*0.1 \\\n + self.UP[\"Airplane\"]*self.p[\"travel\"]*0.9 #per development\n\n LCI_paper = self.UP[\"Paper\"]*self.p[\"developers\"]*self.p[\"paper_use\"] #per year\n LCI_paper = LCI_paper * self.p[\"devmonths\"] / 12 #per development\n\n LCI_office = (LCI_E_office + LCI_water_office + LCI_paper + LCI_travel) #per development\n LCI_office = LCI_office / self.p[\"pkm_fleet\"] #per pkm\n\n self.data['Office'] = LCI_office\n\n def infrastructure(self):\n LCI_construction = (self.UP[\"Facilities\"]*self.p[\"new_factory\"]/2.74e5) / self.p[\"pkm_fleet\"]\n self.data[\"Infrastructure\"] = LCI_construction\n \n def capital(self):\n self.p[\"new_jigs\"] = self.p[\"OEW\"] * 500 # 50t of jigs per 100kg of product\n self.UP[\"Capital\"] = self.UP[\"Steel\"] + self.UP[\"Jigs\"] # material plus transformation\n LCI_capital = (self.UP[\"Capital\"]*self.p[\"new_jigs\"] + self.UP[\"Machine\"]*self.p[\"new_machine\"])/self.p[\"pkm_fleet\"]\n self.data[\"Capital\"] = LCI_capital\n\n def dev(self):\n self.office()\n self.infrastructure()\n self.capital() \n \n def materials(self):\n try:\n reuse = self.p['reuse']\n except:\n reuse = 1\n\n self.p[\"Al\"] = self.p['p_Al'] * self.p['b2f_Al'] * self.p['OEW'] * reuse\n self.p[\"steel\"] = self.p['p_steel'] * self.p['b2f_steel'] * self.p['OEW'] * reuse\n self.p[\"Ti\"] = self.p['p_Ti'] * self.p['b2f_Ti'] * self.p['OEW'] * reuse\n self.p[\"inconel\"] = self.p['p_inconel'] * self.p['b2f_inconel'] * self.p['OEW'] * reuse\n self.p[\"GFRP\"] = self.p['p_GFRP'] * self.p['b2f_GFRP'] * self.p['OEW'] * reuse\n self.p[\"CFRP\"] = self.p['p_CFRP'] * self.p['b2f_CFRP'] * self.p['OEW'] * reuse\n\n LCI_Al = self.UP[\"Aluminium\"] * self.p[\"Al\"]\n LCI_steel = self.UP[\"Steel\"] * self.p[\"steel\"]\n LCI_Ti = self.UP[\"Titanium\"] * self.p[\"Ti\"]\n LCI_inconel = self.UP[\"Inconel\"] * self.p[\"inconel\"]\n LCI_GFRP = self.UP[\"GFRP\"] * self.p[\"GFRP\"]\n LCI_CFRP = self.UP[\"CFRP\"] * self.p[\"CFRP\"]\n\n #LCI Material Extraction and Transformation\n LCI_material = (LCI_Al + LCI_steel + LCI_Ti + LCI_inconel + LCI_GFRP + LCI_CFRP) / self.p[\"pkm_life\"]\n self.data[\"Materials\"] = LCI_material\n\n def factory(self):\n LCI_E_factory = self.electricity(self.p[\"E_factory\"])\n LCI_E_factory = LCI_E_factory * self.p[\"takt\"] / 30 # per aircraft\n\n LCI_water_factory = self.UP[\"Water\"]*self.p[\"water_factory\"] \\\n + self.UP[\"Wastewater\"]*self.p[\"wastewater_factory\"] # per month\n LCI_water_factory = LCI_water_factory * self.p[\"takt\"] / 30 # per aircraft\n\n LCI_lube = self.UP[\"Lubricant\"] * self.p[\"lubricant\"] # per month\n LCI_lube = LCI_lube * self.p[\"takt\"] / 30 # per aircraft\n\n self.p[\"facilities_maint\"] = self.p[\"OEW\"] * 4.58e-10 # use per kg of product\n\n LCI_facilities_maint = self.UP[\"Facilities\"] * self.p[\"facilities_maint\"] * 0.02 # per year\n LCI_facilities_maint = LCI_facilities_maint * self.p[\"takt\"] / 365 # per aircraft\n\n LCI_factory = (LCI_E_factory + LCI_water_factory + LCI_lube + LCI_facilities_maint)/self.p[\"pkm_life\"]\n self.data[\"Factory\"] = LCI_factory\n\n def logistics(self):\n lorry = self.p[\"d_lorry\"] * self.p[\"m_lorry\"] #tonne * km\n sea = self.p[\"d_sea\"] * self.p[\"m_sea\"] #tonne * km\n air = self.p[\"d_air\"] * self.p[\"m_air\"] #tonne * km\n\n LCI_logistics = (self.UP[\"Lorry\"]*lorry + self.UP[\"Sea\"]*sea \\\n + self.UP[\"Air\"]*air) / self.p[\"pkm_life\"]\n self.data['Logistics'] = LCI_logistics\n \n def sustaining(self):\n LCI_sustaining = self.data[\"Office\"] * 0.01 / 30 #per day\n LCI_sustaining = (LCI_sustaining * self.p[\"takt\"])/self.p[\"pkm_life\"]\n self.data[\"Sustaining\"] = LCI_sustaining\n \n def mfg(self):\n self.materials()\n self.factory()\n self.logistics()\n self.sustaining()\n\n def flights(self):\n try:\n self.p[\"t_ccd\"] = self.p[\"FH\"]*60 - (self.p[\"t_app\"] + self.p[\"t_to\"] + self.p[\"t_climb\"]) # minutes\n except:\n self.p[\"t_ccd\"] = self.p['FH']*60 - self.p['ff_lto']\n \n self.p[\"fuel_ccd\"] = self.p[\"ff_ccd\"] * self.p[\"t_ccd\"] * 60 # kg\n self.data[\"LTO\"] = self.UP[\"LTO\"] / self.p[\"pkm_flight\"]\n self.data[\"CCD\"] = self.UP[\"CCD\"] * self.p[\"fuel_ccd\"] / self.p[\"pkm_flight\"]\n\n def maintenance(self):\n LCI_maint = self.UP[\"Aluminium\"]*self.p[\"maint_Al\"] + self.UP[\"Steel\"]*self.p[\"maint_steel\"] \\\n + self.UP[\"Polymer\"]*self.p[\"maint_pol\"] + self.UP[\"Battery\"]*self.p['maint_battery'] #por ano\n\n LCI_maint = (LCI_maint / self.p[\"flights_year\"]) / self.p[\"pkm_flight\"]\n self.data['Maintenance'] = LCI_maint\n\n def airport(self):\n if self.type == \"cargo\":\n ap_impact = 0.132 # 13,2% of airport impacts are due to cargo\n elif self.type == \"pax\":\n ap_impact = 0.868\n else:\n ap_impact = 1\n\n self.p[\"f_pax_ap\"] = self.p[\"pax_ap\"] / 22500000 # fraction of pax relative to zurich in 2000\n LCI_ap = self.UP[\"Airport\"] * self.p[\"f_pax_ap\"]/100 / self.p[\"flights_ap\"] # 100 life years for building\n LCI_ap = LCI_ap * ap_impact / self.p[\"pkm_flight\"]\n\n self.data[\"Airport\"] = LCI_ap\n\n def fuel(self):\n try:\n self.p[\"fuel_lto\"] = self.p['ff_lto'] * self.p['t_lto'] * 60\n except:\n self.p[\"fuel_lto\"] = self.p['t_app']*60*self.p['ff_app'] + self.p['t_idle']*60*self.p['ff_idle'] \\\n + self.p['t_to']*60*self.p['ff_to'] + self.p['t_climb']*60*self.p['ff_climb']\n \n LCI_fuel = (self.UP['Kerosene']*(self.p[\"fuel_ccd\"]+self.p[\"fuel_lto\"]))/ self.p[\"pkm_flight\"]\n self.data[\"Fuel\"] = LCI_fuel\n\n def ope(self):\n self.flights()\n self.maintenance()\n self.airport()\n self.fuel()\n\n def eol(self):\n try:\n reuse_factor = (2 - p['reuse'])\n except:\n reuse_factor = 1\n \n E_sort_constant = 0.4645 / 3.6 # kWh/kg of material, on average\n self.p[\"E_sort\"] = E_sort_constant * self.p['OEW'] * reuse_factor\n LCI_sort = self.electricity(self.p[\"E_sort\"])\n\n materials = ['Al','steel','Ti','inconel','GFRP','CFRP']\n scenarios = ['ldf', 'incin','recycl']\n chunks = self.data.chunks['i'][0]\n iterations = self.data.i.size\n iterations\n UP_eol = self.UP.rename_vars({'Landfill':'ldf','Incineration':'incin','Aluminium':'Al',\n 'Titanium':'Ti', 'Inconel':'inconel','Steel':'steel'})\n eol = xr.Dataset({scenario: (['Substances','i'],da.empty((1835,iterations), chunks=(1835,chunks)))\n for scenario in scenarios}, coords=self.data.coords)\n\n for scenario in scenarios:\n for material in materials:\n self.p[scenario+\"_\"+material] = self.p[\"p_\"+scenario+\"_\"+material]*self.p[material]*reuse_factor\n if scenario == 'recycl':\n eol[scenario] += UP_eol[material] * self.p[scenario + \"_\" + material]\n else:\n eol[scenario] += UP_eol[scenario] * self.p[scenario + \"_\" + material]\n\n self.data[\"Recycling\"] = (LCI_sort - eol['recycl']) / self.p[\"pkm_life\"]\n self.data[\"Incineration\"] = eol[\"incin\"] / self.p[\"pkm_life\"]\n self.data[\"Landfill\"] = eol[\"ldf\"] / self.p[\"pkm_life\"]\n\n def run(self):\n self.dev()\n self.mfg()\n self.ope()\n self.eol()\n\n MFG = self.data[\"Logistics\"]+self.data[\"Sustaining\"]+self.data[\"Factory\"]+self.data[\"Materials\"]\n LCI_prot = (MFG*self.p[\"prototypes\"] + MFG*self.p[\"ironbirds\"]*0.3)/self.p[\"pkm_fleet\"]\n self.data[\"Prototypes\"] = LCI_prot\n\n self.p[\"cert_flights\"] = self.p[\"test_FH\"] / self.p[\"FH\"]\n self.data[\"Certification\"] = (self.data[\"LTO\"]+self.data[\"CCD\"])*self.p[\"cert_flights\"]/self.p[\"pkm_fleet\"]\n\n return self.data\n\n def electricity(self, E):\n \"\"\"Calculates the LCI of electricity consumption based on a gas-wind-hydropower electricity grid.\"\"\"\n \n E_wind = E * self.p['grid_wind']\n E_gas = E * self.p['grid_gas']\n E_hydro = E * self.p['grid_hydro']\n LCI_E = self.UP['Elec_wind']*E_wind \\\n + self.UP['Elec_gas']*E_gas + self.UP['Elec_hydro']*E_hydro\n \n return LCI_E\n\n\n"
] |
[
[
"numpy.arange"
]
] |
tkondoh1022/deep-learning-from-scratch-3
|
[
"240ddab84c50f4c501bd34cd496604892b5bda80"
] |
[
"steps/step06.py"
] |
[
"import numpy as np\n\n\nclass Variable:\n def __init__(self, data):\n self.data = data\n self.grad = None\n\n\nclass Function:\n def __call__(self, input):\n x = input.data\n y = self.forward(x)\n output = Variable(y)\n self.input = input\n return output\n\n def forward(self, x):\n raise NotImplementedError()\n\n def backward(self, gy):\n raise NotImplementedError()\n\n\nclass Square(Function):\n def forward(self, x):\n y = x ** 2\n return y\n\n def backward(self, gy):\n x = self.input.data\n gx = 2 * x * gy\n return gx\n\n\nclass Exp(Function):\n def forward(self, x):\n y = np.exp(x)\n return y\n\n def backward(self, gy):\n x = self.input.data\n gx = np.exp(x) * gy\n return gx\n\n\nA = Square()\nB = Exp()\nC = Square()\n\nx = Variable(np.array(0.5))\na = A(x)\nb = B(a)\ny = C(b)\n\ny.grad = np.array(1.0)\nb.grad = C.backward(y.grad)\na.grad = B.backward(b.grad)\nx.grad = A.backward(a.grad)\nprint(x.grad)\n"
] |
[
[
"numpy.exp",
"numpy.array"
]
] |
Barchid/VISA
|
[
"1b8842e5985ab3f60580c25c9b9563634d4c8708"
] |
[
"TP05/keras-mnist.py"
] |
[
"import tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.datasets import cifar10\n\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(32,32,3)),\n tf.keras.layers.Dense(1024, activation='relu'),\n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dropout(0.2, noise_shape=None, seed=None),\n tf.keras.layers.Dense(512, activation='relu'),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n\n# Fonction de coût fort utilisée : cross entropy \n# \nsgd = tf.keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=False)\nmodel.compile(optimizer=sgd, loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\nhistory = model.fit(x_train, y_train, epochs=30, verbose=1, validation_data=(x_test, y_test)) # on fait ça pendant 5 itérations\nmodel.evaluate(x_test, y_test, verbose=2)\n\n# Plot training & validation accuracy values\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n\n# Plot training & validation loss values\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()"
] |
[
[
"matplotlib.pyplot.legend",
"tensorflow.keras.layers.Dropout",
"matplotlib.pyplot.title",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.datasets.cifar10.load_data",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"tensorflow.keras.layers.Flatten",
"matplotlib.pyplot.ylabel"
]
] |
zhenghangCN/Local-Connected
|
[
"a0eae8e2efdb0c34aab2f126f0aa3b849168e27a"
] |
[
"3D_QM9.py"
] |
[
"import json\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nimport numpy as np\nfrom math import ceil, sqrt\nimport random\nimport os\n\nclass pre(object):\n\tdef __init__(self, path, decimals=1):\n\t\tself.smiles = []\n\t\tself.propertys = []\n\t\tself.path = path\n\t\tself.mols = []\n\t\tself.train_mols = []\n\t\tself.train_pro = []\n\t\tself.train_pre02 = []\n\t\tself.train_pre23 = []\n\t\tself.train_pre34 = []\n\t\tself.train_graph = []\n\t\tself.test_mols = []\n\t\tself.test_pro = []\n\t\tself.test_pre02 = []\n\t\tself.test_pre23 = []\n\t\tself.test_pre34 = []\n\t\tself.test_graph = []\n\t\tself.distance = [0, 0, 0]\n\t\tself.decimals = decimals\n\t\tself.multiple = 10 ** self.decimals\n\t\tself.ai = {\"H\": 0, \"He\": 1, \"Li\": 2, \"Be\": 3, \"B\": 4, \"C\": 5, \"N\": 6, \"O\": 7, \"F\": 8, \"Ne\": 9, \"Na\": 10, \"Mg\": 11, \"Al\": 12, \"Si\": 13, \"P\": 14, \"S\": 15, \"Cl\": 16, \"Ar\": 17, \"K\": 18, \"Ca\": 19, \"Sc\": 20, \"Ti\": 21, \"V\": 22, \"Cr\": 23, \"Mn\": 24, \"Fe\": 25, \"Co\": 26, \"Ni\": 27, \"Cu\": 28, \"Zn\": 29, \"Ga\": 30, \"Ge\": 31, \"As\": 32, \"Se\": 33, \"Br\": 34, \"Kr\": 35, \"Rb\": 36, \"Sr\": 37, \"Y\": 38, \"Zr\": 39, \"Nb\": 40, \"Mo\": 41, \"Tc\": 42, \"Ru\": 43, \"Rh\": 44, \"Pd\": 45, \"Ag\": 46, \"Cd\": 47, \"In\": 48, \"Sn\": 49, \"Sb\": 50, \"Te\": 51, \"I\": 52, \"Xe\": 53, \"Cs\": 54, \"Ba\": 55, \"La\": 56, \"Ce\": 57, \"Pr\": 58, \"Nd\": 59, \"Pm\": 60, \"Sm\": 61, \"Eu\": 62, \"Gd\": 63, \"Tb\": 64, \"Dy\": 65, \"Ho\": 66, \"Er\": 67, \"Tm\": 68, \"Yb\": 69, \"Lu\": 70, \"Hf\": 71, \"Ta\": 72, \"W\": 73, \"Re\": 74, \"Os\": 75, \"Ir\": 76, \"Pt\": 77, \"Au\": 78, \"Hg\": 79, \"Tl\": 80, \"Pb\": 81, \"Bi\": 82, \"Po\": 83, \"At\": 84, \"Rn\": 85, \"Fr\": 86, \"Ra\": 87, \"Ac\": 88, \"Th\": 89, \"Pa\": 90, \"U\": 91, \"Np\": 92, \"Pu\": 93, \"Am\": 94, \"Cm\": 95, \"Bk\": 96, \"Cf\": 97, \"Es\": 98, \"Fm\": 99, \"Md\": 100, \"No\": 101, \"Lr\": 102, \"Rf\": 103, \"Db\": 104, \"Sg\": 105, \"Bh\": 106, \"Hs\": 107, \"Mt\": 108, \"Ds\": 109, \"Rg\": 110, \"Cn\": 111}\n\n\n\tdef load_data(self):\n\t\twith open(self.path + 'data/dsgdb9nsd_finput.xyz', 'r') as f:\n\t\t\tself.mols = json.load(f)\n\t\twith open(self.path + 'data/dsgdb9nsd_label.xyz', 'r') as f:\n\t\t\tself.propertys = json.load(f)\n\n\tdef longest_distance(self):\n\t\tfor i in range(len(self.mols)):\n\t\t\tcoor_xyz = []\n\t\t\tmol = self.mols[i]\n\t\t\tfor j in range(len(mol)):\n\t\t\t\t_coor_xyz = []\n\t\t\t\tfor k in [1,2,3]:\n\t\t\t\t\ttry:\n\t\t\t\t\t\t_coor_xyz.append(float(mol[j][k]))\n\t\t\t\t\texcept:\n\t\t\t\t\t\t_coor_xyz.append(0)\n\t\t\t\tcoor_xyz.append(_coor_xyz)\n\t\t\tcoor_xyz = np.array(coor_xyz)\n\t\t\tmin_x = min(coor_xyz[:,0])\n\t\t\tmin_y = min(coor_xyz[:,1])\n\t\t\tmin_z = min(coor_xyz[:,2])\n\t\t\tmax_x = max(coor_xyz[:,0])\n\t\t\tmax_y = max(coor_xyz[:,1])\n\t\t\tmax_z = max(coor_xyz[:,2])\n\t\t\tif abs(max_x - min_x) > self.distance[0]:\n\t\t\t\tself.distance[0] = abs(max_x - min_x)\n\t\t\tif abs(max_y - min_y) > self.distance[1]:\n\t\t\t\tself.distance[1] = abs(max_y - min_y)\n\t\t\tif abs(max_z - min_z) > self.distance[2]:\n\t\t\t\tself.distance[2] = abs(max_z - min_z)\n\t\twith open(self.path + 'data/distance.json', 'w') as f:\n\t\t\tjson.dump(self.distance, f)\n\t\twith open(self.path + 'data/information.txt', 'a') as f:\n\t\t\tf.write('Distance:\\nX:%f, Y%f, Z%f' %(self.distance[0], self.distance[1], self.distance[2]))\n\n\tdef correction(self):\n\t\tmol = []\n\t\tpppp = []\n\t\tori_mol = self.mols\n\t\tori_pro = []\n\t\tprint(len(self.mols))\n\t\tprint(len(self.propertys))\n\t\tfor i in range(len(self.mols)):\n\t\t\tcoor_xyz = []\n\t\t\tmol = self.mols[i]\n\t\t\tfor j in range(len(mol)):\n\t\t\t\t_coor_xyz = []\n\t\t\t\tfor k in [1,2,3]:\n\t\t\t\t\ttry:\n\t\t\t\t\t\t_coor_xyz.append(float(mol[j][k]))\n\t\t\t\t\texcept:\n\t\t\t\t\t\t_coor_xyz.append(0)\n\t\t\t\tcoor_xyz.append(_coor_xyz)\n\t\t\tcoor_xyz = np.array(coor_xyz)\n\t\t\tmin_x = min(coor_xyz[:,0])\n\t\t\tmin_y = min(coor_xyz[:,1])\n\t\t\tmin_z = min(coor_xyz[:,2])\n\t\t\tmax_x = max(coor_xyz[:,0])\n\t\t\tmax_y = max(coor_xyz[:,1])\n\t\t\tmax_z = max(coor_xyz[:,2])\n\t\t\tcoor_xyz = coor_xyz.tolist()\n\t\t\tdx = max_x - min_x\n\t\t\tdy = max_y - min_y\n\t\t\tdz = max_z - min_z\n\t\t\tif dx > 12 or dy > 12 or dz > 12:\n\t\t\t\tcontinue\n\t\t\tfor j in range(len(mol)):\n\t\t\t\tcoor_xyz[j][0] -= (min_x + 0.5 * dx - 0.5 * 12)\n\t\t\t\tcoor_xyz[j][1] -= (min_y + 0.5 * dy - 0.5 * 12)\n\t\t\t\tcoor_xyz[j][2] -= (min_z + 0.5 * dz - 0.5 * 12)\n\t\t\tppp = []\n\t\t\tpp = []\n\t\t\tfor ppc in range(len(coor_xyz)):\n\t\t\t\tpp = [mol[ppc][0]] + coor_xyz[ppc]\n\t\t\t\tppp.append(pp)\n\t\t\tpppp.append(ppp)\n\t\t\tori_pro.append(self.propertys[i])\n\t\tself.mols = pppp\n\t\tself.propertys = ori_pro\n\t\twith open(self.path + 'data/all_coor_corr.json', 'w') as f:\n\t\t\tjson.dump(self.mols, f)\n\t\twith open(self.path + 'data/all_pro_corr.json', 'w') as f:\n\t\t\tjson.dump(self.propertys, f)\n\n\tdef shuffle(self):\n\t\tindex = np.arange(len(self.mols))\n\t\trandom.shuffle(index)\n\t\tX = [self.mols[int(i)] for i in index]\n\t\tY = [self.propertys[int(i)] for i in index]\n\t\tself.train_mols = X[:int(len(X)*0.9)]\n\t\tself.train_pro = Y[:int(len(Y)*0.9)]\n\t\tself.test_mols = X[int(len(X)*0.9):]\n\t\tself.test_pro = Y[int(len(Y)*0.9):]\n\t\twith open(self.path + 'data/information.txt', 'a') as f:\n\t\t\tf.write('Molecule numbers: \\nTrain:%d, test:%d \\n' %(len(self.train_mols), len(self.test_mols)))\n\t\twith open(self.path + 'data/train_mols.json', 'w') as f:\n\t\t\tjson.dump(self.train_mols, f)\n\t\twith open(self.path + 'data/train_pro.json', 'w') as f:\n\t\t\tjson.dump(self.train_pro, f)\n\t\twith open(self.path + 'data/test_mols.json', 'w') as f:\n\t\t\tjson.dump(self.test_mols, f)\n\t\twith open(self.path + 'data/test_pro.json', 'w') as f:\n\t\t\tjson.dump(self.test_pro, f)\n\n\tdef computepixels(self, atomi, xi, yi, zi, atomj, xj, yj, zj):\n\t\tif atomi == 'H' or atomj == 'H':\n\t\t\treturn [[],[],[]]\n\t\ttemstorage02 = set()\n\t\ttemstorage23 = set()\n\t\ttemstorage34 = set()\n\t\tstorage02 = []\n\t\tstorage23 = []\n\t\tstorage34 = []\n\t\ttry:\n\t\t\tD = round(1 / (sqrt((xi - xj) ** 2 + (yi - yj) ** 2)), 4)\n\t\texcept:\n\t\t\treturn [[],[],[]]\n\t\tif D >= (1/2):\n\t\t\txi = int(100 * xi)\n\t\t\tyi = int(100 * yi)\n\t\t\tzi = int(100 * zi)\n\t\t\txj = int(100 * xj)\n\t\t\tyj = int(100 * yj)\n\t\t\tzj = int(100 * zj)\n\t\t\tDx = abs(xi - xj)\n\t\t\tkxy = np.polyfit([xi, xj], [yi, yj], 1)\n\t\t\tkxz = np.polyfit([xi, xj], [zi, zj], 1)\n\t\t\tx = min(xi, xj)\n\t\t\tfor dx in range(Dx - 1):\n\t\t\t\tx_ = int(ceil((x + dx) / 100 * self.multiple / 2))\n\t\t\t\ty_ = int(ceil((kxy[0] * (x + dx) + kxy[1]) / 100 * self.multiple / 2))\n\t\t\t\tz_ = int(ceil((kxz[0] * (x + dx) + kxz[1]) / 100 * self.multiple / 2))\n\t\t\t\ttemstorage02.add((x_, y_, z_, self.ai[atomi], self.ai[atomj], D))\n\t\t\t\tdx += 1\n\t\telif D >= (1/3) and D < (1/2):\n\t\t\txi = int(100 * xi)\n\t\t\tyi = int(100 * yi)\n\t\t\tzi = int(100 * zi)\n\t\t\txj = int(100 * xj)\n\t\t\tyj = int(100 * yj)\n\t\t\tzj = int(100 * zj)\n\t\t\tDx = abs(xi - xj)\n\t\t\tkxy = np.polyfit([xi, xj], [yi, yj], 1)\n\t\t\tkxz = np.polyfit([xi, xj], [zi, zj], 1)\n\t\t\tx = min(xi, xj)\n\t\t\tfor dx in range(Dx - 1):\n\t\t\t\tx_ = int(ceil((x + dx) / 100 * self.multiple / 2))\n\t\t\t\ty_ = int(ceil((kxy[0] * (x + dx) + kxy[1]) / 100 * self.multiple / 2))\n\t\t\t\tz_ = int(ceil((kxz[0] * (x + dx) + kxz[1]) / 100 * self.multiple / 2))\n\t\t\t\ttemstorage23.add((x_, y_, z_, self.ai[atomi], self.ai[atomj], D))\n\t\t\t\tdx += 1\n\t\telif D >= (1/4) and D < (1/3):\n\t\t\txi = int(100 * xi)\n\t\t\tyi = int(100 * yi)\n\t\t\tzi = int(100 * zi)\n\t\t\txj = int(100 * xj)\n\t\t\tyj = int(100 * yj)\n\t\t\tzj = int(100 * zj)\n\t\t\tDx = abs(xi - xj)\n\t\t\tkxy = np.polyfit([xi, xj], [yi, yj], 1)\n\t\t\tkxz = np.polyfit([xi, xj], [zi, zj], 1)\n\t\t\tx = min(xi, xj)\n\t\t\tfor dx in range(Dx - 1):\n\t\t\t\tx_ = int(ceil((x + dx) / 100 * self.multiple / 2))\n\t\t\t\ty_ = int(ceil((kxy[0] * (x + dx) + kxy[1]) / 100 * self.multiple / 2))\n\t\t\t\tz_ = int(ceil((kxz[0] * (x + dx) + kxz[1]) / 100 * self.multiple / 2))\n\t\t\t\ttemstorage34.add((x_, y_, z_, self.ai[atomi], self.ai[atomj], D))\n\t\t\t\tdx += 1\n\t\telse:\n\t\t\treturn [[],[],[]]\t\t\n\t\tfor i in temstorage02:\n\t\t\tstorage02.append(list(i))\n\t\tfor i in temstorage23:\n\t\t\tstorage23.append(list(i))\n\t\tfor i in temstorage34:\n\t\t\tstorage34.append(list(i))\n\t\treturn [storage02, storage23, storage34]\n\n\tdef image_pre(self, _train=True, _test=True):\n\t\tif _train == True:\n\t\t\tfor index in range(len(self.train_mols)):\n\t\t\t\ttemstorage02 = []\n\t\t\t\ttemstorage23 = []\n\t\t\t\ttemstorage34 = []\n\t\t\t\t_mol = self.train_mols[index]\n\t\t\t\tfor i in range(len(_mol)):\n\t\t\t\t\t_atom = _mol[i]\n\t\t\t\t\tatomi = _atom[0]\n\t\t\t\t\txi = _atom[1]\n\t\t\t\t\tyi = _atom[2]\n\t\t\t\t\tzi = _atom[3]\n\t\t\t\t\tfor j in range(i+1, len(_mol)):\n\t\t\t\t\t\t__atom = _mol[j]\n\t\t\t\t\t\tatomj = __atom[0]\n\t\t\t\t\t\txj = __atom[1]\n\t\t\t\t\t\tyj = __atom[2]\n\t\t\t\t\t\tzj = __atom[3]\n\t\t\t\t\t\tpixel = self.computepixels(atomi, xi, yi, zi, atomj, xj, yj, zj)\n\t\t\t\t\t\tif pixel[0] != []:\n\t\t\t\t\t\t\ttemstorage02.extend(pixel[0])\n\t\t\t\t\t\telif pixel[1] != []:\n\t\t\t\t\t\t\ttemstorage23.extend(pixel[1])\n\t\t\t\t\t\telif pixel[2] != []:\n\t\t\t\t\t\t\ttemstorage34.extend(pixel[2])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\tself.train_pre02.append(temstorage02)\n\t\t\t\tself.train_pre23.append(temstorage23)\n\t\t\t\tself.train_pre34.append(temstorage34)\n\t\t\twith open(self.path + 'data/train_pre02.json', 'w') as f:\n\t\t\t\tjson.dump(self.train_pre02, f)\n\t\t\twith open(self.path + 'data/train_pre23.json', 'w') as f:\n\t\t\t\tjson.dump(self.train_pre23, f)\n\t\t\twith open(self.path + 'data/train_pre34.json', 'w') as f:\n\t\t\t\tjson.dump(self.train_pre34, f)\n\t\tif _test == True:\n\t\t\tfor index in range(len(self.test_mols)):\n\t\t\t\ttemstorage02 = []\n\t\t\t\ttemstorage23 = []\n\t\t\t\ttemstorage34 = []\n\t\t\t\t_mol = self.test_mols[index]\n\t\t\t\tfor i in range(len(_mol)):\n\t\t\t\t\t_atom = _mol[i]\n\t\t\t\t\tatomi = _atom[0]\n\t\t\t\t\txi = _atom[1]\n\t\t\t\t\tyi = _atom[2]\n\t\t\t\t\tzi = _atom[3]\n\t\t\t\t\tfor j in range(i+1, len(_mol)):\n\t\t\t\t\t\t__atom = _mol[j]\n\t\t\t\t\t\tatomj = __atom[0]\n\t\t\t\t\t\txj = __atom[1]\n\t\t\t\t\t\tyj = __atom[2]\n\t\t\t\t\t\tzj = __atom[3]\n\t\t\t\t\t\tpixel = self.computepixels(atomi, xi, yi, atomj, xj, yj)\n\t\t\t\t\t\tif pixel[0] != []:\n\t\t\t\t\t\t\ttemstorage02.extend(pixel[0])\n\t\t\t\t\t\telif pixel[1] != []:\n\t\t\t\t\t\t\ttemstorage23.extend(pixel[1])\n\t\t\t\t\t\telif pixel[2] != []:\n\t\t\t\t\t\t\ttemstorage34.extend(pixel[2])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\tself.test_pre02.append(temstorage02)\n\t\t\t\tself.test_pre23.append(temstorage23)\n\t\t\t\tself.test_pre34.append(temstorage34)\n\t\t\twith open(self.path + 'data/test_pre02.json', 'w') as f:\n\t\t\t\tjson.dump(self.test_pre02, f)\n\t\t\twith open(self.path + 'data/test_pre23.json', 'w') as f:\n\t\t\t\tjson.dump(self.test_pre23, f)\n\t\t\twith open(self.path + 'data/test_pre34.json', 'w') as f:\n\t\t\t\tjson.dump(self.test_pre34, f)\n\n\ndef pre_main(path='/public/home/pcoss1/zh/local-connect/MIC/'):\n\tmain = pre(path)\n\tmain.load_data()\n\tmain.longest_distance()\n\tmain.correction()\n\tmain.shuffle()\n\tmain.image_pre()\n\nfrom keras.models import Sequential,Model\nfrom keras.layers import Dense,Flatten,Dropout,Input \nfrom keras.layers.convolutional import Conv3D,MaxPooling3D\nfrom keras.utils.np_utils import to_categorical\nfrom keras.optimizers import Adam,SGD\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, TensorBoard\nfrom keras.layers.normalization import BatchNormalization as BN\nfrom keras.utils import multi_gpu_model \nimport numpy as np\nimport h5py \nimport os\nimport zipfile \nimport json\nimport time\nfrom math import ceil, floor\nfrom keras import layers\nfrom random import random\nfrom math import pi,cos,sin\n\n# os.environ['CUDA_VISIBLE_DEVICES']='0,1,2,3,4,5,6,7' \n\nclass trainer(object):\n\tdef __init__(self, path, lr=0.001, batch_size=128, epochs=10000, out=1, _type='classify', class_weight=None):\n\t\tself.lr= lr\n\t\tself.path = path\n\t\tself.batch_size = batch_size\n\t\tself.epochs = epochs\n\t\tself.out = out\n\t\tself.type = _type\n\t\tself.model = None\n\t\tself.val_graph = None\n\t\tself.val_pro = None\n\t\tself.train_graph = None\n\t\tself.train_pro = None\n\t\tself.class_weight = class_weight\n\t\tif self.type == 'classify':\n\t\t\tif self.out == 1:\n\t\t\t\tself.loss = 'binary_crossentropy'\n\t\t\telse:\n\t\t\t\tself.loss = 'categorical_crossentropy'\n\t\telse:\n\t\t\tself.loss = 'mean_squared_error'\n\n\tdef load_data(self):\n\t\twith open(self.path + 'data/train_graph.json', 'r') as f:\n\t\t\tself.train_graph = json.load(f)\n\t\twith open(self.path + 'data/train_pro.json', 'r') as f:\n\t\t\tself.train_pro = json.load(f)\n\t\twith open(self.path + 'data/val_graph.json', 'r') as f:\n\t\t\tself.val_graph = json.load(f)\n\t\twith open(self.path + 'data/val_pro.json', 'r') as f:\n\t\t\tself.val_pro = json.load(f)\n\n\tdef model_build(self):\n\t\tinputs_02 = Input(shape=((96,96,96,3)))\n\t\tx1 = Conv3D(32,(1,1,1),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(inputs_02)\n\t\tx1 = BN()(x1)\n\t\tx11 = Conv3D(48,(11,11,11),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x1)\n\t\tx11 = BN()(x11)\n\t\tx12 = Conv3D(48,(7,7,7),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x1)\n\t\tx12 = BN()(x12)\n\t\tx13 = Conv3D(48,(3,3,3),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x1)\n\t\tx13 = BN()(x13)\n\t\tx1 = layers.concatenate([x11, x12, x13], axis=-1)\n\t\tx1 = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2))(x1) # 192 -> 96 / 128 -> 64\n\t\tx1 = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x1)\n\n\t\tinputs_23 = Input(shape=((96,96,96,3)))\n\t\tx2 = Conv3D(32,(1,1,1),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(inputs_23)\n\t\tx2 = BN()(x2)\n\t\tx21 = Conv3D(48,(11,11,11),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x2)\n\t\tx21 = BN()(x21)\n\t\tx22 = Conv3D(48,(7,7,7),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x2)\n\t\tx22 = BN()(x22)\n\t\tx23 = Conv3D(48,(3,3,3),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x2)\n\t\tx23 = BN()(x23)\n\t\tx2 = layers.concatenate([x21, x22, x23], axis=-1)\n\t\tx2 = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2))(x2) # 192 -> 96 / 128 -> 64\n\t\tx2 = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x2)\n\n\t\tinputs_34 = Input(shape=((96,96,96,3)))\n\t\tx3 = Conv3D(32,(1,1,1),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(inputs_34)\n\t\tx3 = BN()(x3)\n\t\tx31 = Conv3D(48,(11,11,11),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x3)\n\t\tx31 = BN()(x31)\n\t\tx32 = Conv3D(48,(7,7,7),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x3)\n\t\tx32 = BN()(x32)\n\t\tx33 = Conv3D(48,(3,3,3),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x3)\n\t\tx33 = BN()(x33)\n\t\tx3 = layers.concatenate([x31, x32, x33], axis=-1)\n\t\tx3 = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2))(x3) # 192 -> 96 / 128 -> 64\n\t\tx3 = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x3)\n\n\t\tx0 = layers.concatenate([x1,x2,x3], axis=-1)\n\t\tx0 = Conv3D(128,(1,1,1),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x0)\n\t\tx01 = Conv3D(96,(7,7,7),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x0)\n\t\tx01 = BN()(x01)\n\t\tx02 = Conv3D(96,(3,3,3),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x0)\n\t\tx02 = BN()(x02)\n\t\tx0 = layers.concatenate([x01, x02], axis=-1)\n\t\tx0 = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2))(x0) # 96 -> 48 / 64 -> 32\n\n\t\tx01 = Conv3D(96,(7,7,7),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x0)\n\t\tx01 = BN()(x01)\n\t\tx02 = Conv3D(96,(3,3,3),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x0)\n\t\tx02 = BN()(x02)\n\t\tx0 = layers.concatenate([x01, x02], axis=-1)\n\t\tx0 = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2))(x0) # 48 -> 24 / 32 -> 16\n\n\t\tx0 = Conv3D(256,(3,3,3),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x0)\n\t\tx0 = BN()(x0)\n\t\tx0 = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2))(x0) # 24 -> 12 / 16 -> 8\n\n\t\tx0 = Conv3D(384,(3,3,3),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x0)\n\t\tx0 = BN()(x0)\n\t\tx0 = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2))(x0) # 12 -> 6 / 8 -> 4\n\n\t\tx0 = Conv3D(512,(3,3,3),strides=(1,1,1),padding='same',activation='relu',kernel_initializer='uniform')(x0)\n\t\tx0 = BN()(x0)\n\t\tx0 = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2))(x0) # 6 -> 3/ 4 -> 2\n\n\t\tx0 = Flatten()(x0)\n\t\tx0 = Dense(1024,activation='relu')(x0)\n\t\tx0 = BN()(x0)\n\t\tx0 = Dense(2048,activation='relu')(x0)\n\t\tx0 = BN()(x0)\n\t\tpredictions = Dense(self.out)(x0)\n\t\tself.model = Model(inputs = [inputs_02, inputs_23, inputs_34], outputs = predictions)\n\t\tself.model = multi_gpu_model(self.model, 8)\n\t\tself.model.compile(loss=self.loss, optimizer=Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08), metrics=['accuracy'])\n\n\n\tdef train(self):\n\t\tcnt = 1\n\t\tfor i in range(self.epochs):\n\t\t\thist = self.model.fit_generator(generator=self.spin(),\n\t\t\t\tsteps_per_epoch=941,\n\t\t\t\tepochs=1)\n\t\t\tself.model.save_weights(self.path + 'log/model_weights' + str(i) + '.h5')\n\t\t\twith open(self.path + 'log/note.json', 'a') as f:\n\t\t\t\tjson.dump(hist.history, f)\n\t\t\t\tf.write('\\n')\n\t\t\tif (i+1) / 100 == 0:\n\t\t\t\tself.lr /= 1.5\n\t\t\t\tself.model.compile(loss=self.loss, optimizer=Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08), metrics=['accuracy'])\n\t\t\t\tcnt += 1\n\t\t\t\tself.model.load_weights(self.path + 'log/model_weights' + str(i) + '.h5')\n\n\tdef val(self):\n\t\tself.model_build()\n\t\twith open(self.path + 'logs/note.json') as f:\n\t\t\tnote = f.readlines()\n\t\t\tall_epochs = len(note)\n\t\twith open(self.path + 'data/val_graph.json') as f:\n\t\t\tx = np.asarray(json.load(f))\n\t\twith open(self.path + 'data/val_pro.json') as f:\n\t\t\ty = np.asarray(json.load(f))\n\t\tfor i in range(all_epochs):\n\t\t\tself.model.load_weights(self.path + 'logs/model_weights%d.h5' % (i))\n\t\t\tprint(self.model.evaluate(x, y, batch_size = self.batch_size))\n\n\tdef generator(self):\n\t\tcnt = 0\n\t\twith open(self.path + 'data/train_pro.json') as f:\n\t\t\ty = json.load(f)\n\t\t\tX = []\n\t\t\tY = []\n\t\twhile 1:\n\t\t\twith open(self.path + 'true_graph/train/%d_02.json' %(cnt)) as f:\n\t\t\t\tX_02 = json.load(f)\n\t\t\twith open(self.path + 'true_graph/train/%d_23.json' %(cnt)) as f:\n\t\t\t\tX_23 = (json.load(f))\n\t\t\twith open(self.path + 'true_graph/train/%d_34.json' %(cnt)) as f:\n\t\t\t\tX_34 = (json.load(f))\n\t\t\tX.append(X_02)\n\t\t\tX.append(X_23)\n\t\t\tX.append(X_34)\n\t\t\tY.append(y[cnt])\n\t\t\tcnt += 1\n\t\t\tif cnt % self.batch_size == 0:\n\t\t\t\tyield (np.asarray(X), np.asarray(Y))\n\t\t\t\tX = []\n\t\t\t\tY = []\n\n\tdef val_generator(self):\n\t\tcnt = 0\n\t\twith open(self.path + 'data/val_pro.json') as f:\n\t\t\ty = json.load(f)\n\t\twhile 1:\n\t\t\tX = []\n\t\t\tY = []\n\t\t\twith open(self.path + 'true_graph/val/%d.json' %(cnt)) as f:\n\t\t\t\tX.append(json.load(f))\n\t\t\tY.append(y[cnt])\n\t\t\tcnt += 1\n\t\t\tif cnt % self.batch_size == 0:\n\t\t\t\tyield (np.asarray(X), np.asarray(Y))\n\n\tdef spin(self):\n\t\twith open(self.path + 'data/train_pre02.json', 'r') as f:\n\t\t\ttrain_pre02 = json.load(f)\n\t\twith open(self.path + 'data/train_pre23.json', 'r') as f:\n\t\t\ttrain_pre23 = json.load(f)\n\t\twith open(self.path + 'data/train_pre34.json', 'r') as f:\n\t\t\ttrain_pre34 = json.load(f)\n\t\twith open(self.path + 'data/train_pro.json', 'r') as f:\n\t\t\ttrain_pro = np.asarray(json.load(f))[:,11].tolist()\n\t\tfor i in range(len(train_pro)):\n\t\t\ttrain_pro[i] = float(train_pro[i])\n\t\tfor index in range(941):\n\t\t\tX_train02 = train_pre02[index*64:(index+1)*64]\n\t\t\tX_train23 = train_pre23[index*64:(index+1)*64]\n\t\t\tX_train34 = train_pre34[index*64:(index+1)*64]\n\t\t\tY = train_pro[index*64:(index+1)*64]\n\t\t\tX02 = np.zeros(shape=(64,96,96,96,3))\n\t\t\tX23 = np.zeros(shape=(64,96,96,96,3))\n\t\t\tX34 = np.zeros(shape=(64,96,96,96,3))\n\t\t\tfor i in range(64):\n\t\t\t\tsigma = random() * 2 * pi\n\t\t\t\tbeta = random() * 2 * pi\n\t\t\t\tdelta = random() * 2 * pi\n\t\t\t\tmol02 = X_train02[i]\n\t\t\t\tmol23 = X_train23[i]\n\t\t\t\tmol34 = X_train34[i]\n\t\t\t\tRx = np.array([[1, 0, 0], [0, cos(sigma), -sin(sigma)], [0, sin(sigma), cos(sigma)]])\n\t\t\t\tRy = np.array([[cos(beta), 0, sin(beta)], [0, 1, 0], [-sin(beta), 0, cos(beta)]])\n\t\t\t\tRz = np.array([[cos(gamma), -sin(gamma), 0], [sin(gamma), cos(gamma), 0], [0, 0, 1]])\n\t\t\t\tfor j in range(len(mol02)):\n\t\t\t\t\tato02 = mol02[j]\n\t\t\t\t\tcoordinate = np.asarray([ato02[:3]]) - 60\n\t\t\t\t\tspin_coordinate = (np.dot(np.dot(np.dot(coordinate, Rx), Ry), Rz) + 60).tolist()\n\t\t\t\t\tX02[i][spin_coordinate[0]][spin_coordinate[1]][spin_coordinate[2]] = np.asarray(ato02[3:])\n\t\t\t\tfor j in range(len(mol23)):\n\t\t\t\t\tato23 = mol23[j]\n\t\t\t\t\tcoordinate = np.asarray([ato23[:3]]) - 60\n\t\t\t\t\tspin_coordinate = (np.dot(np.dot(np.dot(coordinate, Rx), Ry), Rz) + 60).tolist()\n\t\t\t\t\tX23[i][spin_coordinate[0]][spin_coordinate[1]][spin_coordinate[2]] = np.asarray(ato23[3:])\n\t\t\t\tfor j in range(len(mol34)):\n\t\t\t\t\tato34 = mol34[j]\n\t\t\t\t\tcoordinate = np.asarray([ato34[:3]]) - 60\n\t\t\t\t\tspin_coordinate = (np.dot(np.dot(np.dot(coordinate, Rx), Ry), Rz) + 60).tolist()\n\t\t\t\t\tX34[i][spin_coordinate[0]][spin_coordinate[1]][spin_coordinate[2]] = np.asarray(ato34[3:])\n\t\t\tyield ([X02, X23, X34], np.asarray(Y)[:,10])\n\n\n\tdef main_trainer(self):\n\n\t\tself.model_build()\n\t\tself.train()\n\nif __name__ == '__main__':\n\tpre_main(path='/public/home/pcoss1/zh/local-connect/MIC/')\n\t#main = trainer(path='/public/home/pcoss1/zh/multi_QM9/', lr=0.001, batch_size=128, epochs=1000, out=1, _type='classify', class_weight=None)\n\t#main.main_trainer()"
] |
[
[
"numpy.dot",
"numpy.polyfit",
"numpy.asarray",
"numpy.array",
"numpy.zeros"
]
] |
flymin/Rectified-Rejection
|
[
"58bfd0ee159cbcbebdda2da068984e6edf8b61ec"
] |
[
"adaptiveCWattack/base.py"
] |
[
"# Copyright (c) 2018-present, Royal Bank of Canada.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nfrom abc import ABCMeta\n\nimport torch\n\nfrom advertorch.utils import replicate_input\n\n\nclass Attack(object):\n \"\"\"\n Abstract base class for all attack classes.\n\n :param predict: forward pass function.\n :param loss_fn: loss function that takes .\n :param clip_min: mininum value per input dimension.\n :param clip_max: maximum value per input dimension.\n\n \"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(self, predict, loss_fn, clip_min, clip_max):\n \"\"\"Create an Attack instance.\"\"\"\n self.predict = predict\n self.loss_fn = loss_fn\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n def perturb(self, x, **kwargs):\n \"\"\"Virtual method for generating the adversarial examples.\n\n :param x: the model's input tensor.\n :param **kwargs: optional parameters used by child classes.\n :return: adversarial examples.\n \"\"\"\n error = \"Sub-classes must implement perturb.\"\n raise NotImplementedError(error)\n\n def __call__(self, *args, **kwargs):\n return self.perturb(*args, **kwargs)\n\n\nclass LabelMixin(object):\n def _get_predicted_label(self, x):\n \"\"\"\n Compute predicted labels given x. Used to prevent label leaking\n during adversarial training.\n\n :param x: the model's input tensor.\n :return: tensor containing predicted labels.\n \"\"\"\n with torch.no_grad():\n outputs = self.predict(x)\n _, y = torch.max(outputs, dim=1)\n return y\n\n def _verify_and_process_inputs(self, x, y):\n if self.targeted:\n assert y is not None\n\n if not self.targeted:\n if y is None:\n y = self._get_predicted_label(x)\n\n x = replicate_input(x)\n y = replicate_input(y)\n return x, y\n"
] |
[
[
"torch.no_grad",
"torch.max"
]
] |
salesforce/NeuralBayes
|
[
"e2f307933abbf124d55763eeb6df536bc47c3e5a"
] |
[
"MIM/models.py"
] |
[
"__author__ = 'Devansh Arpit'\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport torch.nn as nn\nimport torch.nn.init as init\n\n\ndef param_init(module, init='ortho'):\n for m in module.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n if init == 'he':\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif init=='ortho':\n nn.init.orthogonal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\nclass CNN(nn.Module):\n def __init__(self, bn=True, dataset='mnist', init='ortho'):\n super(CNN, self).__init__()\n nhiddens = [200,500,700,1000]\n if dataset=='mnist':\n self.channel = 1\n self.sz = 28\n elif 'cifar' in dataset:\n self.channel = 3\n self.sz = 32\n elif dataset=='stl10':\n self.channel = 3\n self.sz = 32 \n self.conv1 = nn.Conv2d(self.channel, nhiddens[0], 3, 1)\n if bn:\n self.bn1 = nn.BatchNorm2d(nhiddens[0])\n else:\n self.bn1 = nn.Sequential()\n\n\n\n self.conv2 = nn.Conv2d(nhiddens[0], nhiddens[1], 3, 1)\n if bn:\n self.bn2 = nn.BatchNorm2d(nhiddens[1])\n else:\n self.bn2 = nn.Sequential()\n\n\n self.conv3 = nn.Conv2d(nhiddens[1], nhiddens[2], 3, 1)\n if bn:\n self.bn3 = nn.BatchNorm2d(nhiddens[2])\n else:\n self.bn3 = nn.Sequential()\n\n\n self.conv4 = nn.Conv2d(nhiddens[2], nhiddens[3], 3, 1)\n if bn:\n self.bn4 = nn.BatchNorm2d(nhiddens[3])\n else:\n self.bn4 = nn.Sequential()\n\n param_init(self, init=init)\n self.feat_dim = nhiddens[-1]\n self.nhiddens = nhiddens\n\n def forward(self, x, ret_hid=False, state=-1):\n # print(x.size())\n hid = {}\n x = x.view(-1, self.channel,self.sz,self.sz)\n x=self.conv1(x)\n \n x = F.relu(self.bn1(x))\n hid['0'] = x\n if state==0:\n return x\n\n x = F.max_pool2d(x, 2, 2)\n\n x=self.conv2(x)\n \n x = F.relu(self.bn2(x))\n hid['1'] = x\n if state==1:\n return x\n\n\n x=self.conv3(x)\n \n x = F.relu(self.bn3(x))\n hid['2'] = x\n if state==2:\n return x\n\n x = F.max_pool2d(x, 2, 2)\n # x = nn.AvgPool2d(2,2)(x)\n x=self.conv4(x)\n\n x = F.relu(self.bn4(x))\n hid['3'] = x\n \n\n x = nn.AvgPool2d(*[x.size()[2]*2])(x)\n out = x.view(x.size()[0], -1)\n\n if ret_hid:\n return hid\n return out\n\nclass MLPLayer(nn.Module):\n def __init__(self, dim_in=None, dim_out=None, bn=True, act=True, dropout=0.):\n super(MLPLayer, self).__init__()\n self.dropout = dropout\n self.act=act\n if bn:\n fc = nn.Linear(dim_in, dim_out)\n bn_ = nn.BatchNorm1d(dim_out)\n self.layer = [fc, bn_]\n else:\n self.layer = [nn.Linear(dim_in, dim_out)]\n\n param_init(self, init='ortho')\n self.layer = nn.Sequential(*self.layer)\n \n def forward(self, x):\n if len(x.size())>2:\n x = nn.AvgPool2d(*[x.size()[2]*2])(x)\n x = x.view(x.size()[0], -1)\n x=self.layer(x)\n if self.act:\n x = F.relu((x))\n if self.dropout>0:\n x = nn.Dropout(self.dropout)(x)\n return x\n \nclass PredNet(nn.Module):\n def __init__(self, dim_inp=None, dim_out=1):\n super(PredNet, self).__init__()\n self.dim_inp = dim_inp\n self.dim_out = dim_out\n layer = nn.Linear(dim_inp, dim_out)\n self.net = nn.Sequential(layer)\n \n def forward(self, x):\n if len(x.size())>2:\n x = nn.AvgPool2d(*[x.size()[2]*2])(x)\n x = x.view(x.size()[0], -1)\n\n x = x.view(-1, self.dim_inp)\n x = self.net(x)\n return x\n \nclass MLPNet(nn.Module):\n def __init__(self, dim_inp=None, nhiddens=[500, 500, 500], dim_out=10, bn=True, dropout=0.):\n super(MLPNet, self).__init__()\n self.dim_inp = dim_inp\n self.dropout = dropout\n nhiddens = nhiddens + [dim_out]\n self.layers = nn.ModuleList([])\n for l in range(len(nhiddens)):\n if l==len(nhiddens)-1:\n if len(nhiddens)==1:\n layer = MLPLayer(dim_inp, nhiddens[l], False, False)\n else:\n layer = MLPLayer(nhiddens[l-1], nhiddens[l], False, False)\n elif l==0:\n layer = MLPLayer(dim_inp, nhiddens[l], bn, True)\n else:\n layer = MLPLayer(nhiddens[l-1], nhiddens[l], bn, True)\n self.layers.append((layer))\n \n def forward(self, x):\n if len(x.size())>2:\n x = nn.AvgPool2d(*[x.size()[2]*2])(x)\n x = x.view(x.size()[0], -1)\n\n for layer in self.layers[:-1]:\n x = layer(x)\n x = nn.Dropout(self.dropout)(x) if self.dropout>0 else x\n x = self.layers[-1](x)\n return x"
] |
[
[
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.init.orthogonal_",
"torch.nn.BatchNorm2d",
"torch.nn.functional.max_pool2d"
]
] |
rotemgb/spyder
|
[
"f70735396ce6f4b2f7d920cf7953239bd9504a4f"
] |
[
"spyder/plugins/variableexplorer/widgets/tests/test_collectioneditor.py"
] |
[
"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright © Spyder Project Contributors\n#\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n# ----------------------------------------------------------------------------\n\n\"\"\"\nTests for the Variable Explorer Collections Editor.\n\"\"\"\n\n# Standard library imports\nimport os # Example module for testing display inside CollecitonsEditor\nfrom os import path\nimport copy\nimport datetime\nfrom xml.dom.minidom import parseString\ntry:\n from unittest.mock import Mock\nexcept ImportError:\n from mock import Mock # Python 2\n\n# Third party imports\nimport numpy\nimport pandas\nimport pytest\nfrom flaky import flaky\nfrom qtpy.QtCore import Qt\nfrom qtpy.QtWidgets import QWidget\n\n# Local imports\nfrom spyder.plugins.variableexplorer.widgets.collectionseditor import (\n RemoteCollectionsEditorTableView, CollectionsEditorTableView,\n CollectionsModel, CollectionsEditor, LARGE_NROWS, ROWS_TO_LOAD)\nfrom spyder.plugins.variableexplorer.widgets.namespacebrowser import (\n NamespacesBrowserFinder)\nfrom spyder.plugins.variableexplorer.widgets.tests.test_dataframeeditor import \\\n generate_pandas_indexes\n\n# =============================================================================\n# Constants\n# =============================================================================\n# Full path to this file's parent directory for loading data\nLOCATION = path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n\n# =============================================================================\n# Utility functions\n# =============================================================================\ndef data(cm, i, j):\n return cm.data(cm.index(i, j))\n\n\ndef data_table(cm, n_rows, n_cols):\n return [[data(cm, i, j) for i in range(n_rows)] for j in range(n_cols)]\n\n\n# =============================================================================\n# Pytest Fixtures\n# =============================================================================\n@pytest.fixture\ndef nonsettable_objects_data():\n \"\"\"Rturn Python objects with immutable attribs to test CollectionEditor.\"\"\"\n test_objs = [pandas.Period(\"2018-03\"), pandas.Categorical([1, 2, 42])]\n expected_objs = [pandas.Period(\"2018-03\"), pandas.Categorical([1, 2, 42])]\n keys_test = [[\"_typ\", \"day\", \"dayofyear\", \"hour\"],\n [\"_typ\", \"nbytes\", \"ndim\"]]\n return zip(test_objs, expected_objs, keys_test)\n\n\n# =============================================================================\n# Tests\n# ============================================================================\ndef test_remove_variable(qtbot):\n \"\"\"Test removing of the correct variable.\"\"\"\n variables = {'a': 1,\n 'b': 2,\n 'c': 3,\n 'd': '4',\n 'e': 5}\n editor = CollectionsEditorTableView(None, variables.copy())\n qtbot.addWidget(editor)\n editor.setCurrentIndex(editor.model.index(1, 0))\n\n editor.remove_item(force=True)\n assert editor.model.rowCount() == 4\n assert data(editor.model, 0, 0) == 'a'\n assert data(editor.model, 1, 0) == 'c'\n assert data(editor.model, 2, 0) == 'd'\n assert data(editor.model, 3, 0) == 'e'\n\n # Reset variables and try removing one again\n editor.set_data(variables.copy())\n editor.adjust_columns()\n editor.setCurrentIndex(editor.model.index(1, 0))\n editor.remove_item(force=True)\n assert editor.model.rowCount() == 4\n assert data(editor.model, 0, 0) == 'a'\n assert data(editor.model, 1, 0) == 'c'\n assert data(editor.model, 2, 0) == 'd'\n assert data(editor.model, 3, 0) == 'e'\n\n\ndef test_remove_remote_variable(qtbot, monkeypatch):\n \"\"\"Test the removing of the correct remote variable.\"\"\"\n variables = {'a': {'type': 'int',\n 'size': 1,\n 'color': '#0000ff',\n 'view': '1'},\n 'b': {'type': 'int',\n 'size': 1,\n 'color': '#0000ff',\n 'view': '2'},\n 'c': {'type': 'int',\n 'size': 1,\n 'color': '#0000ff',\n 'view': '3'},\n 'd': {'type': 'str',\n 'size': 1, 'color': '#800000',\n 'view': '4'},\n 'e': {'type': 'int',\n 'size': 1,\n 'color': '#0000ff',\n 'view': '5'}}\n editor = RemoteCollectionsEditorTableView(None, variables.copy())\n qtbot.addWidget(editor)\n editor.setCurrentIndex(editor.model.index(1, 0))\n\n # Monkey patch remove variables\n def remove_values(ins, names):\n assert names == ['b']\n data = {'a': {'type': 'int',\n 'size': 1,\n 'color': '#0000ff',\n 'view': '1'},\n 'c': {'type': 'int',\n 'size': 1,\n 'color': '#0000ff',\n 'view': '3'},\n 'd': {'type': 'str',\n 'size': 1, 'color': '#800000',\n 'view': '4'},\n 'e': {'type': 'int',\n 'size': 1,\n 'color': '#0000ff',\n 'view': '5'}}\n editor.set_data(data)\n monkeypatch.setattr(\n 'spyder.plugins.variableexplorer.widgets'\n '.collectionseditor.RemoteCollectionsEditorTableView.remove_values',\n remove_values)\n\n editor.remove_item(force=True)\n assert editor.model.rowCount() == 4\n assert data(editor.model, 0, 0) == 'a'\n assert data(editor.model, 1, 0) == 'c'\n assert data(editor.model, 2, 0) == 'd'\n assert data(editor.model, 3, 0) == 'e'\n\n # Reset variables and try removing one again\n editor.set_data(variables.copy())\n editor.adjust_columns()\n editor.setCurrentIndex(editor.model.index(1, 0))\n editor.remove_item(force=True)\n assert editor.model.rowCount() == 4\n assert data(editor.model, 0, 0) == 'a'\n assert data(editor.model, 1, 0) == 'c'\n assert data(editor.model, 2, 0) == 'd'\n assert data(editor.model, 3, 0) == 'e'\n\n\ndef test_filter_rows(qtbot):\n \"\"\"Test rows filtering.\"\"\"\n\n df = pandas.DataFrame(['foo', 'bar'])\n editor = CollectionsEditorTableView(None, {'dfa': df, 'dfb': df})\n editor.finder = NamespacesBrowserFinder(editor,\n editor.set_regex)\n qtbot.addWidget(editor)\n\n # Initially two rows\n assert editor.model.rowCount() == 2\n\n # Match two rows by name\n editor.finder.setText(\"df\")\n assert editor.model.rowCount() == 2\n\n # Match two rows by type\n editor.finder.setText(\"DataFrame\")\n assert editor.model.rowCount() == 2\n\n # Only one match\n editor.finder.setText(\"dfb\")\n assert editor.model.rowCount() == 1\n\n # No match\n editor.finder.setText(\"dfbc\")\n assert editor.model.rowCount() == 0\n\ndef test_create_dataframeeditor_with_correct_format(qtbot, monkeypatch):\n MockDataFrameEditor = Mock()\n mockDataFrameEditor_instance = MockDataFrameEditor()\n monkeypatch.setattr('spyder.plugins.variableexplorer.widgets.collectionsdelegate.DataFrameEditor',\n MockDataFrameEditor)\n df = pandas.DataFrame(['foo', 'bar'])\n editor = CollectionsEditorTableView(None, {'df': df})\n qtbot.addWidget(editor)\n editor.set_dataframe_format('%10d')\n editor.delegate.createEditor(None, None, editor.model.index(0, 3))\n mockDataFrameEditor_instance.dataModel.set_format.assert_called_once_with('%10d')\n\ndef test_accept_sig_option_changed_from_dataframeeditor(qtbot, monkeypatch):\n df = pandas.DataFrame(['foo', 'bar'])\n editor = CollectionsEditorTableView(None, {'df': df})\n qtbot.addWidget(editor)\n editor.set_dataframe_format('%10d')\n assert editor.source_model.dataframe_format == '%10d'\n editor.delegate.createEditor(None, None, editor.model.index(0, 3))\n dataframe_editor = next(iter(editor.delegate._editors.values()))['editor']\n qtbot.addWidget(dataframe_editor)\n dataframe_editor.sig_option_changed.emit('dataframe_format', '%5f')\n assert editor.source_model.dataframe_format == '%5f'\n\ndef test_collectionsmodel_with_two_ints():\n coll = {'x': 1, 'y': 2}\n cm = CollectionsModel(None, coll)\n assert cm.rowCount() == 2\n assert cm.columnCount() == 5\n # dict is unordered, so first row might be x or y\n assert data(cm, 0, 0) in {'x',\n 'y'}\n if data(cm, 0, 0) == 'x':\n row_with_x = 0\n row_with_y = 1\n else:\n row_with_x = 1\n row_with_y = 0\n assert data(cm, row_with_x, 1) == 'int'\n assert data(cm, row_with_x, 2) == 1\n assert data(cm, row_with_x, 3) == '1'\n assert data(cm, row_with_y, 0) == 'y'\n assert data(cm, row_with_y, 1) == 'int'\n assert data(cm, row_with_y, 2) == 1\n assert data(cm, row_with_y, 3) == '2'\n\ndef test_collectionsmodel_with_index():\n # Regression test for spyder-ide/spyder#3380,\n # modified for spyder-ide/spyder#3758.\n for rng_name, rng in generate_pandas_indexes().items():\n coll = {'rng': rng}\n cm = CollectionsModel(None, coll)\n assert data(cm, 0, 0) == 'rng'\n assert data(cm, 0, 1) == rng_name\n assert data(cm, 0, 2) == '(20,)' or data(cm, 0, 2) == '(20L,)'\n try:\n assert data(cm, 0, 3) == rng._summary()\n except AttributeError:\n assert data(cm, 0, 3) == rng.summary()\n\n\ndef test_shows_dataframeeditor_when_editing_index(qtbot, monkeypatch):\n for rng_name, rng in generate_pandas_indexes().items():\n MockDataFrameEditor = Mock()\n mockDataFrameEditor_instance = MockDataFrameEditor()\n monkeypatch.setattr('spyder.plugins.variableexplorer.widgets.collectionsdelegate.DataFrameEditor',\n MockDataFrameEditor)\n coll = {'rng': rng}\n editor = CollectionsEditorTableView(None, coll)\n editor.delegate.createEditor(None, None,\n editor.model.index(0, 3))\n mockDataFrameEditor_instance.show.assert_called_once_with()\n\n\ndef test_sort_collectionsmodel():\n var_list1 = [0, 1, 2]\n var_list2 = [3, 4, 5, 6]\n var_dataframe1 = pandas.DataFrame([[1, 2, 3], [20, 30, 40], [2, 2, 2]])\n var_dataframe2 = pandas.DataFrame([[1, 2, 3], [20, 30, 40]])\n var_series1 = pandas.Series(var_list1)\n var_series2 = pandas.Series(var_list2)\n\n coll = [1, 3, 2]\n cm = CollectionsModel(None, coll)\n assert cm.rowCount() == 3\n assert cm.columnCount() == 5\n cm.sort(0) # sort by index\n assert data_table(cm, 3, 4) == [[0, 1, 2],\n ['int', 'int', 'int'],\n [1, 1, 1],\n ['1', '3', '2']]\n cm.sort(3) # sort by value\n assert data_table(cm, 3, 4) == [[0, 2, 1],\n ['int', 'int', 'int'],\n [1, 1, 1],\n ['1', '2', '3']]\n\n coll = [1, var_list1, var_list2, var_dataframe1, var_dataframe2,\n var_series1, var_series2]\n cm = CollectionsModel(None, coll)\n assert cm.rowCount() == 7\n assert cm.columnCount() == 5\n\n cm.sort(1) # sort by type\n assert data_table(cm, 7, 4) == [\n [3, 4, 5, 6, 0, 1, 2],\n ['DataFrame', 'DataFrame', 'Series', 'Series', 'int', 'list', 'list'],\n ['(3, 3)', '(2, 3)', '(3,)', '(4,)', 1, 3, 4],\n ['Column names: 0, 1, 2',\n 'Column names: 0, 1, 2',\n 'Series object of pandas.core.series module',\n 'Series object of pandas.core.series module',\n '1',\n '[0, 1, 2]',\n '[3, 4, 5, 6]']]\n\n cm.sort(2) # sort by size\n assert data_table(cm, 7, 4) == [\n [3, 4, 5, 6, 0, 1, 2],\n ['DataFrame', 'DataFrame', 'Series', 'Series', 'int', 'list', 'list'],\n ['(2, 3)', '(3,)', '(3, 3)', '(4,)', 1, 3, 4],\n ['Column names: 0, 1, 2',\n 'Column names: 0, 1, 2',\n 'Series object of pandas.core.series module',\n 'Series object of pandas.core.series module',\n '1',\n '[0, 1, 2]',\n '[3, 4, 5, 6]']] or data_table(cm, 7, 4) == [\n [0, 1, 2, 4, 5, 3, 6],\n [u'int', u'list', u'list', u'DataFrame', u'Series', u'DataFrame',\n u'Series'],\n [1, 3, 4, u'(2, 3)', u'(3,)', u'(3, 3)', u'(4,)'],\n ['1',\n '[0, 1, 2]',\n '[3, 4, 5, 6]',\n 'Column names: 0, 1, 2',\n 'Series object of pandas.core.series module',\n 'Column names: 0, 1, 2',\n 'Series object of pandas.core.series module',\n ]]\n\n\ndef test_sort_collectionsmodel_with_many_rows():\n coll = list(range(2*LARGE_NROWS))\n cm = CollectionsModel(None, coll)\n assert cm.rowCount() == cm.rows_loaded == ROWS_TO_LOAD\n assert cm.columnCount() == 5\n cm.sort(1) # This was causing an issue (#5232)\n cm.fetchMore()\n assert cm.rowCount() == 2 * ROWS_TO_LOAD\n for _ in range(3):\n cm.fetchMore()\n assert cm.rowCount() == len(coll)\n\n\ndef test_rename_and_duplicate_item_in_collection_editor():\n collections = {'list': ([1, 2, 3], False, True),\n 'tuple': ((1, 2, 3), False, False),\n 'dict': ({'a': 1, 'b': 2}, True, True)}\n for coll, rename_enabled, duplicate_enabled in collections.values():\n coll_copy = copy.copy(coll)\n editor = CollectionsEditorTableView(None, coll)\n assert editor.rename_action.isEnabled()\n assert editor.duplicate_action.isEnabled()\n editor.setCurrentIndex(editor.source_model.index(0, 0))\n editor.refresh_menu()\n assert editor.rename_action.isEnabled() == rename_enabled\n assert editor.duplicate_action.isEnabled() == duplicate_enabled\n if isinstance(coll, list):\n editor.duplicate_item()\n assert editor.source_model.get_data() == coll_copy + [coll_copy[0]]\n\n\ndef test_edit_mutable_and_immutable_types(monkeypatch):\n \"\"\"\n Test that mutable objs/vals are editable in VarExp; immutable ones aren't.\n\n Regression test for spyder-ide/spyder#5991.\n \"\"\"\n MockQLineEdit = Mock()\n attr_to_patch_qlineedit = ('spyder.plugins.variableexplorer.widgets.' +\n 'collectionsdelegate.QLineEdit')\n monkeypatch.setattr(attr_to_patch_qlineedit, MockQLineEdit)\n\n MockTextEditor = Mock()\n attr_to_patch_textedit = ('spyder.plugins.variableexplorer.widgets.' +\n 'collectionsdelegate.TextEditor')\n monkeypatch.setattr(attr_to_patch_textedit, MockTextEditor)\n\n MockQDateTimeEdit = Mock()\n attr_to_patch_qdatetimeedit = ('spyder.plugins.variableexplorer.widgets.' +\n 'collectionsdelegate.QDateTimeEdit')\n monkeypatch.setattr(attr_to_patch_qdatetimeedit, MockQDateTimeEdit)\n\n MockCollectionsEditor = Mock()\n mockCollectionsEditor_instance = MockCollectionsEditor()\n attr_to_patch_coledit = ('spyder.plugins.variableexplorer.widgets.' +\n 'collectionseditor.CollectionsEditor')\n monkeypatch.setattr(attr_to_patch_coledit, MockCollectionsEditor)\n\n list_test = [1, \"012345678901234567901234567890123456789012\",\n datetime.datetime(2017, 12, 24, 7, 9), [1, 2, 3], (2, \"eggs\")]\n tup_test = tuple(list_test)\n\n # Tests for mutable type (list) #\n editor_list = CollectionsEditorTableView(None, list_test)\n\n # Directly editable values inside list\n editor_list_value = editor_list.delegate.createEditor(\n None, None, editor_list.model.index(0, 3))\n assert editor_list_value is not None\n assert MockQLineEdit.call_count == 1\n\n # Text Editor for long text inside list\n editor_list.delegate.createEditor(None, None,\n editor_list.model.index(1, 3))\n assert MockTextEditor.call_count == 2\n assert not MockTextEditor.call_args[1][\"readonly\"]\n\n # Datetime inside list\n editor_list_datetime = editor_list.delegate.createEditor(\n None, None, editor_list.model.index(2, 3))\n assert editor_list_datetime is not None\n assert MockQDateTimeEdit.call_count == 1\n\n # List inside list\n editor_list.delegate.createEditor(None, None,\n editor_list.model.index(3, 3))\n assert mockCollectionsEditor_instance.show.call_count == 1\n assert not mockCollectionsEditor_instance.setup.call_args[1][\"readonly\"]\n\n # Tuple inside list\n editor_list.delegate.createEditor(None, None,\n editor_list.model.index(4, 3))\n assert mockCollectionsEditor_instance.show.call_count == 2\n assert mockCollectionsEditor_instance.setup.call_args[1][\"readonly\"]\n\n # Tests for immutable type (tuple) #\n editor_tup = CollectionsEditorTableView(None, tup_test)\n\n # Directly editable values inside tuple\n editor_tup_value = editor_tup.delegate.createEditor(\n None, None, editor_tup.model.index(0, 3))\n assert editor_tup_value is None\n assert MockQLineEdit.call_count == 1\n\n # Text Editor for long text inside tuple\n editor_tup.delegate.createEditor(None, None,\n editor_tup.model.index(1, 3))\n assert MockTextEditor.call_count == 4\n assert MockTextEditor.call_args[1][\"readonly\"]\n\n # Datetime inside tuple\n editor_tup_datetime = editor_tup.delegate.createEditor(\n None, None, editor_tup.model.index(2, 3))\n assert editor_tup_datetime is None\n assert MockQDateTimeEdit.call_count == 1\n\n # List inside tuple\n editor_tup.delegate.createEditor(None, None,\n editor_tup.model.index(3, 3))\n assert mockCollectionsEditor_instance.show.call_count == 3\n assert mockCollectionsEditor_instance.setup.call_args[1][\"readonly\"]\n\n # Tuple inside tuple\n editor_tup.delegate.createEditor(None, None,\n editor_tup.model.index(4, 3))\n assert mockCollectionsEditor_instance.show.call_count == 4\n assert mockCollectionsEditor_instance.setup.call_args[1][\"readonly\"]\n\n\n@flaky(max_runs=3)\ndef test_view_module_in_coledit():\n \"\"\"\n Test that modules don't produce an error when opening in Variable Explorer.\n\n Also check that they are set as readonly. Regression test for\n spyder-ide/spyder#6080.\n \"\"\"\n editor = CollectionsEditor()\n editor.setup(os, \"module_test\", readonly=False)\n assert editor.widget.editor.readonly\n\ndef test_notimplementederror_multiindex():\n \"\"\"\n Test that the NotImplementedError when scrolling a MultiIndex is handled.\n\n Regression test for spyder-ide/spyder#6284.\n \"\"\"\n time_deltas = [pandas.Timedelta(minutes=minute)\n for minute in range(5, 35, 5)]\n time_delta_multiindex = pandas.MultiIndex.from_product([[0, 1, 2, 3, 4],\n time_deltas])\n col_model = CollectionsModel(None, time_delta_multiindex)\n assert col_model.rowCount() == col_model.rows_loaded == ROWS_TO_LOAD\n assert col_model.columnCount() == 5\n col_model.fetchMore()\n assert col_model.rowCount() == 2 * ROWS_TO_LOAD\n for _ in range(3):\n col_model.fetchMore()\n assert col_model.rowCount() == 5 * ROWS_TO_LOAD\n\n\ndef test_editor_parent_set(monkeypatch):\n \"\"\"\n Test that editors have their parent set so they close with Spyder.\n\n Regression test for spyder-ide/spyder#5696.\n \"\"\"\n # Mocking and setup\n test_parent = QWidget()\n\n MockCollectionsEditor = Mock()\n attr_to_patch_coledit = ('spyder.plugins.variableexplorer.widgets.' +\n 'collectionseditor.CollectionsEditor')\n monkeypatch.setattr(attr_to_patch_coledit, MockCollectionsEditor)\n\n MockArrayEditor = Mock()\n attr_to_patch_arredit = ('spyder.plugins.variableexplorer.widgets.' +\n 'collectionsdelegate.ArrayEditor')\n monkeypatch.setattr(attr_to_patch_arredit, MockArrayEditor)\n\n MockDataFrameEditor = Mock()\n attr_to_patch_dfedit = ('spyder.plugins.variableexplorer.widgets.' +\n 'collectionsdelegate.DataFrameEditor')\n monkeypatch.setattr(attr_to_patch_dfedit, MockDataFrameEditor)\n\n MockTextEditor = Mock()\n attr_to_patch_textedit = ('spyder.plugins.variableexplorer.widgets.' +\n 'collectionsdelegate.TextEditor')\n monkeypatch.setattr(attr_to_patch_textedit, MockTextEditor)\n\n MockObjectExplorer = Mock()\n attr_to_patch_objectexplorer = ('spyder.plugins.variableexplorer.widgets.'\n + 'objectexplorer.ObjectExplorer')\n monkeypatch.setattr(attr_to_patch_objectexplorer, MockObjectExplorer)\n\n editor_data = [[0, 1, 2, 3, 4],\n numpy.array([1.0, 42.0, 1337.0]),\n pandas.DataFrame([[1, 2, 3], [20, 30, 40]]),\n os,\n \"012345678901234567890123456789012345678901234567890123456\"]\n col_editor = CollectionsEditorTableView(test_parent, editor_data)\n assert col_editor.parent() is test_parent\n\n for idx, mock_class in enumerate([MockCollectionsEditor,\n MockArrayEditor,\n MockDataFrameEditor,\n MockObjectExplorer,\n MockTextEditor]):\n col_editor.delegate.createEditor(col_editor.parent(), None,\n col_editor.model.index(idx, 3))\n assert mock_class.call_count == 1 + (idx // 4)\n assert mock_class.call_args[1][\"parent\"] is test_parent\n\n\ndef test_xml_dom_element_view():\n \"\"\"\n Test that XML DOM ``Element``s are able to be viewied in CollectionsEditor.\n\n Regression test for spyder-ide/spyder#5642.\n \"\"\"\n xml_path = path.join(LOCATION, 'dom_element_test.xml')\n with open(xml_path) as xml_file:\n xml_data = xml_file.read()\n\n xml_content = parseString(xml_data)\n xml_element = xml_content.getElementsByTagName(\"note\")[0]\n\n col_editor = CollectionsEditor(None)\n col_editor.setup(xml_element)\n col_editor.show()\n assert col_editor.get_value()\n col_editor.accept()\n\n\ndef test_pandas_dateoffset_view():\n \"\"\"\n Test that pandas ``DateOffset`` objs can be viewied in CollectionsEditor.\n\n Regression test for spyder-ide/spyder#6729.\n \"\"\"\n test_dateoffset = pandas.DateOffset()\n col_editor = CollectionsEditor(None)\n col_editor.setup(test_dateoffset)\n col_editor.show()\n assert col_editor.get_value()\n col_editor.accept()\n\n\ndef test_set_nonsettable_objects(nonsettable_objects_data):\n \"\"\"\n Test that errors trying to set attributes in ColEdit are handled properly.\n\n Unit regression test for issues spyder-ide/spyder#6727 and\n spyder-ide/spyder#6728.\n \"\"\"\n for test_obj, expected_obj, keys in nonsettable_objects_data:\n col_model = CollectionsModel(None, test_obj)\n indicies = [col_model.get_index_from_key(key) for key in keys]\n for idx in indicies:\n assert not col_model.set_value(idx, \"2\")\n # Due to numpy's deliberate breakage of __eq__ comparison\n assert all([key == \"_typ\" or\n (getattr(col_model.get_data().__obj__, key)\n == getattr(expected_obj, key)) for key in keys])\n\n\n@flaky(max_runs=3)\n@pytest.mark.no_xvfb\ndef test_edit_nonsettable_objects(qtbot, nonsettable_objects_data):\n \"\"\"\n Test that errors trying to edit attributes in ColEdit are handled properly.\n\n Integration regression test for issues spyder-ide/spyder#6727 and\n spyder-ide/spyder#6728.\n \"\"\"\n for test_obj, expected_obj, keys in nonsettable_objects_data:\n col_editor = CollectionsEditor(None)\n col_editor.setup(test_obj)\n col_editor.show()\n qtbot.waitForWindowShown(col_editor)\n view = col_editor.widget.editor\n indicies = [view.source_model.get_index_from_key(key) for key in keys]\n\n for _ in range(3):\n qtbot.keyClick(view, Qt.Key_Right)\n last_row = -1\n rows_to_test = [index.row() for index in indicies]\n for row in rows_to_test:\n for _ in range(row - last_row - 1):\n qtbot.keyClick(view, Qt.Key_Down)\n qtbot.keyClick(view, Qt.Key_Space)\n qtbot.keyClick(view.focusWidget(), Qt.Key_Backspace)\n qtbot.keyClicks(view.focusWidget(), \"2\")\n qtbot.keyClick(view.focusWidget(), Qt.Key_Down)\n last_row = row\n\n qtbot.wait(100)\n # Due to numpy's deliberate breakage of __eq__ comparison\n assert all([key == \"_typ\" or (getattr(col_editor.get_value(), key)\n == getattr(expected_obj, key)) for key in keys])\n\n col_editor.accept()\n qtbot.wait(200)\n # Same reason as above\n assert all([key == \"_typ\" or (getattr(col_editor.get_value(), key)\n == getattr(expected_obj, key)) for key in keys])\n assert all([getattr(test_obj, key)\n == getattr(expected_obj, key) for key in keys])\n\n\ndef test_collectionseditor_with_class_having_buggy_copy(qtbot):\n \"\"\"\n Test that editor for object whose .copy() returns a different type is\n readonly; cf. spyder-ide/spyder#6936.\n \"\"\"\n class MyDictWithBuggyCopy(dict):\n pass\n\n md = MyDictWithBuggyCopy({1: 2})\n editor = CollectionsEditor()\n editor.setup(md)\n assert editor.widget.editor.readonly\n\n\ndef test_collectionseditor_with_class_having_correct_copy(qtbot):\n \"\"\"\n Test that editor for object whose .copy() returns the same type is not\n readonly; cf. spyder-ide/spyder#6936.\n \"\"\"\n class MyDictWithCorrectCopy(dict):\n def copy(self):\n return MyDictWithCorrectCopy(self)\n\n md = MyDictWithCorrectCopy({1: 2})\n editor = CollectionsEditor()\n editor.setup(md)\n assert not editor.widget.editor.readonly\n\n\nif __name__ == \"__main__\":\n pytest.main()\n"
] |
[
[
"pandas.DateOffset",
"pandas.Series",
"pandas.Categorical",
"pandas.DataFrame",
"pandas.Timedelta",
"pandas.MultiIndex.from_product",
"pandas.Period",
"numpy.array"
]
] |
reedkihaddi/mplsoccer
|
[
"73705bd269c035a97382eda1166c241bcbee154c"
] |
[
"examples/pizza_plots/plot_pizza_different_units.py"
] |
[
"\"\"\"\n=====================\nDifferent Units Pizza\n=====================\n\n* ``mplsoccer``, ``py_pizza`` module helps one to plot pizza charts in a few lines of code.\n\n* The design idea is inspired by `Tom Worville <https://twitter.com/Worville>`_, \\\n`Football Slices <https://twitter.com/FootballSlices>`_ and \\\n`Soma Zero FC <https://twitter.com/somazerofc>`_\n\n* We have re-written `Soumyajit Bose's <https://twitter.com/Soumyaj15209314>`_ pizza chart code \\\nto enable greater customisation.\n\nHere we plot a pizza chart where the parameters have the same units, but the maximum\nis five instead of 100.\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nfrom mplsoccer import PyPizza, FontManager\n\n##############################################################################\n# Load some fonts\n# ---------------\n# We will use mplsoccer's FontManager to load some fonts from Google Fonts.\n# We borrowed the FontManager from the excellent\n# `ridge_map library <https://github.com/ColCarroll/ridge_map>`_.\n\nfont_normal = FontManager((\"https://github.com/google/fonts/blob/main/apache/roboto/static/\"\n \"Roboto-Regular.ttf?raw=true\"))\nfont_italic = FontManager((\"https://github.com/google/fonts/blob/main/apache/roboto/static/\"\n \"Roboto-Italic.ttf?raw=true\"))\nfont_bold = FontManager((\"https://github.com/google/fonts/blob/main/apache/roboto/static/\"\n \"Roboto-Medium.ttf?raw=true\"))\n\n##############################################################################\n# Different Units\n# ---------------\n# Till now we were plotting a percentile chart where the upper limit was 100.\n# Let's take another example where the lower limit is 0 and upper limit is 5.\n# The below code shows how to plot pizza-chart for such case.\n\n# parameter and value list\nparams = ['Speed', 'Agility', 'Strength', 'Passing', 'Dribbles']\nvalues = [5, 2, 4, 3, 1]\n\n# instantiate PyPizza class\nbaker = PyPizza(\n params=params, # list of parameters\n straight_line_color=\"#F2F2F2\", # color for straight lines\n straight_line_lw=1, # linewidth for straight lines\n straight_line_limit=5.0, # max limit of straight lines\n last_circle_lw=0, # linewidth of last circle\n other_circle_lw=0, # linewidth for other circles\n inner_circle_size=0.4, # size of inner circle\n)\n\n# plot pizza\nfig, ax = baker.make_pizza(\n values, # list of values\n figsize=(8, 8), # adjust figsize according to your need\n color_blank_space=\"same\", # use same color to fill blank space\n blank_alpha=0.4, # alpha for blank-space colors\n param_location=5.5, # where the parameters will be added\n kwargs_slices=dict(\n facecolor=\"cornflowerblue\", edgecolor=\"#F2F2F2\",\n zorder=2, linewidth=1\n ), # values to be used when plotting slices\n kwargs_params=dict(\n color=\"#000000\", fontsize=12,\n fontproperties=font_normal.prop, va=\"center\"\n ), # values to be used when adding parameter\n kwargs_values=dict(\n color=\"#000000\", fontsize=12,\n fontproperties=font_normal.prop, zorder=3,\n bbox=dict(\n edgecolor=\"#000000\", facecolor=\"cornflowerblue\",\n boxstyle=\"round,pad=0.2\", lw=1\n )\n ) # values to be used when adding parameter-values\n)\n\n# add title\nfig.text(\n 0.515, 0.97, \"Player Name - Team Name\", size=18,\n ha=\"center\", fontproperties=font_bold.prop, color=\"#000000\"\n)\n\n# add subtitle\nfig.text(\n 0.515, 0.942,\n \"Rank vs Player's Position | Season Name\",\n size=15,\n ha=\"center\", fontproperties=font_bold.prop, color=\"#000000\"\n)\n\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.show"
]
] |
ashtonmv/pyiron
|
[
"99effc008b8a3cc642d1d927f0c712bf9557218a"
] |
[
"pyiron/gaussian/gaussian.py"
] |
[
"# coding: utf-8\n# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department\n# Distributed under the terms of \"New BSD License\", see the LICENSE file.\n\nimport os,subprocess,re,pandas\nimport numpy as np\nimport matplotlib.pyplot as pt\n\nfrom pyiron.dft.job.generic import GenericDFTJob\nfrom pyiron.base.generic.parameters import GenericParameters\nfrom pyiron.atomistics.structure.atoms import Atoms\n\ntry:\n from molmod.io.fchk import FCHKFile\n from molmod.units import amu,angstrom,electronvolt,centimeter,kcalmol\n from molmod.constants import lightspeed\n from molmod.periodic import periodic\n import tamkin\nexcept ImportError:\n pass\n\n\n__author__ = \"Jan Janssen, Sander Borgmans\"\n__copyright__ = \"Copyright 2019, Max-Planck-Institut für Eisenforschung GmbH - \" \\\n \"- Computational Materials Design (CM) Department\"\n__version__ = \"1.0\"\n__maintainer__ = \"\"\n__email__ = \"\"\n__status__ = \"trial\"\n__date__ = \"Aug 27, 2019\"\n\n\nclass Gaussian(GenericDFTJob):\n def __init__(self, project, job_name):\n super(Gaussian, self).__init__(project, job_name)\n self.__name__ = \"Gaussian\"\n self._executable_activate(enforce=True)\n self.input = GaussianInput()\n\n\n def write_input(self):\n input_dict = {'mem': self.server.memory_limit,\n 'cores': self.server.cores,\n 'verbosity': self.input['verbosity'],\n 'lot': self.input['lot'],\n 'basis_set': self.input['basis_set'],\n 'jobtype' : self.input['jobtype'],\n 'settings' : self.input['settings'],\n 'title' : self.input['title'],\n 'spin_mult': self.input['spin_mult'],\n 'charge': self.input['charge'],\n 'bsse_idx': self.input['bsse_idx'],\n 'symbols': self.structure.get_chemical_symbols().tolist(),\n 'pos': self.structure.positions\n }\n write_input(input_dict=input_dict, working_directory=self.working_directory)\n\n\n def collect_output(self):\n output_dict = collect_output(output_file=os.path.join(self.working_directory, 'input.fchk'))\n with self.project_hdf5.open(\"output\") as hdf5_output:\n for k, v in output_dict.items():\n hdf5_output[k] = v\n\n\n def to_hdf(self, hdf=None, group_name=None):\n super(Gaussian, self).to_hdf(hdf=hdf, group_name=group_name)\n with self.project_hdf5.open(\"input\") as hdf5_input:\n self.structure.to_hdf(hdf5_input)\n self.input.to_hdf(hdf5_input)\n\n\n def from_hdf(self, hdf=None, group_name=None):\n super(Gaussian, self).from_hdf(hdf=hdf, group_name=group_name)\n with self.project_hdf5.open(\"input\") as hdf5_input:\n self.input.from_hdf(hdf5_input)\n self.structure = Atoms().from_hdf(hdf5_input)\n\n\n def log(self):\n with open(os.path.join(self.working_directory, 'input.log')) as f:\n print(f.read())\n\n\n def calc_minimize(self, electronic_steps=None, ionic_steps=None, algorithm=None, ionic_forces=None):\n '''\n Function to setup the hamiltonian to perform ionic relaxations using DFT. The convergence goal can be set using\n either the iconic_energy as an limit for fluctuations in energy or the iconic_forces.\n\n **Arguments**\n\n algorithm: SCF algorithm\n electronic_steps (int): maximum number of electronic steps per electronic convergence\n ionic_steps (int): maximum number of ionic steps\n ionic_forces ('tight' or 'verytight'): convergence criterium for Berny opt (optional)\n '''\n settings = {}\n opt_settings = []\n\n if electronic_steps is not None:\n if not 'SCF' in settings:\n settings['SCF'] = []\n settings['SCF'].append(\"MaxCycle={}\".format(electronic_steps))\n\n if ionic_steps is not None:\n opt_settings.append(\"MaxCycles={}\".format(ionic_steps))\n\n if algorithm is not None:\n if not 'SCF' in settings:\n settings['SCF'] = []\n settings['SCF'].append(algorithm)\n\n if ionic_forces is not None:\n assert isinstance(ionic_forces,str)\n opt_settings.append(ionic_forces)\n\n self.input['jobtype'] = 'opt' + '({})'.format(\",\".join(opt_settings))*(len(opt_settings)>0)\n if not isinstance(self.input['settings'],dict):\n self.input['settings'] = settings\n else:\n self.input['settings'].update(settings)\n\n super(Gaussian, self).calc_minimize(\n electronic_steps=electronic_steps,\n ionic_steps=ionic_steps,\n algorithm=algorithm,\n ionic_forces=ionic_forces\n )\n\n\n def calc_static(self, electronic_steps=None, algorithm=None):\n '''\n Function to setup the hamiltonian to perform static SCF DFT runs\n\n **Arguments**\n\n algorithm (str): SCF algorithm\n electronic_steps (int): maximum number of electronic steps, which can be used to achieve convergence\n '''\n settings = {}\n if electronic_steps is not None:\n if not 'SCF' in settings:\n settings['SCF'] = []\n settings['SCF'].append(\"MaxCycle={}\".format(electronic_steps))\n\n if algorithm is not None:\n if not 'SCF' in settings:\n settings['SCF'] = []\n settings['SCF'].append(algorithm)\n\n self.input['jobtype'] = 'sp'\n if not isinstance(self.input['settings'],dict):\n self.input['settings'] = settings\n else:\n self.input['settings'].update(settings)\n\n super(Gaussian, self).calc_static(\n electronic_steps=electronic_steps,\n algorithm=algorithm\n )\n\n\n def calc_md(self, temperature=None, n_ionic_steps=1000, time_step=None, n_print=100):\n raise NotImplementedError(\"calc_md() not implemented in Gaussian.\")\n\n\n def print_MO(self):\n '''\n Print a list of the MO's with the corresponding orbital energy and occupation.\n '''\n\n n_MO = self.get('output/structure/dft/scf_density').shape[0]\n for n,index in enumerate(range(n_MO)):\n # print orbital information\n occ_alpha = int(self.get('output/structure/dft/n_alpha_electrons') > index)\n occ_beta = int(self.get('output/structure/dft/n_beta_electrons') > index)\n\n if self.get('output/structure/dft/beta_orbital_e') is None:\n orbital_energy = self.get('output/structure/dft/alpha_orbital_e')[index]\n print(\"#{}: \\t Orbital energy = {:>10.5f} \\t Occ. = {}\".format(n,orbital_energy,occ_alpha+occ_beta))\n else:\n orbital_energy = [self.get('output/structure/dft/alpha_orbital_e')[index],self.get('output/structure/dft/beta_orbital_e')[index]]\n print(\"#{}: \\t Orbital energies (alpha,beta) = {:>10.5f},{:>10.5f} \\t Occ. = {},{}\".format(n,orbital_energy[0],orbital_energy[1],occ_alpha,occ_beta))\n\n\n def visualize_MO(self,index,particle_size=0.5,show_bonds=True):\n '''\n Visualize the MO identified by its index.\n\n **Arguments**\n\n index index of the MO, as listed by print_MO()\n\n particle_size\n size of the atoms for visualization, lower value if orbital is too small to see\n\n show_bonds connect atoms or not\n\n **Notes**\n\n This function should always be accompanied with the following commands (in a separate cell)\n\n view[1].update_surface(isolevel=1, color='blue', opacity=.3)\n view[2].update_surface(isolevel=-1, color='red', opacity=.3)\n\n This makes sure that the bonding and non-bonding MO's are plotted and makes them transparent\n '''\n n_MO = self.get('output/structure/dft/scf_density').shape[0]\n assert index >= 0 and index < n_MO\n assert len(self.get('output/structure/numbers')) < 50 # check whether structure does not become too large for interactive calculation of cube file\n\n # print orbital information\n occ_alpha = int(self.get('output/structure/dft/n_alpha_electrons') > index)\n occ_beta = int(self.get('output/structure/dft/n_beta_electrons') > index)\n\n if self.get('output/structure/dft/beta_orbital_e') is None:\n orbital_energy = self.get('output/structure/dft/alpha_orbital_e')[index]\n print(\"Orbital energy = {:>10.5f} \\t Occ. = {}\".format(orbital_energy,occ_alpha+occ_beta))\n else:\n orbital_energy = [self.get('output/structure/dft/alpha_orbital_e')[index],self.get('output/structure/dft/beta_orbital_e')[index]]\n print(\"Orbital energies (alpha,beta) = {:>10.5f},{:>10.5f} \\t Occ. = {},{}\".format(orbital_energy[0],orbital_energy[1],occ_alpha,occ_beta))\n\n # make cube file\n path = self.path+'_hdf5/'+self.name+'/input'\n out = subprocess.check_output(\n \"ml load Gaussian/g16_E.01-intel-2019a;module use /apps/gent/CO7/haswell-ib/modules/all; cubegen 1 MO={} {}.fchk {}.cube\".format(index+1,path,path),\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n shell=True,\n )\n # visualize cube file\n try:\n import nglview\n except ImportError:\n raise ImportError(\"The animate_nma_mode() function requires the package nglview to be installed\")\n\n atom_numbers = []\n atom_positions = []\n\n with open('{}.cube'.format(path),'r') as f:\n for i in range(2):\n f.readline()\n n_atoms = int(f.readline().split()[0][1:])\n for i in range(3):\n f.readline()\n for n in range(n_atoms):\n line = f.readline().split()\n atom_numbers.append(int(line[0]))\n atom_positions.append(np.array([float(m) for m in line[2:]])/angstrom)\n\n structure = Atoms(numbers=np.array(atom_numbers),positions=atom_positions)\n view = nglview.show_ase(structure)\n if not show_bonds:\n view.add_spacefill(radius_type='vdw', scale=0.5, radius=particle_size)\n view.remove_ball_and_stick()\n else:\n view.add_ball_and_stick()\n view.add_component('{}.cube'.format(path))\n view.add_component('{}.cube'.format(path))\n return view\n\n\n def read_NMA(self):\n '''\n Reads the NMA output from the Gaussian .log file.\n\n Returns:\n IR frequencies, intensities and corresponding eigenvectors (modes).\n '''\n # Read number of atoms\n nrat = len(self.get('output/structure/numbers'))\n\n # Read IR frequencies and intensities from log file\n low_freqs = []\n freqs = []\n ints = []\n modes = [[] for i in range(nrat)]\n\n path = self.path+'_hdf5/'+self.name+'/input.log'\n with open(path,'r') as f:\n lines = f.readlines()\n\n # Assert normal termination\n assert \"Normal termination of Gaussian\" in lines[-1]\n\n # Find zero frequencies\n for n in range(len(lines)):\n line = lines[n]\n if 'Low frequencies' in line:\n low_freqs += [float(i) for i in line[20:].split()]\n if 'Frequencies --' in line:\n freqs += [float(i) for i in line[15:].split()]\n if 'IR Inten --' in line:\n ints += [float(i) for i in line[15:].split()]\n if 'Atom AN X Y Z' in line:\n for m in range(nrat):\n modes[m] += [float(i) for i in lines[n+m+1][10:].split()]\n\n nma_zeros = 3*nrat-len(freqs)\n freq_array = np.zeros(3*nrat)\n freq_array[:nma_zeros] = np.array(low_freqs[:nma_zeros])\n freq_array[nma_zeros:] = np.array(freqs)\n freqs = freq_array * (lightspeed/centimeter) # put into atomic units\n ints = np.array(ints)\n modes = np.array(modes).reshape(len(ints),nrat,3)\n\n return freqs,ints,modes\n\n\n def bsse_to_pandas(self):\n '''\n Convert bsse output of all frames to a pandas Dataframe object.\n\n Returns:\n pandas.Dataframe: output as dataframe\n '''\n assert 'counterpoise' in [k.lower() for k in self.input['settings'].keys()] # check if there was a bsse calculation\n tmp = {}\n with self.project_hdf5.open('output/structure/bsse') as hdf:\n for key in hdf.list_nodes():\n tmp[key] = hdf[key] if isinstance(hdf[key],np.ndarray) else [hdf[key]]\n df = pandas.DataFrame(tmp)\n return df\n\n\nclass GaussianInput(GenericParameters):\n def __init__(self, input_file_name=None):\n super(GaussianInput, self).__init__(input_file_name=input_file_name, table_name=\"input_inp\", comment_char=\"#\")\n\n def load_default(self):\n '''\n Loading the default settings for the input file.\n '''\n input_str = \"\"\"\\\nlot HF\nbasis_set 6-311G(d,p)\nspin_mult 1\ncharge 0\n\"\"\"\n self.load_string(input_str)\n\n\ndef write_input(input_dict,working_directory='.'):\n # Comments can be written with ! in Gaussian\n # Load dictionary\n lot = input_dict['lot']\n basis_set = input_dict['basis_set']\n spin_mult = input_dict['spin_mult'] # 2S+1\n charge = input_dict['charge']\n symbols = input_dict['symbols']\n pos = input_dict['pos']\n assert pos.shape[0] == len(symbols)\n\n # Optional elements\n if not input_dict['mem'] is None:\n mem = input_dict['mem'] + 'B' * (input_dict['mem'][-1]!='B') # check if string ends in bytes\n # convert pmem to mem\n cores = input_dict['cores']\n nmem = str(int(re.findall(\"\\d+\", mem)[0]) * cores)\n mem_unit = re.findall(\"[a-zA-Z]+\", mem)[0]\n mem = nmem+mem_unit\n else:\n mem = \"800MB\" # default allocation\n\n if not input_dict['jobtype'] is None:\n jobtype = input_dict['jobtype']\n else:\n jobtype = \"\" # corresponds to sp\n\n if not input_dict['title'] is None:\n title = input_dict['title']\n else:\n title = \"no title\"\n\n if not input_dict['settings'] is None:\n settings = input_dict['settings'] # dictionary {key: [options]}\n else:\n settings = {}\n\n verbosity_dict={'low':'t','normal':'n','high':'p'}\n if not input_dict['verbosity'] is None:\n verbosity = input_dict['verbosity']\n if verbosity in verbosity_dict:\n verbosity = verbosity_dict[verbosity]\n else:\n verbosity='n'\n\n if 'Counterpoise' in settings.keys():\n if input_dict['bsse_idx'] is None or not len(input_dict['bsse_idx'])==len(pos) : # check if all elements are present for a BSSE calculation\n raise ValueError('The Counterpoise setting requires a valid bsse_idx array')\n # Check bsse idx (should start from 1 for Gaussian)\n input_dict['bsse_idx'] = [k - min(input_dict['bsse_idx']) + 1 for k in input_dict['bsse_idx']]\n # Check if it only contains conseqcutive numbers (sum of set should be n*(n+1)/2)\n assert sum(set(input_dict['bsse_idx'])) == (max(input_dict['bsse_idx'])*(max(input_dict['bsse_idx']) + 1))/2\n\n # Parse settings\n settings_string = \"\"\n for key,valuelst in settings.items():\n if not isinstance(valuelst, list):\n valuelst = [valuelst]\n option = key + \"({}) \".format(\",\".join(valuelst))*(len(valuelst)>0)\n settings_string += option\n\n # Write to file\n route_section = \"#{} {}/{} {} {}\\n\\n\".format(verbosity,lot,basis_set,jobtype,settings_string)\n with open(os.path.join(working_directory, 'input.com'), 'w') as f:\n f.write(\"%mem={}\\n\".format(mem))\n f.write(\"%chk=input.chk\\n\")\n f.write(route_section)\n f.write(\"{}\\n\\n\".format(title))\n\n if not 'Counterpoise' in settings.keys():\n f.write(\"{} {}\\n\".format(charge,spin_mult))\n for n,p in enumerate(pos):\n f.write(\" {}\\t{: 1.6f}\\t{: 1.6f}\\t{: 1.6f}\\n\".format(symbols[n],p[0],p[1],p[2]))\n f.write('\\n\\n') # don't know whether this is still necessary in G16\n else:\n if isinstance(charge,list) and isinstance(spin_mult,list): # for BSSE it is possible to define charge and multiplicity for the fragments separately\n f.write(\" \".join([\"{},{}\".format(charge[idx],spin_mult[idx]) for idx in range(int(settings['Counterpoise']))])) # first couple is for full system, then every fragment separately\n else:\n f.write(\"{} {}\\n\".format(charge,spin_mult))\n\n for n,p in enumerate(pos):\n f.write(\" {}(Fragment={})\\t{: 1.6f}\\t{: 1.6f}\\t{: 1.6f}\\n\".format(symbols[n],input_dict['bsse_idx'][n],p[0],p[1],p[2]))\n f.write('\\n\\n') # don't know whether this is still necessary in G16\n\n\n# we could use theochem iodata, should be more robust than molmod.io\n# but we require the latest iodata for this, not the conda version\ndef fchk2dict(fchk):\n # probably still some data missing\n # check job type, for now implement basics (SP=single point, FOpt = full opt, Freq = frequency calculation)\n if not fchk.command.lower() in ['sp','fopt','freq']:\n raise NotImplementedError\n\n # Basic information\n fchkdict = {}\n fchkdict['jobtype'] = fchk.command.lower()\n fchkdict['lot'] = fchk.lot\n fchkdict['basis_set'] = fchk.basis\n\n fchkdict['structure/numbers'] = fchk.fields.get('Atomic numbers')\n fchkdict['structure/masses'] = fchk.fields.get('Real atomic weights')*amu\n fchkdict['structure/charges'] = fchk.fields.get('Mulliken Charges')\n fchkdict['structure/dipole'] = fchk.fields.get('Dipole Moment')\n fchkdict['structure/dft/n_electrons'] = fchk.fields.get('Number of electrons')\n fchkdict['structure/dft/n_alpha_electrons'] = fchk.fields.get('Number of alpha electrons')\n fchkdict['structure/dft/n_beta_electrons'] = fchk.fields.get('Number of beta electrons')\n fchkdict['structure/dft/n_basis_functions'] = fchk.fields.get('Number of basis functions')\n\n # Orbital information\n fchkdict['structure/dft/alpha_orbital_e'] = fchk.fields.get('Alpha Orbital Energies')\n fchkdict['structure/dft/beta_orbital_e'] = fchk.fields.get('Beta Orbital Energies')\n\n # Densities\n fchkdict['structure/dft/scf_density'] = _triangle_to_dense(fchk.fields.get('Total SCF Density'))\n fchkdict['structure/dft/spin_scf_density'] = _triangle_to_dense(fchk.fields.get('Spin SCF Density'))\n\n if fchk.lot.upper() in ['MP2', 'MP3', 'CC', 'CI']:\n # only one of the lots should be present, hence using the same key\n fchkdict['structure/dft/post_scf_density'] = _triangle_to_dense(fchk.fields.get('Total {} Density'.format(fchk.lot)))\n fchkdict['structure/dft/post_spin_scf_density'] = _triangle_to_dense(fchk.fields.get('Spin {} Density'.format(fchk.lot)))\n\n # Specific job information\n if fchkdict['jobtype'] == 'fopt':\n if len(fchk.get_optimization_coordinates().shape) == 3:\n fchkdict['structure/positions'] = fchk.get_optimization_coordinates()[-1]/angstrom\n else:\n fchkdict['structure/positions'] = fchk.get_optimization_coordinates()/angstrom\n fchkdict['generic/positions'] = fchk.get_optimization_coordinates()/angstrom\n fchkdict['generic/energy_tot'] = fchk.get_optimization_energies()/electronvolt\n fchkdict['generic/forces'] = fchk.get_optimization_gradients()/(electronvolt/angstrom) * -1\n\n if fchkdict['jobtype'] == 'freq':\n fchkdict['structure/positions'] = fchk.fields.get('Current cartesian coordinates').reshape([1,-1, 3])/angstrom\n fchkdict['generic/positions'] = fchk.fields.get('Current cartesian coordinates').reshape([1,-1, 3])/angstrom\n fchkdict['generic/forces'] = fchk.fields.get('Cartesian Gradient').reshape([-1, 3])/(electronvolt/angstrom) *-1\n fchkdict['generic/hessian'] = fchk.get_hessian()/(electronvolt/angstrom**2)\n fchkdict['generic/energy_tot'] = fchk.fields.get('Total Energy')/electronvolt\n\n if fchkdict['jobtype'] == 'sp':\n fchkdict['structure/positions'] = fchk.fields.get('Current cartesian coordinates').reshape([1,-1, 3])/angstrom\n fchkdict['generic/positions'] = fchk.fields.get('Current cartesian coordinates').reshape([1,-1, 3])/angstrom\n fchkdict['generic/energy_tot'] = fchk.fields.get('Total Energy')/electronvolt\n\n return fchkdict\n\n\ndef get_bsse_array(line,it):\n numeric_const_pattern = '[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?'\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n\n cE_corr = float(rx.findall(line)[0]) * kcalmol/electronvolt\n line = next(it) # go to next line\n cE_raw = float(rx.findall(line)[0]) * kcalmol/electronvolt\n line = next(it) # go to next line\n sum_fragments = float(rx.findall(line)[0])/electronvolt\n line = next(it) # go to next line\n bsse_corr = float(rx.findall(line)[0])/electronvolt\n line = next(it) # go to next line\n E_tot_corr = float(rx.findall(line)[0])/electronvolt\n\n return E_tot_corr,bsse_corr,sum_fragments,cE_raw,cE_corr\n\n\ndef read_bsse(output_file,output_dict):\n # Check whether the route section contains the Counterpoise setting (if fchk module is update, route section can be loaded from dict)\n cp = False\n with open(output_file,'r') as f:\n line = f.readline()\n while line:\n if 'route' in line.lower():\n if 'counterpoise' in f.readline().lower(): # read next line\n cp = True\n break\n line = f.readline()\n\n if cp:\n # the log file has the same path and name as the output file aside from the file extension\n log_file = output_file[:output_file.rfind('.')] + '.log'\n\n frames = 1 if isinstance(output_dict['generic/energy_tot'],float) else len(output_dict['generic/energy_tot'])\n\n output_dict['structure/bsse/energy_tot_corrected'] = np.zeros(frames)\n output_dict['structure/bsse/bsse_correction'] = np.zeros(frames)\n output_dict['structure/bsse/sum_of_fragments'] = np.zeros(frames)\n output_dict['structure/bsse/complexation_energy_raw'] = np.zeros(frames)\n output_dict['structure/bsse/complexation_energy_corrected'] = np.zeros(frames)\n\n it = _reverse_readline(log_file)\n line = next(it)\n for i in range(frames):\n found = False\n while not found:\n line = next(it)\n if 'complexation energy' in line:\n E_tot_corr,bsse_corr,sum_fragments,cE_raw,cE_corr = get_bsse_array(line,it)\n output_dict['structure/bsse/energy_tot_corrected'][i] = E_tot_corr\n output_dict['structure/bsse/bsse_correction'][i] = bsse_corr\n output_dict['structure/bsse/sum_of_fragments'][i] = sum_fragments\n output_dict['structure/bsse/complexation_energy_raw'][i] = cE_raw\n output_dict['structure/bsse/complexation_energy_corrected'][i] = cE_corr\n found = True\n\n if frames==1:\n output_dict['structure/bsse/energy_tot_corrected'] = output_dict['structure/bsse/energy_tot_corrected'][0]\n output_dict['structure/bsse/bsse_correction'] = output_dict['structure/bsse/bsse_correction'][0]\n output_dict['structure/bsse/sum_of_fragments'] = output_dict['structure/bsse/sum_of_fragments'][0]\n output_dict['structure/bsse/complexation_energy_raw'] = output_dict['structure/bsse/complexation_energy_raw'][0]\n output_dict['structure/bsse/complexation_energy_corrected'] = output_dict['structure/bsse/complexation_energy_corrected'][0]\n else:\n # flip array sequence\n output_dict['structure/bsse/energy_tot_corrected'] = output_dict['structure/bsse/energy_tot_corrected'][::-1]\n output_dict['structure/bsse/bsse_correction'] = output_dict['structure/bsse/bsse_correction'][::-1]\n output_dict['structure/bsse/sum_of_fragments'] = output_dict['structure/bsse/sum_of_fragments'][::-1]\n output_dict['structure/bsse/complexation_energy_raw'] = output_dict['structure/bsse/complexation_energy_raw'][::-1]\n output_dict['structure/bsse/complexation_energy_corrected'] = output_dict['structure/bsse/complexation_energy_corrected'][::-1]\n\n\ndef read_EmpiricalDispersion(output_file,output_dict):\n # Get dispersion term from log file if it is there\n # dispersion term is not retrieved from gaussian output in fchk\n\n disp = None\n with open(output_file,'r') as f:\n while True:\n line = f.readline()\n if 'Route' in line:\n line = f.readline()\n if 'EmpiricalDispersion' in line:\n idx = line.find('EmpiricalDispersion')\n if 'GD3' in line[idx:]:\n search_term = 'Grimme-D3 Dispersion energy='\n else:\n raise NotImplementedError\n else:\n return\n break\n\n # the log file has the same path and name as the output file aside from the file extension\n log_file = output_file[:output_file.rfind('.')] + '.log'\n it = _reverse_readline(log_file)\n while True:\n line = next(it)\n if search_term in line:\n disp = float(line[38:-9])/electronvolt # could be changed when new search terms are implemented\n break\n\n output_dict['generic/energy_tot'] += disp\n\n\ndef collect_output(output_file):\n # Read output\n fchk = FCHKFile(output_file)\n\n # Translate to dict\n output_dict = fchk2dict(fchk)\n\n # Read BSSE output if it is present\n read_bsse(output_file,output_dict)\n\n # Correct energy if empirical dispersion contribution is present\n read_EmpiricalDispersion(output_file,output_dict)\n\n return output_dict\n\n\n# function from theochem iodata\ndef _triangle_to_dense(triangle):\n '''Convert a symmetric matrix in triangular storage to a dense square matrix.\n Parameters\n ----------\n triangle\n A row vector containing all the unique matrix elements of symmetric\n matrix. (Either the lower-triangular part in row major-order or the\n upper-triangular part in column-major order.)\n Returns\n -------\n ndarray\n a square symmetric matrix.\n '''\n if triangle is None: return None\n nrow = int(np.round((np.sqrt(1 + 8 * len(triangle)) - 1) / 2))\n result = np.zeros((nrow, nrow))\n begin = 0\n for irow in range(nrow):\n end = begin + irow + 1\n result[irow, :irow + 1] = triangle[begin:end]\n result[:irow + 1, irow] = triangle[begin:end]\n begin = end\n return result\n\n\ndef _reverse_readline(filename, buf_size=8192):\n '''A generator that returns the lines of a file in reverse order'''\n '''https://stackoverflow.com/questions/2301789/read-a-file-in-reverse-order-using-python'''\n with open(filename) as fh:\n segment = None\n offset = 0\n fh.seek(0, os.SEEK_END)\n file_size = remaining_size = fh.tell()\n while remaining_size > 0:\n offset = min(file_size, offset + buf_size)\n fh.seek(file_size - offset)\n buffer = fh.read(min(remaining_size, buf_size))\n remaining_size -= buf_size\n lines = buffer.split('\\n')\n # The first line of the buffer is probably not a complete line so\n # we'll save it and append it to the last line of the next buffer\n # we read\n if segment is not None:\n # If the previous chunk starts right from the beginning of line\n # do not concat the segment to the last line of new chunk.\n # Instead, yield the segment first\n if buffer[-1] != '\\n':\n lines[-1] += segment\n else:\n yield segment\n segment = lines[0]\n for index in range(len(lines) - 1, 0, -1):\n if lines[index]:\n yield lines[index]\n # Don't yield None if the file was empty\n if segment is not None:\n yield segment\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"pandas.DataFrame"
]
] |
zvs08/Stark
|
[
"00ed184e34f8573558b8e27803807ca4cefcf884"
] |
[
"lib/train/actors/stark_s.py"
] |
[
"from . import BaseActor\nfrom lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy\nimport torch\nfrom lib.utils.merge import merge_template_search\n\n\nclass STARKSActor(BaseActor):\n \"\"\" Actor for training the STARK-S and STARK-ST(Stage1)\"\"\"\n def __init__(self, net, objective, loss_weight, settings):\n super().__init__(net, objective)\n self.loss_weight = loss_weight\n self.settings = settings\n self.bs = self.settings.batchsize # batch size\n\n def __call__(self, data):\n \"\"\"\n args:\n data - The input data, should contain the fields 'template', 'search', 'gt_bbox'.\n template_images: (N_t, batch, 3, H, W)\n search_images: (N_s, batch, 3, H, W)\n returns:\n loss - the training loss\n status - dict containing detailed losses\n \"\"\"\n # forward pass\n out_dict = self.forward_pass(data, run_box_head=True, run_cls_head=False)\n\n # process the groundtruth\n gt_bboxes = data['search_anno'] # (Ns, batch, 4) (x1,y1,w,h)\n\n # compute losses\n loss, status = self.compute_losses(out_dict, gt_bboxes[0])\n\n return loss, status\n\n def forward_pass(self, data, run_box_head, run_cls_head):\n feat_dict_list = []\n # process the templates\n for i in range(self.settings.num_template):\n template_img_i = data['template_images'][i].view(-1, *data['template_images'].shape[2:]) # (batch, 3, 128, 128)\n template_att_i = data['template_att'][i].view(-1, *data['template_att'].shape[2:]) # (batch, 128, 128)\n feat_dict_list.append(self.net(img={'tensors': template_img_i, 'mask': template_att_i}, mode='backbone'))\n\n # process the search regions (t-th frame)\n search_img = data['search_images'].view(-1, *data['search_images'].shape[2:]) # (batch, 3, 320, 320)\n search_att = data['search_att'].view(-1, *data['search_att'].shape[2:]) # (batch, 320, 320)\n feat_dict_list.append(self.net(img={'tensors': search_img, 'mask': search_att}, mode='backbone'))\n\n # run the transformer and compute losses\n seq_dict = merge_template_search(feat_dict_list)\n out_dict, _, _ = self.net(seq_dict=seq_dict, mode=\"transformer\", run_box_head=run_box_head, run_cls_head=run_cls_head)\n # out_dict: (B, N, C), outputs_coord: (1, B, N, C), target_query: (1, B, N, C)\n return out_dict\n\n def compute_losses(self, pred_dict, gt_bbox, return_status=True):\n # Get boxes\n pred_boxes = pred_dict['pred_boxes']\n if torch.isnan(pred_boxes).any():\n raise ValueError(\"Network outputs is NAN! Stop Training\")\n num_queries = pred_boxes.size(1)\n pred_boxes_vec = box_cxcywh_to_xyxy(pred_boxes).view(-1, 4) # (B,N,4) --> (BN,4) (x1,y1,x2,y2)\n gt_boxes_vec = box_xywh_to_xyxy(gt_bbox)[:, None, :].repeat((1, num_queries, 1)).view(-1, 4).clamp(min=0.0, max=1.0) # (B,4) --> (B,1,4) --> (B,N,4)\n # compute giou and iou\n try:\n giou_loss, iou = self.objective['giou'](pred_boxes_vec, gt_boxes_vec) # (BN,4) (BN,4)\n except:\n giou_loss, iou = torch.tensor(0.0).cuda(), torch.tensor(0.0).cuda()\n # compute l1 loss\n l1_loss = self.objective['l1'](pred_boxes_vec, gt_boxes_vec) # (BN,4) (BN,4)\n # weighted sum\n loss = self.loss_weight['giou'] * giou_loss + self.loss_weight['l1'] * l1_loss\n if return_status:\n # status for log\n mean_iou = iou.detach().mean()\n status = {\"Loss/total\": loss.item(),\n \"Loss/giou\": giou_loss.item(),\n \"Loss/l1\": l1_loss.item(),\n \"IoU\": mean_iou.item()}\n return loss, status\n else:\n return loss\n"
] |
[
[
"torch.isnan",
"torch.tensor"
]
] |
SiewLinYap/flair
|
[
"b0e4cc1a92c4bd56536ed4e09c8ff6cb1a89f08c"
] |
[
"flair/trainers/language_model_trainer.py"
] |
[
"import time, datetime\nimport random\nimport sys\nimport logging\nfrom pathlib import Path\nfrom typing import Union\n\nfrom torch import cuda\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.optim.sgd import SGD\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\nimport flair\nfrom flair.data import Dictionary\nfrom flair.models import LanguageModel\nfrom flair.optim import *\nfrom flair.training_utils import add_file_handler\n\nlog = logging.getLogger(\"flair\")\n\n\nclass TextDataset(Dataset):\n def __init__(\n self,\n path: Path,\n dictionary: Dictionary,\n expand_vocab: bool = False,\n forward: bool = True,\n split_on_char: bool = True,\n random_case_flip: bool = True,\n shuffle_lines: bool = True,\n ):\n\n assert path.exists()\n\n self.files = None\n self.path = path\n self.dictionary = dictionary\n self.split_on_char = split_on_char\n self.forward = forward\n self.random_case_flip = random_case_flip\n self.expand_vocab = expand_vocab\n self.shuffle_lines = shuffle_lines\n\n if path.is_dir():\n self.files = sorted([f for f in path.iterdir() if f.exists()])\n else:\n self.files = [path]\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, index=0) -> torch.tensor:\n return self.charsplit(\n self.files[index],\n self.expand_vocab,\n self.forward,\n self.split_on_char,\n self.random_case_flip,\n )\n\n def charsplit(\n self,\n path: Path,\n expand_vocab=False,\n forward=True,\n split_on_char=True,\n random_case_flip=True,\n ) -> torch.tensor:\n\n \"\"\"Tokenizes a text file on character basis.\"\"\"\n assert path.exists()\n\n lines = open(path, \"r\", encoding=\"utf-8\").readlines()\n log.info(f\"read text file with {len(lines)} lines\")\n if self.shuffle_lines:\n random.shuffle(lines)\n log.info(f\"shuffled\")\n\n tokens = 0\n for line in lines:\n\n if split_on_char:\n chars = list(line)\n else:\n chars = line.split()\n\n tokens += len(chars)\n\n # Add chars to the dictionary\n if expand_vocab:\n for char in chars:\n self.dictionary.add_item(char)\n\n ids = torch.zeros(tokens, dtype=torch.long)\n if forward:\n # charsplit file content\n token = 0\n for line in lines:\n if random_case_flip:\n line = self.random_casechange(line)\n\n if split_on_char:\n chars = list(line)\n else:\n chars = line.split()\n\n for char in chars:\n if token >= tokens:\n break\n ids[token] = self.dictionary.get_idx_for_item(char)\n token += 1\n else:\n # charsplit file content\n token = tokens - 1\n for line in lines:\n if random_case_flip:\n line = self.random_casechange(line)\n\n if split_on_char:\n chars = list(line)\n else:\n chars = line.split()\n\n for char in chars:\n if token >= tokens:\n break\n ids[token] = self.dictionary.get_idx_for_item(char)\n token -= 1\n return ids\n\n @staticmethod\n def random_casechange(line: str) -> str:\n no = random.randint(0, 99)\n if no is 0:\n line = line.lower()\n if no is 1:\n line = line.upper()\n return line\n\n def tokenize(self, path: Path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert path.exists()\n # Add words to the dictionary\n with open(path, \"r\") as f:\n tokens = 0\n for line in f:\n words = line.split() + [\"<eos>\"]\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, \"r\") as f:\n ids = torch.zeros(tokens, dtype=torch.long, device=flair.device)\n token = 0\n for line in f:\n words = line.split() + [\"<eos>\"]\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n\n return ids\n\n\nclass TextCorpus(object):\n def __init__(\n self,\n path: Union[Path, str],\n dictionary: Dictionary,\n forward: bool = True,\n character_level: bool = True,\n random_case_flip: bool = True,\n shuffle_lines: bool = True,\n ):\n self.dictionary: Dictionary = dictionary\n self.forward = forward\n self.split_on_char = character_level\n self.random_case_flip = random_case_flip\n self.shuffle_lines = shuffle_lines\n\n if type(path) == str:\n path = Path(path)\n\n self.train = TextDataset(\n path / \"train\",\n dictionary,\n False,\n self.forward,\n self.split_on_char,\n self.random_case_flip,\n shuffle_lines=self.shuffle_lines,\n )\n\n # TextDataset returns a list. valid and test are only one file, so return the first element\n self.valid = TextDataset(\n path / \"valid.txt\",\n dictionary,\n False,\n self.forward,\n self.split_on_char,\n self.random_case_flip,\n shuffle_lines=False,\n )[0]\n self.test = TextDataset(\n path / \"test.txt\",\n dictionary,\n False,\n self.forward,\n self.split_on_char,\n self.random_case_flip,\n shuffle_lines=False,\n )[0]\n\n\nclass LanguageModelTrainer:\n def __init__(\n self,\n model: LanguageModel,\n corpus: TextCorpus,\n optimizer: Optimizer = SGD,\n test_mode: bool = False,\n epoch: int = 0,\n split: int = 0,\n loss: float = 10000,\n optimizer_state: dict = None,\n ):\n self.model: LanguageModel = model\n self.optimizer: Optimizer = optimizer\n self.corpus: TextCorpus = corpus\n self.test_mode: bool = test_mode\n\n self.loss_function = torch.nn.CrossEntropyLoss()\n self.log_interval = 100\n self.num_workers = 2\n self.epoch = epoch\n self.split = split\n self.loss = loss\n self.optimizer_state = optimizer_state\n\n def train(\n self,\n base_path: Union[Path, str],\n sequence_length: int,\n learning_rate: float = 20,\n mini_batch_size: int = 100,\n anneal_factor: float = 0.25,\n patience: int = 10,\n clip=0.25,\n max_epochs: int = 1000,\n checkpoint: bool = False,\n grow_to_sequence_length: int = 0,\n apex: bool = False,\n apex_opt_level: str = 'O1',\n **kwargs,\n ):\n\n if apex:\n if sys.version_info < (3, 0):\n raise RuntimeError(\"Apex currently only supports Python 3. Aborting.\")\n if amp is None:\n raise RuntimeError(\"Failed to import apex. Please install apex from https://www.github.com/nvidia/apex \"\n \"to enable mixed-precision training.\")\n \n # cast string to Path\n if type(base_path) is str:\n base_path = Path(base_path)\n\n add_file_handler(log, base_path / \"training.log\")\n\n number_of_splits: int = len(self.corpus.train)\n\n val_data = self._batchify(self.corpus.valid, mini_batch_size)\n\n base_path.mkdir(parents=True, exist_ok=True)\n loss_txt = base_path / \"loss.txt\"\n savefile = base_path / \"best-lm.pt\"\n\n try:\n epoch = self.epoch\n best_val_loss = self.loss\n optimizer = self.optimizer(\n self.model.parameters(), lr=learning_rate, **kwargs\n )\n if self.optimizer_state is not None:\n optimizer.load_state_dict(self.optimizer_state)\n\n if isinstance(optimizer, (AdamW, SGDW)):\n scheduler: ReduceLRWDOnPlateau = ReduceLRWDOnPlateau(\n optimizer, verbose=True, factor=anneal_factor, patience=patience\n )\n else:\n scheduler: ReduceLROnPlateau = ReduceLROnPlateau(\n optimizer, verbose=True, factor=anneal_factor, patience=patience\n )\n\n if apex:\n self.model, optimizer = amp.initialize(self.model, optimizer,\n opt_level=apex_opt_level\n )\n \n training_generator = DataLoader(\n self.corpus.train, shuffle=False, num_workers=self.num_workers\n )\n\n for epoch in range(self.epoch, max_epochs):\n epoch_start_time = time.time()\n # Shuffle training files randomly after serially iterating through corpus one\n if epoch > 0:\n training_generator = DataLoader(\n self.corpus.train, shuffle=True, num_workers=self.num_workers\n )\n self.model.save_checkpoint(\n base_path / f\"epoch_{epoch}.pt\",\n optimizer,\n epoch,\n 0,\n best_val_loss,\n )\n\n # iterate through training data, starting at self.split (for checkpointing)\n for curr_split, train_slice in enumerate(\n training_generator, self.split\n ):\n\n if sequence_length < grow_to_sequence_length:\n sequence_length += 1\n log.info(f\"Sequence length is {sequence_length}\")\n\n split_start_time = time.time()\n # off by one for printing\n curr_split += 1\n train_data = self._batchify(train_slice.flatten(), mini_batch_size)\n\n log.info(\n \"Split %d\" % curr_split\n + \"\\t - ({:%H:%M:%S})\".format(datetime.datetime.now())\n )\n\n for group in optimizer.param_groups:\n learning_rate = group[\"lr\"]\n\n # go into train mode\n self.model.train()\n\n # reset variables\n hidden = self.model.init_hidden(mini_batch_size)\n\n # not really sure what this does\n ntokens = len(self.corpus.dictionary)\n\n total_loss = 0\n start_time = time.time()\n\n for batch, i in enumerate(\n range(0, train_data.size(0) - 1, sequence_length)\n ):\n data, targets = self._get_batch(train_data, i, sequence_length)\n\n if not data.is_cuda and cuda.is_available():\n log.info(\n \"Batch %d is not on CUDA, training will be very slow\"\n % (batch)\n )\n raise Exception(\"data isnt on cuda\")\n\n self.model.zero_grad()\n optimizer.zero_grad()\n\n # do the forward pass in the model\n output, rnn_output, hidden = self.model.forward(data, hidden)\n\n # try to predict the targets\n loss = self.loss_function(output.view(-1, ntokens), targets)\n # Backward\n if apex:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), clip)\n\n optimizer.step()\n\n total_loss += loss.data\n\n # We detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n hidden = self._repackage_hidden(hidden)\n\n # explicitly remove loss to clear up memory\n del loss, output, rnn_output\n\n if batch % self.log_interval == 0 and batch > 0:\n cur_loss = total_loss.item() / self.log_interval\n elapsed = time.time() - start_time\n log.info(\n \"| split {:3d} /{:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | \"\n \"loss {:5.2f} | ppl {:8.2f}\".format(\n curr_split,\n number_of_splits,\n batch,\n len(train_data) // sequence_length,\n elapsed * 1000 / self.log_interval,\n cur_loss,\n math.exp(cur_loss),\n )\n )\n total_loss = 0\n start_time = time.time()\n\n log.info(\n \"%d seconds for train split %d\"\n % (time.time() - split_start_time, curr_split)\n )\n\n ###############################################################################\n self.model.eval()\n\n val_loss = self.evaluate(val_data, mini_batch_size, sequence_length)\n scheduler.step(val_loss)\n\n log.info(\"best loss so far {:5.2f}\".format(best_val_loss))\n\n log.info(self.model.generate_text())\n\n if checkpoint:\n self.model.save_checkpoint(\n base_path / \"checkpoint.pt\",\n optimizer,\n epoch,\n curr_split,\n best_val_loss,\n )\n\n # Save the model if the validation loss is the best we've seen so far.\n if val_loss < best_val_loss:\n self.model.best_score = best_val_loss\n self.model.save(savefile)\n best_val_loss = val_loss\n\n ###############################################################################\n # print info\n ###############################################################################\n log.info(\"-\" * 89)\n\n summary = (\n \"| end of split {:3d} /{:3d} | epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | \"\n \"valid ppl {:8.2f} | learning rate {:3.4f}\".format(\n curr_split,\n number_of_splits,\n epoch + 1,\n (time.time() - split_start_time),\n val_loss,\n math.exp(val_loss),\n learning_rate,\n )\n )\n\n with open(loss_txt, \"a\") as myfile:\n myfile.write(\"%s\\n\" % summary)\n\n log.info(summary)\n log.info(\"-\" * 89)\n\n log.info(\"Epoch time: %.2f\" % (time.time() - epoch_start_time))\n\n except KeyboardInterrupt:\n log.info(\"-\" * 89)\n log.info(\"Exiting from training early\")\n\n ###############################################################################\n # final testing\n ###############################################################################\n test_data = self._batchify(self.corpus.test, mini_batch_size)\n test_loss = self.evaluate(test_data, mini_batch_size, sequence_length)\n\n summary = \"TEST: valid loss {:5.2f} | valid ppl {:8.2f}\".format(\n test_loss, math.exp(test_loss)\n )\n with open(loss_txt, \"a\") as myfile:\n myfile.write(\"%s\\n\" % summary)\n\n log.info(summary)\n log.info(\"-\" * 89)\n\n def evaluate(self, data_source, eval_batch_size, sequence_length):\n # Turn on evaluation mode which disables dropout.\n self.model.eval()\n\n with torch.no_grad():\n total_loss = 0\n ntokens = len(self.corpus.dictionary)\n\n hidden = self.model.init_hidden(eval_batch_size)\n\n for i in range(0, data_source.size(0) - 1, sequence_length):\n data, targets = self._get_batch(data_source, i, sequence_length)\n prediction, rnn_output, hidden = self.model.forward(data, hidden)\n output_flat = prediction.view(-1, ntokens)\n total_loss += len(data) * self.loss_function(output_flat, targets).data\n hidden = self._repackage_hidden(hidden)\n return total_loss.item() / len(data_source)\n\n @staticmethod\n def _batchify(data, batch_size):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = data.size(0) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data.narrow(0, 0, nbatch * batch_size)\n # Evenly divide the data across the bsz batches.\n data = data.view(batch_size, -1).t().contiguous()\n return data\n\n @staticmethod\n def _get_batch(source, i, sequence_length):\n seq_len = min(sequence_length, len(source) - 1 - i)\n\n data = source[i : i + seq_len].clone().detach()\n target = source[i + 1 : i + 1 + seq_len].view(-1).clone().detach()\n\n data = data.to(flair.device)\n target = target.to(flair.device)\n\n return data, target\n\n @staticmethod\n def _repackage_hidden(h):\n \"\"\"Wraps hidden states in new tensors, to detach them from their history.\"\"\"\n return tuple(v.clone().detach() for v in h)\n\n @staticmethod\n def load_from_checkpoint(\n checkpoint_file: Path, corpus: TextCorpus, optimizer: Optimizer = SGD\n ):\n checkpoint = LanguageModel.load_checkpoint(checkpoint_file)\n return LanguageModelTrainer(\n checkpoint[\"model\"],\n corpus,\n optimizer,\n epoch=checkpoint[\"epoch\"],\n split=checkpoint[\"split\"],\n loss=checkpoint[\"loss\"],\n optimizer_state=checkpoint[\"optimizer_state_dict\"],\n )\n"
] |
[
[
"torch.utils.data.DataLoader",
"torch.cuda.is_available"
]
] |
Rujaan/Upscale.io
|
[
"be79e902f008f75de71969f7e01b1577efda20aa",
"be79e902f008f75de71969f7e01b1577efda20aa"
] |
[
"upscale.py",
"ModelTrain/codes/models/networks.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport logging\nimport sys\nfrom collections import OrderedDict\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import List, Optional, Union\n\nimport cv2\nimport numpy as np\nimport torch\nimport typer\nfrom rich import print\nfrom rich.logging import RichHandler\nfrom rich.progress import (\n BarColumn,\n Progress,\n # SpinnerColumn,\n TaskID,\n TimeRemainingColumn,\n)\n\nimport utils.architecture as arch\nimport utils.dataops as ops\n\n\nclass SeamlessOptions(str, Enum):\n tile = \"tile\"\n mirror = \"mirror\"\n replicate = \"replicate\"\n alpha_pad = \"alpha_pad\"\n\n\nclass AlphaOptions(str, Enum):\n no_alpha = \"no_alpha\"\n bas = \"bas\"\n alpha_separately = \"alpha_separately\"\n swapping = \"swapping\"\n\n\nclass Upscale:\n model_str: str = 'None'\n input: Path = None\n output: Path = None\n reverse: bool = None\n skip_existing: bool = None\n delete_input: bool = None\n seamless: SeamlessOptions = None\n cpu: bool = None\n fp16: bool = None\n # device_id: int = None\n cache_max_split_depth: bool = None\n binary_alpha: bool = None\n ternary_alpha: bool = None\n alpha_threshold: float = None\n alpha_boundary_offset: float = None\n alpha_mode: AlphaOptions = None\n log: logging.Logger = None\n\n device: torch.device = None\n in_nc: int = None\n out_nc: int = None\n last_model: str = None\n last_in_nc: int = None\n last_out_nc: int = None\n last_nf: int = None\n last_nb: int = None\n last_scale: int = None\n last_kind: str = None\n model: Union[arch.nn.Module, arch.RRDBNet, arch.SPSRNet] = None\n\n def __init__(\n self,\n model: str,\n input: Path,\n output: Path,\n reverse: bool = False,\n skip_existing: bool = False,\n delete_input: bool = False,\n seamless: Optional[SeamlessOptions] = None,\n cpu: bool = False,\n fp16: bool = False,\n device_id: int = 0,\n cache_max_split_depth: bool = False,\n binary_alpha: bool = False,\n ternary_alpha: bool = False,\n alpha_threshold: float = 0.5,\n alpha_boundary_offset: float = 0.2,\n alpha_mode: Optional[AlphaOptions] = None,\n log: logging.Logger = logging.getLogger(),\n ) -> None:\n self.model_str = model\n self.input = input.resolve()\n self.output = output.resolve()\n self.reverse = reverse\n self.skip_existing = skip_existing\n self.delete_input = delete_input\n self.seamless = seamless\n self.cpu = cpu\n self.fp16 = fp16\n self.device = torch.device(\"cpu\" if self.cpu else f\"cuda:{device_id}\")\n self.cache_max_split_depth = cache_max_split_depth\n self.binary_alpha = binary_alpha\n self.ternary_alpha = ternary_alpha\n self.alpha_threshold = alpha_threshold\n self.alpha_boundary_offset = alpha_boundary_offset\n self.alpha_mode = alpha_mode\n self.log = log\n if self.fp16:\n torch.set_default_tensor_type(\n torch.HalfTensor if self.cpu else torch.cuda.HalfTensor\n )\n\n def run(self) -> None:\n model_chain = (\n self.model_str.split(\"+\")\n if \"+\" in self.model_str\n else self.model_str.split(\">\")\n )\n\n for idx, model in enumerate(model_chain):\n\n interpolations = (\n model.split(\"|\") if \"|\" in self.model_str else model.split(\"&\")\n )\n\n if len(interpolations) > 1:\n for i, interpolation in enumerate(interpolations):\n interp_model, interp_amount = (\n interpolation.split(\"@\")\n if \"@\" in interpolation\n else interpolation.split(\":\")\n )\n interp_model = self.__check_model_path(interp_model)\n interpolations[i] = f\"{interp_model}@{interp_amount}\"\n model_chain[idx] = \"&\".join(interpolations)\n else:\n model_chain[idx] = self.__check_model_path(model)\n\n if not self.input.exists():\n self.log.error(f'Folder \"{self.input}\" does not exist.')\n sys.exit(1)\n elif self.input.is_file():\n self.log.error(f'Folder \"{self.input}\" is a file.')\n sys.exit(1)\n elif self.output.is_file():\n self.log.error(f'Folder \"{self.output}\" is a file.')\n sys.exit(1)\n elif not self.output.exists():\n self.output.mkdir(parents=True)\n\n self.in_nc = None\n self.out_nc = None\n\n print(\n 'Model{:s}: \"{:s}\"'.format(\n \"s\" if len(model_chain) > 1 else \"\",\n # \", \".join([Path(x).stem for x in model_chain]),\n \", \".join([x for x in model_chain]),\n )\n )\n\n images: List[Path] = []\n for ext in [\"png\", \"jpg\", \"jpeg\", \"gif\", \"bmp\", \"tiff\", \"tga\"]:\n images.extend(self.input.glob(f\"**/*.{ext}\"))\n\n # Store the maximum split depths for each model in the chain\n # TODO: there might be a better way of doing this but it's good enough for now\n split_depths = {}\n\n with Progress(\n # SpinnerColumn(),\n \"[progress.description]{task.description}\",\n BarColumn(),\n \"[progress.percentage]{task.percentage:>3.0f}%\",\n TimeRemainingColumn(),\n ) as progress:\n task_upscaling = progress.add_task(\"Upscaling\", total=len(images))\n for idx, img_path in enumerate(images, 1):\n img_input_path_rel = img_path.relative_to(self.input)\n output_dir = self.output.joinpath(img_input_path_rel).parent\n img_output_path_rel = output_dir.joinpath(f\"{img_path.stem}.jpg\")\n output_dir.mkdir(parents=True, exist_ok=True)\n if len(model_chain) == 1:\n self.log.info(\n f'Processing {str(idx).zfill(len(str(len(images))))}: \"{img_input_path_rel}\"'\n )\n if self.skip_existing and img_output_path_rel.is_file():\n self.log.warning(\"Already exists, skipping\")\n if self.delete_input:\n img_path.unlink(missing_ok=True)\n progress.advance(task_upscaling)\n continue\n # read image\n img = cv2.imread(str(img_path.absolute()), cv2.IMREAD_UNCHANGED)\n if len(img.shape) < 3:\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n\n # Seamless modes\n if self.seamless == SeamlessOptions.tile:\n img = cv2.copyMakeBorder(img, 16, 16, 16, 16, cv2.BORDER_WRAP)\n elif self.seamless == SeamlessOptions.mirror:\n img = cv2.copyMakeBorder(\n img, 16, 16, 16, 16, cv2.BORDER_REFLECT_101\n )\n elif self.seamless == SeamlessOptions.replicate:\n img = cv2.copyMakeBorder(img, 16, 16, 16, 16, cv2.BORDER_REPLICATE)\n elif self.seamless == SeamlessOptions.alpha_pad:\n img = cv2.copyMakeBorder(\n img, 16, 16, 16, 16, cv2.BORDER_CONSTANT, value=[0, 0, 0, 0]\n )\n final_scale: int = 1\n\n task_model_chain: TaskID = None\n if len(model_chain) > 1:\n task_model_chain = progress.add_task(\n f'{str(idx).zfill(len(str(len(images))))} - \"{img_input_path_rel}\"',\n total=len(model_chain),\n )\n for i, model_path in enumerate(model_chain):\n\n img_height, img_width = img.shape[:2]\n\n # Load the model so we can access the scale\n self.load_model(model_path)\n\n if self.cache_max_split_depth and len(split_depths.keys()) > 0:\n rlt, depth = ops.auto_split_upscale(\n img,\n self.upscale,\n self.last_scale,\n max_depth=split_depths[i],\n )\n else:\n rlt, depth = ops.auto_split_upscale(\n img, self.upscale, self.last_scale\n )\n split_depths[i] = depth\n\n final_scale *= self.last_scale\n\n # This is for model chaining\n img = rlt.astype(\"uint8\")\n if len(model_chain) > 1:\n progress.advance(task_model_chain)\n\n if self.seamless:\n rlt = self.crop_seamless(rlt, final_scale)\n\n cv2.imwrite(str(img_output_path_rel.absolute()), rlt)\n\n if self.delete_input:\n img_path.unlink(missing_ok=True)\n\n progress.advance(task_upscaling)\n\n def __check_model_path(self, model_path: str) -> str:\n if Path(model_path).is_file():\n return model_path\n elif Path(\"./models/\").joinpath(model_path).is_file():\n return str(Path(\"./models/\").joinpath(model_path))\n else:\n self.log.error(f'Model \"{model_path}\" does not exist.')\n sys.exit(1)\n\n # This code is a somewhat modified version of BlueAmulet's fork of ESRGAN by Xinntao\n def process(self, img: np.ndarray):\n \"\"\"\n Does the processing part of ESRGAN. This method only exists because the same block of code needs to be ran twice for images with transparency.\n\n Parameters:\n img (array): The image to process\n\n Returns:\n rlt (array): The processed image\n \"\"\"\n if img.shape[2] == 3:\n img = img[:, :, [2, 1, 0]]\n elif img.shape[2] == 4:\n img = img[:, :, [2, 1, 0, 3]]\n img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float()\n if self.fp16:\n img = img.half()\n img_LR = img.unsqueeze(0)\n img_LR = img_LR.to(self.device)\n\n output = self.model(img_LR).data.squeeze(0).float().cpu().clamp_(0, 1).numpy()\n if output.shape[0] == 3:\n output = output[[2, 1, 0], :, :]\n elif output.shape[0] == 4:\n output = output[[2, 1, 0, 3], :, :]\n output = np.transpose(output, (1, 2, 0))\n return output\n\n def load_model(self, model_path: str):\n if model_path != self.last_model:\n # interpolating OTF, example: 4xBox:25&4xPSNR:75\n if (\":\" in model_path or \"@\" in model_path) and (\n \"&\" in model_path or \"|\" in model_path\n ):\n interps = model_path.split(\"&\")[:2]\n model_1 = torch.load(interps[0].split(\"@\")[0])\n model_2 = torch.load(interps[1].split(\"@\")[0])\n state_dict = OrderedDict()\n for k, v_1 in model_1.items():\n v_2 = model_2[k]\n state_dict[k] = (int(interps[0].split(\"@\")[1]) / 100) * v_1 + (\n int(interps[1].split(\"@\")[1]) / 100\n ) * v_2\n else:\n state_dict = torch.load(model_path)\n\n if \"conv_first.weight\" in state_dict:\n print(\"Attempting to convert and load a new-format model\")\n old_net = {}\n items = []\n for k, v in state_dict.items():\n items.append(k)\n\n old_net[\"model.0.weight\"] = state_dict[\"conv_first.weight\"]\n old_net[\"model.0.bias\"] = state_dict[\"conv_first.bias\"]\n\n for k in items.copy():\n if \"RDB\" in k:\n ori_k = k.replace(\"RRDB_trunk.\", \"model.1.sub.\")\n if \".weight\" in k:\n ori_k = ori_k.replace(\".weight\", \".0.weight\")\n elif \".bias\" in k:\n ori_k = ori_k.replace(\".bias\", \".0.bias\")\n old_net[ori_k] = state_dict[k]\n items.remove(k)\n\n old_net[\"model.1.sub.23.weight\"] = state_dict[\"trunk_conv.weight\"]\n old_net[\"model.1.sub.23.bias\"] = state_dict[\"trunk_conv.bias\"]\n old_net[\"model.3.weight\"] = state_dict[\"upconv1.weight\"]\n old_net[\"model.3.bias\"] = state_dict[\"upconv1.bias\"]\n old_net[\"model.6.weight\"] = state_dict[\"upconv2.weight\"]\n old_net[\"model.6.bias\"] = state_dict[\"upconv2.bias\"]\n old_net[\"model.8.weight\"] = state_dict[\"HRconv.weight\"]\n old_net[\"model.8.bias\"] = state_dict[\"HRconv.bias\"]\n old_net[\"model.10.weight\"] = state_dict[\"conv_last.weight\"]\n old_net[\"model.10.bias\"] = state_dict[\"conv_last.bias\"]\n state_dict = old_net\n\n # extract model information\n scale2 = 0\n max_part = 0\n plus = False\n if \"f_HR_conv1.0.weight\" in state_dict:\n kind = \"SPSR\"\n scalemin = 4\n else:\n kind = \"ESRGAN\"\n scalemin = 6\n for part in list(state_dict):\n parts = part.split(\".\")\n n_parts = len(parts)\n if n_parts == 5 and parts[2] == \"sub\":\n nb = int(parts[3])\n elif n_parts == 3:\n part_num = int(parts[1])\n if (\n part_num > scalemin\n and parts[0] == \"model\"\n and parts[2] == \"weight\"\n ):\n scale2 += 1\n if part_num > max_part:\n max_part = part_num\n self.out_nc = state_dict[part].shape[0]\n if \"conv1x1\" in part and not plus:\n plus = True\n\n upscale = 2 ** scale2\n self.in_nc = state_dict[\"model.0.weight\"].shape[1]\n if kind == \"SPSR\":\n self.out_nc = state_dict[\"f_HR_conv1.0.weight\"].shape[0]\n nf = state_dict[\"model.0.weight\"].shape[0]\n\n if (\n self.in_nc != self.last_in_nc\n or self.out_nc != self.last_out_nc\n or nf != self.last_nf\n or nb != self.last_nb\n or upscale != self.last_scale\n or kind != self.last_kind\n ):\n if kind == \"ESRGAN\":\n self.model = arch.RRDBNet(\n in_nc=self.in_nc,\n out_nc=self.out_nc,\n nf=nf,\n nb=nb,\n gc=32,\n upscale=upscale,\n norm_type=None,\n act_type=\"leakyrelu\",\n mode=\"CNA\",\n upsample_mode=\"upconv\",\n plus=plus,\n )\n elif kind == \"SPSR\":\n self.model = arch.SPSRNet(\n self.in_nc,\n self.out_nc,\n nf,\n nb,\n gc=32,\n upscale=upscale,\n norm_type=None,\n act_type=\"leakyrelu\",\n mode=\"CNA\",\n upsample_mode=\"upconv\",\n )\n self.last_in_nc = self.in_nc\n self.last_out_nc = self.out_nc\n self.last_nf = nf\n self.last_nb = nb\n self.last_scale = upscale\n self.last_kind = kind\n self.last_model = model_path\n\n self.model.load_state_dict(state_dict, strict=True)\n del state_dict\n self.model.eval()\n for k, v in self.model.named_parameters():\n v.requires_grad = False\n self.model = self.model.to(self.device)\n\n # This code is a somewhat modified version of BlueAmulet's fork of ESRGAN by Xinntao\n def upscale(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Upscales the image passed in with the specified model\n\n Parameters:\n img: The image to upscale\n model_path (string): The model to use\n\n Returns:\n output: The processed image\n \"\"\"\n\n img = img * 1.0 / np.iinfo(img.dtype).max\n\n if (\n img.ndim == 3\n and img.shape[2] == 4\n and self.last_in_nc == 3\n and self.last_out_nc == 3\n ):\n\n # Fill alpha with white and with black, remove the difference\n if self.alpha_mode == AlphaOptions.bas:\n img1 = np.copy(img[:, :, :3])\n img2 = np.copy(img[:, :, :3])\n for c in range(3):\n img1[:, :, c] *= img[:, :, 3]\n img2[:, :, c] = (img2[:, :, c] - 1) * img[:, :, 3] + 1\n\n output1 = self.process(img1)\n output2 = self.process(img2)\n alpha = 1 - np.mean(output2 - output1, axis=2)\n output = np.dstack((output1, alpha))\n output = np.clip(output, 0, 1)\n # Upscale the alpha channel itself as its own image\n elif self.alpha_mode == AlphaOptions.alpha_separately:\n img1 = np.copy(img[:, :, :3])\n img2 = cv2.merge((img[:, :, 3], img[:, :, 3], img[:, :, 3]))\n output1 = self.process(img1)\n output2 = self.process(img2)\n output = cv2.merge(\n (\n output1[:, :, 0],\n output1[:, :, 1],\n output1[:, :, 2],\n output2[:, :, 0],\n )\n )\n # Use the alpha channel like a regular channel\n elif self.alpha_mode == AlphaOptions.swapping:\n img1 = cv2.merge((img[:, :, 0], img[:, :, 1], img[:, :, 2]))\n img2 = cv2.merge((img[:, :, 1], img[:, :, 2], img[:, :, 3]))\n output1 = self.process(img1)\n output2 = self.process(img2)\n output = cv2.merge(\n (\n output1[:, :, 0],\n output1[:, :, 1],\n output1[:, :, 2],\n output2[:, :, 2],\n )\n )\n # Remove alpha\n else:\n img1 = np.copy(img[:, :, :3])\n output = self.process(img1)\n output = cv2.cvtColor(output, cv2.COLOR_BGR2BGRA)\n\n if self.binary_alpha:\n alpha = output[:, :, 3]\n threshold = self.alpha_threshold\n _, alpha = cv2.threshold(alpha, threshold, 1, cv2.THRESH_BINARY)\n output[:, :, 3] = alpha\n elif self.ternary_alpha:\n alpha = output[:, :, 3]\n half_transparent_lower_bound = (\n self.alpha_threshold - self.alpha_boundary_offset\n )\n half_transparent_upper_bound = (\n self.alpha_threshold + self.alpha_boundary_offset\n )\n alpha = np.where(\n alpha < half_transparent_lower_bound,\n 0,\n np.where(alpha <= half_transparent_upper_bound, 0.5, 1),\n )\n output[:, :, 3] = alpha\n else:\n if img.ndim == 2:\n img = np.tile(\n np.expand_dims(img, axis=2), (1, 1, min(self.last_in_nc, 3))\n )\n if img.shape[2] > self.last_in_nc: # remove extra channels\n self.log.warning(\"Truncating image channels\")\n img = img[:, :, : self.last_in_nc]\n # pad with solid alpha channel\n elif img.shape[2] == 3 and self.last_in_nc == 4:\n img = np.dstack((img, np.full(img.shape[:-1], 1.0)))\n output = self.process(img)\n\n output = (output * 255.0).round()\n\n return output\n\n def crop_seamless(self, img: np.ndarray, scale: int) -> np.ndarray:\n img_height, img_width = img.shape[:2]\n y, x = 16 * scale, 16 * scale\n h, w = img_height - (32 * scale), img_width - (32 * scale)\n img = img[y : y + h, x : x + w]\n return img\n\n\napp = typer.Typer()\n\n\n@app.command()\ndef main(\n model: str = typer.Argument(...),\n input: Path = typer.Option(Path(\"input\"), \"--input\", \"-i\", help=\"Input folder\"),\n output: Path = typer.Option(Path(\"output\"), \"--output\", \"-o\", help=\"Output folder\"),\n reverse: bool = typer.Option(False, \"--reverse\", \"-r\", help=\"Reverse Order\"),\n skip_existing: bool = typer.Option(\n False,\n \"--skip-existing\",\n \"-se\",\n help=\"Skip existing output files\",\n ),\n delete_input: bool = typer.Option(\n False,\n \"--delete-input\",\n \"-di\",\n help=\"Delete input files after upscaling\",\n ),\n seamless: SeamlessOptions = typer.Option(\n None,\n \"--seamless\",\n \"-s\",\n case_sensitive=False,\n help=\"Helps seamlessly upscale an image. tile = repeating along edges. mirror = reflected along edges. replicate = extended pixels along edges. alpha_pad = extended alpha border.\",\n ),\n cpu: bool = typer.Option(False, \"--cpu\", \"-c\", help=\"Use CPU instead of CUDA\"),\n fp16: bool = typer.Option(\n False,\n \"--floating-point-16\",\n \"-fp16\",\n help=\"Use FloatingPoint16/Halftensor type for images.\",\n ),\n device_id: int = typer.Option(\n 0, \"--device-id\", \"-did\", help=\"The numerical ID of the GPU you want to use.\"\n ),\n cache_max_split_depth: bool = typer.Option(\n False,\n \"--cache-max-split-depth\",\n \"-cmsd\",\n help=\"Caches the maximum recursion depth used by the split/merge function. Useful only when upscaling images of the same size.\",\n ),\n binary_alpha: bool = typer.Option(\n False,\n \"--binary-alpha\",\n \"-ba\",\n help=\"Whether to use a 1 bit alpha transparency channel, Useful for PSX upscaling\",\n ),\n ternary_alpha: bool = typer.Option(\n False,\n \"--ternary-alpha\",\n \"-ta\",\n help=\"Whether to use a 2 bit alpha transparency channel, Useful for PSX upscaling\",\n ),\n alpha_threshold: float = typer.Option(\n 0.5,\n \"--alpha-threshold\",\n \"-at\",\n help=\"Only used when binary_alpha is supplied. Defines the alpha threshold for binary transparency\",\n ),\n alpha_boundary_offset: float = typer.Option(\n 0.2,\n \"--alpha-boundary-offset\",\n \"-abo\",\n help=\"Only used when binary_alpha is supplied. Determines the offset boundary from the alpha threshold for half transparency.\",\n ),\n alpha_mode: AlphaOptions = typer.Option(\n None,\n \"--alpha-mode\",\n \"-am\",\n help=\"Type of alpha processing to use. no_alpha = is no alpha processing. bas = is BA's difference method. alpha_separately = is upscaling the alpha channel separately (like IEU). swapping = is swapping an existing channel with the alpha channel.\",\n ),\n verbose: bool = typer.Option(\n False,\n \"--verbose\",\n \"-v\",\n help=\"Verbose mode\",\n ),\n):\n\n logging.basicConfig(\n level=logging.DEBUG if verbose else logging.WARNING,\n format=\"%(message)s\",\n datefmt=\"[%X]\",\n handlers=[RichHandler(markup=True)],\n # handlers=[RichHandler(markup=True, rich_tracebacks=True)],\n )\n\n upscale = Upscale(\n model=model,\n input=input,\n output=output,\n reverse=reverse,\n skip_existing=skip_existing,\n delete_input=delete_input,\n seamless=seamless,\n cpu=cpu,\n fp16=fp16,\n device_id=device_id,\n cache_max_split_depth=cache_max_split_depth,\n binary_alpha=binary_alpha,\n ternary_alpha=ternary_alpha,\n alpha_threshold=alpha_threshold,\n alpha_boundary_offset=alpha_boundary_offset,\n alpha_mode=alpha_mode,\n )\n upscale.run()\n\n\nif __name__ == \"__main__\":\n app()\n",
"import functools\nimport logging\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nfrom options.options import opt_get\n\n#import models.modules.sft_arch as sft_arch\nlogger = logging.getLogger('base')\n####################\n# initialize networks\n####################\n\ndef weights_init_normal(m, bias_fill=0, mean=0.0, std=0.02):\n # classname = m.__class__.__name__\n # if classname.find('Conv') != -1 and classname != \"DiscConvBlock\": #ASRResNet's DiscConvBlock causes confusion\n # elif classname.find('Linear') != -1:\n if hasattr(m, 'weight') and isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n # init.normal_(m.weight.data, 0.0, std)\n init.normal_(m.weight, mean=mean, std=std)\n if m.bias is not None:\n m.bias.data.fill_(bias_fill)\n # elif classname.find('BatchNorm2d') != -1:\n elif isinstance(m, nn.modules.batchnorm._BatchNorm):\n init.normal_(m.weight.data, mean=1.0, std=std) # BN also uses norm\n if hasattr(m, 'bias') and m.bias is not None:\n # init.constant_(m.bias.data, 0.0)\n m.bias.data.fill_(bias_fill)\n\ndef weights_init_xavier(m, scale=1, bias_fill=0, **kwargs):\n # classname = m.__class__.__name__\n # if classname.find('Conv') != -1 and classname != \"DiscConvBlock\": #ASRResNet's DiscConvBlock causes confusion\n # elif classname.find('Linear') != -1:\n if hasattr(m, 'weight') and isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n # init.xavier_normal_(m.weight.data, gain=gain)\n init.xavier_normal_(m.weight, **kwargs)\n m.weight.data *= scale\n if m.bias is not None:\n m.bias.data.fill_(bias_fill)\n # elif classname.find('BatchNorm2d') != -1:\n # elif isinstance(m, _BatchNorm):\n elif isinstance(m, nn.modules.batchnorm._BatchNorm):\n # init.constant_(m.weight.data, 1.0)\n init.constant_(m.weight, 1)\n if hasattr(m, 'bias') and m.bias is not None:\n # init.constant_(m.bias.data, 0.0)\n m.bias.data.fill_(bias_fill)\n\ndef weights_init_kaiming(m, scale=1, bias_fill=0, **kwargs):\n # classname = m.__class__.__name__\n # if classname.find('Conv') != -1 and classname != \"DiscConvBlock\": #ASRResNet's DiscConvBlock causes confusion\n # elif classname.find('Linear') != -1:\n if hasattr(m, 'weight') and isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n # init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n init.kaiming_normal_(m.weight, **kwargs)\n m.weight.data *= scale\n if hasattr(m, 'bias') and m.bias is not None:\n m.bias.data.fill_(bias_fill)\n # elif classname.find('BatchNorm2d') != -1:\n # elif isinstance(m, _BatchNorm):\n elif isinstance(m, nn.modules.batchnorm._BatchNorm):\n # init.constant_(m.weight.data, 1.0)\n init.constant_(m.weight, 1)\n if m.bias is not None:\n # init.constant_(m.bias.data, 0.0)\n m.bias.data.fill_(bias_fill)\n\ndef weights_init_orthogonal(m, bias_fill=0, **kwargs):\n # classname = m.__class__.__name__\n # if classname.find('Conv') != -1:\n # elif classname.find('Linear') != -1:\n if hasattr(m, 'weight') and isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n # init.orthogonal_(m.weight.data, gain=1)\n init.orthogonal_(m.weight.data, **kwargs)\n if m.bias is not None:\n m.bias.data.fill_(bias_fill)\n # elif classname.find('BatchNorm2d') != -1:\n elif isinstance(m, nn.modules.batchnorm._BatchNorm):\n # init.constant_(m.weight.data, 1.0)\n init.constant_(m.weight, 1)\n if hasattr(m, 'bias') and m.bias is not None:\n # init.constant_(m.bias.data, 0.0)\n m.bias.data.fill_(bias_fill)\n\ndef init_weights(net, init_type='kaiming', scale=1, std=0.02, gain=0.02):\n '''Initialize network weights.\n To initialize a network: \n 1. register CPU/GPU device (with multi-GPU support)\n 2. initialize the network weights\n Parameters:\n net (network) -- the network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n scale (float) -- scaling factor for kaiming.\n gain (float) -- scaling factor for xavier.\n std (float) -- scaling factor for normal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n 'kaiming' is used in the ESRGAN paper, 'normal' in the original pix2pix and CycleGAN paper.\n kaiming and xavier might work better for some applications.\n '''\n logger.info('Initialization method [{:s}]'.format(init_type))\n if init_type == 'normal':\n weights_init_normal_ = functools.partial(weights_init_normal, std=std)\n net.apply(weights_init_normal_)\n if init_type == 'xavier':\n weights_init_xavier_ = functools.partial(weights_init_xavier, gain=gain)\n net.apply(weights_init_xavier_)\n elif init_type == 'kaiming':\n weights_init_kaiming_ = functools.partial(weights_init_kaiming, scale=scale)\n net.apply(weights_init_kaiming_)\n elif init_type == 'orthogonal':\n net.apply(weights_init_orthogonal)\n else:\n raise NotImplementedError('initialization method [{:s}] not implemented'.format(init_type))\n\n\n####################\n# define network\n####################\n\n\n# Generator\ndef define_G(opt, step=0):\n '''Create a generator\n Returns a generator\n The generator is usually initialized with <init_weights>.\n '''\n gpu_ids = opt['gpu_ids']\n opt_net = opt['network_G']\n which_model = opt_net['which_model_G']\n init_type = opt_net.get('init_type', 'kaiming')\n init_scale = opt_net.get('init_scale', 0.1)\n \n if opt_net['net_act']: # If set, use a different activation function\n act_type = opt_net['net_act']\n else: # Use networks defaults\n if which_model == 'sr_resnet':\n act_type = 'relu'\n elif which_model == 'RRDB_net':\n act_type = 'leakyrelu'\n elif which_model == 'ppon':\n act_type = 'leakyrelu'\n else:\n act_type = 'leakyrelu'\n \n if which_model == 'sr_resnet': # SRResNet\n from models.modules.architectures import SRResNet_arch\n netG = SRResNet_arch.SRResNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], \\\n nb=opt_net['nb'], upscale=opt_net['scale'], norm_type=opt_net['norm_type'], \\\n act_type=act_type, mode=opt_net['mode'], upsample_mode='pixelshuffle', \\\n convtype=opt_net['convtype'], finalact=opt_net['finalact'])\n elif which_model == 'sft_arch': # SFT-GAN\n from models.modules.architectures import sft_arch\n netG = sft_arch.SFT_Net()\n elif which_model == 'RRDB_net': # RRDB\n from models.modules.architectures import RRDBNet_arch\n netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], \\\n nb=opt_net['nb'], gc=opt_net['gc'], upscale=opt_net['scale'], norm_type=opt_net['norm_type'], \\\n act_type=act_type, mode=opt_net['mode'], upsample_mode='upconv', convtype=opt_net['convtype'], \\\n finalact=opt_net['finalact'], gaussian_noise=opt_net['gaussian'], plus=opt_net['plus'], \n nr=opt_net.get('nr', 3))\n elif which_model == 'MRRDB_net': # Modified RRDB\n from models.modules.architectures import RRDBNet_arch\n netG = RRDBNet_arch.MRRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], \\\n nb=opt_net['nb'], gc=opt_net['gc'])\n elif which_model == 'ppon':\n from models.modules.architectures import PPON_arch\n netG = PPON_arch.PPON(in_nc=opt_net['in_nc'], nf=opt_net['nf'], nb=opt_net['nb'], out_nc=opt_net['out_nc'], \n upscale=opt_net['scale'], act_type=act_type) #(in_nc=3, nf=64, nb=24, out_nc=3)\n elif which_model == 'asr_cnn':\n from models.modules.architectures import ASRResNet_arch\n netG = ASRResNet_arch.ASRCNN(upscale_factor=opt_net['scale'], spectral_norm = True, self_attention = True, max_pool=True, poolsize = 4, finalact='tanh')\n elif which_model == 'asr_resnet':\n from models.modules.architectures import ASRResNet_arch\n netG = ASRResNet_arch.ASRResNet(scale_factor=opt_net['scale'], spectral_norm = True, self_attention = True, max_pool=True, poolsize = 4)\n elif which_model == 'abpn_net':\n from models.modules.architectures import ABPN_arch\n netG = ABPN_arch.ABPN_v5(input_dim=3, dim=32)\n # netG = ABPN_arch.ABPN_v5(input_dim=opt_net['in_nc'], dim=opt_net['out_nc'])\n elif which_model == 'pan_net': #PAN\n from models.modules.architectures import PAN_arch\n netG = PAN_arch.PAN(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'],\n nf=opt_net['nf'], unf=opt_net['unf'], nb=opt_net['nb'], scale=opt_net['scale'],\n self_attention=opt_net.get('self_attention', False), \n double_scpa=opt_net.get('double_scpa', False),\n ups_inter_mode=opt_net.get('ups_inter_mode', 'nearest'))\n elif which_model == 'sofvsr_net':\n from models.modules.architectures import SOFVSR_arch\n netG = SOFVSR_arch.SOFVSR(scale=opt_net['scale'],n_frames=opt_net.get('n_frames', 3),\n channels=opt_net.get('channels', 320), img_ch=opt_net.get('img_ch', 1), \n SR_net=opt_net.get('SR_net', 'sofvsr'), \n sr_nf=opt_net.get('sr_nf', 64), sr_nb=opt_net.get('sr_nb', 23), \n sr_gc=opt_net.get('sr_gc', 32), sr_unf=opt_net.get('sr_unf', 24),\n sr_gaussian_noise=opt_net.get('sr_gaussian_noise', 64), \n sr_plus=opt_net.get('sr_plus', False), sr_sa=opt_net.get('sr_sa', True),\n sr_upinter_mode=opt_net.get('sr_upinter_mode', 'nearest'))\n elif which_model == 'sr3d_net':\n from models.modules.architectures import SR3DNet_arch\n netG = SR3DNet_arch.SR3DNet(scale=opt['scale'], in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], \n nf=opt_net['nf'], nb=opt_net['nb'], n_frames=opt_net.get('n_frames', 5))\n elif which_model == 'rife_net':\n from models.modules.architectures import RIFE_arch\n netG = RIFE_arch.RIFE()\n elif which_model == 'SRFlow_net':\n from models.modules.architectures import SRFlowNet_arch\n netG = SRFlowNet_arch.SRFlowNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'],\n nf=opt_net['nf'], nb=opt_net['nb'], scale=opt['scale'], K=opt_net['flow']['K'], opt=opt, step=step)\n elif which_model == 'unet_net':\n from models.modules.architectures import UNet_arch\n netG = UNet_arch.UnetGenerator(input_nc=opt_net['in_nc'], output_nc=opt_net['out_nc'], \n num_downs=opt_net['num_downs'], ngf=opt_net['ngf'], \n norm_type=opt_net['norm_type'], use_dropout=opt_net['use_dropout'],\n upsample_mode=opt_net['upsample_mode'])\n elif which_model == 'resnet_net':\n from models.modules.architectures import ResNet_arch\n netG = ResNet_arch.ResnetGenerator(input_nc=opt_net['in_nc'], output_nc=opt_net['out_nc'], \n n_blocks=opt_net['n_blocks'], ngf=opt_net['ngf'], \n norm_type=opt_net['norm_type'], use_dropout=opt_net['use_dropout'],\n upsample_mode=opt_net['upsample_mode'])\n elif which_model == 'DVD_net':\n from models.modules.architectures import DVDNet_arch\n netG = DVDNet_arch.DVDNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'])\n elif which_model == 'EDVR_net':\n from models.modules.architectures import EDVR_arch\n netG = EDVR_arch.EDVR(num_in_ch=opt_net['in_nc'], num_out_ch=opt_net['out_nc'], num_feat=opt_net['nf'], num_frame=opt_net['n_frames'],\n deformable_groups=opt_net['deformable_groups'], num_extract_block=opt_net['n_extract_block'], \n num_reconstruct_block=opt_net['n_reconstruct_block'], center_frame_idx=None, with_predeblur=opt_net['predeblur'], \n with_tsa=opt_net['tsa'], upsample_mode=opt_net['upsample_mode'], upscale=opt_net['scale'], \n add_rrdb=opt_net['add_rrdb'], nb=opt_net['nb'])\n else:\n raise NotImplementedError('Generator model [{:s}] not recognized'.format(which_model))\n\n if opt['is_train'] and which_model != 'MRRDB_net':\n # Note: MRRDB_net initializes the modules during init, no need to initialize again here\n init_weights(netG, init_type=init_type, scale=init_scale)\n if gpu_ids:\n assert torch.cuda.is_available()\n netG = nn.DataParallel(netG)\n return netG\n\n\n# Discriminator\ndef define_D(opt):\n '''Create a discriminator\n Returns a discriminator\n Some of the available types of discriminators:\n vgg_*: discriminators based on a VGG-like network architecture.\n The ones with '_fea' in the name also allow to extract feature \n maps from the discriminator to use for feature losses. \n patchgan: PatchGAN classifier described in the original pix2pix paper.\n It can classify whether 70×70 overlapping patches are real or fake.\n Such a patch-level discriminator architecture has fewer parameters\n than a full-image discriminator and can work on arbitrarily-sized images\n in a fully convolutional fashion.\n [n_layers]: With this option, you can specify the number of conv layers \n in the discriminator with the parameter <n_layers_D> \n (default=3 as used in basic (PatchGAN).)\n multiscale: can create multiple patchgan discriminators that operate at \n different scales. Each one at half the scale of the previous. Must \n coordinate with the LR_size. \n pixelgan: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.\n It encourages greater color diversity but has no effect on spatial statistics.\n\n The discriminator is usually initialized with <init_weights>.\n '''\n gpu_ids = opt['gpu_ids']\n opt_net = opt['network_D']\n which_model = opt_net['which_model_D']\n which_model_G = opt_net['which_model_G']\n init_type = opt_net.get('init_type', 'kaiming')\n init_scale = opt_net.get('init_scale', 1)\n \n if which_model_G == 'ppon':\n model_G = 'PPON'\n else:\n model_G = 'ESRGAN'\n \n if which_model == 'dis_acd': # sft-gan, Auxiliary Classifier Discriminator\n from models.modules.architectures import sft_arch\n netD = sft_arch.ACD_VGG_BN_96()\n elif which_model == 'discriminator_vgg_96':\n from models.modules.architectures import discriminators\n netD = discriminators.Discriminator_VGG_96(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \\\n norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], convtype=opt_net['convtype'], arch=model_G)\n elif which_model == 'discriminator_vgg_128_SN':\n from models.modules.architectures import discriminators\n netD = discriminators.Discriminator_VGG_128_SN()\n elif which_model == 'discriminator_vgg_128':\n from models.modules.architectures import discriminators\n netD = discriminators.Discriminator_VGG_128(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \\\n norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], convtype=opt_net['convtype'], arch=model_G)\n elif which_model == 'discriminator_vgg_192' or which_model == 'discriminator_192': #vic in PPON its called Discriminator_192, instead of BasicSR's Discriminator_VGG_192\n from models.modules.architectures import discriminators\n netD = discriminators.Discriminator_VGG_192(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \\\n norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], convtype=opt_net['convtype'], arch=model_G)\n elif which_model == 'discriminator_vgg_256' or which_model == 'discriminator_256':\n from models.modules.architectures import discriminators\n netD = discriminators.Discriminator_VGG_256(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \\\n norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], convtype=opt_net['convtype'], arch=model_G)\n elif which_model == 'discriminator_vgg': # General adaptative case\n from models.modules.architectures import discriminators\n try:\n size = int(opt['datasets']['train']['HR_size'])\n netD = discriminators.Discriminator_VGG(size=size, in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \\\n norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], convtype=opt_net['convtype'], arch=model_G)\n except ValueError:\n raise ValueError('VGG Discriminator size could not be parsed from the HR patch size. Check that the image patch size is either a power of 2 or 3 multiplied by a power of 2.')\n elif which_model == 'adiscriminator':\n from models.modules.architectures import ASRResNet_arch\n netD = ASRResNet_arch.ADiscriminator(spectral_norm=opt_net['spectral_norm'], self_attention=opt_net['self_attention'], \\\n max_pool=opt_net['max_pool'], poolsize=opt_net['poolsize'])\n elif which_model == 'adiscriminator_s':\n from models.modules.architectures import ASRResNet_arch\n netD = ASRResNet_arch.ADiscriminator_S(spectral_norm=opt_net['spectral_norm'], self_attention=opt_net['self_attention'], \\\n max_pool=opt_net['max_pool'], poolsize=opt_net['poolsize'] )\n elif which_model == 'discriminator_vgg_128_fea': #VGG-like discriminator with features extraction\n from models.modules.architectures import discriminators\n netD = discriminators.Discriminator_VGG_128_fea(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \\\n norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], \\\n convtype=opt_net['convtype'], arch=model_G, spectral_norm=opt_net['spectral_norm'], self_attention=opt_net['self_attention'], \\\n max_pool=opt_net['max_pool'], poolsize=opt_net['poolsize'])\n elif which_model == 'discriminator_vgg_fea': #VGG-like discriminator with features extraction\n from models.modules.architectures import discriminators\n try:\n size = int(opt['datasets']['train']['HR_size'])\n netD = discriminators.Discriminator_VGG_fea(size=size, in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \\\n norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], \\\n convtype=opt_net['convtype'], arch=model_G, spectral_norm=opt_net['spectral_norm'], self_attention=opt_net['self_attention'], \\\n max_pool=opt_net['max_pool'], poolsize=opt_net['poolsize'])\n except ValueError:\n raise ValueError('VGG Discriminator size could not be parsed from the HR patch size. Check that the image patch size is either a power of 2 or 3 multiplied by a power of 2.')\n elif which_model == 'patchgan' or which_model == 'NLayerDiscriminator':\n from models.modules.architectures import discriminators\n netD = discriminators.NLayerDiscriminator(input_nc=opt_net['in_nc'], ndf=opt_net['nf'], \n n_layers=opt_net['nlayer'], patch=opt_net.get('patch_output', True), \n use_spectral_norm=opt_net.get('use_spectral_norm', False))\n elif which_model == 'pixelgan' or which_model == 'PixelDiscriminator':\n from models.modules.architectures import discriminators\n netD = discriminators.PixelDiscriminator(input_nc=opt_net['in_nc'], ndf=opt_net['nf'])\n elif which_model == 'multiscale':\n from models.modules.architectures import discriminators\n netD = discriminators.MultiscaleDiscriminator(input_nc=opt_net['in_nc'], ndf=opt_net['nf'], \\\n n_layers=opt_net['nlayer'], num_D=opt_net['num_D'])\n else:\n raise NotImplementedError('Discriminator model [{:s}] not recognized'.format(which_model))\n \"\"\"\n elif which_model.startswith('discriminator_vgg_'): # User-defined case\n models.modules.architectures import discriminators\n vgg_size = which_model[18:]\n try:\n size = int(vgg_size)\n netD = discriminators.Discriminator_VGG(size=size, in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \\\n norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], convtype=opt_net['convtype'], arch=model_G)\n except ValueError:\n raise ValueError('VGG Discriminator size [{:s}] could not be parsed.'.format(vgg_size))\n #\"\"\"\n init_weights(netD, init_type=init_type, scale=init_scale)\n if gpu_ids:\n netD = nn.DataParallel(netD)\n return netD\n\n\ndef define_F(opt, use_bn=False):\n '''Create a feature extraction network for feature losses\n '''\n from models.modules.architectures import perceptual\n \n feat_network = 'vgg' #opt['feat_network'] #can be configurable option \n \n gpu_ids = opt['gpu_ids']\n if opt['datasets']['train']['znorm']:\n z_norm = opt['datasets']['train']['znorm']\n else:\n z_norm = False\n device = torch.device('cuda' if gpu_ids else 'cpu')\n # pytorch pretrained VGG19-54, before ReLU.\n if use_bn:\n feature_layer = 49\n else:\n feature_layer = 34\n \n if feat_network == 'resnet': #ResNet\n netF = perceptual.ResNet101FeatureExtractor(use_input_norm=True, device=device)\n else: #VGG network (default)\n netF = perceptual.VGGFeatureExtractor(feature_layer=feature_layer, use_bn=use_bn, \\\n use_input_norm=True, device=device, z_norm=z_norm)\n \n if gpu_ids:\n netF = nn.DataParallel(netF)\n netF.eval() # No need to train\n return netF\n\n\n####################\n# model coversions and validation for \n# network loading\n####################\n\ndef normal2mod(state_dict):\n if 'model.0.weight' in state_dict:\n try:\n logger.info('Converting and loading an RRDB model to modified RRDB')\n except:\n print('Converting and loading an RRDB model to modified RRDB')\n crt_net = {}\n items = []\n\n for k, v in state_dict.items():\n items.append(k)\n\n # # directly copy\n # for k, v in crt_net.items():\n # if k in state_dict and state_dict[k].size() == v.size():\n # crt_net[k] = state_dict[k]\n # items.remove(k)\n\n crt_net['conv_first.weight'] = state_dict['model.0.weight']\n crt_net['conv_first.bias'] = state_dict['model.0.bias']\n\n for k in items.copy():\n if 'RDB' in k:\n ori_k = k.replace('model.1.sub.', 'RRDB_trunk.')\n if '.0.weight' in k:\n ori_k = ori_k.replace('.0.weight', '.weight')\n elif '.0.bias' in k:\n ori_k = ori_k.replace('.0.bias', '.bias')\n crt_net[ori_k] = state_dict[k]\n items.remove(k)\n\n crt_net['trunk_conv.weight'] = state_dict['model.1.sub.23.weight']\n crt_net['trunk_conv.bias'] = state_dict['model.1.sub.23.bias']\n crt_net['upconv1.weight'] = state_dict['model.3.weight']\n crt_net['upconv1.bias'] = state_dict['model.3.bias']\n crt_net['upconv2.weight'] = state_dict['model.6.weight']\n crt_net['upconv2.bias'] = state_dict['model.6.bias']\n crt_net['HRconv.weight'] = state_dict['model.8.weight']\n crt_net['HRconv.bias'] = state_dict['model.8.bias']\n crt_net['conv_last.weight'] = state_dict['model.10.weight']\n crt_net['conv_last.bias'] = state_dict['model.10.bias']\n state_dict = crt_net\n\n return state_dict\n\ndef mod2normal(state_dict):\n if 'conv_first.weight' in state_dict:\n try:\n logger.info('Converting and loading a modified RRDB model to normal RRDB')\n except:\n print('Converting and loading a modified RRDB model to normal RRDB')\n crt_net = {}\n items = []\n for k, v in state_dict.items():\n items.append(k)\n\n crt_net['model.0.weight'] = state_dict['conv_first.weight']\n crt_net['model.0.bias'] = state_dict['conv_first.bias']\n\n for k in items.copy():\n if 'RDB' in k:\n ori_k = k.replace('RRDB_trunk.', 'model.1.sub.')\n if '.weight' in k:\n ori_k = ori_k.replace('.weight', '.0.weight')\n elif '.bias' in k:\n ori_k = ori_k.replace('.bias', '.0.bias')\n crt_net[ori_k] = state_dict[k]\n items.remove(k)\n\n crt_net['model.1.sub.23.weight'] = state_dict['trunk_conv.weight']\n crt_net['model.1.sub.23.bias'] = state_dict['trunk_conv.bias']\n crt_net['model.3.weight'] = state_dict['upconv1.weight']\n crt_net['model.3.bias'] = state_dict['upconv1.bias']\n crt_net['model.6.weight'] = state_dict['upconv2.weight']\n crt_net['model.6.bias'] = state_dict['upconv2.bias']\n crt_net['model.8.weight'] = state_dict['HRconv.weight']\n crt_net['model.8.bias'] = state_dict['HRconv.bias']\n crt_net['model.10.weight'] = state_dict['conv_last.weight']\n crt_net['model.10.bias'] = state_dict['conv_last.bias']\n state_dict = crt_net\n return state_dict\n\ndef model_val(opt_net=None, state_dict=None, model_type=None):\n if model_type == 'G':\n model = opt_get(opt_net, ['network_G', 'which_model_G'])\n if model == 'RRDB_net': # tonormal\n return mod2normal(state_dict)\n elif model == 'MRRDB_net' or model == 'SRFlow_net': # tomod\n return normal2mod(state_dict)\n else:\n return state_dict\n elif model_type == 'D':\n # no particular Discriminator validation at the moment\n # model = opt_get(opt_net, ['network_G', 'which_model_D'])\n return state_dict\n else:\n # if model_type not provided, return unchanged \n # (can do other validations here)\n return state_dict\n\ndef cem2normal(state_dict):\n if str(list(state_dict.keys())[0]).startswith('generated_image_model'):\n try:\n logger.info('Unwrapping the Generator model from CEM')\n except:\n print('Unwrapping the Generator model from CEM')\n crt_net = {}\n items = []\n\n for k, v in state_dict.items():\n items.append(k)\n\n for k in items.copy():\n if 'generated_image_model.module.' in k:\n ori_k = k.replace('generated_image_model.module.', '')\n crt_net[ori_k] = state_dict[k]\n items.remove(k)\n\n state_dict = crt_net\n\n return state_dict\n"
] |
[
[
"torch.set_default_tensor_type",
"numpy.expand_dims",
"torch.load",
"numpy.clip",
"numpy.dstack",
"numpy.full",
"numpy.copy",
"numpy.mean",
"numpy.iinfo",
"numpy.transpose",
"torch.device",
"numpy.where"
],
[
"torch.nn.init.constant_",
"torch.nn.init.xavier_normal_",
"torch.nn.init.normal_",
"torch.nn.init.orthogonal_",
"torch.cuda.is_available",
"torch.device",
"torch.nn.DataParallel",
"torch.nn.init.kaiming_normal_"
]
] |
ivanvikhrev/open_model_zoo
|
[
"322e7ac5ed8a17611b56c46e5e56bfef05d8cc2a"
] |
[
"tools/accuracy_checker/accuracy_checker/adapters/mask_rcnn_with_text.py"
] |
[
"\"\"\"\nCopyright (c) 2018-2020 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport cv2\nimport numpy as np\n\nfrom .mask_rcnn import MaskRCNNAdapter\nfrom ..config import StringField, NumberField\nfrom ..representation import TextDetectionPrediction\n\n\nclass MaskRCNNWithTextAdapter(MaskRCNNAdapter):\n __provider__ = 'mask_rcnn_with_text'\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'classes_out': StringField(\n description=\"Name of output layer with information about classes.\",\n optional=False\n ),\n 'scores_out': StringField(\n description=\"Name of output layer with bbox scores.\",\n optional=True\n ),\n 'boxes_out': StringField(\n description=\"Name of output layer with bboxes.\",\n optional=False\n ),\n 'raw_masks_out': StringField(\n description='Name of output layer with raw instances masks.',\n optional=False\n ),\n 'texts_out': StringField(\n description='Name of output layer with texts.',\n optional=False\n ),\n 'confidence_threshold': NumberField(\n description='Confidence threshold that is used to filter out detected instances.',\n optional=False\n ),\n })\n\n return parameters\n\n def configure(self):\n self.classes_out = self.get_value_from_config('classes_out')\n self.scores_out = self.get_value_from_config('scores_out')\n self.boxes_out = self.get_value_from_config('boxes_out')\n self.num_detections_out = self.get_value_from_config('num_detections_out')\n self.raw_masks_out = self.get_value_from_config('raw_masks_out')\n self.texts_out = self.get_value_from_config('texts_out')\n self.confidence_threshold = self.get_value_from_config('confidence_threshold')\n\n def process(self, raw, identifiers, frame_meta):\n raw_outputs = self._extract_predictions(raw, frame_meta)\n\n classes = raw_outputs[self.classes_out]\n if self.scores_out:\n valid_detections_mask = classes > 0\n scores = raw_outputs[self.scores_out][valid_detections_mask]\n else:\n scores = raw_outputs[self.boxes_out][:, 4]\n valid_detections_mask = scores > 0\n scores = scores[valid_detections_mask]\n classes = classes[valid_detections_mask].astype(np.uint32)\n boxes = raw_outputs[self.boxes_out][valid_detections_mask, :4]\n raw_masks = raw_outputs[self.raw_masks_out][valid_detections_mask]\n texts = raw_outputs[self.texts_out][valid_detections_mask]\n\n confidence_filter = scores > self.confidence_threshold\n classes = classes[confidence_filter]\n boxes = boxes[confidence_filter]\n texts = texts[confidence_filter]\n raw_masks = raw_masks[confidence_filter]\n\n text_filter = texts != ''\n classes = classes[text_filter]\n boxes = boxes[text_filter]\n texts = texts[text_filter]\n raw_masks = raw_masks[text_filter]\n\n results = []\n\n for identifier, image_meta in zip(identifiers, frame_meta):\n original_image_size = image_meta['image_size'][:2]\n if 'scale_x' in image_meta and 'scale_y' in image_meta:\n im_scale_x, im_scale_y = image_meta['scale_x'], image_meta['scale_y']\n else:\n image_input = [shape for shape in image_meta['input_shape'].values() if len(shape) == 4]\n assert image_input, \"image input not found\"\n assert len(image_input) == 1, 'several input images detected'\n processed_image_size = image_input[0][2:]\n im_scale_y = processed_image_size[0] / original_image_size[0]\n im_scale_x = processed_image_size[1] / original_image_size[1]\n boxes[:, 0::2] /= im_scale_x\n boxes[:, 1::2] /= im_scale_y\n masks = []\n\n if self.scores_out:\n raw_mask_for_all_classes = np.shape(raw_masks)[1] != len(identifiers)\n if raw_mask_for_all_classes:\n per_obj_raw_masks = []\n for cls, raw_mask in zip(classes, raw_masks):\n per_obj_raw_masks.append(raw_mask[cls, ...])\n else:\n per_obj_raw_masks = np.squeeze(raw_masks, axis=1)\n else:\n per_obj_raw_masks = raw_masks\n\n for box, raw_cls_mask in zip(boxes, per_obj_raw_masks):\n masks.append(self.segm_postprocess(box, raw_cls_mask, *original_image_size, True, False))\n\n rectangles = self.masks_to_rects(masks)\n\n results.append(\n TextDetectionPrediction(identifier, points=rectangles, description=texts))\n\n return results\n\n @staticmethod\n def masks_to_rects(masks):\n rects = []\n for mask in masks:\n decoded_mask = mask\n contours = cv2.findContours(decoded_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]\n\n areas = []\n boxes = []\n for contour in contours:\n area = cv2.contourArea(contour)\n areas.append(area)\n\n rect = cv2.minAreaRect(contour)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n boxes.append(box)\n\n if areas:\n i = np.argmax(areas)\n rects.append(boxes[i])\n\n return rects\n"
] |
[
[
"numpy.int0",
"numpy.squeeze",
"numpy.argmax",
"numpy.shape"
]
] |
cptanalatriste/AttnGAN
|
[
"6b8641cd5eb9c3a0bba73904b5c639784d6c3ec8"
] |
[
"featurepred/train.py"
] |
[
"import logging\nimport time\nfrom typing import Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch.nn import Module\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom featurepred import model\n\n\nclass FeaturePredictorTrainer:\n\n def __init__(self, model_wrapper: model, summary_writer: SummaryWriter):\n self.model_wrapper: Module = model_wrapper\n self.summary_writer: SummaryWriter = summary_writer\n\n def train_predictor(self, epochs: int, train_loader: DataLoader, validation_loader: DataLoader,\n optimiser: Optimizer, loss_function, device):\n model: Module = self.model_wrapper.model.to(device)\n\n train_start: float = time.time()\n best_accuracy: float = 0.0\n self.model_wrapper.save_model_state()\n\n for epoch in range(1, epochs + 1):\n training_loss: float = do_train(model_to_train=model, train_loader=train_loader, optimiser=optimiser,\n loss_function=loss_function, device=device)\n validation_loss: float\n validation_accuracy: float\n validation_loss, validation_accuracy = evaluate(model_to_validate=model,\n validation_loader=validation_loader,\n loss_function=loss_function, device=device)\n\n logging.info(\"Epoch: {}, training Loss: {:.2f}, validation Loss: {:.2f}, accuracy: {:.2f}\".format(epoch,\n training_loss,\n validation_loss,\n validation_accuracy))\n global_step: int = epoch * len(train_loader)\n self.summary_writer.add_scalar(tag='training_loss', scalar_value=training_loss, global_step=global_step)\n self.summary_writer.add_scalar(tag='validation_accuracy', scalar_value=validation_accuracy,\n global_step=global_step)\n self.summary_writer.close()\n\n if validation_accuracy > best_accuracy:\n best_accuracy = validation_accuracy\n self.model_wrapper.save_model_state()\n\n training_time: float = time.time() - train_start\n logging.info('Training complete in {:.0f}m {:.0f}s'.format(training_time // 60, training_time % 60))\n logging.info('Best accuracy: {}'.format(best_accuracy))\n\n\ndef do_train(model_to_train: Module, train_loader: DataLoader, optimiser: Optimizer, loss_function, device) -> float:\n model_to_train.train()\n\n total_images: int = len(train_loader.dataset)\n running_loss: float = 0.0\n for training_batch in train_loader:\n optimiser.zero_grad()\n images: Tensor\n classes: Tensor\n\n images, classes_in_batch = training_batch\n images = images.to(device)\n classes_in_batch = classes_in_batch.to(device)\n\n model_output: Tensor = model_to_train(images)\n training_loss: Tensor = loss_function(model_output, classes_in_batch)\n\n optimiser.zero_grad()\n training_loss.backward()\n optimiser.step()\n\n images_in_batch: int = images.size(0)\n running_loss += training_loss.data.item() * images_in_batch\n\n total_loss: float = running_loss / total_images\n return total_loss\n\n\ndef evaluate(model_to_validate: Module, validation_loader: DataLoader, loss_function, device) -> Tuple[float, float]:\n model_to_validate.eval()\n\n total_images: int = len(validation_loader.dataset)\n running_loss: float = 0.0\n running_matches: float = 0.0\n\n for validation_batch in validation_loader:\n images: Tensor\n classes: Tensor\n\n images, classes_in_batch = validation_batch\n images = images.to(device)\n classes_in_batch = classes_in_batch.to(device)\n\n model_output: Tensor = model_to_validate(images)\n validation_loss: Tensor = loss_function(model_output, classes_in_batch)\n batch_matches, _ = predict_and_evaluate(model_output=model_output, real_labels=classes_in_batch)\n\n images_in_batch: int = images.size(0)\n running_loss += validation_loss.data.item() * images_in_batch\n running_matches += batch_matches\n\n total_loss: float = running_loss / total_images\n total_accuracy: float = running_matches / total_images\n\n return total_loss, total_accuracy\n\n\ndef predict_and_evaluate(model_output: Tensor, real_labels: Tensor) -> Tuple[float, float]:\n class_by_model: Tensor = output_to_target_class(model_output=model_output)\n model_matches = torch.eq(class_by_model, real_labels).view(-1)\n\n correct_predictions = torch.sum(model_matches).item()\n evaluations = model_matches.shape[0]\n\n logging.debug(\"correct_predictions {} evaluations {} \".format(correct_predictions, evaluations))\n return correct_predictions, evaluations\n\n\ndef output_to_target_class(model_output: Tensor) -> Tensor:\n class_by_model = torch.max(F.softmax(model_output), dim=1)[1]\n return class_by_model\n\n\ndef output_to_class_probabilities(model_output: Tensor) -> Tensor:\n class_probabilities = F.softmax(model_output)\n return class_probabilities\n"
] |
[
[
"torch.eq",
"torch.nn.functional.softmax",
"torch.sum"
]
] |
jrs65/radiotools
|
[
"de8153824d2c394a4ddb850b630028e75ba1afb0"
] |
[
"radiotools/estimator.py"
] |
[
"# === Start Python 2/3 compatibility\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\nfrom future.builtins import * # noqa pylint: disable=W0401, W0614\nfrom future.builtins.disabled import * # noqa pylint: disable=W0401, W0614\n# === End Python 2/3 compatibility\n\nimport numpy as np\n\nimport scipy.linalg as la\nimport scipy.sparse as ss\nimport scipy.sparse.linalg as sla\n\nfrom . import uvtools\n\n\nclass PSEstimatorBase(object):\n \"\"\"Base class for quadractic power spectrum estimators.\n\n Parameters\n ----------\n proj : ss.csr_matrix\n Sparse matrix projector.\n sky : SkyCovariance instance\n Function that returns a sky covariance matrix.\n noise : float\n Noise standard deviation.\n grid : np.ndarray\n UV grid to project onto.\n \"\"\"\n\n proj = None\n\n sky = None\n noise = None\n grid = None\n\n #l_bands = np.logspace(np.log10(300.0), np.log10(3e4), 21)\n l_bands = np.linspace(0, 3e4, 21)\n _bands = None\n\n def __init__(self, proj, sky, noise, grid):\n\n self.proj = proj\n self.sky = sky\n self.noise = noise\n self.grid = grid\n\n def setup(self):\n \"\"\"Setup the power spectrum bands and covariance matrices.\n \"\"\"\n\n # Setup routine for generating sky sims\n self.sim = vis_simulation(self.grid, self.proj, self.sky.powerspectrum, self.noise)\n\n # Set up powerspectrum bands and sky covariances\n self._bands = list(zip(self.l_bands[:-1], self.l_bands[1:]))\n self.l_centre = [ 0.5 * (ls + le) for ls, le in self._bands]\n self.C_a = np.array([\n uvtools.covariance_band(self.grid, ls, le, mat=False)\n for ls, le in self._bands\n ])\n\n # Construct the noise and sky covariances in their respective basis\n self.C_sky = self.sky.covariance(self.grid)\n self.C_noise = ss.identity(self.proj.shape[0]) * self.noise**2 / 2.0 # Factor of two to give variance of real/imag separately\n\n # Construct the full covariance matrix as a LinearOperator\n def mv(v):\n n = self.C_noise.dot(v)\n s = self.proj.dot(self.C_sky.dot(self.proj.T.dot(v)))\n return s + n\n\n self.C_full = sla.LinearOperator(shape=self.C_noise.shape, dtype=np.float64,\n matvec=mv, matmat=mv, rmatvec=mv)\n\n def q_estimator(self, vis):\n \"\"\"Calculate the biased q-estimator for a set of visibilities.\n \"\"\"\n pass\n\n def p_estimator(self, vis):\n \"\"\"Calculate the unbiased p-estimator.\n \"\"\"\n q_a = self.q_estimator(vis)\n\n p_a = np.dot(self.M_ab, q_a - self.b_a)\n\n return p_a\n\n _M_ab = None\n\n @property\n def M_ab(self):\n \"\"\"The power spectrum bin unmixing matrix M_ab.\n \"\"\"\n\n if self._M_ab is None:\n self._calculate_M_ab()\n\n return self._M_ab\n\n _b_a = None\n\n @property\n def b_a(self):\n \"\"\"The estimator bias.\n \"\"\"\n if self._b_a is None:\n self._calculate_b_a()\n\n return self._b_a\n\n\nclass OptimalEstimator(PSEstimatorBase):\n\n fisher_samples = 500\n\n def q_estimator(self, vis):\n\n Ci_v, info = sla.cg(self.C_full, vis, tol=1e-4)\n\n if info != 0:\n raise RuntimeError('meh')\n\n Cp = self.proj.T.dot(Ci_v)\n\n Cp2 = np.abs(Cp)**2\n\n q = 0.5 * np.dot(self.C_a, Cp2)\n\n return q\n\n def fisher_bias(self, n=500, fisher_only=False):\n\n qav = []\n\n bav = []\n\n for i in range(n):\n\n s1, n1, v1 = self.sim()\n\n qa = self.q_estimator(v1)\n qav.append(qa)\n\n if not fisher_only:\n ba = self.q_estimator(n1)\n bav.append(ba)\n\n F_ab = np.cov(np.array(qav).T)\n\n if fisher_only:\n return F_ab\n else:\n b_a = np.mean(np.array(ba), axis=0)\n return F_ab, b_a\n\n def _calculate_M_ab(self):\n\n F_ab, b_a = self.fisher_bias(self.fisher_samples)\n\n self._M_ab = la.pinv(F_ab)\n self._b_a = b_a\n\n def _calculate_b_a(self):\n self._calculate_M_ab()\n\n\nclass BareEstimator(PSEstimatorBase):\n\n def q_estimator(self, vis):\n\n Cp = self.proj.T.dot(vis)\n\n Cp2 = np.abs(Cp)**2\n\n q = 0.5 * np.dot(self.C_a, Cp2)\n\n return q\n\n def _calculate_M_ab(self):\n\n nband = len(self._bands)\n\n # Form sparse matrices for C_a^{1/2}\n hC_a = [ss.diags(cband**0.5, 0) for cband in self.C_a]\n\n # Get the projection matrix product\n BTB = self.proj.T.dot(self.proj)\n\n # Create a placeholder matrix for the normalisation\n iM_ab = np.zeros((nband, nband))\n\n # Iterate over all band pairs and calculate the Trace\n for ii in range(nband):\n\n Cp = BTB.dot(hC_a[ii])\n\n for ij in range(ii, nband):\n\n iC2 = hC_a[ij].dot(Cp)\n\n tr = np.sum(iC2.data**2)\n\n iM_ab[ii, ij] = 0.5 * tr\n iM_ab[ij, ii] = 0.5 * tr\n\n # Invert the matrix to get M_ab\n self._M_ab = la.pinv(iM_ab)\n\n def _calculate_b_a(self):\n\n # Get the projection matrix product\n BTNB_diag = self.proj.T.dot(self.C_noise.dot(self.proj)).diagonal()\n\n self._b_a = 0.5 * (self.C_a * BTNB_diag[np.newaxis, :]).sum(axis=1)\n\n\nclass WeightedEstimator(PSEstimatorBase):\n\n def setup(self):\n\n super(WeightedEstimator, self).setup()\n\n self.C_full_diag = (\n np.array(self.proj.multiply(self.C_sky.dot(self.proj.T).T).sum(axis=1)).reshape(-1) +\n self.C_noise.diagonal()\n )\n\n def q_estimator(self, vis):\n\n Cp = self.proj.T.dot(vis / self.C_full_diag)\n\n Cp2 = np.abs(Cp)**2\n\n q = 0.5 * np.dot(self.C_a, Cp2)\n\n return q\n\n def _calculate_M_ab(self):\n\n nband = len(self._bands)\n\n # Form sparse matrices for C_a^{1/2}\n hC_a = [ss.diags(cband**0.5, 0) for cband in self.C_a]\n\n Cd = ss.diags(1.0 / self.C_full_diag, 0)\n\n # Get the projection matrix product\n BTCdB = self.proj.T.dot(Cd.dot(self.proj))\n\n # Create a placeholder matrix for the normalisation\n iM_ab = np.zeros((nband, nband))\n\n # Iterate over all band pairs and calculate the Trace\n for ii in range(nband):\n\n Cp = BTCdB.dot(hC_a[ii])\n\n for ij in range(ii, nband):\n\n iC2 = hC_a[ij].dot(Cp)\n\n tr = np.sum(iC2.data**2)\n\n iM_ab[ii, ij] = 0.5 * tr\n iM_ab[ij, ii] = 0.5 * tr\n\n # Invert the matrix to get M_ab\n self._iM_ab = iM_ab\n self._M_ab = la.pinv(iM_ab)\n\n def _calculate_b_a(self):\n\n # Create a new weighted noise matrix\n wN = ss.diags(self.C_noise.diagonal() / self.C_full_diag**2, 0)\n\n # Get the projection matrix product\n BTNB_diag = self.proj.T.dot(wN.dot(self.proj)).diagonal()\n\n self._b_a = 0.5 * (self.C_a * BTNB_diag[np.newaxis, :]).sum(axis=1)\n\n\ndef vis_simulation(uv_grid, proj, ps_func, noise_amp):\n \"\"\"Generate a routine for creating random sky simulations.\n\n Parameters\n ----------\n uv_grid : np.ndarray[:, 2]\n Location in the UV-plane.\n proj : :class:`ss.csr_matrix`\n Sparse matrix projecting the UV-plane into observations.\n ps_func : function\n Power spectrum function.\n noise_amp : float\n The noise amplitude.\n \"\"\"\n\n l = 2 * np.pi * np.hypot(uv_grid[:, 0], uv_grid[:, 1])\n l = np.where(l == 0.0, 1.0, l)\n psa = ps_func(l)\n\n w = (np.concatenate([psa, psa]) / 2.0)**0.5\n\n def _sim():\n\n sky = np.random.standard_normal(w.shape) * w\n vis = proj.dot(sky)\n\n noise = noise_amp * np.random.standard_normal(vis.shape) / 2**0.5\n\n vis += noise\n\n return sky, noise, vis\n\n return _sim\n"
] |
[
[
"numpy.dot",
"scipy.linalg.pinv",
"numpy.abs",
"numpy.linspace",
"numpy.random.standard_normal",
"scipy.sparse.linalg.cg",
"scipy.sparse.diags",
"numpy.concatenate",
"scipy.sparse.identity",
"scipy.sparse.linalg.LinearOperator",
"numpy.array",
"numpy.hypot",
"numpy.where",
"numpy.sum",
"numpy.zeros"
]
] |
maxzvyagin/mdlearn
|
[
"30c7f3346f8a2ed3cff0fe7b6b1eead9b61f5310"
] |
[
"examples/symmetric_conv2d_vae/sweep/train.py"
] |
[
"import time\nimport wandb\nimport torch\nimport random\nimport numpy as np\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom typing import Dict, Tuple\nfrom torchsummary import summary\nfrom mdlearn.utils import (\n log_checkpoint,\n log_latent_visualization,\n get_torch_optimizer,\n get_torch_scheduler,\n)\nfrom mdlearn.metrics import metric_cluster_quality\nfrom mdlearn.data.utils import train_valid_split\nfrom mdlearn.data.datasets.contact_map import ContactMapDataset\nfrom mdlearn.nn.models.vae.symmetric_conv2d_vae import SymmetricConv2dVAE\nfrom config import SymmetricConv2dVAEConfig\n\n\ndef main(cfg: SymmetricConv2dVAEConfig):\n\n # Create checkpoint directory\n checkpoint_path = Path(wandb.run.dir) / \"checkpoints\"\n checkpoint_path.mkdir()\n # Create plot directory\n plot_path = Path(wandb.run.dir) / \"plots\"\n plot_path.mkdir()\n\n # Set random seed\n torch.manual_seed(cfg.seed)\n np.random.seed(cfg.seed)\n random.seed(cfg.seed)\n\n torch.set_num_threads(cfg.num_data_workers)\n\n # Load training and validation data\n dataset = ContactMapDataset(\n path=cfg.input_path,\n shape=cfg.input_shape,\n dataset_name=cfg.dataset_name,\n scalar_dset_names=cfg.scalar_dset_names,\n values_dset_name=cfg.values_dset_name,\n scalar_requires_grad=cfg.scalar_requires_grad,\n in_memory=cfg.in_memory,\n )\n train_loader, valid_loader = train_valid_split(\n dataset,\n cfg.split_pct,\n batch_size=cfg.batch_size,\n shuffle=cfg.shuffle,\n num_workers=cfg.num_data_workers,\n drop_last=True,\n pin_memory=True,\n persistent_workers=True,\n prefetch_factor=cfg.prefetch_factor,\n )\n\n # Hardware\n device = torch.device(\n \"cuda:0\" if torch.cuda.is_available() and not cfg.ignore_gpu else \"cpu\"\n )\n\n # Create model\n model = SymmetricConv2dVAE(\n cfg.input_shape,\n cfg.init_weights,\n cfg.filters,\n cfg.kernels,\n cfg.strides,\n cfg.affine_widths,\n cfg.affine_dropouts,\n cfg.latent_dim,\n cfg.activation,\n cfg.output_activation,\n )\n model = model.to(device)\n\n # Diplay model\n print(model)\n summary(model, cfg.input_shape)\n wandb.watch(model) # Must run after summary()\n\n optimizer = get_torch_optimizer(\n cfg.optimizer.name, cfg.optimizer.hparams, model.parameters()\n )\n if cfg.scheduler is not None:\n scheduler = get_torch_scheduler(\n cfg.scheduler.name, cfg.scheduler.hparams, optimizer\n )\n else:\n scheduler = None\n\n # Optionally initialize model with pre-trained weights\n if cfg.init_weights is not None:\n checkpoint = torch.load(cfg.init_weights, map_location=\"cpu\")\n model.load_state_dict(checkpoint[\"model_state_dict\"])\n print(f\"Loaded model from {cfg.init_weights}\")\n\n # Start training\n epoch_times = []\n print(\n f\"Start training with {round(len(train_loader) * cfg.train_subsample_pct)} batches \"\n f\"and {round(len(valid_loader) * cfg.valid_subsample_pct)} validation batches.\"\n )\n for epoch in range(cfg.epochs):\n start = time.time()\n\n # Training\n model.train()\n avg_train_losses = train(train_loader, model, optimizer, device)\n\n print(\n \"====> Epoch: {} Train:\\tAvg loss: {:.4f}\\tAvg recon loss {:.4f}\\tAvg kld loss {:.4f}\".format(\n epoch, *avg_train_losses\n )\n )\n\n # Validation\n model.eval()\n with torch.no_grad():\n avg_loss, avg_recon_loss, avg_kld_loss, latent_vectors, scalars = validate(\n valid_loader, model, device\n )\n\n print(\n \"====> Epoch: {} Valid:\\tAvg loss: {:.4f}\\tAvg recon loss {:.4f}\\tAvg kld loss {:.4f}\".format(\n epoch, avg_loss, avg_recon_loss, avg_kld_loss\n )\n )\n elapsed = time.time() - start\n print(f\"Epoch: {epoch} Time: {elapsed}\\n\")\n epoch_times.append(elapsed)\n\n start = time.time()\n cluster_quality = metric_cluster_quality(latent_vectors, scalars[\"rmsd\"])\n print(f\"cluster quality metric time: \", time.time() - start)\n\n metrics = {\n \"train_loss\": avg_train_losses[0],\n \"train_recon_loss\": avg_train_losses[1],\n \"train_kld_loss\": avg_train_losses[2],\n \"valid_loss\": avg_loss,\n \"valid_recon_loss\": avg_recon_loss,\n \"valid_kld_loss\": avg_kld_loss,\n \"cluster_quality\": cluster_quality,\n }\n\n # Visualize latent space\n if epoch % cfg.plot_log_every == 0:\n html_strings = log_latent_visualization(\n latent_vectors,\n scalars,\n plot_path,\n epoch,\n cfg.plot_n_samples,\n cfg.plot_method,\n )\n for name, html_string in html_strings.items():\n metrics[name] = wandb.Html(html_string, inject=False)\n\n wandb.log(metrics)\n\n # Step the learning rate scheduler\n if scheduler is None:\n pass\n elif cfg.scheduler.name == \"ReduceLROnPlateau\":\n scheduler.step(avg_loss)\n else:\n raise NotImplementedError(f\"scheduler {cfg.scheduler.name} step function.\")\n\n if epoch % cfg.checkpoint_log_every == 0:\n log_checkpoint(\n checkpoint_path / f\"checkpoint-epoch-{epoch}.pt\",\n epoch,\n model,\n {\"optimizer\": optimizer},\n scheduler,\n )\n\n print(\"Elapsed avg time\", np.mean(epoch_times))\n\n # Output directory structure\n # output_path\n # ├── checkpoint\n # │ ├── epoch-1-20200606-125334.pt\n # │ ├── epoch-2-20200606-125338.pt\n # ├── wandb/\n\n\ndef train(train_loader, model, optimizer, device):\n avg_loss, avg_recon_loss, avg_kld_loss, i = 0.0, 0.0, 0.0, 0\n for i, batch in enumerate(train_loader):\n\n if i / len(train_loader) > cfg.train_subsample_pct:\n break # Early stop for sweeps\n\n x = batch[\"X\"].to(device, non_blocking=True)\n\n # Forward pass\n _, recon_x = model(x)\n kld_loss = model.kld_loss()\n recon_loss = model.recon_loss(x, recon_x)\n loss = cfg.lambda_rec * recon_loss + kld_loss\n\n # Backward pass\n optimizer.zero_grad()\n loss.backward()\n _ = torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip_grad_max_norm)\n optimizer.step()\n\n # Collect loss\n avg_loss += loss.item()\n avg_recon_loss += recon_loss.item()\n avg_kld_loss += kld_loss.item()\n\n avg_loss /= i + 1\n avg_recon_loss /= i + 1\n avg_kld_loss /= i + 1\n\n return avg_loss, avg_recon_loss, avg_kld_loss\n\n\ndef validate(\n valid_loader, model, device\n) -> Tuple[float, float, float, np.ndarray, Dict[str, np.ndarray]]:\n scalars = defaultdict(list)\n latent_vectors = []\n avg_loss, avg_recon_loss, avg_kld_loss, i = 0.0, 0.0, 0.0, 0\n for i, batch in enumerate(valid_loader):\n\n if i / len(valid_loader) > cfg.valid_subsample_pct:\n break # Early stop for sweeps\n\n x = batch[\"X\"].to(device, non_blocking=True)\n\n # Forward pass\n z, recon_x = model(x)\n kld_loss = model.kld_loss()\n recon_loss = model.recon_loss(x, recon_x)\n loss = cfg.lambda_rec * recon_loss + kld_loss\n\n # Collect loss\n avg_loss += loss.item()\n avg_recon_loss += recon_loss.item()\n avg_kld_loss += kld_loss.item()\n\n # Collect latent vectors for visualization\n latent_vectors.append(z.cpu().numpy())\n for name in cfg.scalar_dset_names:\n scalars[name].append(batch[name].cpu().numpy())\n\n avg_loss /= i + 1\n avg_recon_loss /= i + 1\n avg_kld_loss /= i + 1\n\n latent_vectors = np.concatenate(latent_vectors)\n scalars = {name: np.concatenate(scalar) for name, scalar in scalars.items()}\n\n return avg_loss, avg_recon_loss, avg_kld_loss, latent_vectors, scalars\n\n\nif __name__ == \"__main__\":\n # wandb.init(dir=wandb.config.output_path)\n wandb.init()\n cfg = SymmetricConv2dVAEConfig.from_yaml(wandb.config.default_yaml)\n\n # Update cfg with sweep parameters\n cfg.batch_size = wandb.config.batch_size\n cfg.optimizer.name = wandb.config.optimizer\n cfg.optimizer.hparams[\"lr\"] = wandb.config.lr\n cfg.latent_dim = wandb.config.latent_dim\n cfg.lambda_rec = wandb.config.lambda_rec\n\n main(cfg)\n"
] |
[
[
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"numpy.concatenate",
"torch.set_num_threads",
"numpy.mean",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
reeserich/covid-chicago
|
[
"88bdc556aebdd7e443e4756e7421160d230f5a01"
] |
[
"plotters/process_for_civis_EMSgrp.py"
] |
[
"import argparse\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport seaborn as sns\nimport matplotlib as mpl\nimport matplotlib.dates as mdates\nfrom datetime import date, timedelta, datetime\nimport sys\nsys.path.append('../')\nfrom processing_helpers import *\nfrom load_paths import load_box_paths\ndatapath, projectpath, wdir,exe_dir, git_dir = load_box_paths()\n\nmpl.rcParams['pdf.fonttype'] = 42\n\ndef parse_args():\n description = \"Process simulation outputs to send to Civis\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\n \"-e\", \"--exp_name\",\n type=str,\n help=\"Name of experiment and folder name\",\n default=None,\n )\n parser.add_argument(\n \"-p\", \"--processStep\",\n type=str,\n help=\"Only required if files are too large to process regions in a loop\",\n default='generate_outputs',\n )\n parser.add_argument(\n \"-l\", \"--Location\",\n type=str,\n help=\"Local or NUCLUSTER\",\n default='Local',\n )\n parser.add_argument(\n \"-t\", \"--trajectoriesName\",\n type=str,\n help=\"Name of trajectoriesDat file, could be trajectoriesDat.csv or trajectoriesDat_trim.csv\",\n default='trajectoriesDat.csv',\n )\n\n return parser.parse_args()\n \ndef get_scenarioName(exp_suffix) :\n scenarioName = exp_suffix\n if exp_suffix == \"reopen\": scenarioName = \"reopen_gradual\"\n if exp_suffix == \"gradual\": scenarioName = \"reopen_gradual\"\n if exp_suffix == \"interventionStop\": scenarioName = \"endsip\"\n if exp_suffix == \"0\": scenarioName = \"baseline\"\n if exp_suffix == \"neverSIP\": scenarioName = \"neversip\"\n if exp_suffix == \"stopSIP30\": scenarioName = \"july1partial30\"\n if exp_suffix == \"stopSIP10\": scenarioName = \"july1partial10\"\n\n return(scenarioName)\n\n\ndef plot_sim(dat,suffix,channels) :\n\n if suffix not in [\"All\",\"central\",\"southern\",\"northeast\",\"northcentral\"]:\n suffix_nr = str(suffix.split(\"-\")[1])\n if suffix == \"All\":\n suffix_nr =\"illinois\"\n capacity = load_capacity(suffix_nr)\n\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(right=0.97, wspace=0.2, left=0.07, hspace=0.15)\n palette = sns.color_palette('Set1', len(channels))\n\n for c, channel in enumerate(channels):\n ax = fig.add_subplot(3, 3, c + 1)\n\n ax.plot(dat['date'], dat['%s_median' % channel], color=palette[c])\n ax.fill_between(dat['date'].values, dat['%s_95CI_lower' % channel], dat['%s_95CI_upper' % channel],\n color=palette[c], linewidth=0, alpha=0.2)\n ax.fill_between(dat['date'].values, dat[ '%s_50CI_lower' % channel], dat[ '%s_50CI_upper' % channel],\n color=palette[c], linewidth=0, alpha=0.4)\n\n if channel in capacity.keys():\n ax.plot([np.min(dat['date']), np.max(dat['date'])],\n [capacity[channel], capacity[channel]], '--', linewidth=2, color=palette[c])\n\n ax.set_title(channel, y=0.85)\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%d\\n%b'))\n\n plotname = f'{scenarioName}_{suffix}'\n plotname = plotname.replace('EMS-','covidregion_')\n\n plt.savefig(os.path.join(plot_path, plotname + '.png'))\n plt.savefig(os.path.join(plot_path, 'pdf', plotname + '.pdf'), format='PDF')\n # plt.show()\n\ndef load_and_plot_data(ems_region, fname , savePlot=True) :\n region_suffix = f'_{str(ems_region)}'\n column_list = ['startdate', 'time', 'scen_num', 'sample_num', 'run_num']\n outcome_channels = ['susceptible', 'infected', 'recovered', 'infected_cumul', 'asymp_cumul', 'asymp_det_cumul', 'symp_mild_cumul', 'symp_severe_cumul', 'symp_mild_det_cumul',\n 'symp_severe_det_cumul', 'hosp_det_cumul', 'hosp_cumul', 'detected_cumul', 'crit_cumul', 'crit_det_cumul', 'death_det_cumul',\n 'deaths', 'crit_det', 'critical', 'hosp_det', 'hospitalized']\n\n for channel in outcome_channels:\n column_list.append(channel + region_suffix)\n\n df = load_sim_data(exp_name,region_suffix = region_suffix,fname=fname, column_list=column_list)\n df['ems'] = ems_region\n df = df[(df['date'] >= plot_first_day) & (df['date'] <= plot_last_day)]\n\n df['ventilators'] = get_vents(df['crit_det'].values)\n df['new_symptomatic'] = df['new_symptomatic_severe'] + df['new_symptomatic_mild'] + df['new_detected_symptomatic_severe'] + df['new_detected_symptomatic_mild']\n\n channels = ['infected', 'new_infected', 'new_symptomatic', 'new_deaths', 'new_detected_deaths', 'hospitalized', 'critical', 'hosp_det', 'crit_det', 'ventilators', 'recovered']\n plotchannels = ['infected', 'new_infected', 'new_symptomatic', 'new_deaths', 'new_detected_deaths', 'hosp_det', 'crit_det', 'ventilators', 'recovered']\n\n adf = pd.DataFrame()\n for c, channel in enumerate(channels):\n mdf = df.groupby(['date', 'ems'])[channel].agg([CI_50, CI_2pt5, CI_97pt5, CI_25, CI_75]).reset_index()\n\n mdf = mdf.rename(columns={'CI_50': '%s_median' % channel,\n 'CI_2pt5': '%s_95CI_lower' % channel,\n 'CI_97pt5': '%s_95CI_upper' % channel,\n 'CI_25': '%s_50CI_lower' % channel,\n 'CI_75': '%s_50CI_upper' % channel})\n if adf.empty:\n adf = mdf\n else:\n adf = pd.merge(left=adf, right=mdf, on=['date', 'ems'])\n\n if savePlot :\n plot_sim(adf, ems_region, channels=plotchannels)\n\n return adf\n\n\ndef process_and_save(adf,ems_region, SAVE = True) :\n col_names = civis_colnames(reverse=False)\n adf = adf.rename(columns=col_names)\n\n adf.geography_modeled = adf.geography_modeled.str.replace('-', \"\")\n adf.geography_modeled = adf.geography_modeled.str.lower()\n adf.geography_modeled = adf.geography_modeled.str.replace('all', \"illinois\")\n\n adf['scenario_name'] = scenarioName\n\n dfout = adf[\n ['date', 'geography_modeled', 'scenario_name', 'cases_median', 'cases_lower', 'cases_upper', 'cases_new_median',\n 'cases_new_lower', 'cases_new_upper',\n 'deaths_median', 'deaths_lower', 'deaths_upper', 'deaths_det_median', 'deaths_det_lower', 'deaths_det_upper',\n 'hosp_bed_median', 'hosp_bed_lower', 'hosp_bed_upper','hosp_det_bed_median', 'hosp_det_bed_lower', 'hosp_det_bed_upper',\n 'icu_median', 'icu_lower', 'icu_upper', 'icu_det_median', 'icu_det_lower', 'icu_det_upper',\n 'vent_median', 'vent_lower', 'vent_upper', 'recovered_median',\n 'recovered_lower', 'recovered_upper']]\n\n if SAVE :\n filename = \"nu_\" + simdate + \"_\" + ems_region + \".csv\"\n rename_geography_and_save(dfout, filename=filename)\n\n return dfout\n\ndef rename_geography_and_save(df,filename) :\n\n dfout = df.copy()\n if \"geography_modeled\" not in dfout.columns:\n dfout.rename(columns={'ems': 'covid_region'}, inplace=True)\n dfout['covid_region'] = dfout['covid_region'].str.replace('EMS-', '')\n\n if \"geography_modeled\" in dfout.columns:\n dfout['geography_modeled'] = dfout['geography_modeled'].str.replace('ems', 'covidregion_')\n\n dfout.to_csv(os.path.join(sim_output_path, filename), index=False, date_format='%Y-%m-%d')\n\n\nif __name__ == '__main__' :\n\n args = parse_args()\n\n exp_name = args.exp_name # \"20200910_IL_RR_baseline_combined\"\n simdate = exp_name.split(\"_\")[0]\n processStep = args.processStep # 'generate_outputs'\n trajectoriesName = args.trajectoriesName\n\n datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths(Location=args.Location)\n\n plot_first_day = date(2020,3,1)\n plot_last_day = date(2021,4,1)\n\n regions = ['All', 'EMS-1', 'EMS-2', 'EMS-3', 'EMS-4', 'EMS-5', 'EMS-6', 'EMS-7', 'EMS-8', 'EMS-9', 'EMS-10','EMS-11']\n exp_suffix = exp_name.split(\"_\")[-1]\n scenarioName = get_scenarioName(exp_suffix)\n\n sim_output_path = os.path.join(wdir, 'simulation_output', exp_name)\n plot_path = os.path.join(sim_output_path, '_plots')\n\n if processStep == 'generate_outputs' :\n dfAll = pd.DataFrame()\n for reg in regions :\n print( f'Start processing {reg}')\n tdf = load_and_plot_data(reg,fname=trajectoriesName , savePlot=True)\n adf = process_and_save(tdf, reg, SAVE=True)\n dfAll = pd.concat([dfAll, adf])\n del tdf\n\n if len(regions) == 12 :\n filename = f'nu_{simdate}.csv'\n rename_geography_and_save(dfAll,filename=filename)\n\n ### Optional\n if processStep == 'combine_outputs' :\n\n for reg in regions :\n print(\"Start processing\" + reg)\n filename = \"nu_\" + simdate + \"_\" + reg + \".csv\"\n adf = pd.read_csv(os.path.join(sim_output_path, filename))\n dfAll = pd.concat([dfAll, adf])\n\n filename = f'nu_{simdate}.csv'\n rename_geography_and_save(dfAll, filename=filename)\n\n"
] |
[
[
"matplotlib.dates.DateFormatter",
"pandas.merge",
"pandas.concat",
"numpy.min",
"pandas.DataFrame",
"numpy.max",
"matplotlib.pyplot.figure"
]
] |
gjoseph92/pangeo-forge-recipes
|
[
"6dda95778bcae551bed1e79d76a20867e237097f"
] |
[
"tests/conftest.py"
] |
[
"\"\"\"\nObjects in this module belong to the following groups, delimited by inline comments:\n - Helper functions:\n Functions used in the creation of fixtures\n - Dataset + path fixtures:\n Create a dataset or a path to dataset(s)\n - FilePattern fixtures:\n Create `pangeo_forge_recipes.patterns.FilePattern` instances\n - Storage fixtures:\n Create temporary storage locations for caching, writing, etc.\n - Execution fixtures:\n Create infrastructure or define steps for executing recipes\nNote:\n Recipe fixtures are defined in their respective test modules, e.g. `test_recipes.py`\n\"\"\"\nimport logging\nimport os\nimport socket\nimport subprocess\nimport time\n\nimport aiohttp\nimport fsspec\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport xarray as xr\nfrom dask.distributed import Client, LocalCluster\nfrom prefect.executors import DaskExecutor\n\n# need to import this way (rather than use pytest.lazy_fixture) to make it work with dask\nfrom pytest_lazyfixture import lazy_fixture\n\nfrom pangeo_forge_recipes.patterns import (\n ConcatDim,\n FilePattern,\n MergeDim,\n pattern_from_file_sequence,\n)\nfrom pangeo_forge_recipes.storage import CacheFSSpecTarget, FSSpecTarget, MetadataTarget\n\n# Helper functions --------------------------------------------------------------------------------\n\n\n# to use this feature, e.g.\n# $ pytest --redirect-dask-worker-logs-to-stdout=DEBUG\ndef pytest_addoption(parser):\n parser.addoption(\n \"--redirect-dask-worker-logs-to-stdout\", action=\"store\", default=\"NOTSET\",\n )\n\n\ndef split_up_files_by_day(ds, day_param):\n gb = ds.resample(time=day_param)\n _, datasets = zip(*gb)\n fnames = [f\"{n:03d}.nc\" for n in range(len(datasets))]\n return datasets, fnames\n\n\ndef split_up_files_by_variable_and_day(ds, day_param):\n all_dsets = []\n all_fnames = []\n fnames_by_variable = {}\n for varname in ds.data_vars:\n var_dsets, fnames = split_up_files_by_day(ds[[varname]], day_param)\n fnames = [f\"{varname}_{fname}\" for fname in fnames]\n all_dsets += var_dsets\n all_fnames += fnames\n fnames_by_variable[varname] = fnames\n return all_dsets, all_fnames, fnames_by_variable\n\n\ndef make_file_pattern(path_fixture):\n \"\"\"Creates a filepattern from a `path_fixture`\n\n Parameters\n ----------\n path_fixture : callable\n `netcdf_local_paths`, `netcdf_http_paths`, or similar\n \"\"\"\n paths, items_per_file, fnames_by_variable, path_format, kwargs = path_fixture\n\n if not fnames_by_variable:\n file_pattern = pattern_from_file_sequence(\n [str(path) for path in paths], \"time\", items_per_file, **kwargs\n )\n else:\n time_index = list(range(len(paths) // 2))\n\n def format_function(variable, time):\n return path_format.format(variable=variable, time=time)\n\n file_pattern = FilePattern(\n format_function,\n ConcatDim(\"time\", time_index, items_per_file),\n MergeDim(\"variable\", [\"foo\", \"bar\"]),\n **kwargs,\n )\n\n return file_pattern\n\n\ndef make_netcdf_local_paths(daily_xarray_dataset, tmpdir_factory, items_per_file, file_splitter):\n tmp_path = tmpdir_factory.mktemp(\"netcdf_data\")\n file_splitter_tuple = file_splitter(daily_xarray_dataset.copy(), items_per_file)\n\n datasets, fnames = file_splitter_tuple[:2]\n full_paths = [tmp_path.join(fname) for fname in fnames]\n xr.save_mfdataset(datasets, [str(path) for path in full_paths])\n items_per_file = {\"D\": 1, \"2D\": 2}[items_per_file]\n\n fnames_by_variable = file_splitter_tuple[-1] if len(file_splitter_tuple) == 3 else None\n path_format = str(tmp_path) + \"/{variable}_{time:03d}.nc\" if fnames_by_variable else None\n\n kwargs = dict(fsspec_open_kwargs={}, query_string_secrets={})\n\n return full_paths, items_per_file, fnames_by_variable, path_format, kwargs\n\n\ndef get_open_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((\"\", 0))\n s.listen(1)\n port = str(s.getsockname()[1])\n s.close()\n return port\n\n\ndef start_http_server(paths, request, username=None, password=None, required_query_string=None):\n\n first_path = paths[0]\n # assume that all files are in the same directory\n basedir = first_path.dirpath()\n\n this_dir = os.path.dirname(os.path.abspath(__file__))\n port = get_open_port()\n command_list = [\n \"python\",\n os.path.join(this_dir, \"http_auth_server.py\"),\n f\"--port={port}\",\n \"--address=127.0.0.1\",\n ]\n if username:\n command_list += [f\"--username={username}\", f\"--password={password}\"]\n if required_query_string:\n command_list += [f\"--required-query-string={required_query_string}\"]\n p = subprocess.Popen(command_list, cwd=basedir)\n url = f\"http://127.0.0.1:{port}\"\n time.sleep(2) # let the server start up\n\n def teardown():\n p.kill()\n\n request.addfinalizer(teardown)\n\n return url\n\n\ndef make_netcdf_http_paths(netcdf_local_paths, request):\n paths, items_per_file, fnames_by_variable, _, kwargs = netcdf_local_paths\n\n url = start_http_server(paths, request, **request.param)\n path_format = url + \"/{variable}_{time:03d}.nc\" if fnames_by_variable else None\n\n fnames = [path.basename for path in paths]\n all_urls = [\"/\".join([url, str(fname)]) for fname in fnames]\n\n if \"username\" in request.param.keys():\n kwargs.update(dict(fsspec_open_kwargs={\"auth\": aiohttp.BasicAuth(\"foo\", \"bar\")}))\n if \"required_query_string\" in request.param.keys():\n kwargs.update(dict(query_string_secrets={\"foo\": \"foo\", \"bar\": \"bar\"}))\n\n return all_urls, items_per_file, fnames_by_variable, path_format, kwargs\n\n\n# Dataset + path fixtures -------------------------------------------------------------------------\n\n\n@pytest.fixture(scope=\"session\")\ndef daily_xarray_dataset():\n \"\"\"Return a synthetic random xarray dataset.\"\"\"\n np.random.seed(1)\n # TODO: change nt to 11 in order to catch the edge case where\n # items_per_input does not evenly divide the length of the sequence dimension\n nt, ny, nx = 10, 18, 36\n time = pd.date_range(start=\"2010-01-01\", periods=nt, freq=\"D\")\n lon = (np.arange(nx) + 0.5) * 360 / nx\n lon_attrs = {\"units\": \"degrees_east\", \"long_name\": \"longitude\"}\n lat = (np.arange(ny) + 0.5) * 180 / ny\n lat_attrs = {\"units\": \"degrees_north\", \"long_name\": \"latitude\"}\n foo = np.random.rand(nt, ny, nx)\n foo_attrs = {\"long_name\": \"Fantastic Foo\"}\n # make sure things work with heterogenous data types\n bar = np.random.randint(0, 10, size=(nt, ny, nx))\n bar_attrs = {\"long_name\": \"Beautiful Bar\"}\n dims = (\"time\", \"lat\", \"lon\")\n ds = xr.Dataset(\n {\"bar\": (dims, bar, bar_attrs), \"foo\": (dims, foo, foo_attrs)},\n coords={\n \"time\": (\"time\", time),\n \"lat\": (\"lat\", lat, lat_attrs),\n \"lon\": (\"lon\", lon, lon_attrs),\n },\n attrs={\"conventions\": \"CF 1.6\"},\n )\n return ds\n\n\n@pytest.fixture(scope=\"session\")\ndef daily_xarray_dataset_with_coordinateless_dimension(daily_xarray_dataset):\n \"\"\"\n A Dataset with a coordinateless dimension.\n\n Reproduces https://github.com/pangeo-forge/pangeo-forge-recipes/issues/214\n \"\"\"\n ds = daily_xarray_dataset.copy()\n del ds[\"lon\"]\n return ds\n\n\n@pytest.fixture(scope=\"session\")\ndef netcdf_local_paths_sequential_1d(daily_xarray_dataset, tmpdir_factory):\n return make_netcdf_local_paths(daily_xarray_dataset, tmpdir_factory, \"D\", split_up_files_by_day)\n\n\n@pytest.fixture(scope=\"session\")\ndef netcdf_local_paths_sequential_2d(daily_xarray_dataset, tmpdir_factory):\n return make_netcdf_local_paths(\n daily_xarray_dataset, tmpdir_factory, \"2D\", split_up_files_by_day,\n )\n\n\n@pytest.fixture(\n scope=\"session\",\n params=[\n lazy_fixture(\"netcdf_local_paths_sequential_1d\"),\n lazy_fixture(\"netcdf_local_paths_sequential_2d\"),\n ],\n)\ndef netcdf_local_paths_sequential(request):\n return request.param\n\n\n@pytest.fixture(scope=\"session\")\ndef netcdf_local_paths_sequential_multivariable_1d(daily_xarray_dataset, tmpdir_factory):\n return make_netcdf_local_paths(\n daily_xarray_dataset, tmpdir_factory, \"D\", split_up_files_by_variable_and_day,\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef netcdf_local_paths_sequential_multivariable_2d(daily_xarray_dataset, tmpdir_factory):\n return make_netcdf_local_paths(\n daily_xarray_dataset, tmpdir_factory, \"2D\", split_up_files_by_variable_and_day,\n )\n\n\n@pytest.fixture(\n scope=\"session\",\n params=[\n lazy_fixture(\"netcdf_local_paths_sequential_multivariable_1d\"),\n lazy_fixture(\"netcdf_local_paths_sequential_multivariable_2d\"),\n ],\n)\ndef netcdf_local_paths_sequential_multivariable(request):\n return request.param\n\n\n@pytest.fixture(scope=\"session\",)\ndef netcdf_local_paths_sequential_multivariable_with_coordinateless_dimension(\n daily_xarray_dataset_with_coordinateless_dimension, tmpdir_factory\n):\n return make_netcdf_local_paths(\n daily_xarray_dataset_with_coordinateless_dimension,\n tmpdir_factory,\n \"D\",\n split_up_files_by_variable_and_day,\n )\n\n\n@pytest.fixture(\n scope=\"session\",\n params=[\n lazy_fixture(\"netcdf_local_paths_sequential\"),\n lazy_fixture(\"netcdf_local_paths_sequential_multivariable\"),\n ],\n)\ndef netcdf_local_paths(request):\n return request.param\n\n\nhttp_auth_params = [\n dict(username=\"foo\", password=\"bar\"),\n dict(required_query_string=\"foo=foo&bar=bar\"),\n]\n\n\n@pytest.fixture(scope=\"session\", params=[dict()])\ndef netcdf_public_http_paths_sequential_1d(netcdf_local_paths_sequential_1d, request):\n return make_netcdf_http_paths(netcdf_local_paths_sequential_1d, request)\n\n\n@pytest.fixture(scope=\"session\", params=[*http_auth_params])\ndef netcdf_private_http_paths_sequential_1d(netcdf_local_paths_sequential_1d, request):\n return make_netcdf_http_paths(netcdf_local_paths_sequential_1d, request)\n\n\n@pytest.fixture(\n scope=\"session\",\n params=[\n lazy_fixture(\"netcdf_public_http_paths_sequential_1d\"),\n lazy_fixture(\"netcdf_private_http_paths_sequential_1d\"),\n ],\n)\ndef netcdf_http_paths_sequential_1d(request):\n return request.param\n\n\n@pytest.fixture(scope=\"session\")\ndef netcdf_local_paths_sequential_with_coordinateless_dimension(\n daily_xarray_dataset_with_coordinateless_dimension, tmpdir_factory\n):\n return make_netcdf_local_paths(\n daily_xarray_dataset_with_coordinateless_dimension,\n tmpdir_factory,\n \"D\",\n split_up_files_by_day,\n )\n\n\n# FilePattern fixtures ----------------------------------------------------------------------------\n\n\n@pytest.fixture(scope=\"session\")\ndef netcdf_local_file_pattern_sequential(netcdf_local_paths_sequential):\n return make_file_pattern(netcdf_local_paths_sequential)\n\n\n@pytest.fixture(scope=\"session\")\ndef netcdf_local_file_pattern_sequential_multivariable(\n netcdf_local_paths_sequential_multivariable,\n):\n return make_file_pattern(netcdf_local_paths_sequential_multivariable)\n\n\n@pytest.fixture(\n scope=\"session\",\n params=[\n lazy_fixture(\"netcdf_local_file_pattern_sequential\"),\n lazy_fixture(\"netcdf_local_file_pattern_sequential_multivariable\"),\n ],\n)\ndef netcdf_local_file_pattern(request):\n return request.param\n\n\n@pytest.fixture(scope=\"session\")\ndef netcdf_http_file_pattern_sequential_1d(netcdf_http_paths_sequential_1d):\n return make_file_pattern(netcdf_http_paths_sequential_1d)\n\n\n@pytest.fixture(scope=\"session\")\ndef netcdf_local_file_pattern_sequential_with_coordinateless_dimension(\n netcdf_local_paths_sequential_with_coordinateless_dimension,\n):\n \"\"\"\n Filepattern where one of the dimensions doesn't have a coordinate.\n \"\"\"\n return make_file_pattern(netcdf_local_paths_sequential_with_coordinateless_dimension)\n\n\n# Storage fixtures --------------------------------------------------------------------------------\n\n\n@pytest.fixture()\ndef tmp_target(tmpdir_factory):\n fs = fsspec.get_filesystem_class(\"file\")()\n path = str(tmpdir_factory.mktemp(\"target\"))\n return FSSpecTarget(fs, path)\n\n\n@pytest.fixture()\ndef tmp_cache(tmpdir_factory):\n path = str(tmpdir_factory.mktemp(\"cache\"))\n fs = fsspec.get_filesystem_class(\"file\")()\n cache = CacheFSSpecTarget(fs, path)\n return cache\n\n\n@pytest.fixture()\ndef tmp_metadata_target(tmpdir_factory):\n path = str(tmpdir_factory.mktemp(\"cache\"))\n fs = fsspec.get_filesystem_class(\"file\")()\n cache = MetadataTarget(fs, path)\n return cache\n\n\n# Execution fixtures ------------------------------------------------------------------------------\n\n\n@pytest.fixture(scope=\"session\")\ndef dask_cluster(request):\n cluster = LocalCluster(n_workers=2, threads_per_worker=1, silence_logs=False)\n\n client = Client(cluster)\n\n # cluster setup\n\n def set_blosc_threads():\n from numcodecs import blosc\n\n blosc.use_threads = False\n\n log_level_name = request.config.getoption(\"--redirect-dask-worker-logs-to-stdout\")\n level = logging.getLevelName(log_level_name)\n\n def redirect_logs():\n import logging\n\n for log in [\"pangeo_forge_recipes\", \"fsspec\"]:\n logger = logging.getLogger(log)\n formatter = logging.Formatter(\"%(name)s - %(levelname)s - %(message)s\")\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n handler.setLevel(level)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n client.run(set_blosc_threads)\n client.run(redirect_logs)\n client.close()\n del client\n\n yield cluster\n\n cluster.close()\n\n\n# The fixtures below use the following pattern to only run when the marks are activated\n# Based on https://github.com/pytest-dev/pytest/issues/1368#issuecomment-466339463\n\n\n@pytest.fixture(\n scope=\"session\",\n params=[\n pytest.param(\"FunctionPipelineExecutor\", marks=pytest.mark.executor_function),\n pytest.param(\"GeneratorPipelineExecutor\", marks=pytest.mark.executor_generator),\n pytest.param(\"DaskPipelineExecutor\", marks=pytest.mark.executor_dask),\n pytest.param(\"PrefectPipelineExecutor\", marks=pytest.mark.executor_prefect),\n pytest.param(\"BeamPipelineExecutor\", marks=pytest.mark.executor_beam),\n ],\n)\ndef Executor(request):\n try:\n import pangeo_forge_recipes.executors as exec_module\n\n return getattr(exec_module, request.param)\n except AttributeError:\n pytest.skip(f\"Couldn't import {request.param}\")\n\n\n@pytest.fixture(params=[pytest.param(0, marks=pytest.mark.executor_function)])\ndef execute_recipe_function():\n def execute(recipe):\n return recipe.to_function()()\n\n return execute\n\n\n@pytest.fixture(params=[pytest.param(0, marks=pytest.mark.executor_generator)])\ndef execute_recipe_generator():\n def execute(recipe):\n for f, args, kwargs in recipe.to_generator():\n f(*args, **kwargs)\n\n return execute\n\n\n@pytest.fixture(params=[pytest.param(0, marks=pytest.mark.executor_dask)])\ndef execute_recipe_dask(dask_cluster):\n def execute(recipe):\n with Client(dask_cluster):\n return recipe.to_dask().compute()\n\n return execute\n\n\n@pytest.fixture(params=[pytest.param(0, marks=pytest.mark.executor_prefect)])\ndef execute_recipe_prefect():\n def execute(recipe):\n state = recipe.to_prefect().run()\n if state.is_failed():\n raise ValueError(f\"Prefect flow run failed with message {state.message}\")\n\n return execute\n\n\n@pytest.fixture(params=[pytest.param(0, marks=pytest.mark.executor_prefect_dask)])\ndef execute_recipe_prefect_dask(dask_cluster):\n def execute(recipe):\n flow = recipe.to_prefect()\n executor = DaskExecutor(address=dask_cluster.scheduler_address)\n state = flow.run(executor=executor)\n if state.is_failed():\n raise ValueError(f\"Prefect flow run failed with message {state.message}\")\n\n return execute\n\n\n@pytest.fixture(params=[pytest.param(0, marks=pytest.mark.executor_beam)])\ndef execute_recipe_beam():\n beam = pytest.importorskip(\"apache_beam\")\n\n def execute(recipe):\n pcoll = recipe.to_beam()\n with beam.Pipeline() as p:\n p | pcoll\n\n return execute\n\n\n# now mark all other tests with \"no_executor\"\n# https://stackoverflow.com/questions/39846230/how-to-run-only-unmarked-tests-in-pytest\ndef pytest_collection_modifyitems(items, config):\n for item in items:\n executor_markers = [\n marker for marker in item.iter_markers() if marker.name.startswith(\"executor_\")\n ]\n if len(executor_markers) == 0:\n item.add_marker(\"no_executor\")\n\n\n@pytest.fixture(\n params=[\n lazy_fixture(\"execute_recipe_function\"),\n lazy_fixture(\"execute_recipe_generator\"),\n lazy_fixture(\"execute_recipe_dask\"),\n lazy_fixture(\"execute_recipe_beam\"),\n ],\n)\ndef execute_recipe_no_prefect(request):\n return request.param\n\n\n@pytest.fixture(\n params=[lazy_fixture(\"execute_recipe_prefect\"), lazy_fixture(\"execute_recipe_prefect_dask\")],\n)\ndef execute_recipe_with_prefect(request):\n return request.param\n\n\n@pytest.fixture(\n params=[lazy_fixture(\"execute_recipe_function\"), lazy_fixture(\"execute_recipe_prefect\")],\n)\ndef execute_recipe_no_dask(request):\n return request.param\n\n\n@pytest.fixture(\n params=[lazy_fixture(\"execute_recipe_dask\"), lazy_fixture(\"execute_recipe_prefect_dask\")],\n)\ndef execute_recipe_with_dask(request):\n return request.param\n\n\n@pytest.fixture(\n params=[\n lazy_fixture(\"execute_recipe_no_prefect\"),\n lazy_fixture(\"execute_recipe_with_prefect\"),\n ],\n)\ndef execute_recipe(request):\n return request.param\n"
] |
[
[
"numpy.random.seed",
"numpy.arange",
"numpy.random.rand",
"pandas.date_range",
"numpy.random.randint"
]
] |
anDoer/PoseTrack21
|
[
"25c8412feca57319404a0c48565f9a264d648bd1"
] |
[
"eval/mot/motmetrics/mot.py"
] |
[
"# py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking.\n# https://github.com/cheind/py-motmetrics/\n#\n# MIT License\n# Copyright (c) 2017-2020 Christoph Heindl, Jack Valmadre and others.\n# See LICENSE file for terms.\n\n\"\"\"Accumulate tracking events frame by frame.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import OrderedDict\nimport itertools\n\nimport numpy as np\nimport pandas as pd\n\nfrom motmetrics.lap import linear_sum_assignment\n\n_INDEX_FIELDS = ['FrameId', 'Event']\n_EVENT_FIELDS = ['Type', 'OId', 'HId', 'D']\n\n\nclass MOTAccumulator(object):\n \"\"\"Manage tracking events.\n\n This class computes per-frame tracking events from a given set of object / hypothesis\n ids and pairwise distances. Indended usage\n\n import motmetrics as mm\n acc = mm.MOTAccumulator()\n acc.update(['a', 'b'], [0, 1, 2], dists, frameid=0)\n ...\n acc.update(['d'], [6,10], other_dists, frameid=76)\n summary = mm.metrics.summarize(acc)\n print(mm.io.render_summary(summary))\n\n Update is called once per frame and takes objects / hypothesis ids and a pairwise distance\n matrix between those (see distances module for support). Per frame max(len(objects), len(hypothesis))\n events are generated. Each event type is one of the following\n - `'MATCH'` a match between a object and hypothesis was found\n - `'SWITCH'` a match between a object and hypothesis was found but differs from previous assignment (hypothesisid != previous)\n - `'MISS'` no match for an object was found\n - `'FP'` no match for an hypothesis was found (spurious detections)\n - `'RAW'` events corresponding to raw input\n - `'TRANSFER'` a match between a object and hypothesis was found but differs from previous assignment (objectid != previous)\n - `'ASCEND'` a match between a object and hypothesis was found but differs from previous assignment (hypothesisid is new)\n - `'MIGRATE'` a match between a object and hypothesis was found but differs from previous assignment (objectid is new)\n\n Events are tracked in a pandas Dataframe. The dataframe is hierarchically indexed by (`FrameId`, `EventId`),\n where `FrameId` is either provided during the call to `update` or auto-incremented when `auto_id` is set\n true during construction of MOTAccumulator. `EventId` is auto-incremented. The dataframe has the following\n columns\n - `Type` one of `('MATCH', 'SWITCH', 'MISS', 'FP', 'RAW')`\n - `OId` object id or np.nan when `'FP'` or `'RAW'` and object is not present\n - `HId` hypothesis id or np.nan when `'MISS'` or `'RAW'` and hypothesis is not present\n - `D` distance or np.nan when `'FP'` or `'MISS'` or `'RAW'` and either object/hypothesis is absent\n\n From the events and associated fields the entire tracking history can be recovered. Once the accumulator\n has been populated with per-frame data use `metrics.summarize` to compute statistics. See `metrics.compute_metrics`\n for a list of metrics computed.\n\n References\n ----------\n 1. Bernardin, Keni, and Rainer Stiefelhagen. \"Evaluating multiple object tracking performance: the CLEAR MOT metrics.\"\n EURASIP Journal on Image and Video Processing 2008.1 (2008): 1-10.\n 2. Milan, Anton, et al. \"Mot16: A benchmark for multi-object tracking.\" arXiv preprint arXiv:1603.00831 (2016).\n 3. Li, Yuan, Chang Huang, and Ram Nevatia. \"Learning to associate: Hybridboosted multi-target tracker for crowded scene.\"\n Computer Vision and Pattern Recognition, 2009. CVPR 2009. IEEE Conference on. IEEE, 2009.\n \"\"\"\n\n def __init__(self, auto_id=False, max_switch_time=float('inf')):\n \"\"\"Create a MOTAccumulator.\n\n Params\n ------\n auto_id : bool, optional\n Whether or not frame indices are auto-incremented or provided upon\n updating. Defaults to false. Not specifying a frame-id when this value\n is true results in an error. Specifying a frame-id when this value is\n false also results in an error.\n\n max_switch_time : scalar, optional\n Allows specifying an upper bound on the timespan an unobserved but\n tracked object is allowed to generate track switch events. Useful if groundtruth\n objects leaving the field of view keep their ID when they reappear,\n but your tracker is not capable of recognizing this (resulting in\n track switch events). The default is that there is no upper bound\n on the timespan. In units of frame timestamps. When using auto_id\n in units of count.\n \"\"\"\n\n # Parameters of the accumulator.\n self.auto_id = auto_id\n self.max_switch_time = max_switch_time\n\n # Accumulator state.\n self._events = None\n self._indices = None\n self.m = None\n self.res_m = None\n self.last_occurrence = None\n self.last_match = None\n self.hypHistory = None\n self.dirty_events = None\n self.cached_events_df = None\n\n self.reset()\n\n def reset(self):\n \"\"\"Reset the accumulator to empty state.\"\"\"\n\n self._events = {field: [] for field in _EVENT_FIELDS}\n self._indices = {field: [] for field in _INDEX_FIELDS}\n self.m = {} # Pairings up to current timestamp\n self.res_m = {} # Result pairings up to now\n self.last_occurrence = {} # Tracks most recent occurance of object\n self.last_match = {} # Tracks most recent match of object\n self.hypHistory = {}\n self.dirty_events = True\n self.cached_events_df = None\n\n def _append_to_indices(self, frameid, eid):\n self._indices['FrameId'].append(frameid)\n self._indices['Event'].append(eid)\n\n def _append_to_events(self, typestr, oid, hid, distance):\n self._events['Type'].append(typestr)\n self._events['OId'].append(oid)\n self._events['HId'].append(hid)\n self._events['D'].append(distance)\n\n def update(self, oids, hids, dists, frameid=None, vf='', ignore_candidates=None):\n \"\"\"Updates the accumulator with frame specific objects/detections.\n\n This method generates events based on the following algorithm [1]:\n 1. Try to carry forward already established tracks. If any paired object / hypothesis\n from previous timestamps are still visible in the current frame, create a 'MATCH'\n event between them.\n 2. For the remaining constellations minimize the total object / hypothesis distance\n error (Kuhn-Munkres algorithm). If a correspondence made contradicts a previous\n match create a 'SWITCH' else a 'MATCH' event.\n 3. Create 'MISS' events for all remaining unassigned objects.\n 4. Create 'FP' events for all remaining unassigned hypotheses.\n\n Params\n ------\n oids : N array\n Array of object ids.\n hids : M array\n Array of hypothesis ids.\n dists: NxM array\n Distance matrix. np.nan values to signal do-not-pair constellations.\n See `distances` module for support methods.\n\n Kwargs\n ------\n frameId : id\n Unique frame id. Optional when MOTAccumulator.auto_id is specified during\n construction.\n vf: file to log details\n Returns\n -------\n frame_events : pd.DataFrame\n Dataframe containing generated events\n\n References\n ----------\n 1. Bernardin, Keni, and Rainer Stiefelhagen. \"Evaluating multiple object tracking performance: the CLEAR MOT metrics.\"\n EURASIP Journal on Image and Video Processing 2008.1 (2008): 1-10.\n \"\"\"\n # pylint: disable=too-many-locals, too-many-statements\n\n self.dirty_events = True\n oids = np.asarray(oids)\n oids_masked = np.zeros_like(oids, dtype=np.bool)\n hids = np.asarray(hids)\n hids_masked = np.zeros_like(hids, dtype=np.bool)\n dists = np.atleast_2d(dists).astype(float).reshape(oids.shape[0], hids.shape[0]).copy()\n\n if frameid is None:\n assert self.auto_id, 'auto-id is not enabled'\n if len(self._indices['FrameId']) > 0:\n frameid = self._indices['FrameId'][-1] + 1\n else:\n frameid = 0\n else:\n assert not self.auto_id, 'Cannot provide frame id when auto-id is enabled'\n\n eid = itertools.count()\n\n # 0. Record raw events\n\n no = len(oids)\n nh = len(hids)\n\n # Add a RAW event simply to ensure the frame is counted.\n self._append_to_indices(frameid, next(eid))\n self._append_to_events('RAW', np.nan, np.nan, np.nan)\n\n # There must be at least one RAW event per object and hypothesis.\n # Record all finite distances as RAW events.\n valid_i, valid_j = np.where(np.isfinite(dists))\n valid_dists = dists[valid_i, valid_j]\n for i, j, dist_ij in zip(valid_i, valid_j, valid_dists):\n self._append_to_indices(frameid, next(eid))\n self._append_to_events('RAW', oids[i], hids[j], dist_ij)\n # Add a RAW event for objects and hypotheses that were present but did\n # not overlap with anything.\n used_i = np.unique(valid_i)\n used_j = np.unique(valid_j)\n unused_i = np.setdiff1d(np.arange(no), used_i)\n unused_j = np.setdiff1d(np.arange(nh), used_j)\n\n ignore_indices = {}\n ignore_events = {}\n\n for oid in oids[unused_i]:\n self._append_to_indices(frameid, next(eid))\n self._append_to_events('RAW', oid, np.nan, np.nan)\n\n # for hid in hids[unused_j]:\n for idx_j in unused_j:\n hid = hids[idx_j]\n\n # if we have ignore candidates, store these events for later\n if len(ignore_candidates) > 0:\n if idx_j in ignore_candidates:\n ignore_indices[hid] = [frameid, next(eid)]\n ignore_events[hid] = ['RAW', np.nan, hid, np.nan]\n continue\n\n self._append_to_indices(frameid, next(eid))\n self._append_to_events('RAW', np.nan, hid, np.nan)\n\n if oids.size * hids.size > 0:\n # 1. Try to re-establish tracks from previous correspondences\n for i in range(oids.shape[0]):\n # No need to check oids_masked[i] here.\n if oids[i] not in self.m:\n continue\n\n hprev = self.m[oids[i]]\n j, = np.where(~hids_masked & (hids == hprev))\n if j.shape[0] == 0:\n continue\n j = j[0]\n\n if np.isfinite(dists[i, j]):\n o = oids[i]\n h = hids[j]\n oids_masked[i] = True\n hids_masked[j] = True\n self.m[oids[i]] = hids[j]\n\n self._append_to_indices(frameid, next(eid))\n self._append_to_events('MATCH', oids[i], hids[j], dists[i, j])\n self.last_match[o] = frameid\n self.hypHistory[h] = frameid\n\n # 2. Try to remaining objects/hypotheses\n dists[oids_masked, :] = np.nan\n dists[:, hids_masked] = np.nan\n\n rids, cids = linear_sum_assignment(dists)\n\n for i, j in zip(rids, cids):\n if not np.isfinite(dists[i, j]):\n continue\n\n o = oids[i]\n h = hids[j]\n is_switch = (o in self.m and\n self.m[o] != h and\n abs(frameid - self.last_occurrence[o]) <= self.max_switch_time)\n cat1 = 'SWITCH' if is_switch else 'MATCH'\n if cat1 == 'SWITCH':\n if h not in self.hypHistory:\n subcat = 'ASCEND'\n self._append_to_indices(frameid, next(eid))\n self._append_to_events(subcat, oids[i], hids[j], dists[i, j])\n # ignore the last condition temporarily\n is_transfer = (h in self.res_m and\n self.res_m[h] != o)\n # is_transfer = (h in self.res_m and\n # self.res_m[h] != o and\n # abs(frameid - self.last_occurrence[o]) <= self.max_switch_time)\n cat2 = 'TRANSFER' if is_transfer else 'MATCH'\n if cat2 == 'TRANSFER':\n if o not in self.last_match:\n subcat = 'MIGRATE'\n self._append_to_indices(frameid, next(eid))\n self._append_to_events(subcat, oids[i], hids[j], dists[i, j])\n self._append_to_indices(frameid, next(eid))\n self._append_to_events(cat2, oids[i], hids[j], dists[i, j])\n if vf != '' and (cat1 != 'MATCH' or cat2 != 'MATCH'):\n if cat1 == 'SWITCH':\n vf.write('%s %d %d %d %d %d\\n' % (subcat[:2], o, self.last_match[o], self.m[o], frameid, h))\n if cat2 == 'TRANSFER':\n vf.write('%s %d %d %d %d %d\\n' % (subcat[:2], h, self.hypHistory[h], self.res_m[h], frameid, o))\n self.hypHistory[h] = frameid\n self.last_match[o] = frameid\n self._append_to_indices(frameid, next(eid))\n self._append_to_events(cat1, oids[i], hids[j], dists[i, j])\n oids_masked[i] = True\n hids_masked[j] = True\n self.m[o] = h\n self.res_m[h] = o\n\n # 3. All remaining objects are missed\n for o in oids[~oids_masked]:\n self._append_to_indices(frameid, next(eid))\n self._append_to_events('MISS', o, np.nan, np.nan)\n if vf != '':\n vf.write('FN %d %d\\n' % (frameid, o))\n\n # 4.2:\n # add missing events for hypotheses that have been tracked\n masked_hids_indices = np.where(hids_masked)[0]\n for hid_idx in masked_hids_indices:\n # for h in hids[hids_masked]:\n h = hids[hid_idx]\n if hid_idx in ignore_candidates and h in ignore_indices:\n self._append_to_indices(*ignore_indices[h])\n self._append_to_events(*ignore_events[h])\n\n # 4. All remaining hypotheses are false alarms\n unmasked_hids_indices = np.where(~hids_masked)[0]\n for hid_idx in unmasked_hids_indices:\n h = hids[hid_idx]\n # for h in hids[~hids_masked]:\n # skip if object is an ignore candidate!\n if hid_idx in ignore_candidates:\n continue\n\n self._append_to_indices(frameid, next(eid))\n self._append_to_events('FP', np.nan, h, np.nan)\n if vf != '':\n vf.write('FP %d %d\\n' % (frameid, h))\n\n\n # 5. Update occurance state\n for o in oids:\n self.last_occurrence[o] = frameid\n\n return frameid\n\n @property\n def events(self):\n if self.dirty_events:\n self.cached_events_df = MOTAccumulator.new_event_dataframe_with_data(self._indices, self._events)\n self.dirty_events = False\n return self.cached_events_df\n\n @property\n def mot_events(self):\n df = self.events\n return df[df.Type != 'RAW']\n\n @staticmethod\n def new_event_dataframe():\n \"\"\"Create a new DataFrame for event tracking.\"\"\"\n idx = pd.MultiIndex(levels=[[], []], codes=[[], []], names=['FrameId', 'Event'])\n cats = pd.Categorical([], categories=['RAW', 'FP', 'MISS', 'SWITCH', 'MATCH', 'TRANSFER', 'ASCEND', 'MIGRATE'])\n df = pd.DataFrame(\n OrderedDict([\n ('Type', pd.Series(cats)), # Type of event. One of FP (false positive), MISS, SWITCH, MATCH\n ('OId', pd.Series(dtype=float)), # Object ID or -1 if FP. Using float as missing values will be converted to NaN anyways.\n ('HId', pd.Series(dtype=float)), # Hypothesis ID or NaN if MISS. Using float as missing values will be converted to NaN anyways.\n ('D', pd.Series(dtype=float)), # Distance or NaN when FP or MISS\n ]),\n index=idx\n )\n return df\n\n @staticmethod\n def new_event_dataframe_with_data(indices, events):\n \"\"\"Create a new DataFrame filled with data.\n\n Params\n ------\n indices: dict\n dict of lists with fields 'FrameId' and 'Event'\n events: dict\n dict of lists with fields 'Type', 'OId', 'HId', 'D'\n \"\"\"\n\n if len(events) == 0:\n return MOTAccumulator.new_event_dataframe()\n\n raw_type = pd.Categorical(\n events['Type'],\n categories=['RAW', 'FP', 'MISS', 'SWITCH', 'MATCH', 'TRANSFER', 'ASCEND', 'MIGRATE'],\n ordered=False)\n series = [\n pd.Series(raw_type, name='Type'),\n pd.Series(events['OId'], dtype=float, name='OId'),\n pd.Series(events['HId'], dtype=float, name='HId'),\n pd.Series(events['D'], dtype=float, name='D')\n ]\n\n idx = pd.MultiIndex.from_arrays(\n [indices[field] for field in _INDEX_FIELDS],\n names=_INDEX_FIELDS)\n df = pd.concat(series, axis=1)\n df.index = idx\n return df\n\n @staticmethod\n def merge_analysis(anas, infomap):\n # pylint: disable=missing-function-docstring\n res = {'hyp': {}, 'obj': {}}\n mapp = {'hyp': 'hid_map', 'obj': 'oid_map'}\n for ana, infom in zip(anas, infomap):\n if ana is None:\n return None\n for t in ana.keys():\n which = mapp[t]\n if np.nan in infom[which]:\n res[t][int(infom[which][np.nan])] = 0\n if 'nan' in infom[which]:\n res[t][int(infom[which]['nan'])] = 0\n for _id, cnt in ana[t].items():\n if _id not in infom[which]:\n _id = str(_id)\n res[t][int(infom[which][_id])] = cnt\n return res\n\n @staticmethod\n def merge_event_dataframes(dfs, update_frame_indices=True, update_oids=True, update_hids=True, return_mappings=False):\n \"\"\"Merge dataframes.\n\n Params\n ------\n dfs : list of pandas.DataFrame or MotAccumulator\n A list of event containers to merge\n\n Kwargs\n ------\n update_frame_indices : boolean, optional\n Ensure that frame indices are unique in the merged container\n update_oids : boolean, unique\n Ensure that object ids are unique in the merged container\n update_hids : boolean, unique\n Ensure that hypothesis ids are unique in the merged container\n return_mappings : boolean, unique\n Whether or not to return mapping information\n\n Returns\n -------\n df : pandas.DataFrame\n Merged event data frame\n \"\"\"\n\n mapping_infos = []\n new_oid = itertools.count()\n new_hid = itertools.count()\n\n r = MOTAccumulator.new_event_dataframe()\n for df in dfs:\n\n if isinstance(df, MOTAccumulator):\n df = df.events\n\n copy = df.copy()\n infos = {}\n\n # Update index\n if update_frame_indices:\n # pylint: disable=cell-var-from-loop\n next_frame_id = max(r.index.get_level_values(0).max() + 1, r.index.get_level_values(0).unique().shape[0])\n if np.isnan(next_frame_id):\n next_frame_id = 0\n copy.index = copy.index.map(lambda x: (x[0] + next_frame_id, x[1]))\n infos['frame_offset'] = next_frame_id\n\n # Update object / hypothesis ids\n if update_oids:\n # pylint: disable=cell-var-from-loop\n oid_map = dict([oid, str(next(new_oid))] for oid in copy['OId'].dropna().unique())\n copy['OId'] = copy['OId'].map(lambda x: oid_map[x], na_action='ignore')\n infos['oid_map'] = oid_map\n\n if update_hids:\n # pylint: disable=cell-var-from-loop\n hid_map = dict([hid, str(next(new_hid))] for hid in copy['HId'].dropna().unique())\n copy['HId'] = copy['HId'].map(lambda x: hid_map[x], na_action='ignore')\n infos['hid_map'] = hid_map\n\n r = r.append(copy)\n mapping_infos.append(infos)\n\n if return_mappings:\n return r, mapping_infos\n else:\n return r\n"
] |
[
[
"pandas.concat",
"pandas.Series",
"numpy.isfinite",
"pandas.MultiIndex",
"numpy.asarray",
"pandas.Categorical",
"numpy.unique",
"numpy.arange",
"numpy.isnan",
"pandas.MultiIndex.from_arrays",
"numpy.atleast_2d",
"numpy.zeros_like",
"numpy.where"
]
] |
otherm/gshp-analysis
|
[
"746070b10a05985c31f06acd5e052ac3a7bf4924"
] |
[
"utilities/csv_to_yaml.py"
] |
[
"import yaml\nimport pandas as pd\nimport model_dict_templates as mdt\n\n\ninputfile = '../temp_files/NWS_stations_2.csv'\n\nstation_data = pd.read_csv(inputfile, header=0)\n\nstation_dict_list = station_data.set_index('model').to_dict('records')\n\nweather_station_list = []\nfor data in station_dict_list:\n if data['nws_id'] not in ['KPSM', 'KPSF', 'KHIE', 'KBED']:\n weather_station_list.append({'WeatherStation': data})\n print(data)\n\nwith open('../temp_files/nws_stations.yaml', 'w') as file:\n yaml.safe_dump(weather_station_list, file, default_style='\"')\n\nfile.close()\n"
] |
[
[
"pandas.read_csv"
]
] |
vision7820/BMN-Boundary-Matching-Network
|
[
"bb13368245b5079a6be156cc065abd6599412cbe"
] |
[
"post_processing.py"
] |
[
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport json\nimport multiprocessing as mp\n\nfrom utils import iou_with_anchors\n\n\ndef load_json(file):\n with open(file) as json_file:\n data = json.load(json_file)\n return data\n\n\ndef getDatasetDict(opt):\n df = pd.read_csv(opt[\"video_info\"])\n json_data = load_json(opt[\"video_anno\"])\n database = json_data\n video_dict = {}\n for i in range(len(df)):\n video_name = df.video.values[i]\n video_info = database[video_name]\n video_new_info = {}\n video_new_info['duration_frame'] = video_info['duration_frame']\n video_new_info['duration_second'] = video_info['duration_second']\n video_new_info[\"feature_frame\"] = video_info['feature_frame']\n video_subset = df.subset.values[i]\n video_new_info['annotations'] = video_info['annotations']\n if video_subset == 'validation':\n video_dict[video_name] = video_new_info\n return video_dict\n\n\ndef soft_nms(df, alpha, t1, t2):\n '''\n df: proposals generated by network;\n alpha: alpha value of Gaussian decaying function;\n t1, t2: threshold for soft nms.\n '''\n df = df.sort_values(by=\"score\", ascending=False)\n tstart = list(df.xmin.values[:])\n tend = list(df.xmax.values[:])\n tscore = list(df.score.values[:])\n\n rstart = []\n rend = []\n rscore = []\n\n while len(tscore) > 1 and len(rscore) < 101:\n max_index = tscore.index(max(tscore))\n tmp_iou_list = iou_with_anchors(\n np.array(tstart),\n np.array(tend), tstart[max_index], tend[max_index])\n for idx in range(0, len(tscore)):\n if idx != max_index:\n tmp_iou = tmp_iou_list[idx]\n tmp_width = tend[max_index] - tstart[max_index]\n if tmp_iou > t1 + (t2 - t1) * tmp_width:\n tscore[idx] = tscore[idx] * np.exp(-np.square(tmp_iou) /\n alpha)\n\n rstart.append(tstart[max_index])\n rend.append(tend[max_index])\n rscore.append(tscore[max_index])\n tstart.pop(max_index)\n tend.pop(max_index)\n tscore.pop(max_index)\n\n newDf = pd.DataFrame()\n newDf['score'] = rscore\n newDf['xmin'] = rstart\n newDf['xmax'] = rend\n return newDf\n\n\ndef video_post_process(opt, video_list, video_dict):\n for video_name in video_list:\n df = pd.read_csv(\"./output/BMN_results/\" + video_name + \".csv\")\n\n if len(df) > 1:\n snms_alpha = opt[\"soft_nms_alpha\"]\n snms_t1 = opt[\"soft_nms_low_thres\"]\n snms_t2 = opt[\"soft_nms_high_thres\"]\n df = soft_nms(df, snms_alpha, snms_t1, snms_t2)\n\n df = df.sort_values(by=\"score\", ascending=False)\n video_info = video_dict[video_name]\n video_duration = float(video_info[\"duration_frame\"] // 16 * 16) / video_info[\"duration_frame\"] * video_info[\n \"duration_second\"]\n proposal_list = []\n\n for j in range(min(100, len(df))):\n tmp_proposal = {}\n tmp_proposal[\"score\"] = df.score.values[j]\n tmp_proposal[\"segment\"] = [max(0, df.xmin.values[j]) * video_duration,\n min(1, df.xmax.values[j]) * video_duration]\n proposal_list.append(tmp_proposal)\n result_dict[video_name[2:]] = proposal_list\n\n\ndef BMN_post_processing(opt):\n video_dict = getDatasetDict(opt)\n video_list = list(video_dict.keys()) # [:100]\n global result_dict\n result_dict = mp.Manager().dict()\n\n num_videos = len(video_list)\n num_videos_per_thread = num_videos // opt[\"post_process_thread\"]\n processes = []\n for tid in range(opt[\"post_process_thread\"] - 1):\n tmp_video_list = video_list[tid * num_videos_per_thread:(tid + 1) * num_videos_per_thread]\n p = mp.Process(target=video_post_process, args=(opt, tmp_video_list, video_dict))\n p.start()\n processes.append(p)\n tmp_video_list = video_list[(opt[\"post_process_thread\"] - 1) * num_videos_per_thread:]\n p = mp.Process(target=video_post_process, args=(opt, tmp_video_list, video_dict))\n p.start()\n processes.append(p)\n for p in processes:\n p.join()\n\n result_dict = dict(result_dict)\n output_dict = {\"version\": \"VERSION 1.3\", \"results\": result_dict, \"external_data\": {}}\n outfile = open(opt[\"result_file\"], \"w\")\n json.dump(output_dict, outfile)\n outfile.close()\n\n# opt = opts.parse_opt()\n# opt = vars(opt)\n# BSN_post_processing(opt)\n"
] |
[
[
"numpy.square",
"numpy.array",
"pandas.read_csv",
"pandas.DataFrame"
]
] |
tosseto/robosat.pink
|
[
"81b891a5a21a531b66a9834bed73b0da6baa9867"
] |
[
"robosat_pink/tools/tile.py"
] |
[
"import os\nimport sys\nimport math\nfrom tqdm import tqdm\nimport concurrent.futures as futures\n\nimport numpy as np\n\nimport shutil\nimport mercantile\n\nfrom rasterio import open as rasterio_open\nfrom rasterio.vrt import WarpedVRT\nfrom rasterio.enums import Resampling\nfrom rasterio.warp import transform_bounds, calculate_default_transform\nfrom rasterio.transform import from_bounds\n\nfrom robosat_pink.core import load_config, check_classes, make_palette, web_ui\nfrom robosat_pink.tiles import (\n tile_image_to_file,\n tile_label_to_file,\n tile_from_slippy_map,\n tile_image_from_file,\n tile_label_from_file,\n)\n\n\ndef add_parser(subparser, formatter_class):\n parser = subparser.add_parser(\"tile\", help=\"Tile a raster, or a rasters coverage\", formatter_class=formatter_class)\n\n inp = parser.add_argument_group(\"Inputs\")\n inp.add_argument(\"rasters\", type=str, nargs=\"+\", help=\"path to raster files to tile [required]\")\n\n out = parser.add_argument_group(\"Output\")\n out.add_argument(\"--zoom\", type=int, required=True, help=\"zoom level of tiles [required]\")\n out.add_argument(\"--ts\", type=int, default=512, help=\"tile size in pixels [default: 512]\")\n out.add_argument(\"out\", type=str, help=\"output directory path [required]\")\n\n lab = parser.add_argument_group(\"Labels\")\n lab.add_argument(\"--label\", action=\"store_true\", help=\"if set, generate label tiles\")\n lab.add_argument(\"--config\", type=str, help=\"path to config file [required in label mode]\")\n\n perf = parser.add_argument_group(\"Performances\")\n perf.add_argument(\"--workers\", type=int, help=\"number of workers [default: CPU / 2]\")\n\n ui = parser.add_argument_group(\"Web UI\")\n ui.add_argument(\"--web_ui_base_url\", type=str, help=\"alternate Web UI base URL\")\n ui.add_argument(\"--web_ui_template\", type=str, help=\"alternate Web UI template path\")\n ui.add_argument(\"--no_web_ui\", action=\"store_true\", help=\"desactivate Web UI output\")\n\n parser.set_defaults(func=main)\n\n\ndef is_nodata(image, no_data=0, threshold=5):\n\n if (\n np.all(image[0, :, :] == no_data)\n or np.all(image[-1, :, :] == no_data)\n or np.all(image[:, 0, :] == no_data)\n or np.all(image[:, -1, :] == no_data)\n ):\n return True # pixel border is no_data, on all bands\n\n C, W, H = image.shape\n return np.sum(image[:, :, :] == no_data) > ((threshold * C * 100) / (W * H))\n\n\ndef main(args):\n\n if not args.workers:\n args.workers = max(1, math.floor(os.cpu_count() * 0.5))\n\n if args.label:\n config = load_config(args.config)\n check_classes(config)\n colors = [classe[\"color\"] for classe in config[\"classes\"]]\n palette = make_palette(*colors)\n\n splits_path = os.path.join(os.path.expanduser(args.out), \".splits\")\n tiles_map = {}\n\n print(\"RoboSat.pink - tile on CPU, with {} workers\".format(args.workers))\n\n bands = -1\n for path in args.rasters:\n raster = rasterio_open(path)\n w, s, e, n = transform_bounds(raster.crs, \"EPSG:4326\", *raster.bounds)\n\n if bands != -1:\n assert bands == len(raster.indexes), \"Coverage must be bands consistent\"\n bands = len(raster.indexes)\n\n tiles = [mercantile.Tile(x=x, y=y, z=z) for x, y, z in mercantile.tiles(w, s, e, n, args.zoom)]\n for tile in tiles:\n tile_key = (str(tile.x), str(tile.y), str(tile.z))\n if tile_key not in tiles_map.keys():\n tiles_map[tile_key] = []\n tiles_map[tile_key].append(path)\n\n if args.label:\n ext = \"png\"\n bands = 1\n if not args.label:\n if bands == 1:\n ext = \"png\"\n if bands == 3:\n ext = \"webp\"\n if bands > 3:\n ext = \"tiff\"\n\n tiles = []\n progress = tqdm(total=len(tiles_map), ascii=True, unit=\"tile\")\n # Begin to tile plain tiles\n with futures.ThreadPoolExecutor(args.workers) as executor:\n\n def worker(path):\n\n raster = rasterio_open(path)\n w, s, e, n = transform_bounds(raster.crs, \"EPSG:4326\", *raster.bounds)\n transform, _, _ = calculate_default_transform(raster.crs, \"EPSG:3857\", raster.width, raster.height, w, s, e, n)\n tiles = [mercantile.Tile(x=x, y=y, z=z) for x, y, z in mercantile.tiles(w, s, e, n, args.zoom)]\n tiled = []\n\n for tile in tiles:\n\n w, s, e, n = mercantile.xy_bounds(tile)\n\n # inspired by rio-tiler, cf: https://github.com/mapbox/rio-tiler/pull/45\n warp_vrt = WarpedVRT(\n raster,\n crs=\"epsg:3857\",\n resampling=Resampling.bilinear,\n add_alpha=False,\n transform=from_bounds(w, s, e, n, args.ts, args.ts),\n width=math.ceil((e - w) / transform.a),\n height=math.ceil((s - n) / transform.e),\n )\n data = warp_vrt.read(out_shape=(len(raster.indexes), args.ts, args.ts), window=warp_vrt.window(w, s, e, n))\n image = np.moveaxis(data, 0, 2) # C,H,W -> H,W,C\n\n tile_key = (str(tile.x), str(tile.y), str(tile.z))\n if not args.label and len(tiles_map[tile_key]) == 1 and is_nodata(image):\n progress.update()\n continue\n\n if len(tiles_map[tile_key]) > 1:\n out = os.path.join(splits_path, str(tiles_map[tile_key].index(path)))\n else:\n out = args.out\n\n x, y, z = map(int, tile)\n\n if not args.label:\n ret = tile_image_to_file(out, mercantile.Tile(x=x, y=y, z=z), image)\n if args.label:\n ret = tile_label_to_file(out, mercantile.Tile(x=x, y=y, z=z), palette, image)\n\n if not ret:\n sys.exit(\"Error: Unable to write tile {} from raster {}.\".format(str(tile), raster))\n\n if len(tiles_map[tile_key]) == 1:\n progress.update()\n tiled.append(mercantile.Tile(x=x, y=y, z=z))\n\n return tiled\n\n for tiled in executor.map(worker, args.rasters):\n if tiled is not None:\n tiles.extend(tiled)\n\n # Aggregate remaining tiles splits\n with futures.ThreadPoolExecutor(args.workers) as executor:\n\n def worker(tile_key):\n\n if len(tiles_map[tile_key]) == 1:\n return\n\n image = np.zeros((args.ts, args.ts, bands), np.uint8)\n\n x, y, z = map(int, tile_key)\n for i in range(len(tiles_map[tile_key])):\n root = os.path.join(splits_path, str(i))\n _, path = tile_from_slippy_map(root, x, y, z)\n\n if not args.label:\n split = tile_image_from_file(path)\n if args.label:\n split = tile_label_from_file(path)\n split = split.reshape((args.ts, args.ts, 1)) # H,W -> H,W,C\n\n assert image.shape == split.shape\n image[:, :, :] += split[:, :, :]\n\n if not args.label and is_nodata(image):\n progress.update()\n return\n\n tile = mercantile.Tile(x=x, y=y, z=z)\n\n if not args.label:\n ret = tile_image_to_file(args.out, tile, image)\n\n if args.label:\n ret = tile_label_to_file(args.out, tile, palette, image)\n\n if not ret:\n sys.exit(\"Error: Unable to write tile {}.\".format(str(tile_key)))\n\n progress.update()\n return tile\n\n for tiled in executor.map(worker, tiles_map.keys()):\n if tiled is not None:\n tiles.append(tiled)\n\n if splits_path and os.path.isdir(splits_path):\n shutil.rmtree(splits_path) # Delete suffixes dir if any\n\n if not args.no_web_ui:\n template = \"leaflet.html\" if not args.web_ui_template else args.web_ui_template\n base_url = args.web_ui_base_url if args.web_ui_base_url else \"./\"\n web_ui(args.out, base_url, tiles, tiles, ext, template)\n"
] |
[
[
"numpy.all",
"numpy.zeros",
"numpy.sum",
"numpy.moveaxis"
]
] |
shibing624/textgen
|
[
"0a9d55f1f61d5217b8e06f1f23904e49afa84370",
"0a9d55f1f61d5217b8e06f1f23904e49afa84370"
] |
[
"textgen/question_answering/question_answering_model.py",
"textgen/language_generation/language_generation_model.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com)\n@description: refer https://github.com/ThilinaRajapakse/simpletransformers\n\"\"\"\nimport json\nimport math\nimport os\nimport random\nimport warnings\nfrom dataclasses import asdict\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom tqdm.auto import tqdm, trange\nfrom transformers import (\n AlbertConfig,\n AlbertForQuestionAnswering,\n AlbertTokenizer,\n AutoConfig,\n AutoModelForQuestionAnswering,\n AutoTokenizer,\n BartConfig,\n BartForQuestionAnswering,\n BartTokenizer,\n BertConfig,\n BertForQuestionAnswering,\n BertTokenizer,\n CamembertConfig,\n CamembertForQuestionAnswering,\n CamembertTokenizer,\n DistilBertConfig,\n DistilBertForQuestionAnswering,\n DistilBertTokenizer,\n ElectraConfig,\n ElectraTokenizer,\n LongformerConfig,\n LongformerForQuestionAnswering,\n LongformerTokenizer,\n MPNetConfig,\n MPNetForQuestionAnswering,\n MPNetTokenizer,\n MobileBertConfig,\n MobileBertForQuestionAnswering,\n MobileBertTokenizer,\n RobertaConfig,\n RobertaForQuestionAnswering,\n RobertaTokenizer,\n SqueezeBertConfig,\n SqueezeBertForQuestionAnswering,\n SqueezeBertTokenizer,\n XLMConfig,\n XLMForQuestionAnswering,\n XLMRobertaConfig,\n XLMRobertaTokenizer,\n XLMTokenizer,\n XLNetConfig,\n XLNetForQuestionAnswering,\n XLNetTokenizer,\n)\nfrom transformers.optimization import AdamW, Adafactor\nfrom transformers.optimization import (\n get_constant_schedule,\n get_constant_schedule_with_warmup,\n get_linear_schedule_with_warmup,\n get_cosine_schedule_with_warmup,\n get_cosine_with_hard_restarts_schedule_with_warmup,\n get_polynomial_decay_schedule_with_warmup,\n)\n\nfrom textgen.config.model_args import QuestionAnsweringArgs\nfrom textgen.config.utils import sweep_config_to_sweep_values\nfrom textgen.custom_models.models import ElectraForQuestionAnswering, XLMRobertaForQuestionAnswering\nfrom textgen.question_answering.question_answering_utils import (\n LazyQuestionAnsweringDataset,\n RawResult,\n RawResultExtended,\n build_examples,\n get_best_predictions,\n get_best_predictions_extended,\n get_examples,\n load_hf_dataset,\n squad_convert_examples_to_features,\n to_list,\n write_predictions,\n write_predictions_extended,\n)\n\ntry:\n import wandb\n\n wandb_available = True\nexcept ImportError:\n wandb_available = False\n\nfrom textgen.utils.log import logger\n\n\nclass QuestionAnsweringModel:\n def __init__(self, model_type, model_name, args=None, use_cuda=True, cuda_device=-1, **kwargs):\n\n \"\"\"\n Initializes a QuestionAnsweringModel model.\n\n Args:\n model_type: The type of model (bert, xlnet, xlm, distilbert)\n model_name: Default Transformer model name or path to a directory containing Transformer model file (pytorch_nodel.bin).\n args (optional): Default args will be used if this parameter is not provided. If provided,\n it should be a dict containing the args that should be changed in the default args'\n use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.\n cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.\n \"\"\" # noqa: ignore flake8\"\n\n MODEL_CLASSES = {\n \"albert\": (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer),\n \"auto\": (AutoConfig, AutoTokenizer, AutoModelForQuestionAnswering),\n \"bart\": (BartConfig, BartForQuestionAnswering, BartTokenizer),\n \"bert\": (BertConfig, BertForQuestionAnswering, BertTokenizer),\n \"camembert\": (CamembertConfig, CamembertForQuestionAnswering, CamembertTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer),\n \"electra\": (ElectraConfig, ElectraForQuestionAnswering, ElectraTokenizer),\n \"longformer\": (LongformerConfig, LongformerForQuestionAnswering, LongformerTokenizer),\n \"mobilebert\": (MobileBertConfig, MobileBertForQuestionAnswering, MobileBertTokenizer),\n \"mpnet\": (MPNetConfig, MPNetForQuestionAnswering, MPNetTokenizer),\n \"roberta\": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer),\n \"squeezebert\": (SqueezeBertConfig, SqueezeBertForQuestionAnswering, SqueezeBertTokenizer),\n \"xlm\": (XLMConfig, XLMForQuestionAnswering, XLMTokenizer),\n \"xlmroberta\": (XLMRobertaConfig, XLMRobertaForQuestionAnswering, XLMRobertaTokenizer),\n \"xlnet\": (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer),\n }\n\n self.args = self._load_model_args(model_name)\n\n if isinstance(args, dict):\n self.args.update_from_dict(args)\n elif isinstance(args, QuestionAnsweringArgs):\n self.args = args\n\n if \"sweep_config\" in kwargs:\n self.is_sweeping = True\n sweep_config = kwargs.pop(\"sweep_config\")\n sweep_values = sweep_config_to_sweep_values(sweep_config)\n self.args.update_from_dict(sweep_values)\n else:\n self.is_sweeping = False\n\n if self.args.manual_seed:\n random.seed(self.args.manual_seed)\n np.random.seed(self.args.manual_seed)\n torch.manual_seed(self.args.manual_seed)\n if self.args.n_gpu > 0:\n torch.cuda.manual_seed_all(self.args.manual_seed)\n\n if not use_cuda:\n self.args.fp16 = False\n\n config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]\n self.config = config_class.from_pretrained(model_name, **self.args.config)\n if not self.args.quantized_model:\n self.model = model_class.from_pretrained(model_name, config=self.config, **kwargs)\n else:\n quantized_weights = torch.load(os.path.join(model_name, \"pytorch_model.bin\"))\n self.model = model_class.from_pretrained(None, config=self.config, state_dict=quantized_weights)\n\n if self.args.dynamic_quantize:\n self.model = torch.quantization.quantize_dynamic(self.model, {torch.nn.Linear}, dtype=torch.qint8)\n if self.args.quantized_model:\n self.model.load_state_dict(quantized_weights)\n if self.args.dynamic_quantize:\n self.args.quantized_model = True\n\n if use_cuda:\n if torch.cuda.is_available():\n if cuda_device == -1:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(f\"cuda:{cuda_device}\")\n else:\n raise ValueError(\n \"'use_cuda' set to True when cuda is unavailable.\"\n \" Make sure CUDA is available or set use_cuda=False.\"\n )\n else:\n self.device = \"cpu\"\n\n self.results = {}\n\n if self.args.fp16:\n try:\n from torch.cuda import amp\n except AttributeError:\n raise AttributeError(\"fp16 requires Pytorch >= 1.6. Please update Pytorch or turn off fp16.\")\n\n if model_type == \"auto\":\n self.tokenizer = tokenizer_class.from_pretrained(model_name, **kwargs)\n else:\n self.tokenizer = tokenizer_class.from_pretrained(\n model_name, do_lower_case=self.args.do_lower_case, **kwargs\n )\n\n if self.args.special_tokens_list:\n self.tokenizer.add_tokens(self.args.special_tokens_list, special_tokens=True)\n self.model.resize_token_embeddings(len(self.tokenizer))\n\n self.args.model_name = model_name\n self.args.model_type = model_type\n\n if self.args.wandb_project and not wandb_available:\n warnings.warn(\"wandb_project specified but wandb is not available. Wandb disabled.\")\n self.args.wandb_project = None\n\n def load_and_cache_examples(self, examples, evaluate=False, no_cache=False, output_examples=False):\n \"\"\"\n Converts a list of examples to a TensorDataset containing InputFeatures. Caches the InputFeatures.\n\n Utility function for train() and eval() methods. Not intended to be used directly.\n \"\"\"\n\n tokenizer = self.tokenizer\n args = self.args\n\n if not no_cache:\n no_cache = args.no_cache\n\n if not no_cache:\n os.makedirs(self.args.cache_dir, exist_ok=True)\n\n examples = get_examples(examples, is_training=not evaluate)\n\n mode = \"dev\" if evaluate else \"train\"\n cached_features_file = os.path.join(\n args.cache_dir, \"cached_{}_{}_{}_{}\".format(mode, args.model_type, args.max_seq_length, len(examples)),\n )\n\n if os.path.exists(cached_features_file) and (\n (not args.reprocess_input_data and not no_cache) or (mode == \"dev\" and args.use_cached_eval_features)\n ):\n features = torch.load(cached_features_file)\n logger.info(f\" Features loaded from cache at {cached_features_file}\")\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)\n all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)\n all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)\n\n if mode == \"dev\":\n all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)\n dataset = TensorDataset(\n all_input_ids,\n all_attention_masks,\n all_token_type_ids,\n all_feature_index,\n all_cls_index,\n all_p_mask,\n )\n else:\n all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)\n all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)\n dataset = TensorDataset(\n all_input_ids,\n all_attention_masks,\n all_token_type_ids,\n all_start_positions,\n all_end_positions,\n all_cls_index,\n all_p_mask,\n all_is_impossible,\n )\n else:\n logger.info(\" Converting to features started.\")\n\n features, dataset = squad_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length,\n is_training=not evaluate,\n tqdm_enabled=not args.silent,\n threads=args.process_count,\n args=args,\n )\n\n if not no_cache:\n torch.save(features, cached_features_file)\n\n if output_examples:\n return dataset, examples, features\n return dataset\n\n def train_model(\n self, train_data, output_dir=False, show_running_loss=True, args=None, eval_data=None, verbose=True,\n **kwargs\n ):\n \"\"\"\n Trains the model using 'train_data'\n\n Args:\n train_data: Path to JSON file containing training data OR list of Python dicts in the correct format. The model will be trained on this data.\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.\n args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.\n eval_data (optional): Path to JSON file containing evaluation data against which evaluation will be performed when evaluate_during_training is enabled.\n Is required if evaluate_during_training is enabled.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n Returns:\n global_step: Number of global steps trained\n training_details: Average training loss if evaluate_during_training is False or full training progress scores if evaluate_during_training is True\n \"\"\" # noqa: ignore flake8\"\n\n if args:\n self.args.update_from_dict(args)\n\n if self.args.silent:\n show_running_loss = False\n\n if self.args.evaluate_during_training and eval_data is None:\n raise ValueError(\n \"evaluate_during_training is enabled but eval_data is not specified.\"\n \" Pass eval_data to model.train_model() if using evaluate_during_training.\"\n )\n\n if not output_dir:\n output_dir = self.args.output_dir\n\n if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty.\"\n \"Use --overwrite_output_dir to overcome.\".format(output_dir)\n )\n\n self._move_model_to_device()\n\n if self.args.use_hf_datasets:\n train_dataset = load_hf_dataset(train_data, self.tokenizer, self.args, is_training=True)\n elif self.args.lazy_loading:\n if isinstance(train_data, str):\n train_dataset = LazyQuestionAnsweringDataset(train_data, self.tokenizer, self.args)\n else:\n raise ValueError(\"Input must be given as a path to a file when using lazy loading\")\n else:\n if isinstance(train_data, str):\n with open(train_data, \"r\", encoding=self.args.encoding) as f:\n train_examples = json.load(f)\n else:\n train_examples = train_data\n\n train_dataset = self.load_and_cache_examples(train_examples)\n\n os.makedirs(output_dir, exist_ok=True)\n\n global_step, training_details = self.train(\n train_dataset, output_dir, show_running_loss=show_running_loss, eval_data=eval_data, **kwargs\n )\n\n self.save_model(model=self.model)\n\n logger.info(\" Training of {} model complete. Saved to {}.\".format(self.args.model_type, output_dir))\n\n return global_step, training_details\n\n def train(self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs):\n \"\"\"\n Trains the model on train_dataset.\n\n Utility function to be used by the train_model() method. Not intended to be used directly.\n \"\"\"\n\n model = self.model\n args = self.args\n\n tb_writer = SummaryWriter(logdir=args.tensorboard_dir)\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(\n train_dataset,\n sampler=train_sampler,\n batch_size=args.train_batch_size,\n num_workers=self.args.dataloader_num_workers,\n )\n\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n\n optimizer_grouped_parameters = []\n custom_parameter_names = set()\n for group in self.args.custom_parameter_groups:\n params = group.pop(\"params\")\n custom_parameter_names.update(params)\n param_group = {**group}\n param_group[\"params\"] = [p for n, p in model.named_parameters() if n in params]\n optimizer_grouped_parameters.append(param_group)\n\n for group in self.args.custom_layer_parameters:\n layer_number = group.pop(\"layer\")\n layer = f\"layer.{layer_number}.\"\n group_d = {**group}\n group_nd = {**group}\n group_nd[\"weight_decay\"] = 0.0\n params_d = []\n params_nd = []\n for n, p in model.named_parameters():\n if n not in custom_parameter_names and layer in n:\n if any(nd in n for nd in no_decay):\n params_nd.append(p)\n else:\n params_d.append(p)\n custom_parameter_names.add(n)\n group_d[\"params\"] = params_d\n group_nd[\"params\"] = params_nd\n\n optimizer_grouped_parameters.append(group_d)\n optimizer_grouped_parameters.append(group_nd)\n\n if not self.args.train_custom_parameters_only:\n optimizer_grouped_parameters.extend(\n [\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if n not in custom_parameter_names and not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if n not in custom_parameter_names and any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n )\n\n warmup_steps = math.ceil(t_total * args.warmup_ratio)\n args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps\n\n if args.optimizer == \"AdamW\":\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n elif args.optimizer == \"Adafactor\":\n optimizer = Adafactor(\n optimizer_grouped_parameters,\n lr=args.learning_rate,\n eps=args.adafactor_eps,\n clip_threshold=args.adafactor_clip_threshold,\n decay_rate=args.adafactor_decay_rate,\n beta1=args.adafactor_beta1,\n weight_decay=args.weight_decay,\n scale_parameter=args.adafactor_scale_parameter,\n relative_step=args.adafactor_relative_step,\n warmup_init=args.adafactor_warmup_init,\n )\n print(\"Using Adafactor for T5\")\n else:\n raise ValueError(\n \"{} is not a valid optimizer class. Please use one of ('AdamW', 'Adafactor') instead.\".format(\n args.optimizer\n )\n )\n\n if args.scheduler == \"constant_schedule\":\n scheduler = get_constant_schedule(optimizer)\n\n elif args.scheduler == \"constant_schedule_with_warmup\":\n scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps)\n\n elif args.scheduler == \"linear_schedule_with_warmup\":\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n elif args.scheduler == \"cosine_schedule_with_warmup\":\n scheduler = get_cosine_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n num_cycles=args.cosine_schedule_num_cycles,\n )\n\n elif args.scheduler == \"cosine_with_hard_restarts_schedule_with_warmup\":\n scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n num_cycles=args.cosine_schedule_num_cycles,\n )\n\n elif args.scheduler == \"polynomial_decay_schedule_with_warmup\":\n scheduler = get_polynomial_decay_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n lr_end=args.polynomial_decay_schedule_lr_end,\n power=args.polynomial_decay_schedule_power,\n )\n\n else:\n raise ValueError(\"{} is not a valid scheduler.\".format(args.scheduler))\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n global_step = 0\n training_progress_scores = None\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.silent, mininterval=0)\n epoch_number = 0\n best_eval_metric = None\n early_stopping_counter = 0\n steps_trained_in_current_epoch = 0\n epochs_trained = 0\n\n if args.model_name and os.path.exists(args.model_name):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name.split(\"/\")[-1].split(\"-\")\n if len(checkpoint_suffix) > 2:\n checkpoint_suffix = checkpoint_suffix[1]\n else:\n checkpoint_suffix = checkpoint_suffix[-1]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (\n len(train_dataloader) // args.gradient_accumulation_steps\n )\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the current epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n if args.evaluate_during_training:\n training_progress_scores = self._create_training_progress_scores(**kwargs)\n\n if args.wandb_project:\n wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)\n wandb.watch(self.model)\n\n if args.fp16:\n from torch.cuda import amp\n\n scaler = amp.GradScaler()\n\n for _ in train_iterator:\n model.train()\n if epochs_trained > 0:\n epochs_trained -= 1\n continue\n train_iterator.set_description(f\"Epoch {epoch_number + 1} of {args.num_train_epochs}\")\n batch_iterator = tqdm(\n train_dataloader,\n desc=f\"Running Epoch {epoch_number} of {args.num_train_epochs}\",\n disable=args.silent,\n mininterval=0,\n )\n for step, batch in enumerate(batch_iterator):\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n inputs = self._get_inputs_dict(batch)\n if args.fp16:\n with amp.autocast():\n outputs = model(**inputs)\n # model outputs are always tuple in pytorch-transformers (see doc)\n loss = outputs[0]\n else:\n outputs = model(**inputs)\n # model outputs are always tuple in pytorch-transformers (see doc)\n loss = outputs[0]\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n current_loss = loss.item()\n\n if show_running_loss:\n batch_iterator.set_description(\n f\"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}\"\n )\n\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n scaler.scale(loss).backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n scaler.unscale_(optimizer)\n if args.optimizer == \"AdamW\":\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n if args.fp16:\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n tb_writer.add_scalar(\"lr\", scheduler.get_last_lr()[0], global_step)\n tb_writer.add_scalar(\n \"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step,\n )\n logging_loss = tr_loss\n if args.wandb_project or self.is_sweeping:\n wandb.log(\n {\n \"Training loss\": current_loss,\n \"lr\": scheduler.get_last_lr()[0],\n \"global_step\": global_step,\n }\n )\n\n if args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}\".format(global_step))\n\n self.save_model(output_dir_current, optimizer, scheduler, model=model)\n\n if args.evaluate_during_training and (\n args.evaluate_during_training_steps > 0\n and global_step % args.evaluate_during_training_steps == 0\n ):\n # Only evaluate when single GPU otherwise metrics may not average well\n results, _ = self.eval_model(eval_data, verbose=False, **kwargs)\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}\".format(global_step))\n\n if args.save_eval_checkpoints:\n self.save_model(output_dir_current, optimizer, scheduler, model=model, results=results)\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(\n os.path.join(args.output_dir, \"training_progress_scores.csv\"), index=False,\n )\n\n if args.wandb_project or self.is_sweeping:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)\n if best_eval_metric and args.early_stopping_metric_minimize:\n if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(\n args.best_model_dir, optimizer, scheduler, model=model, results=results\n )\n early_stopping_counter = 0\n else:\n if args.use_early_stopping:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n else:\n if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(\n args.best_model_dir, optimizer, scheduler, model=model, results=results\n )\n early_stopping_counter = 0\n else:\n if args.use_early_stopping:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n model.train()\n\n epoch_number += 1\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}-epoch-{}\".format(global_step, epoch_number))\n\n if args.save_model_every_epoch or args.evaluate_during_training:\n os.makedirs(output_dir_current, exist_ok=True)\n\n if args.save_model_every_epoch:\n self.save_model(output_dir_current, optimizer, scheduler, model=model)\n\n if args.evaluate_during_training and args.evaluate_each_epoch:\n results, _ = self.eval_model(eval_data, verbose=False, **kwargs)\n\n self.save_model(output_dir_current, optimizer, scheduler, results=results)\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(os.path.join(args.output_dir, \"training_progress_scores.csv\"), index=False)\n\n if args.wandb_project or self.is_sweeping:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)\n if best_eval_metric and args.early_stopping_metric_minimize:\n if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args.use_early_stopping and args.early_stopping_consider_epochs:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n else:\n if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args.use_early_stopping and args.early_stopping_consider_epochs:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n\n return (\n global_step,\n tr_loss / global_step if not self.args.evaluate_during_training else training_progress_scores,\n )\n\n def eval_model(self, eval_data, output_dir=None, verbose=False, verbose_logging=False, **kwargs):\n \"\"\"\n Evaluates the model on eval_data. Saves results to output_dir.\n\n Args:\n eval_data: Path to JSON file containing evaluation data OR list of Python dicts in the correct format. The model will be evaluated on this data.\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n verbose: If verbose, results will be printed to the console on completion of evaluation.\n verbose_logging: Log info related to feature conversion and writing predictions.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n result: Dictionary containing evaluation results. (correct, similar, incorrect)\n text: A dictionary containing the 3 dictionaries correct_text, similar_text (the predicted answer is a substring of the correct answer or vise versa), incorrect_text.\n \"\"\" # noqa: ignore flake8\"\n\n if not output_dir:\n output_dir = self.args.output_dir\n\n self._move_model_to_device()\n\n all_predictions, all_nbest_json, scores_diff_json, eval_loss = self.evaluate(\n eval_data, output_dir, verbose_logging=verbose\n )\n\n if isinstance(eval_data, str):\n with open(eval_data, \"r\", encoding=self.args.encoding) as f:\n truth = json.load(f)\n else:\n truth = eval_data\n\n result, texts = self.calculate_results(truth, all_predictions, **kwargs)\n result[\"eval_loss\"] = eval_loss\n\n self.results.update(result)\n\n if verbose:\n logger.info(self.results)\n\n return result, texts\n\n def evaluate(self, eval_data, output_dir, verbose_logging=False):\n \"\"\"\n Evaluates the model on eval_data.\n\n Utility function to be used by the eval_model() method. Not intended to be used directly.\n \"\"\"\n tokenizer = self.tokenizer\n model = self.model\n args = self.args\n\n if isinstance(eval_data, str):\n with open(eval_data, \"r\", encoding=self.args.encoding) as f:\n eval_examples = json.load(f)\n else:\n eval_examples = eval_data\n\n eval_dataset, examples, features = self.load_and_cache_examples(\n eval_examples, evaluate=True, output_examples=True\n )\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n if self.args.fp16:\n from torch.cuda import amp\n\n all_results = []\n for batch in tqdm(eval_dataloader, disable=args.silent, desc=\"Running Evaluation\"):\n batch = tuple(t.to(self.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n\n if self.args.model_type in [\n \"xlm\",\n \"roberta\",\n \"distilbert\",\n \"camembert\",\n \"electra\",\n \"xlmroberta\",\n \"bart\",\n ]:\n del inputs[\"token_type_ids\"]\n\n example_indices = batch[3]\n\n if args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[4], \"p_mask\": batch[5]})\n\n if self.args.fp16:\n with amp.autocast():\n outputs = model(**inputs)\n eval_loss += outputs[0].mean().item()\n else:\n outputs = model(**inputs)\n eval_loss += outputs[0].mean().item()\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n if args.model_type in [\"xlnet\", \"xlm\"]:\n # XLNet uses a more complex post-processing procedure\n result = RawResultExtended(\n unique_id=unique_id,\n start_top_log_probs=to_list(outputs[0][i]),\n start_top_index=to_list(outputs[1][i]),\n end_top_log_probs=to_list(outputs[2][i]),\n end_top_index=to_list(outputs[3][i]),\n cls_logits=to_list(outputs[4][i]),\n )\n else:\n result = RawResult(\n unique_id=unique_id,\n start_logits=to_list(outputs[0][i]),\n end_logits=to_list(outputs[1][i]),\n )\n all_results.append(result)\n\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n\n prefix = \"test\"\n os.makedirs(output_dir, exist_ok=True)\n\n output_prediction_file = os.path.join(output_dir, \"predictions_{}.json\".format(prefix))\n output_nbest_file = os.path.join(output_dir, \"nbest_predictions_{}.json\".format(prefix))\n output_null_log_odds_file = os.path.join(output_dir, \"null_odds_{}.json\".format(prefix))\n\n if args.model_type in [\"xlnet\", \"xlm\"]:\n # XLNet uses a more complex post-processing procedure\n (all_predictions, all_nbest_json, scores_diff_json,) = write_predictions_extended(\n examples,\n features,\n all_results,\n args.n_best_size,\n args.max_answer_length,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n eval_data,\n model.config.start_n_top,\n model.config.end_n_top,\n True,\n tokenizer,\n verbose_logging,\n )\n else:\n all_predictions, all_nbest_json, scores_diff_json = write_predictions(\n examples,\n features,\n all_results,\n args.n_best_size,\n args.max_answer_length,\n False,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n verbose_logging,\n True,\n args.null_score_diff_threshold,\n )\n\n return all_predictions, all_nbest_json, scores_diff_json, eval_loss\n\n def predict(self, to_predict, n_best_size=None):\n \"\"\"\n Performs predictions on a list of python dicts containing contexts and qas.\n\n Args:\n to_predict: A python list of python dicts containing contexts and questions to be sent to the model for prediction.\n E.g: predict([\n {\n 'context': \"Some context as a demo\",\n 'qas': [\n {'id': '0', 'question': 'What is the context here?'},\n {'id': '1', 'question': 'What is this for?'}\n ]\n }\n ])\n n_best_size (Optional): Number of predictions to return. args.n_best_size will be used if not specified.\n\n Returns:\n list: A python list of dicts containing the predicted answer/answers, and id for each question in to_predict.\n list: A python list of dicts containing the predicted probability/probabilities, and id for each question in to_predict.\n \"\"\" # noqa: ignore flake8\"\n tokenizer = self.tokenizer\n device = self.device\n model = self.model\n args = self.args\n\n if not n_best_size:\n n_best_size = args.n_best_size\n\n self._move_model_to_device()\n\n eval_examples = build_examples(to_predict)\n eval_dataset, examples, features = self.load_and_cache_examples(\n eval_examples, evaluate=True, output_examples=True, no_cache=True\n )\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n model.eval()\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n if self.args.fp16:\n from torch.cuda import amp\n\n all_results = []\n for batch in tqdm(eval_dataloader, disable=args.silent, desc=\"Running Prediction\"):\n batch = tuple(t.to(self.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n\n if self.args.model_type in [\n \"xlm\",\n \"roberta\",\n \"distilbert\",\n \"camembert\",\n \"electra\",\n \"xlmroberta\",\n \"bart\",\n ]:\n del inputs[\"token_type_ids\"]\n\n example_indices = batch[3]\n\n if args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[4], \"p_mask\": batch[5]})\n\n if self.args.fp16:\n with amp.autocast():\n outputs = model(**inputs)\n else:\n outputs = model(**inputs)\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n if args.model_type in [\"xlnet\", \"xlm\"]:\n # XLNet uses a more complex post-processing procedure\n result = RawResultExtended(\n unique_id=unique_id,\n start_top_log_probs=to_list(outputs[0][i]),\n start_top_index=to_list(outputs[1][i]),\n end_top_log_probs=to_list(outputs[2][i]),\n end_top_index=to_list(outputs[3][i]),\n cls_logits=to_list(outputs[4][i]),\n )\n else:\n result = RawResult(\n unique_id=unique_id,\n start_logits=to_list(outputs[0][i]),\n end_logits=to_list(outputs[1][i]),\n )\n all_results.append(result)\n\n if args.model_type in [\"xlnet\", \"xlm\"]:\n answers = get_best_predictions_extended(\n examples,\n features,\n all_results,\n n_best_size,\n args.max_answer_length,\n model.config.start_n_top,\n model.config.end_n_top,\n True,\n tokenizer,\n args.null_score_diff_threshold,\n )\n else:\n answers = get_best_predictions(\n examples, features, all_results, n_best_size, args.max_answer_length, False, False, True, False,\n )\n\n answer_list = [{\"id\": answer[\"id\"], \"answer\": answer[\"answer\"][:-1]} for answer in answers]\n probability_list = [{\"id\": answer[\"id\"], \"probability\": answer[\"probability\"][:-1]} for answer in answers]\n\n return answer_list, probability_list\n\n def calculate_results(self, truth, predictions, **kwargs):\n truth_dict = {}\n questions_dict = {}\n for item in truth:\n for answer in item[\"qas\"]:\n if answer[\"answers\"]:\n truth_dict[answer[\"id\"]] = answer[\"answers\"][0][\"text\"]\n else:\n truth_dict[answer[\"id\"]] = \"\"\n questions_dict[answer[\"id\"]] = answer[\"question\"]\n\n correct = 0\n incorrect = 0\n similar = 0\n correct_text = {}\n incorrect_text = {}\n similar_text = {}\n predicted_answers = []\n true_answers = []\n\n for q_id, answer in truth_dict.items():\n predicted_answers.append(predictions[q_id])\n true_answers.append(answer)\n if predictions[q_id].strip() == answer.strip():\n correct += 1\n correct_text[q_id] = answer\n elif predictions[q_id].strip() in answer.strip() or answer.strip() in predictions[q_id].strip():\n similar += 1\n similar_text[q_id] = {\n \"truth\": answer,\n \"predicted\": predictions[q_id],\n \"question\": questions_dict[q_id],\n }\n else:\n incorrect += 1\n incorrect_text[q_id] = {\n \"truth\": answer,\n \"predicted\": predictions[q_id],\n \"question\": questions_dict[q_id],\n }\n\n extra_metrics = {}\n for metric, func in kwargs.items():\n extra_metrics[metric] = func(true_answers, predicted_answers)\n\n result = {\"correct\": correct, \"similar\": similar, \"incorrect\": incorrect, **extra_metrics}\n\n texts = {\n \"correct_text\": correct_text,\n \"similar_text\": similar_text,\n \"incorrect_text\": incorrect_text,\n }\n\n return result, texts\n\n def _move_model_to_device(self):\n self.model.to(self.device)\n\n def _get_last_metrics(self, metric_values):\n return {metric: values[-1] for metric, values in metric_values.items()}\n\n def _get_inputs_dict(self, batch):\n if self.args.use_hf_datasets:\n inputs = {key: value.to(self.device) for key, value in batch.items()}\n\n if self.args.model_type in [\"xlm\", \"roberta\", \"distilbert\", \"camembert\", \"electra\", \"xlmroberta\", \"bart\"]:\n del inputs[\"token_type_ids\"]\n if self.args.model_type not in [\"xlnet\", \"xlm\"]:\n del inputs[\"cls_index\"]\n del inputs[\"p_mask\"]\n\n return inputs\n else:\n batch = tuple(t.to(self.device) for t in batch)\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n \"start_positions\": batch[3],\n \"end_positions\": batch[4],\n }\n\n if self.args.model_type in [\"xlm\", \"roberta\", \"distilbert\", \"camembert\", \"electra\", \"xlmroberta\", \"bart\"]:\n del inputs[\"token_type_ids\"]\n\n if self.args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[5], \"p_mask\": batch[6]})\n\n return inputs\n\n def _create_training_progress_scores(self, **kwargs):\n extra_metrics = {key: [] for key in kwargs}\n training_progress_scores = {\n \"global_step\": [],\n \"correct\": [],\n \"similar\": [],\n \"incorrect\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n\n return training_progress_scores\n\n def save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):\n if not output_dir:\n output_dir = self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n\n if model and not self.args.no_save:\n # Take care of distributed/parallel training\n model_to_save = model.module if hasattr(model, \"module\") else model\n model_to_save.save_pretrained(output_dir)\n self.tokenizer.save_pretrained(output_dir)\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n if optimizer and scheduler and self.args.save_optimizer_and_scheduler:\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n self.save_model_args(output_dir)\n\n if results:\n output_eval_file = os.path.join(output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n def save_model_args(self, output_dir):\n os.makedirs(output_dir, exist_ok=True)\n self.args.save(output_dir)\n\n def _load_model_args(self, input_dir):\n args = QuestionAnsweringArgs()\n args.load(input_dir)\n return args\n\n def get_named_parameters(self):\n return [n for n, p in self.model.named_parameters()]\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com)\n@description: refer https://github.com/ThilinaRajapakse/simpletransformers\n\"\"\"\n\nimport os\nimport random\n\nimport numpy as np\nimport torch\nfrom transformers import (\n CTRLConfig,\n CTRLLMHeadModel,\n CTRLTokenizer,\n GPT2Config,\n GPT2LMHeadModel,\n GPT2Tokenizer,\n OpenAIGPTConfig,\n OpenAIGPTLMHeadModel,\n OpenAIGPTTokenizer,\n TransfoXLConfig,\n TransfoXLLMHeadModel,\n TransfoXLTokenizer,\n XLMConfig,\n XLMTokenizer,\n XLMWithLMHeadModel,\n XLNetConfig,\n XLNetLMHeadModel,\n XLNetTokenizer,\n)\n\nfrom textgen.config.model_args import LanguageGenerationArgs\nfrom textgen.config.utils import sweep_config_to_sweep_values\nfrom textgen.language_generation.language_generation_utils import PREPROCESSING_FUNCTIONS\nfrom textgen.utils.log import logger\n\nMAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop\n\n\nclass LanguageGenerationModel:\n def __init__(\n self, model_type, model_name, args=None, use_cuda=True, cuda_device=-1, **kwargs,\n ):\n\n \"\"\"\n Initializes a LanguageGenerationModel model.\n\n Args:\n model_type: The type of model (gpt2, ctrl, openai-gpt, xlnet, transfo-xl, xlm)\n model_name: Default Transformer model name or path to a directory containing Transformer model file (pytorch_nodel.bin).\n args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.\n use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.\n cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.\n **kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.\n \"\"\" # noqa: ignore flake8\n\n MODEL_CLASSES = {\n \"gpt2\": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),\n \"ctrl\": (CTRLConfig, CTRLLMHeadModel, CTRLTokenizer),\n \"openai-gpt\": (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),\n \"xlnet\": (XLNetConfig, XLNetLMHeadModel, XLNetTokenizer),\n \"transfo-xl\": (TransfoXLConfig, TransfoXLLMHeadModel, TransfoXLTokenizer),\n \"xlm\": (XLMConfig, XLMWithLMHeadModel, XLMTokenizer),\n }\n\n self.args = self._load_model_args(model_name)\n\n if isinstance(args, dict):\n self.args.update_from_dict(args)\n elif isinstance(args, LanguageGenerationArgs):\n self.args = args\n\n if \"sweep_config\" in kwargs:\n self.is_sweeping = True\n sweep_config = kwargs.pop(\"sweep_config\")\n sweep_values = sweep_config_to_sweep_values(sweep_config)\n self.args.update_from_dict(sweep_values)\n else:\n self.is_sweeping = False\n\n if self.args.manual_seed:\n random.seed(self.args.manual_seed)\n np.random.seed(self.args.manual_seed)\n torch.manual_seed(self.args.manual_seed)\n if self.args.n_gpu > 0:\n torch.cuda.manual_seed_all(self.args.manual_seed)\n\n if use_cuda:\n if torch.cuda.is_available():\n if cuda_device == -1:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(f\"cuda:{cuda_device}\")\n else:\n raise ValueError(\n \"'use_cuda' set to True when cuda is unavailable.\"\n \"Make sure CUDA is available or set `use_cuda=False`.\"\n )\n else:\n self.device = \"cpu\"\n\n if self.args.special_tokens_list:\n self.tokenizer.add_tokens(self.args.special_tokens_list, special_tokens=True)\n self.model.resize_token_embeddings(len(self.tokenizer))\n\n self.args.model_name = model_name\n self.args.model_type = model_type\n\n config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]\n\n if self.args.tokenizer_name:\n self.tokenizer = tokenizer_class.from_pretrained(self.args.tokenizer_name, cache_dir=self.args.cache_dir)\n else:\n self.tokenizer = tokenizer_class.from_pretrained(model_name, cache_dir=self.args.cache_dir, **kwargs)\n self.args.tokenizer_name = model_name\n\n if self.args.config_name:\n self.config = config_class.from_pretrained(self.args.config_name, cache_dir=self.args.cache_dir)\n else:\n self.config = config_class.from_pretrained(model_name, cache_dir=self.args.cache_dir, **kwargs)\n\n self.model = model_class.from_pretrained(\n model_name, config=self.config, cache_dir=self.args.cache_dir, **kwargs,\n )\n\n self.model.to(self.device)\n\n def generate(self, prompt=None, args=None, verbose=True):\n\n \"\"\"\n Generate text using a LanguageGenerationModel\n\n Args:\n prompt (optional): A prompt text for the model. If given, will override args.prompt\n args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.\n verbose (optional): If verbose, generated text will be logged to the console.\n Returns:\n generated_sequences: Sequences of text generated by the model.\n \"\"\" # noqa: ignore flake8\"\n\n model = self.model\n tokenizer = self.tokenizer\n device = self.device\n\n if args:\n self.args.update_from_dict(args)\n\n if prompt:\n self.args.prompt = prompt\n elif not self.args.prompt:\n self.args.prompt = input(\"Model prompt >>> \")\n\n prompt_text = self.args.prompt\n args = self.args\n\n # Different models need different input formatting and/or extra arguments\n requires_preprocessing = args.model_type in PREPROCESSING_FUNCTIONS.keys()\n if requires_preprocessing:\n prepare_input = PREPROCESSING_FUNCTIONS.get(args.model_type)\n preprocessed_prompt_text = prepare_input(args, model, tokenizer, prompt_text)\n encoded_prompt = tokenizer.encode(\n preprocessed_prompt_text,\n add_special_tokens=False,\n return_tensors=\"pt\",\n )\n else:\n encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False, return_tensors=\"pt\")\n encoded_prompt = encoded_prompt.to(device)\n\n output_sequences = model.generate(\n input_ids=encoded_prompt,\n max_length=args.max_length + len(encoded_prompt[0]),\n temperature=args.temperature,\n top_k=args.top_k,\n top_p=args.top_p,\n repetition_penalty=args.repetition_penalty,\n do_sample=args.do_sample,\n num_return_sequences=args.num_return_sequences,\n )\n\n # Remove the batch dimension when returning multiple sequences\n if len(output_sequences.shape) > 2:\n output_sequences.squeeze_()\n\n generated_sequences = []\n\n for generated_sequence_idx, generated_sequence in enumerate(output_sequences):\n if verbose:\n logger.info(\"=== GENERATED SEQUENCE {} ===\".format(generated_sequence_idx + 1))\n generated_sequence = generated_sequence.tolist()\n\n # Decode text\n text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)\n\n # Remove all text after the stop token\n text = text[: text.find(args.stop_token) if args.stop_token else None]\n\n # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing\n total_sequence = (\n prompt_text + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)):]\n )\n\n generated_sequences.append(total_sequence)\n if verbose:\n logger.info(total_sequence)\n\n return generated_sequences\n\n def save_model_args(self, output_dir):\n os.makedirs(output_dir, exist_ok=True)\n self.args.save(output_dir)\n\n def _load_model_args(self, input_dir):\n args = LanguageGenerationArgs()\n args.load(input_dir)\n return args\n"
] |
[
[
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.utils.data.RandomSampler",
"pandas.DataFrame",
"torch.tensor",
"torch.cuda.amp.autocast",
"torch.cuda.amp.GradScaler",
"torch.quantization.quantize_dynamic",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.device",
"torch.nn.DataParallel",
"torch.save"
],
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.device"
]
] |
mcdobe100/arkouda
|
[
"499ecb502da214ee71e9923fda9487c65ec4d616"
] |
[
"tests/operator_tests.py"
] |
[
"import numpy as np\nimport warnings\nfrom itertools import product\nfrom base_test import ArkoudaTest\nfrom context import arkouda as ak\nSIZE = 10\nverbose = ArkoudaTest.verbose\n\ndef run_tests(verbose):\n # ignore numpy warnings like divide by 0\n np.seterr(all='ignore')\n global pdarrays\n pdarrays = {'int64': ak.arange(0, SIZE, 1),\n 'uint64': ak.array(np.arange(0, SIZE, 1, dtype=np.uint64)),\n 'float64': ak.linspace(0, 2, SIZE),\n 'bool': (ak.arange(0, SIZE, 1) % 2) == 0}\n global ndarrays\n ndarrays = {'int64': np.arange(0, SIZE, 1),\n 'uint64': np.arange(0, SIZE, 1, dtype=np.uint64),\n 'float64': np.linspace(0, 2, SIZE),\n 'bool': (np.arange(0, SIZE, 1) % 2) == 0}\n global scalars\n #scalars = {k: v[SIZE//2] for k, v in ndarrays.items()}\n scalars = {'int64': 5,\n 'uint64': np.uint64(5),\n 'float64': 3.14159,\n 'bool': True}\n dtypes = pdarrays.keys()\n if verbose:\n print(\"Operators: \", ak.pdarray.BinOps)\n print(\"Dtypes: \", dtypes)\n print(\"pdarrays: \")\n for k, v in pdarrays.items():\n print(k, \": \", v)\n print(\"ndarrays: \")\n for k, v in ndarrays.items():\n print(k, \": \", v)\n print(\"scalars: \")\n for k, v in scalars.items():\n print(k, \": \", v)\n\n def do_op(lt, rt, ls, rs, isarkouda, oper):\n evalstr = ''\n if ls:\n evalstr += 'scalars[\"{}\"]'.format(lt)\n else:\n evalstr += '{}[\"{}\"]'.format(('ndarrays', 'pdarrays')[isarkouda], lt)\n evalstr += ' {} '.format(oper)\n if rs:\n evalstr += 'scalars[\"{}\"]'.format(rt)\n else:\n evalstr += '{}[\"{}\"]'.format(('ndarrays', 'pdarrays')[isarkouda], rt)\n #print(evalstr)\n res = eval(evalstr)\n return res\n\n results = {'neither_implement': [], # (expression, ak_error)\n 'arkouda_minus_numpy': [], # (expression, ak_result, error_on_exec?)\n 'numpy_minus_arkouda': [], # (expression, ak_result, error_on_exec?)\n 'both_implement': []} # (expression, ak_result, error_on_exec?, dtype_mismatch?, value_mismatch?)\n tests = 0\n for ltype, rtype, op in product(dtypes, dtypes, ak.pdarray.BinOps):\n if op in (\"<<<\", \">>>\"):\n continue\n for lscalar, rscalar in ((False, False), (False, True), (True, False)):\n tests += 1\n expression = \"{}({}) {} {}({})\".format(ltype, ('array', 'scalar')[lscalar], op, rtype, ('array', 'scalar')[rscalar])\n try:\n npres = do_op(ltype, rtype, lscalar, rscalar, False, op)\n except TypeError: # numpy doesn't implement operation\n try:\n akres = do_op(ltype, rtype, lscalar, rscalar, True, op)\n except RuntimeError as e:\n if 'not implemented' or 'unrecognized type' in str(e): # neither numpy nor arkouda implement\n results['neither_implement'].append((expression, str(e)))\n else: # arkouda implements with error, np does not implement\n results['arkouda_minus_numpy'].append((expression, str(e), True))\n continue\n # arkouda implements but not numpy\n results['arkouda_minus_numpy'].append((expression, str(akres), False))\n continue\n try:\n akres = do_op(ltype, rtype, lscalar, rscalar, True, op)\n except RuntimeError as e:\n if 'not implemented' or 'unrecognized type' in str(e): # numpy implements but not arkouda\n results['numpy_minus_arkouda'].append((expression, str(e), True))\n else: # both implement, but arkouda errors\n results['both_implement'].append((expression, str(e), True, False, False))\n continue\n # both numpy and arkouda execute without error\n try:\n akrestype = akres.dtype\n except Exception as e:\n warnings.warn(\"Cannot detect return dtype of ak result: {} (np result: {})\".format(akres, npres))\n results['both_implement'].append((expression, str(akres), False, True, False))\n continue\n\n if akrestype != npres.dtype:\n restypes = \"{}(np) vs. {}(ak)\".format(npres.dtype, akrestype)\n #warnings.warn(\"dtype mismatch: {} = {}\".format(expression, restypes))\n results['both_implement'].append((expression, restypes, False, True, False))\n continue\n try:\n akasnp = akres.to_ndarray()\n except Exception as e:\n warnings.warn(\"Could not convert to ndarray: {}\".format(akres))\n results['both_implement'].append((expression, str(akres), True, False, False))\n continue\n if not np.allclose(akasnp, npres, equal_nan=True):\n res = \"np: {}\\nak: {}\".format(npres, akasnp)\n # warnings.warn(\"result mismatch: {} =\\n{}\".format(expression, res))\n results['both_implement'].append((expression, res, False, False, True))\n # Finally, both numpy and arkouda agree on result\n results['both_implement'].append((expression, \"\", False, False, False))\n\n print(\"# ops not implemented by numpy or arkouda: {}\".format(len(results['neither_implement'])))\n if verbose:\n for expression, err in results['neither_implement']:\n print(expression)\n print(\"# ops implemented by numpy but not arkouda: {}\".format(len(results['numpy_minus_arkouda'])))\n if verbose:\n for expression, err, flag in results['numpy_minus_arkouda']:\n print(expression)\n print(\"# ops implemented by arkouda but not numpy: {}\".format(len(results['arkouda_minus_numpy'])))\n if verbose:\n for expression, res, flag in results['arkouda_minus_numpy']:\n print(expression, \" -> \", res)\n nboth = len(results['both_implement'])\n print(\"# ops implemented by both: {}\".format(nboth))\n matches = 0\n execerrors = []\n dtypeerrors = []\n valueerrors = []\n for (expression, res, ex, dt, val) in results['both_implement']:\n matches += not any((ex, dt, val))\n if ex: execerrors.append((expression, res))\n if dt: dtypeerrors.append((expression, res))\n if val: valueerrors.append((expression, res))\n print(\" Matching results: {} / {}\".format(matches, nboth))\n print(\" Arkouda execution errors: {} / {}\".format(len(execerrors), nboth))\n if verbose: print('\\n'.join(map(': '.join, execerrors)))\n print(\" Dtype mismatches: {} / {}\".format(len(dtypeerrors), nboth))\n if verbose: print('\\n'.join(map(': '.join, dtypeerrors)))\n print(\" Value mismatches: {} / {}\".format(len(valueerrors), nboth))\n if verbose: print('\\n'.join(map(': '.join, valueerrors)))\n return matches == nboth\n\n'''\nEncapsulates test cases that invoke the run_tests method.\n'''\nclass OperatorsTest(ArkoudaTest):\n\n def testPdArrayAddInt(self):\n aArray = ak.ones(100)\n addArray = aArray + 1\n self.assertIsInstance(addArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(2),addArray[0])\n\n addArray = 1 + aArray\n self.assertIsInstance(addArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(2), addArray[0])\n\n def testPdArrayAddNumpyInt(self):\n aArray = ak.ones(100)\n addArray = aArray + np.int64(1)\n self.assertIsInstance(addArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(2), addArray[0])\n\n addArray = np.int64(1) + aArray\n self.assertIsInstance(addArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(2), addArray[0])\n\n def testPdArraySubtractInt(self):\n aArray = ak.ones(100)\n subArray = aArray - 2\n self.assertIsInstance(subArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(-1), subArray[0])\n\n subArray = 2 - aArray\n self.assertIsInstance(subArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(1), subArray[0])\n\n def testPdArraySubtractNumpyInt(self):\n aArray = ak.ones(100)\n subArray = aArray - np.int64(2)\n self.assertIsInstance(subArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(-1), subArray[0])\n\n subArray = np.int64(2) - aArray\n self.assertIsInstance(subArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(1), subArray[0])\n\n def testPdArrayMultInt(self):\n aArray = ak.ones(100)\n mArray = aArray*5\n self.assertIsInstance(mArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(5), mArray[0])\n\n mArray = 5*aArray\n self.assertIsInstance(mArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(5), mArray[0])\n\n def testPdArrayMultNumpyInt(self):\n aArray = ak.ones(100)\n mArray = aArray*np.int64(5)\n self.assertIsInstance(mArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(5), mArray[0])\n\n mArray = np.int64(5)*aArray\n self.assertIsInstance(mArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(5), mArray[0])\n\n def testPdArrayDivideInt(self):\n aArray = ak.ones(100)\n dArray = aArray*15/3\n self.assertIsInstance(dArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(5), dArray[0])\n\n dArray = 15*aArray/3\n self.assertIsInstance(dArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(5), dArray[0])\n\n def testPdArrayDivideNumpyInt(self):\n aArray = ak.ones(100)\n dArray = aArray*np.int64(15)/3\n self.assertIsInstance(dArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(5), dArray[0])\n\n dArray = np.int64(15)*aArray/3\n self.assertIsInstance(dArray, ak.pdarrayclass.pdarray)\n self.assertEqual(np.float64(5), dArray[0])\n \n def testPdArrayConcatenation(self):\n onesOne = ak.randint(0, 100, 100)\n onesTwo = ak.randint(0, 100, 100)\n \n result = ak.concatenate([onesOne,onesTwo])\n self.assertEqual(200, len(result))\n self.assertEqual(np.int64,result.dtype)\n\n def testConcatenate(self):\n pdaOne = ak.arange(1,4)\n pdaTwo = ak.arange(4,7) \n \n self.assertTrue((ak.array([1,2,3,4,5,6])\n == ak.concatenate([pdaOne,pdaTwo])).all())\n self.assertTrue((ak.array([4,5,6,1,2,3])\n == ak.concatenate([pdaTwo,pdaOne])).all())\n \n pdaOne = ak.linspace(start=1,stop=3,length=3)\n pdaTwo = ak.linspace(start=4,stop=6,length=3) \n \n self.assertTrue((ak.array([1,2,3,4,5,6])\n == ak.concatenate([pdaOne,pdaTwo])).all())\n self.assertTrue((ak.array([4,5,6,1,2,3])\n == ak.concatenate([pdaTwo,pdaOne])).all())\n\n pdaOne = ak.array([True,False,True])\n pdaTwo = ak.array([False,True,True])\n \n self.assertTrue((ak.array([True, False, True, False, True, True]) == \n ak.concatenate([pdaOne,pdaTwo])).all())\n\n def test_concatenate_type_preservation(self):\n # Test that concatenate preserves special pdarray types (IPv4, Datetime, BitVector, ...)\n from arkouda.util import concatenate as akuconcat\n pda_one = ak.arange(1, 4)\n pda_two = ak.arange(4, 7)\n pda_concat = ak.concatenate([pda_one, pda_two])\n\n # IPv4 test\n ipv4_one = ak.IPv4(pda_one)\n ipv4_two = ak.IPv4(pda_two)\n ipv4_concat = ak.concatenate([ipv4_one, ipv4_two])\n self.assertEqual(type(ipv4_concat), ak.IPv4)\n self.assertListEqual(ak.IPv4(pda_concat).to_ndarray().tolist(), ipv4_concat.to_ndarray().tolist())\n # test single and empty\n self.assertEqual(type(ak.concatenate([ipv4_one])), ak.IPv4)\n self.assertListEqual(ak.IPv4(pda_one).to_ndarray().tolist(), ak.concatenate([ipv4_one]).to_ndarray().tolist())\n self.assertEqual(type(ak.concatenate([ak.IPv4(ak.array([], dtype=ak.int64))])), ak.IPv4)\n\n # Datetime test\n datetime_one = ak.Datetime(pda_one)\n datetime_two = ak.Datetime(pda_two)\n datetime_concat = ak.concatenate([datetime_one, datetime_two])\n self.assertEqual(type(datetime_concat), ak.Datetime)\n self.assertListEqual(ak.Datetime(pda_concat).to_ndarray().tolist(), datetime_concat.to_ndarray().tolist())\n # test single and empty\n self.assertEqual(type(ak.concatenate([datetime_one])), ak.Datetime)\n self.assertListEqual(ak.Datetime(pda_one).to_ndarray().tolist(), ak.concatenate([datetime_one]).to_ndarray().tolist())\n self.assertEqual(type(ak.concatenate([ak.Datetime(ak.array([], dtype=ak.int64))])), ak.Datetime)\n\n # Timedelta test\n timedelta_one = ak.Timedelta(pda_one)\n timedelta_two = ak.Timedelta(pda_two)\n timedelta_concat = ak.concatenate([timedelta_one, timedelta_two])\n self.assertEqual(type(timedelta_concat), ak.Timedelta)\n self.assertListEqual(ak.Timedelta(pda_concat).to_ndarray().tolist(), timedelta_concat.to_ndarray().tolist())\n # test single and empty\n self.assertEqual(type(ak.concatenate([timedelta_one])), ak.Timedelta)\n self.assertListEqual(ak.Timedelta(pda_one).to_ndarray().tolist(), ak.concatenate([timedelta_one]).to_ndarray().tolist())\n self.assertEqual(type(ak.concatenate([ak.Timedelta(ak.array([], dtype=ak.int64))])), ak.Timedelta)\n\n # BitVector test\n bitvector_one = ak.BitVector(pda_one)\n bitvector_two = ak.BitVector(pda_two)\n bitvector_concat = ak.concatenate([bitvector_one, bitvector_two])\n self.assertEqual(type(bitvector_concat), ak.BitVector)\n self.assertListEqual(ak.BitVector(pda_concat).to_ndarray().tolist(), bitvector_concat.to_ndarray().tolist())\n # test single and empty\n self.assertEqual(type(ak.concatenate([bitvector_one])), ak.BitVector)\n self.assertListEqual(ak.BitVector(pda_one).to_ndarray().tolist(), ak.concatenate([bitvector_one]).to_ndarray().tolist())\n self.assertEqual(type(ak.concatenate([ak.BitVector(ak.array([], dtype=ak.int64))])), ak.BitVector)\n\n # Test failure with mixed types\n with self.assertRaises(TypeError):\n ak.concatenate(datetime_one, bitvector_two)\n\n # verify ak.util.concatenate still works\n ipv4_akuconcat = akuconcat([ipv4_one, ipv4_two])\n self.assertEqual(type(ipv4_akuconcat), ak.IPv4)\n self.assertListEqual(ak.IPv4(pda_concat).to_ndarray().tolist(), ipv4_akuconcat.to_ndarray().tolist())\n\n datetime_akuconcat = akuconcat([datetime_one, datetime_two])\n self.assertEqual(type(datetime_akuconcat), ak.Datetime)\n self.assertListEqual(ak.Datetime(pda_concat).to_ndarray().tolist(), datetime_akuconcat.to_ndarray().tolist())\n\n timedelta_akuconcat = akuconcat([timedelta_one, timedelta_two])\n self.assertEqual(type(timedelta_akuconcat), ak.Timedelta)\n self.assertListEqual(ak.Timedelta(pda_concat).to_ndarray().tolist(), timedelta_akuconcat.to_ndarray().tolist())\n\n bitvector_akuconcat = akuconcat([bitvector_one, bitvector_two])\n self.assertEqual(type(bitvector_akuconcat), ak.BitVector)\n self.assertListEqual(ak.BitVector(pda_concat).to_ndarray().tolist(), bitvector_akuconcat.to_ndarray().tolist())\n\n def testAllOperators(self):\n run_tests(verbose)\n \n def testErrorHandling(self):\n # Test NotImplmentedError that prevents pddarray iteration \n with self.assertRaises(NotImplementedError):\n iter(ak.ones(100))\n \n # Test NotImplmentedError that prevents Strings iteration \n with self.assertRaises(NotImplementedError):\n iter(ak.array(['String {}'.format(i) for i in range(0,10)]))\n \n # Test ak,histogram against unsupported dtype\n #with self.assertRaises(ValueError) as cm:\n # ak.histogram((ak.randint(0, 1, 100, dtype=ak.bool)))\n \n with self.assertRaises(RuntimeError) as cm:\n ak.concatenate([ak.array([True]),ak.array([True])]).is_sorted()\n \n with self.assertRaises(TypeError):\n ak.ones(100).any([0])\n \n with self.assertRaises(AttributeError) as cm:\n ak.unique(list(range(0,10)))\n \n with self.assertRaises(ValueError) as cm:\n ak.concatenate([ak.ones(100),ak.array([True])])\n\n def test_str_repr(self):\n \"\"\"\n Test 3 different types: int, float, bool with lengths under/over threshold\n Do this for both __str__() and __repr__()\n \"\"\"\n ak.client.pdarrayIterThresh = 5\n # Test __str__()\n self.assertEqual(\"[1 2 3]\", ak.array([1, 2, 3]).__str__())\n self.assertEqual(\"[1 2 3 ... 17 18 19]\", ak.arange(1, 20).__str__())\n self.assertEqual(\"[1.100000e+00 2.300000e+00 5.000000e+00]\", ak.array([1.1, 2.3, 5]).__str__())\n self.assertEqual(\"[0.000000e+00 5.263158e-01 1.052632e+00 ... 8.947368e+00 9.473684e+00 1.000000e+01]\",\n ak.linspace(0, 10, 20).__str__())\n self.assertEqual(\"[False False False]\", ak.isnan(ak.array([1.1, 2.3, 5])).__str__())\n self.assertEqual(\"[False False False ... False False False]\", ak.isnan(ak.linspace(0, 10, 20)).__str__())\n\n # Test __repr__()\n self.assertEqual(\"array([1 2 3])\", ak.array([1, 2, 3]).__repr__())\n self.assertEqual(\"array([1 2 3 ... 17 18 19])\", ak.arange(1, 20).__repr__())\n self.assertEqual(\"array([1.1000000000000001 2.2999999999999998 5])\", ak.array([1.1, 2.3, 5]).__repr__())\n self.assertEqual(\"array([0 0.52631578947368418 1.0526315789473684 ... 8.9473684210526319 9.473684210526315 10])\",\n ak.linspace(0, 10, 20).__repr__())\n self.assertEqual(\"array([False False False])\", ak.isnan(ak.array([1.1, 2.3, 5])).__repr__())\n self.assertEqual(\"array([False False False ... False False False])\", ak.isnan(ak.linspace(0, 10, 20)).__repr__())\n ak.client.pdarrayIterThresh = ak.client.pdarrayIterThreshDefVal # Don't forget to set this back for other tests.\n\n\n\nif __name__ == '__main__':\n '''\n Enables invocation of operator tests outside of pytest test harness\n '''\n import sys\n if len(sys.argv) not in (3, 4):\n print(f\"Usage: {sys.argv[0]} <server_name> <port> [<verbose>=(0|1)]\")\n verbose = False\n if len(sys.argv) == 4 and sys.argv[3] == \"1\":\n verbose = True\n ak.connect(server=sys.argv[1], port=int(sys.argv[2]))\n success = run_tests(verbose)\n ak.disconnect()\n sys.exit((1, 0)[success])\n"
] |
[
[
"numpy.allclose",
"numpy.linspace",
"numpy.arange",
"numpy.seterr",
"numpy.int64",
"numpy.uint64",
"numpy.float64"
]
] |
hjyjh/wechat_articles_spider
|
[
"38da6cde565f8d6e09a5822b25b0dcfb1a8cb239"
] |
[
"test/test_GetUrls.py"
] |
[
"# coding: utf-8\nimport json\nimport os\nimport random\nimport time\nfrom pprint import pprint\n\nimport pandas as pd\nfrom wechatarticles import ArticlesInfo\nfrom wechatarticles.utils import get_history_urls, verify_url\n\n# 快速获取大量文章urls(利用历史文章获取链接)\n\n\ndef save_xlsx(fj, lst):\n df = pd.DataFrame(lst, columns=[\"url\", \"title\", \"date\", \"read_num\", \"like_num\"])\n df.to_excel(fj + \".xlsx\", encoding=\"utf-8\")\n\n\ndef demo(lst):\n # 抓取示例,供参考,不保证有效\n fj = \"公众号名称\"\n item_lst = []\n for i, line in enumerate(lst, 0):\n print(\"index:\", i)\n # item = json.loads('{' + line + '}', strict=False)\n item = line\n timestamp = item[\"comm_msg_info\"][\"datetime\"]\n ymd = time.localtime(timestamp)\n date = \"{}-{}-{}\".format(ymd.tm_year, ymd.tm_mon, ymd.tm_mday)\n\n infos = item[\"app_msg_ext_info\"]\n url_title_lst = [[infos[\"content_url\"], infos[\"title\"]]]\n if \"multi_app_msg_item_list\" in infos.keys():\n url_title_lst += [\n [info[\"content_url\"], info[\"title\"]]\n for info in infos[\"multi_app_msg_item_list\"]\n ]\n\n for url, title in url_title_lst:\n try:\n if not verify_url(url):\n continue\n # 获取文章阅读数在看点赞数\n read_num, like_num, old_like_num = ai.read_like_nums(url)\n print(read_num, like_num)\n item_lst.append([url, title, date, read_num, like_num])\n time.sleep(random.randint(5, 10))\n except Exception as e:\n print(e)\n flag = 1\n break\n finally:\n save_xlsx(fj, item_lst)\n\n if flag == 1:\n break\n\n save_xlsx(fj, item_lst)\n\n\nif __name__ == \"__main__\":\n # 需要抓取公众号的__biz参数\n biz = \"\"\n # 个人微信号登陆后获取的uin\n uin = \"\"\n # 个人微信号登陆后获取的key,隔段时间更新\n key = \"\"\n\n lst = get_history_urls(\n biz, uin, key, lst=[], start_timestamp=0, start_count=0, end_count=10\n )\n print(\"抓取到的文章链接\")\n print(lst)\n\n # 个人微信号登陆后获取的token\n appmsg_token = \"\"\n # 个人微信号登陆后获取的cookie\n cookie = \"\"\n # 获取点赞数、阅读数、评论信息\n ai = ArticlesInfo(appmsg_token, cookie)\n\n # url:微信文章链接. lst[0][\"app_msg_ext_info\"][\"content_url\"]\n read_num, like_num, old_like_num = ai.read_like_nums(url)\n item = ai.comments(url)\n print(\"阅读:{}; 在看: {}; 点赞: {}\".format(read_num, like_num, old_like_num))\n print(\"评论信息\")\n pprint(item)\n"
] |
[
[
"pandas.DataFrame"
]
] |
crochat/exareme
|
[
"94c0ab79b0096a2bac65b13bdf975ff6bbebfea2",
"94c0ab79b0096a2bac65b13bdf975ff6bbebfea2"
] |
[
"Exareme-Docker/src/mip-algorithms/LOGISTIC_REGRESSION/init/1/global.py",
"Exareme-Docker/src/mip-algorithms/LOGISTIC_REGRESSION/step/1/local.py"
] |
[
"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport sys\nfrom os import path\nfrom argparse import ArgumentParser\nimport numpy as np\n\nsys.path.append(path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__))))) + '/utils/')\nsys.path.append(path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__))))) +\n '/LOGISTIC_REGRESSION/')\n\nfrom algorithm_utils import StateData, ExaremeError\nfrom log_regr_lib import LogRegrInit_Loc2Glob_TD, LogRegrIter_Glob2Loc_TD\n\n\ndef logregr_global_init(global_in):\n n_obs, n_cols, y_val_dict, schema_X, schema_Y = global_in.get_data()\n\n if n_obs == 0:\n raise ExaremeError('The selected variables contain 0 datapoints.')\n\n # Init vars\n ll = - 2 * n_obs * np.log(2)\n coeff = np.zeros(n_cols)\n iter = 0\n\n # Pack state and results\n global_state = StateData(n_obs=n_obs, n_cols=n_cols, ll=ll, coeff=coeff, iter=iter,\n y_val_dict=y_val_dict, schema_X=schema_X, schema_Y=schema_Y)\n global_out = LogRegrIter_Glob2Loc_TD(coeff)\n\n return global_state, global_out\n\n\ndef main():\n # Parse arguments\n parser = ArgumentParser()\n parser.add_argument('-cur_state_pkl', required=True,\n help='Path to the pickle file holding the current state.')\n parser.add_argument('-local_step_dbs', required=True,\n help='Path to db holding local step results.')\n args, unknown = parser.parse_known_args()\n fname_cur_state = path.abspath(args.cur_state_pkl)\n local_dbs = path.abspath(args.local_step_dbs)\n\n # Load local nodes output\n local_out = LogRegrInit_Loc2Glob_TD.load(local_dbs)\n # Run algorithm global step\n global_state, global_out = logregr_global_init(global_in=local_out)\n # Save global state\n global_state.save(fname=fname_cur_state)\n # Return the algorithm's output\n global_out.transfer()\n\n\nif __name__ == '__main__':\n main()\n",
"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport sys\nfrom os import path\nfrom argparse import ArgumentParser\nimport numpy as np\nfrom scipy.special import expit\n\nsys.path.append(path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__))))) + '/utils/')\nsys.path.append(path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__))))) +\n '/LOGISTIC_REGRESSION/')\n\nfrom algorithm_utils import StateData\nfrom log_regr_lib import LogRegrIter_Loc2Glob_TD, LogRegrIter_Glob2Loc_TD\n\n\ndef logregr_local_iter(local_state, local_in):\n # Unpack local state\n X, Y = local_state['X'], local_state['Y']\n # Unpack local input\n coeff = local_in.get_data()\n\n # Auxiliary quantities\n z = np.dot(X, coeff)\n s = expit(z)\n d = np.multiply(s, (1 - s))\n D = np.diag(d)\n # Hessian\n hess = np.dot(\n np.transpose(X),\n np.dot(D, X)\n )\n # Gradient\n grad = np.dot(\n np.transpose(X),\n np.dot(\n D,\n z + np.divide(Y - s, d)\n )\n )\n # Log-likelihood\n ls1, ls2 = np.log(s), np.log(1 - s)\n ll = np.dot(Y, ls1) + np.dot(1 - Y, ls2)\n\n # Pack state and results\n local_state = StateData(X=X, Y=Y)\n local_out = LogRegrIter_Loc2Glob_TD(ll, grad, hess)\n return local_state, local_out\n\n\ndef main():\n # Parse arguments\n parser = ArgumentParser()\n parser.add_argument('-cur_state_pkl', required=True,\n help='Path to the pickle file holding the current state.')\n parser.add_argument('-prev_state_pkl', required=True,\n help='Path to the pickle file holding the previous state.')\n parser.add_argument('-global_step_db', required=True,\n help='Path to db holding global step results.')\n args, unknown = parser.parse_known_args()\n # raise ValueError(args)\n fname_cur_state = path.abspath(args.cur_state_pkl)\n fname_prev_state = path.abspath(args.prev_state_pkl)\n global_db = path.abspath(args.global_step_db)\n\n # Load local state\n local_state = StateData.load(fname_prev_state).data\n # Load global node output\n global_out = LogRegrIter_Glob2Loc_TD.load(global_db)\n # Run algorithm local iteration step\n local_state, local_out = logregr_local_iter(local_state=local_state, local_in=global_out)\n # Save local state\n local_state.save(fname=fname_cur_state)\n # Return\n local_out.transfer()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.log",
"numpy.zeros"
],
[
"numpy.diag",
"numpy.dot",
"numpy.log",
"scipy.special.expit",
"numpy.multiply",
"numpy.transpose",
"numpy.divide"
]
] |
onsiteiq/OpenSfM
|
[
"77335de7fe5ae30a2af6b23ff4488a8ef18738d1"
] |
[
"opensfm/features.py"
] |
[
"\"\"\"Tools to extract features.\"\"\"\n\nimport time\nimport logging\nimport numpy as np\nimport sys\nimport cv2\n\nfrom opensfm import context\nfrom opensfm import csfm\n\nlogger = logging.getLogger(__name__)\n\n\ndef resized_image(image, config):\n \"\"\"Resize image to feature_process_size.\"\"\"\n max_size = config['feature_process_size']\n h, w, _ = image.shape\n size = max(w, h)\n if 0 < max_size < size:\n dsize = w * max_size // size, h * max_size // size\n return cv2.resize(image, dsize=dsize, interpolation=cv2.INTER_AREA)\n else:\n return image\n\n\ndef root_feature(desc, l2_normalization=False):\n if l2_normalization:\n s2 = np.linalg.norm(desc, axis=1)\n desc = (desc.T / s2).T\n s = np.sum(desc, 1)\n desc = np.sqrt(desc.T / s).T\n return desc\n\n\ndef root_feature_surf(desc, l2_normalization=False, partial=False):\n \"\"\"\n Experimental square root mapping of surf-like feature, only work for 64-dim surf now\n \"\"\"\n if desc.shape[1] == 64:\n if l2_normalization:\n s2 = np.linalg.norm(desc, axis=1)\n desc = (desc.T/s2).T\n if partial:\n ii = np.array([i for i in range(64) if (i % 4 == 2 or i % 4 == 3)])\n else:\n ii = np.arange(64)\n desc_sub = np.abs(desc[:, ii])\n desc_sub_sign = np.sign(desc[:, ii])\n # s_sub = np.sum(desc_sub, 1) # This partial normalization gives slightly better results for AKAZE surf\n s_sub = np.sum(np.abs(desc), 1)\n desc_sub = np.sqrt(desc_sub.T / s_sub).T\n desc[:, ii] = desc_sub*desc_sub_sign\n return desc\n\n\ndef normalized_image_coordinates(pixel_coords, width, height):\n size = max(width, height)\n p = np.empty((len(pixel_coords), 2))\n p[:, 0] = (pixel_coords[:, 0] + 0.5 - width / 2.0) / size\n p[:, 1] = (pixel_coords[:, 1] + 0.5 - height / 2.0) / size\n return p\n\n\ndef denormalized_image_coordinates(norm_coords, width, height):\n size = max(width, height)\n p = np.empty((len(norm_coords), 2))\n p[:, 0] = norm_coords[:, 0] * size - 0.5 + width / 2.0\n p[:, 1] = norm_coords[:, 1] * size - 0.5 + height / 2.0\n return p\n\n\ndef mask_and_normalize_features(points, desc, colors, width, height, mask=None):\n \"\"\"Remove features outside the mask and normalize image coordinates.\"\"\"\n\n if mask is not None:\n ids = np.array([_in_mask(point, width, height, mask) for point in points])\n points = points[ids]\n desc = desc[ids]\n colors = colors[ids]\n\n points[:, :2] = normalized_image_coordinates(points[:, :2], width, height)\n points[:, 2:3] /= max(width, height)\n return points, desc, colors\n\n\ndef _in_mask(point, width, height, mask):\n \"\"\"Check if a point is inside a binary mask.\"\"\"\n u = mask.shape[1] * (point[0] + 0.5) / width\n v = mask.shape[0] * (point[1] + 0.5) / height\n return mask[int(v), int(u)][0] != 0\n\n\ndef extract_features_sift(image, config):\n sift_edge_threshold = config['sift_edge_threshold']\n sift_peak_threshold = float(config['sift_peak_threshold'])\n if context.OPENCV3:\n try:\n detector = cv2.xfeatures2d.SIFT_create(\n edgeThreshold=sift_edge_threshold,\n contrastThreshold=sift_peak_threshold)\n except AttributeError as ae:\n if \"no attribute 'xfeatures2d'\" in ae.message:\n logger.error('OpenCV Contrib modules are required to extract SIFT features')\n raise\n descriptor = detector\n else:\n detector = cv2.FeatureDetector_create('SIFT')\n descriptor = cv2.DescriptorExtractor_create('SIFT')\n detector.setDouble('edgeThreshold', sift_edge_threshold)\n while True:\n logger.debug('Computing sift with threshold {0}'.format(sift_peak_threshold))\n t = time.time()\n if context.OPENCV3:\n detector = cv2.xfeatures2d.SIFT_create(\n edgeThreshold=sift_edge_threshold,\n contrastThreshold=sift_peak_threshold)\n else:\n detector.setDouble(\"contrastThreshold\", sift_peak_threshold)\n points = detector.detect(image)\n logger.debug('Found {0} points in {1}s'.format(len(points), time.time() - t))\n if len(points) < config['feature_min_frames'] and sift_peak_threshold > 0.0001:\n sift_peak_threshold = (sift_peak_threshold * 2) / 3\n logger.debug('reducing threshold')\n else:\n logger.debug('done')\n break\n points, desc = descriptor.compute(image, points)\n if config['feature_root']:\n desc = root_feature(desc)\n points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])\n return points, desc\n\n\ndef extract_features_surf(image, config):\n surf_hessian_threshold = config['surf_hessian_threshold']\n if context.OPENCV3:\n try:\n detector = cv2.xfeatures2d.SURF_create()\n except AttributeError as ae:\n if \"no attribute 'xfeatures2d'\" in ae.message:\n logger.error('OpenCV Contrib modules are required to extract SURF features')\n raise\n descriptor = detector\n detector.setHessianThreshold(surf_hessian_threshold)\n detector.setNOctaves(config['surf_n_octaves'])\n detector.setNOctaveLayers(config['surf_n_octavelayers'])\n detector.setUpright(config['surf_upright'])\n else:\n detector = cv2.FeatureDetector_create('SURF')\n descriptor = cv2.DescriptorExtractor_create('SURF')\n detector.setDouble('hessianThreshold', surf_hessian_threshold)\n detector.setDouble('nOctaves', config['surf_n_octaves'])\n detector.setDouble('nOctaveLayers', config['surf_n_octavelayers'])\n detector.setInt('upright', config['surf_upright'])\n\n while True:\n logger.debug('Computing surf with threshold {0}'.format(surf_hessian_threshold))\n t = time.time()\n if context.OPENCV3:\n detector.setHessianThreshold(surf_hessian_threshold)\n else:\n detector.setDouble(\"hessianThreshold\", surf_hessian_threshold) # default: 0.04\n points = detector.detect(image)\n logger.debug('Found {0} points in {1}s'.format(len(points), time.time() - t))\n if len(points) < config['feature_min_frames'] and surf_hessian_threshold > 0.0001:\n surf_hessian_threshold = (surf_hessian_threshold * 2) / 3\n logger.debug('reducing threshold')\n else:\n logger.debug('done')\n break\n\n points, desc = descriptor.compute(image, points)\n if config['feature_root']:\n desc = root_feature_surf(desc, partial=True)\n points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])\n return points, desc\n\n\ndef akaze_descriptor_type(name):\n d = csfm.AkazeDescriptorType.__dict__\n if name in d:\n return d[name]\n else:\n logger.debug('Wrong akaze descriptor type')\n return d['MSURF']\n\n\ndef extract_features_akaze(image, config):\n options = csfm.AKAZEOptions()\n options.omax = config['akaze_omax']\n akaze_descriptor_name = config['akaze_descriptor']\n options.descriptor = akaze_descriptor_type(akaze_descriptor_name)\n options.descriptor_size = config['akaze_descriptor_size']\n options.descriptor_channels = config['akaze_descriptor_channels']\n options.dthreshold = config['akaze_dthreshold']\n options.kcontrast_percentile = config['akaze_kcontrast_percentile']\n options.use_isotropic_diffusion = config['akaze_use_isotropic_diffusion']\n options.target_num_features = config['feature_min_frames']\n options.use_adaptive_suppression = config['feature_use_adaptive_suppression']\n\n logger.debug('Computing AKAZE with threshold {0}'.format(options.dthreshold))\n t = time.time()\n points, desc = csfm.akaze(image, options)\n logger.debug('Found {0} points in {1}s'.format(len(points), time.time() - t))\n\n if config['feature_root']:\n if akaze_descriptor_name in [\"SURF_UPRIGHT\", \"MSURF_UPRIGHT\"]:\n desc = root_feature_surf(desc, partial=True)\n elif akaze_descriptor_name in [\"SURF\", \"MSURF\"]:\n desc = root_feature_surf(desc, partial=False)\n points = points.astype(float)\n return points, desc\n\n\ndef extract_features_hahog(image, config):\n t = time.time()\n points, desc = csfm.hahog(image.astype(np.float32) / 255, # VlFeat expects pixel values between 0, 1\n peak_threshold=config['hahog_peak_threshold'],\n edge_threshold=config['hahog_edge_threshold'],\n target_num_features=config['feature_min_frames'],\n use_adaptive_suppression=config['feature_use_adaptive_suppression'])\n\n if config['feature_root']:\n desc = np.sqrt(desc)\n uchar_scaling = 362 # x * 512 < 256 => sqrt(x) * 362 < 256\n else:\n uchar_scaling = 512\n\n if config['hahog_normalize_to_uchar']:\n desc = (uchar_scaling * desc).clip(0, 255).round()\n\n logger.debug('Found {0} points in {1}s'.format(len(points), time.time() - t))\n return points, desc\n\n\ndef extract_features_orb(image, config):\n if context.OPENCV3:\n detector = cv2.ORB_create(nfeatures=int(config['feature_min_frames']))\n descriptor = detector\n else:\n detector = cv2.FeatureDetector_create('ORB')\n descriptor = cv2.DescriptorExtractor_create('ORB')\n detector.setDouble('nFeatures', config['feature_min_frames'])\n\n logger.debug('Computing ORB')\n t = time.time()\n points = detector.detect(image)\n\n points, desc = descriptor.compute(image, points)\n points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])\n\n logger.debug('Found {0} points in {1}s'.format(len(points), time.time() - t))\n return points, desc\n\n\ndef extract_features(color_image, config, mask=None):\n \"\"\"Detect features in an image.\n\n The type of feature detected is determined by the ``feature_type``\n config option.\n\n The coordinates of the detected points are returned in normalized\n image coordinates.\n\n Returns:\n tuple:\n - points: ``x``, ``y``, ``size`` and ``angle`` for each feature\n - descriptors: the descriptor of each feature\n - colors: the color of the center of each feature\n \"\"\"\n assert len(color_image.shape) == 3\n color_image = resized_image(color_image, config)\n image = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)\n\n feature_type = config['feature_type'].upper()\n if feature_type == 'SIFT':\n points, desc = extract_features_sift(image, config)\n elif feature_type == 'SURF':\n points, desc = extract_features_surf(image, config)\n elif feature_type == 'AKAZE':\n points, desc = extract_features_akaze(image, config)\n elif feature_type == 'HAHOG':\n points, desc = extract_features_hahog(image, config)\n elif feature_type == 'ORB':\n points, desc = extract_features_orb(image, config)\n else:\n raise ValueError('Unknown feature type '\n '(must be SURF, SIFT, AKAZE, HAHOG or ORB)')\n\n xs = points[:, 0].round().astype(int)\n ys = points[:, 1].round().astype(int)\n colors = color_image[ys, xs]\n\n return mask_and_normalize_features(points, desc, colors,\n image.shape[1], image.shape[0], mask)\n\n\ndef build_flann_index(features, config):\n FLANN_INDEX_LINEAR = 0\n FLANN_INDEX_KDTREE = 1\n FLANN_INDEX_KMEANS = 2\n FLANN_INDEX_COMPOSITE = 3\n FLANN_INDEX_KDTREE_SINGLE = 4\n FLANN_INDEX_HIERARCHICAL = 5\n FLANN_INDEX_LSH = 6\n\n if features.dtype.type is np.float32:\n FLANN_INDEX_METHOD = FLANN_INDEX_KMEANS\n else:\n FLANN_INDEX_METHOD = FLANN_INDEX_LSH\n\n flann_params = dict(algorithm=FLANN_INDEX_METHOD,\n branching=config['flann_branching'],\n iterations=config['flann_iterations'])\n\n return context.flann_Index(features, flann_params)\n\n\nFEATURES_VERSION = 1\nFEATURES_HEADER = 'OPENSFM_FEATURES_VERSION'\n\n\ndef load_features(filepath, config):\n \"\"\" Load features from filename \"\"\"\n s = np.load(filepath)\n version = _features_file_version(s)\n return getattr(sys.modules[__name__], '_load_features_v%d' % version)(s, config)\n\n\ndef _features_file_version(obj):\n \"\"\" Retrieve features file version. Return 0 if none \"\"\"\n if FEATURES_HEADER in obj:\n return obj[FEATURES_HEADER]\n else:\n return 0\n\n\ndef _load_features_v0(s, config):\n \"\"\" Base version of features file\n\n Scale (desc[2]) set to reprojection_error_sd by default (legacy behaviour)\n \"\"\"\n feature_type = config['feature_type']\n if feature_type == 'HAHOG' and config['hahog_normalize_to_uchar']:\n descriptors = s['descriptors'].astype(np.float32)\n else:\n descriptors = s['descriptors']\n points = s['points']\n points[:, 2:3] = config['reprojection_error_sd']\n return points, descriptors, s['colors'].astype(float)\n\n\ndef _load_features_v1(s, config):\n \"\"\" Version 1 of features file\n\n Scale is not properly set higher in the pipeline, default is gone.\n \"\"\"\n feature_type = config['feature_type']\n if feature_type == 'HAHOG' and config['hahog_normalize_to_uchar']:\n descriptors = s['descriptors'].astype(np.float32)\n else:\n descriptors = s['descriptors']\n return s['points'], descriptors, s['colors'].astype(float)\n\n\ndef save_features(filepath, points, desc, colors, config):\n feature_type = config['feature_type']\n if ((feature_type == 'AKAZE' and config['akaze_descriptor'] in ['MLDB_UPRIGHT', 'MLDB'])\n or (feature_type == 'HAHOG' and config['hahog_normalize_to_uchar'])\n or (feature_type == 'ORB')):\n feature_data_type = np.uint8\n else:\n feature_data_type = np.float32\n np.savez_compressed(filepath,\n points=points.astype(np.float32),\n descriptors=desc.astype(feature_data_type),\n colors=colors,\n OPENSFM_FEATURES_VERSION=FEATURES_VERSION)\n"
] |
[
[
"numpy.sqrt",
"numpy.abs",
"numpy.arange",
"numpy.linalg.norm",
"numpy.sign",
"numpy.load",
"numpy.array",
"numpy.sum"
]
] |
hungcao0402/PaddleOCR-Vietnamese
|
[
"fae0985c902a7a6e541b08c9c708a55593a89b64"
] |
[
"tools/infer/predict_system.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport sys\nimport subprocess\n\n__dir__ = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(__dir__)\nsys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))\n\nos.environ[\"FLAGS_allocator_strategy\"] = 'auto_growth'\n\nimport cv2\nimport copy\nimport numpy as np\nimport time\nimport logging\nfrom PIL import Image\nimport tools.infer.utility as utility\nimport tools.infer.predict_rec as predict_rec\nimport tools.infer.predict_det as predict_det\nfrom ppocr.utils.utility import get_image_file_list\nfrom ppocr.utils.logging import get_logger\nfrom tools.infer.utility import draw_ocr_box_txt, get_rotate_crop_image\n\nlogger = get_logger()\n\ndef check_and_read_gif(img_path):\n if os.path.basename(img_path)[-3:] in ['gif', 'GIF']:\n gif = cv2.VideoCapture(img_path)\n ret, frame = gif.read()\n if not ret:\n logger = logging.getLogger('ppocr')\n logger.info(\"Cannot read {}. This gif image maybe corrupted.\")\n return None, False\n if len(frame.shape) == 2 or frame.shape[-1] == 1:\n frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)\n imgvalue = frame[:, :, ::-1]\n return imgvalue, True\n return None, False\n\nclass TextSystem(object):\n def __init__(self, args):\n if not args.show_log:\n logger.setLevel(logging.INFO)\n\n self.text_detector = predict_det.TextDetector(args)\n self.text_recognizer = predict_rec.TextRecognizer(args)\n self.use_angle_cls = args.use_angle_cls\n self.drop_score = args.drop_score\n\n def print_draw_crop_rec_res(self, img_crop_list, rec_res):\n bbox_num = len(img_crop_list)\n for bno in range(bbox_num):\n cv2.imwrite(\"./output/img_crop_%d.jpg\" % bno, img_crop_list[bno])\n logger.info(bno, rec_res[bno])\n\n def __call__(self, img, cls=True):\n ori_im = img.copy()\n dt_boxes, elapse = self.text_detector(img)\n\n logger.debug(\"dt_boxes num : {}, elapse : {}\".format(\n len(dt_boxes), elapse))\n if dt_boxes is None:\n return None, None\n img_crop_list = []\n\n dt_boxes = sorted_boxes(dt_boxes)\n\n for bno in range(len(dt_boxes)):\n tmp_box = copy.deepcopy(dt_boxes[bno])\n img_crop = get_rotate_crop_image(ori_im, tmp_box)\n img_crop_list.append(img_crop)\n if self.use_angle_cls and cls:\n img_crop_list, angle_list, elapse = self.text_classifier(\n img_crop_list)\n logger.debug(\"cls num : {}, elapse : {}\".format(\n len(img_crop_list), elapse))\n\n rec_res, elapse = self.text_recognizer(img_crop_list)\n logger.debug(\"rec_res num : {}, elapse : {}\".format(\n len(rec_res), elapse))\n # self.print_draw_crop_rec_res(img_crop_list, rec_res)\n filter_boxes, filter_rec_res = [], []\n for box, rec_reuslt in zip(dt_boxes, rec_res):\n text, score = rec_reuslt\n if score >= self.drop_score:\n filter_boxes.append(box)\n filter_rec_res.append(rec_reuslt)\n return filter_boxes, filter_rec_res\n\n\ndef sorted_boxes(dt_boxes):\n \"\"\"\n Sort text boxes in order from top to bottom, left to right\n args:\n dt_boxes(array):detected text boxes with shape [4, 2]\n return:\n sorted boxes(array) with shape [4, 2]\n \"\"\"\n num_boxes = dt_boxes.shape[0]\n sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))\n _boxes = list(sorted_boxes)\n\n for i in range(num_boxes - 1):\n if abs(_boxes[i + 1][0][1] - _boxes[i][0][1]) < 10 and \\\n (_boxes[i + 1][0][0] < _boxes[i][0][0]):\n tmp = _boxes[i]\n _boxes[i] = _boxes[i + 1]\n _boxes[i + 1] = tmp\n return _boxes\n\n\ndef main(args):\n image_file_list = get_image_file_list(args.image_dir)\n image_file_list = image_file_list[args.process_id::args.total_process_num]\n text_sys = TextSystem(args)\n is_visualize = True\n font_path = args.vis_font_path\n drop_score = args.drop_score\n save_results = []\n args.warmup=True\n # warm up 10 times\n if args.warmup:\n img = np.random.uniform(0, 255, [640, 640, 3]).astype(np.uint8)\n for i in range(10):\n res = text_sys(img)\n\n total_time = 0\n cpu_mem, gpu_mem, gpu_util = 0, 0, 0\n _st = time.time()\n count = 0\n for idx, image_file in enumerate(image_file_list):\n\n img, flag = check_and_read_gif(image_file)\n if not flag:\n img = cv2.imread(image_file)\n if img is None:\n logger.info(\"error in loading image:{}\".format(image_file))\n continue\n starttime = time.time()\n dt_boxes, rec_res = text_sys(img)\n elapse = time.time() - starttime\n total_time += elapse\n \n logger.info(\n str(idx) + \" Predict time of %s: %.3fs\" % (image_file, elapse)) \n \n #save_pred = str(json.dumps(np.array(dt_boxes).astype(np.int32).tolist())) + \"\\n\"\n #save_results.append(save_pred)\n txts = [rec_res[i][0] for i in range(len(rec_res))]\n # save_pred = str(json.dumps(np.array(dt_boxes).astype(np.int32).tolist())) + \"\\n\"\n save_pred = np.array(dt_boxes).astype(np.int32).tolist()\n os.makedirs('submission_output/', exist_ok=True)\n label_text=open('submission_output/'+os.path.basename(image_file)+'.txt', \"w\")\n for i in range(len(save_pred)):\n label = str(save_pred[i][0][0])+','+str(save_pred[i][0][1])+','+str(save_pred[i][1][0])+','+str(save_pred[i][1][1])+','+str(save_pred[i][2][0])+','+str(save_pred[i][2][1])+','+str(save_pred[i][3][0])+','+str(save_pred[i][3][1])+','+txts[i]+\"\\n\"\n label_text.write(label)\n label_text.close()\n if is_visualize:\n image = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n boxes = dt_boxes\n txts = [rec_res[i][0] for i in range(len(rec_res))]\n scores = [rec_res[i][1] for i in range(len(rec_res))]\n\n draw_img = draw_ocr_box_txt(\n image,\n boxes,\n txts,\n scores,\n drop_score=drop_score,\n font_path=font_path)\n draw_img_save_dir = './inference_results'\n os.makedirs(draw_img_save_dir, exist_ok=True)\n if flag:\n image_file = image_file[:-3] + \"png\"\n cv2.imwrite(\n os.path.join(draw_img_save_dir, os.path.basename(image_file)),\n draw_img[:, :, ::-1])\n logger.debug(\"The visualized image saved in {}\".format(\n os.path.join(draw_img_save_dir, os.path.basename(image_file))))\n logger.info(\"The predict total time is {}\".format(time.time() - _st))\n logger.info(\"\\nThe predict total time is {}\".format(total_time))\n\n\nif __name__ == \"__main__\":\n args = utility.parse_args()\n if args.use_mp:\n p_list = []\n total_process_num = args.total_process_num\n for process_id in range(total_process_num):\n cmd = [sys.executable, \"-u\"] + sys.argv + [\n \"--process_id={}\".format(process_id),\n \"--use_mp={}\".format(False)\n ]\n p = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stdout)\n p_list.append(p)\n for p in p_list:\n p.wait()\n else:\n main(args)\n"
] |
[
[
"numpy.random.uniform",
"numpy.array"
]
] |
cod3licious/simec
|
[
"45392475debc37883a79eddb0e90f2669ce0a44e"
] |
[
"utils_datasets.py"
] |
[
"from __future__ import unicode_literals, division, print_function, absolute_import\nfrom builtins import range\nimport numpy as np\nfrom sklearn.datasets import make_circles, make_blobs, make_swiss_roll, make_s_curve\nfrom sklearn.utils import check_random_state\n\n\ndef make_3_circles(n_samples, random_state=1):\n random_state = check_random_state(random_state)\n X = np.ones((3 * n_samples, 3))\n Y_plot = np.ones((3 * n_samples, 1))\n X[:n_samples, :2], _ = make_circles(n_samples=n_samples, noise=0.05, factor=.01, random_state=random_state)\n X[:n_samples, 2] *= -1\n Y_plot[:n_samples, 0] = 1\n X[n_samples:2 * n_samples, :2], _ = make_circles(n_samples=n_samples,\n noise=0.05, factor=.01, random_state=random_state)\n X[n_samples:2 * n_samples, 2] = 0\n Y_plot[n_samples:2 * n_samples, 0] = 2\n X[2 * n_samples:, :2], _ = make_circles(n_samples=n_samples, noise=0.05, factor=.01, random_state=random_state)\n Y_plot[2 * n_samples:, 0] = 3\n # shuffle examples\n idx = random_state.permutation(list(range(3 * n_samples)))\n X, Y_plot = X[idx, :], Y_plot[idx, :]\n # cut to actual size\n X, Y_plot = X[:n_samples, :], Y_plot[:n_samples, :]\n return X, Y_plot\n\n\ndef make_sphere(n_samples, random_state=1):\n # Create our sphere.\n random_state = check_random_state(random_state)\n p = random_state.rand(int(n_samples * 1.5)) * (2 * np.pi - 0.5)\n t = random_state.rand(int(n_samples * 1.5)) * np.pi\n\n # Sever the poles from the sphere.\n indices = ((t < (np.pi - (np.pi / 10))) & (t > ((np.pi / 10))))\n colors = p[indices]\n x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \\\n np.sin(t[indices]) * np.sin(p[indices]), \\\n np.cos(t[indices])\n sphere_data = np.array([x, y, z]).T\n return sphere_data[:n_samples, :], colors[:n_samples]\n\n\ndef make_broken_swiss_roll(n_samples, random_state=1):\n # get original swiss roll\n X, Y_plot = make_swiss_roll(2 * n_samples, random_state=random_state)\n # cut off a part\n X, Y_plot = X[X[:, 0] > -5, :], Y_plot[X[:, 0] > -5]\n # get desired number of samples\n X, Y_plot = X[:n_samples, :], Y_plot[:n_samples]\n return X, Y_plot\n\n\ndef make_peaks(n_samples, random_state=1):\n # get randomly sampled 2d grid\n random_state = check_random_state(random_state)\n X = 10. * random_state.rand(n_samples, 3)\n # have as 3rd dimension some peaks\n X[X[:, 0] <= 5, 2] = np.cos(0.9 * (X[X[:, 0] <= 5, 1] - 2))\n X[X[:, 0] > 5, 2] = np.cos(0.5 * (X[X[:, 0] > 5, 1] - 5))\n # 3rd dim is also the color\n Y_plot = X[:, 2]\n return X, Y_plot\n\n\ndef load_dataset(dataset, n_samples, random_state=1, n_features=3):\n # wrapper function to load one of the 3d datasets\n if dataset == 's_curve':\n return make_s_curve(n_samples, random_state=random_state)\n elif dataset == 'swiss_roll':\n return make_swiss_roll(n_samples, random_state=random_state)\n elif dataset == 'broken_swiss_roll':\n return make_broken_swiss_roll(n_samples, random_state=random_state)\n elif dataset == 'sphere':\n return make_sphere(n_samples, random_state=random_state)\n elif dataset == '3_circles':\n return make_3_circles(n_samples, random_state=random_state)\n elif dataset == 'peaks':\n return make_peaks(n_samples, random_state=random_state)\n elif dataset == 'blobs':\n return make_blobs(n_samples, n_features=n_features, centers=3, random_state=random_state)\n else:\n print(\"unknown dataset\")\n"
] |
[
[
"sklearn.datasets.make_s_curve",
"sklearn.datasets.make_swiss_roll",
"numpy.cos",
"numpy.ones",
"numpy.sin",
"sklearn.datasets.make_circles",
"numpy.array",
"sklearn.utils.check_random_state",
"sklearn.datasets.make_blobs"
]
] |
chao9527/PaddleBox
|
[
"1cb68eb8227096c3d269e7f6ba5075f7157add86"
] |
[
"python/paddle/fluid/dataset.py"
] |
[
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This is definition of dataset class, which is high performance IO.\"\"\"\n\nfrom paddle.fluid.proto import data_feed_pb2\nfrom google.protobuf import text_format\nfrom . import core\nfrom ..utils import deprecated\n__all__ = ['DatasetFactory', 'InMemoryDataset', 'QueueDataset']\n\n\nclass DatasetFactory(object):\n \"\"\"\n DatasetFactory is a factory which create dataset by its name,\n you can create \"QueueDataset\" or \"InMemoryDataset\", or \"FileInstantDataset\",\n the default is \"QueueDataset\".\n\n Example:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n\n \"\"\"\n\n def __init__(self):\n \"\"\" Init. \"\"\"\n pass\n\n def create_dataset(self, datafeed_class=\"QueueDataset\"):\n \"\"\"\n Create \"QueueDataset\" or \"InMemoryDataset\", or \"FileInstantDataset\",\n the default is \"QueueDataset\".\n\n Args:\n datafeed_class(str): datafeed class name, QueueDataset or InMemoryDataset.\n Default is QueueDataset.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset()\n\n \"\"\"\n try:\n dataset = globals()[datafeed_class]()\n return dataset\n except:\n raise ValueError(\"datafeed class %s does not exist\" %\n datafeed_class)\n\n\nclass DatasetBase(object):\n \"\"\" Base dataset class. \"\"\"\n\n def __init__(self):\n \"\"\" Init. \"\"\"\n # define class name here\n # to decide whether we need create in memory instance\n self.proto_desc = data_feed_pb2.DataFeedDesc()\n self.proto_desc.pipe_command = \"cat\"\n self.dataset = core.Dataset(\"MultiSlotDataset\")\n self.thread_num = 1\n self.filelist = []\n\n def set_pipe_command(self, pipe_command):\n \"\"\"\n Set pipe command of current dataset\n A pipe command is a UNIX pipeline command that can be used only\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_pipe_command(\"python my_script.py\")\n\n Args:\n pipe_command(str): pipe command\n\n \"\"\"\n self.proto_desc.pipe_command = pipe_command\n\n def set_rank_offset(self, rank_offset):\n \"\"\"\n Set rank_offset for merge_pv. It set the message of Pv.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_rank_offset(\"rank_offset\")\n\n Args:\n rank_offset(str): rank_offset's name\n\n \"\"\"\n self.proto_desc.rank_offset = rank_offset\n\n def set_fea_eval(self, record_candidate_size, fea_eval=True):\n \"\"\"\n set fea eval mode for slots shuffle to debug the importance level of\n slots(features), fea_eval need to be set True for slots shuffle.\n \n Args:\n record_candidate_size(int): size of instances candidate to shuffle \n one slot\n fea_eval(bool): whether enable fea eval mode to enable slots shuffle.\n default is True.\n \n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset.set_fea_eval(1000000, True)\n\n \"\"\"\n if fea_eval:\n self.dataset.set_fea_eval(fea_eval, record_candidate_size)\n self.fea_eval = fea_eval\n\n def slots_shuffle(self, slots):\n \"\"\"\n Slots Shuffle \n Slots Shuffle is a shuffle method in slots level, which is usually used \n in sparse feature with large scale of instances. To compare the metric, i.e.\n auc while doing slots shuffle on one or several slots with baseline to \n evaluate the importance level of slots(features).\n \n Args:\n slots(list[string]): the set of slots(string) to do slots shuffle.\n\n Examples:\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset.set_merge_by_lineid()\n #suppose there is a slot 0\n dataset.slots_shuffle(['0'])\n \"\"\"\n if self.fea_eval:\n slots_set = set(slots)\n self.dataset.slots_shuffle(slots_set)\n\n def set_batch_size(self, batch_size):\n \"\"\"\n Set batch size. Will be effective during training\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_batch_size(128)\n\n Args:\n batch_size(int): batch size\n\n \"\"\"\n self.proto_desc.batch_size = batch_size\n\n def set_pv_batch_size(self, pv_batch_size):\n \"\"\"\n Set pv batch size. It will be effective during enable_pv_merge\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_pv_batch(128)\n Args:\n pv_batch_size(int): pv batch size\n\n \"\"\"\n self.proto_desc.pv_batch_size = pv_batch_size\n\n def set_sample_rate(self, sample_rate):\n \"\"\"\n Set sample_rate, it is the sample rate of readers.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_sample_rate(0.8)\n\n Args:\n sample_rate(float): sample rate\n \"\"\"\n self.proto_desc.sample_rate = sample_rate\n\n def set_thread(self, thread_num):\n \"\"\"\n Set thread num, it is the num of readers.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_thread(12)\n\n Args:\n thread_num(int): thread num\n \"\"\"\n self.dataset.set_thread_num(thread_num)\n self.thread_num = thread_num\n\n def set_filelist(self, filelist):\n \"\"\"\n Set file list in current worker.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_filelist(['a.txt', 'b.txt'])\n\n Args:\n filelist(list): file list\n \"\"\"\n self.dataset.set_filelist(filelist)\n self.filelist = filelist\n\n def set_input_type(self, input_type):\n self.proto_desc.input_type = input_type\n\n def set_use_var(self, var_list):\n \"\"\"\n Set Variables which you will use.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_use_var([data, label])\n\n Args:\n var_list(list): variable list\n \"\"\"\n multi_slot = self.proto_desc.multi_slot_desc\n for var in var_list:\n slot_var = multi_slot.slots.add()\n slot_var.is_used = True\n slot_var.name = var.name\n if var.lod_level == 0:\n slot_var.is_dense = True\n slot_var.shape.extend(var.shape)\n if var.dtype == core.VarDesc.VarType.FP32:\n slot_var.type = \"float\"\n elif var.dtype == core.VarDesc.VarType.INT64:\n slot_var.type = \"uint64\"\n else:\n raise ValueError(\n \"Currently, fluid.dataset only supports dtype=float32 and dtype=int64\"\n )\n\n def set_hdfs_config(self, fs_name, fs_ugi):\n \"\"\"\n Set hdfs config: fs name ad ugi\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_hdfs_config(\"my_fs_name\", \"my_fs_ugi\")\n\n Args:\n fs_name(str): fs name\n fs_ugi(str): fs ugi\n \"\"\"\n self.dataset.set_hdfs_config(fs_name, fs_ugi)\n\n def set_download_cmd(self, download_cmd):\n \"\"\"\n Set customized download cmd: download_cmd\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_download_cmd(\"./read_from_afs\")\n\n Args:\n download_cmd(str): customized download command\n \"\"\"\n self.dataset.set_download_cmd(download_cmd)\n\n def _prepare_to_run(self):\n \"\"\"\n Set data_feed_desc before load or shuffle,\n user no need to call this function.\n \"\"\"\n if self.thread_num > len(self.filelist):\n self.thread_num = len(self.filelist)\n self.dataset.set_thread_num(self.thread_num)\n self.dataset.set_data_feed_desc(self.desc())\n self.dataset.create_readers()\n\n def _finish_to_run(self):\n self.dataset.destroy_readers()\n\n def desc(self):\n \"\"\"\n Returns a protobuf message for this DataFeedDesc\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset()\n print(dataset.desc())\n\n Returns:\n A string message\n \"\"\"\n return text_format.MessageToString(self.proto_desc)\n\n def _dynamic_adjust_before_train(self, thread_num):\n pass\n\n def _dynamic_adjust_after_train(self):\n pass\n\n\nclass InMemoryDataset(DatasetBase):\n \"\"\"\n InMemoryDataset, it will load data into memory\n and shuffle data before training.\n This class should be created by DatasetFactory\n\n Example:\n dataset = paddle.fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n \"\"\"\n\n @deprecated(since=\"2.0.0\", update_to=\"paddle.distributed.InMemoryDataset\")\n def __init__(self):\n \"\"\" Init. \"\"\"\n super(InMemoryDataset, self).__init__()\n self.proto_desc.name = \"MultiSlotInMemoryDataFeed\"\n self.fleet_send_batch_size = None\n self.is_user_set_queue_num = False\n self.queue_num = None\n self.parse_ins_id = False\n self.parse_content = False\n self.parse_logkey = False\n self.merge_by_sid = True\n self.enable_pv_merge = False\n self.merge_by_lineid = False\n self.fleet_send_sleep_seconds = None\n self.trainer_num = -1\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._set_feed_type\")\n def set_feed_type(self, data_feed_type):\n \"\"\"\n Set data_feed_desc\n \"\"\"\n self.proto_desc.name = data_feed_type\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._prepare_to_run\")\n def _prepare_to_run(self):\n \"\"\"\n Set data_feed_desc before load or shuffle,\n user no need to call this function.\n \"\"\"\n if self.thread_num <= 0:\n self.thread_num = 1\n self.dataset.set_thread_num(self.thread_num)\n if self.queue_num is None:\n self.queue_num = self.thread_num\n self.dataset.set_queue_num(self.queue_num)\n self.dataset.set_parse_ins_id(self.parse_ins_id)\n self.dataset.set_parse_content(self.parse_content)\n self.dataset.set_parse_logkey(self.parse_logkey)\n self.dataset.set_merge_by_sid(self.merge_by_sid)\n self.dataset.set_enable_pv_merge(self.enable_pv_merge)\n self.dataset.set_data_feed_desc(self.desc())\n self.dataset.create_channel()\n self.dataset.create_readers()\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._dynamic_adjust_before_train\"\n )\n def _dynamic_adjust_before_train(self, thread_num):\n if not self.is_user_set_queue_num:\n self.dataset.dynamic_adjust_channel_num(thread_num, False)\n self.dataset.dynamic_adjust_readers_num(thread_num)\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._dynamic_adjust_after_train\"\n )\n def _dynamic_adjust_after_train(self):\n if not self.is_user_set_queue_num:\n self.dataset.dynamic_adjust_channel_num(self.thread_num, False)\n self.dataset.dynamic_adjust_readers_num(self.thread_num)\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._set_queue_num\")\n def set_queue_num(self, queue_num):\n \"\"\"\n Set Dataset output queue num, training threads get data from queues\n\n Args:\n queue_num(int): dataset output queue num\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset.set_queue_num(12)\n\n \"\"\"\n self.is_user_set_queue_num = True\n self.queue_num = queue_num\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._set_parse_ins_id\")\n def set_parse_ins_id(self, parse_ins_id):\n \"\"\"\n Set id Dataset need to parse insid\n\n Args:\n parse_ins_id(bool): if parse ins_id or not\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset.set_parse_ins_id(True)\n\n \"\"\"\n self.parse_ins_id = parse_ins_id\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._set_parse_content\")\n def set_parse_content(self, parse_content):\n \"\"\"\n Set if Dataset need to parse content\n\n Args:\n parse_content(bool): if parse content or not\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset.set_parse_content(True)\n\n \"\"\"\n self.parse_content = parse_content\n\n def set_parse_logkey(self, parse_logkey):\n \"\"\"\n Set if Dataset need to parse logkey\n\n Args:\n parse_content(bool): if parse logkey or not\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset.set_parse_logkey(True)\n\n \"\"\"\n self.parse_logkey = parse_logkey\n\n def _set_trainer_num(self, trainer_num):\n \"\"\"\n Set trainer num\n\n Args:\n trainer_num(int): trainer num\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset._set_trainer_num(1)\n\n \"\"\"\n self.trainer_num = trainer_num\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._set_merge_by_sid\")\n def set_merge_by_sid(self, merge_by_sid):\n \"\"\"\n Set if Dataset need to merge sid. If not, one ins means one Pv.\n\n Args:\n merge_by_sid(bool): if merge sid or not\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset.set_merge_by_sid(True)\n\n \"\"\"\n self.merge_by_sid = merge_by_sid\n\n def set_enable_pv_merge(self, enable_pv_merge):\n \"\"\"\n Set if Dataset need to merge pv.\n\n Args:\n enable_pv_merge(bool): if enable_pv_merge or not\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset.set_enable_pv_merge(True)\n\n \"\"\"\n self.enable_pv_merge = enable_pv_merge\n\n def preprocess_instance(self):\n \"\"\"\n Merge pv instance and convey it from input_channel to input_pv_channel. \n It will be effective when enable_pv_merge_ is True.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.load_into_memory()\n dataset.preprocess_instance()\n\n \"\"\"\n self.dataset.preprocess_instance()\n\n def set_current_phase(self, current_phase):\n \"\"\"\n Set current phase in train. It is useful for untest.\n current_phase : 1 for join, 0 for update.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.load_into_memory()\n dataset.set_current_phase(1)\n\n \"\"\"\n self.dataset.set_current_phase(current_phase)\n\n def postprocess_instance(self):\n \"\"\"\n Divide pv instance and convey it to input_channel.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.load_into_memory()\n dataset.preprocess_instance()\n exe.train_from_dataset(dataset)\n dataset.postprocess_instance()\n\n \"\"\"\n self.dataset.postprocess_instance()\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._set_fleet_send_batch_size\"\n )\n def set_fleet_send_batch_size(self, fleet_send_batch_size=1024):\n \"\"\"\n Set fleet send batch size, default is 1024\n\n Args:\n fleet_send_batch_size(int): fleet send batch size\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset.set_fleet_send_batch_size(800)\n\n \"\"\"\n self.fleet_send_batch_size = fleet_send_batch_size\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._set_fleet_send_sleep_seconds\"\n )\n def set_fleet_send_sleep_seconds(self, fleet_send_sleep_seconds=0):\n \"\"\"\n Set fleet send sleep time, default is 0\n\n Args:\n fleet_send_sleep_seconds(int): fleet send sleep time\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset.set_fleet_send_sleep_seconds(2)\n\n \"\"\"\n self.fleet_send_sleep_seconds = fleet_send_sleep_seconds\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._set_merge_by_lineid\")\n def set_merge_by_lineid(self, merge_size=2):\n \"\"\"\n Set merge by line id, instances of same line id will be merged after\n shuffle, you should parse line id in data generator.\n\n Args:\n merge_size(int): ins size to merge. default is 2.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset.set_merge_by_lineid()\n\n \"\"\"\n self.dataset.set_merge_by_lineid(merge_size)\n self.merge_by_lineid = True\n self.parse_ins_id = True\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._set_generate_unique_feasigns\"\n )\n def set_generate_unique_feasigns(self, generate_uni_feasigns, shard_num):\n self.dataset.set_generate_unique_feasigns(generate_uni_feasigns)\n self.gen_uni_feasigns = generate_uni_feasigns\n self.local_shard_num = shard_num\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset._generate_local_tables_unlock\"\n )\n def generate_local_tables_unlock(self, table_id, fea_dim, read_thread_num,\n consume_thread_num, shard_num):\n self.dataset.generate_local_tables_unlock(\n table_id, fea_dim, read_thread_num, consume_thread_num, shard_num)\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset.load_into_memory\")\n def load_into_memory(self):\n \"\"\"\n Load data into memory\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.load_into_memory()\n \"\"\"\n self._prepare_to_run()\n self.dataset.load_into_memory()\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset.preload_into_memory\")\n def preload_into_memory(self, thread_num=None):\n \"\"\"\n Load data into memory in async mode\n\n Args:\n thread_num(int): preload thread num\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.preload_into_memory()\n dataset.wait_preload_done()\n \"\"\"\n self._prepare_to_run()\n if thread_num is None:\n thread_num = self.thread_num\n self.dataset.set_preload_thread_num(thread_num)\n self.dataset.create_preload_readers()\n self.dataset.preload_into_memory()\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset.wait_preload_done\")\n def wait_preload_done(self):\n \"\"\"\n Wait preload_into_memory done\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.preload_into_memory()\n dataset.wait_preload_done()\n \"\"\"\n self.dataset.wait_preload_done()\n self.dataset.destroy_preload_readers()\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset.local_shuffle\")\n def local_shuffle(self):\n \"\"\"\n Local shuffle\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.load_into_memory()\n dataset.local_shuffle()\n \"\"\"\n self.dataset.local_shuffle()\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset.global_shuffle\")\n def global_shuffle(self, fleet=None, thread_num=12):\n \"\"\"\n Global shuffle.\n Global shuffle can be used only in distributed mode. i.e. multiple\n processes on single machine or multiple machines training together.\n If you run in distributed mode, you should pass fleet instead of None.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.load_into_memory()\n dataset.global_shuffle(fleet)\n\n Args:\n fleet(Fleet): fleet singleton. Default None.\n thread_num(int): shuffle thread num. Default is 12.\n\n \"\"\"\n if fleet is not None:\n fleet._role_maker.barrier_worker()\n if self.trainer_num == -1:\n self.trainer_num = fleet.worker_num()\n if self.fleet_send_batch_size is None:\n self.fleet_send_batch_size = 1024\n if self.fleet_send_sleep_seconds is None:\n self.fleet_send_sleep_seconds = 0\n self.dataset.register_client2client_msg_handler()\n self.dataset.set_trainer_num(self.trainer_num)\n self.dataset.set_fleet_send_batch_size(self.fleet_send_batch_size)\n self.dataset.set_fleet_send_sleep_seconds(self.fleet_send_sleep_seconds)\n if fleet is not None:\n fleet._role_maker.barrier_worker()\n self.dataset.global_shuffle(thread_num)\n if fleet is not None:\n fleet._role_maker.barrier_worker()\n if self.merge_by_lineid:\n self.dataset.merge_by_lineid()\n if fleet is not None:\n fleet._role_maker.barrier_worker()\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset.release_memory\")\n def release_memory(self):\n \"\"\"\n :api_attr: Static Graph\n \n Release InMemoryDataset memory data, when data will not be used again.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.load_into_memory()\n dataset.global_shuffle(fleet)\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n exe.train_from_dataset(fluid.default_main_program(), dataset)\n dataset.release_memory()\n\n \"\"\"\n self.dataset.release_memory()\n\n def get_pv_data_size(self):\n \"\"\"\n Get memory data size of Pv, user can call this function to know the pv num\n of ins in all workers after load into memory.\n\n Note:\n This function may cause bad performance, because it has barrier\n\n Returns:\n The size of memory pv data.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.load_into_memory()\n print dataset.get_pv_data_size()\n\n \"\"\"\n return self.dataset.get_pv_data_size()\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset.get_memory_data_size\")\n def get_memory_data_size(self, fleet=None):\n \"\"\"\n Get memory data size, user can call this function to know the num\n of ins in all workers after load into memory.\n\n Note:\n This function may cause bad performance, because it has barrier\n\n Args:\n fleet(Fleet): Fleet Object.\n\n Returns:\n The size of memory data.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.load_into_memory()\n print dataset.get_memory_data_size(fleet)\n\n \"\"\"\n import numpy as np\n local_data_size = self.dataset.get_memory_data_size()\n local_data_size = np.array([local_data_size])\n if fleet is not None:\n global_data_size = local_data_size * 0\n fleet._role_maker.all_reduce_worker(local_data_size,\n global_data_size)\n return global_data_size[0]\n return local_data_size[0]\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.InMemoryDataset.get_shuffle_data_size\")\n def get_shuffle_data_size(self, fleet=None):\n \"\"\"\n Get shuffle data size, user can call this function to know the num\n of ins in all workers after local/global shuffle.\n\n Note:\n This function may cause bad performance to local shuffle,\n because it has barrier. It does not affect global shuffle.\n\n Args:\n fleet(Fleet): Fleet Object.\n\n Returns:\n The size of shuffle data.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.load_into_memory()\n dataset.global_shuffle(fleet)\n print dataset.get_shuffle_data_size(fleet)\n\n \"\"\"\n import numpy as np\n local_data_size = self.dataset.get_shuffle_data_size()\n local_data_size = np.array([local_data_size])\n if fleet is not None:\n global_data_size = local_data_size * 0\n fleet._role_maker.all_reduce_worker(local_data_size,\n global_data_size)\n return global_data_size[0]\n return local_data_size[0]\n\n\nclass QueueDataset(DatasetBase):\n \"\"\"\n QueueDataset, it will process data streamly.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"QueueDataset\")\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize QueueDataset\n This class should be created by DatasetFactory\n \"\"\"\n super(QueueDataset, self).__init__()\n self.proto_desc.name = \"MultiSlotDataFeed\"\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.distributed.QueueDataset._prepare_to_run\")\n def _prepare_to_run(self):\n \"\"\"\n Set data_feed_desc/thread num/filelist before run,\n user no need to call this function.\n \"\"\"\n if self.thread_num > len(self.filelist):\n self.thread_num = len(self.filelist)\n if self.thread_num == 0:\n self.thread_num = 1\n self.dataset.set_thread_num(self.thread_num)\n self.dataset.set_filelist(self.filelist)\n self.dataset.set_data_feed_desc(self.desc())\n self.dataset.create_readers()\n\n def local_shuffle(self):\n \"\"\"\n Local shuffle data.\n\n Local shuffle is not supported in QueueDataset\n NotImplementedError will be raised\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"QueueDataset\")\n dataset.local_shuffle()\n\n Raises:\n NotImplementedError: QueueDataset does not support local shuffle\n\n \"\"\"\n raise NotImplementedError(\n \"QueueDataset does not support local shuffle, \"\n \"please use InMemoryDataset for local_shuffle\")\n\n def global_shuffle(self, fleet=None):\n \"\"\"\n Global shuffle data.\n\n Global shuffle is not supported in QueueDataset\n NotImplementedError will be raised\n\n Args:\n fleet(Fleet): fleet singleton. Default None.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet\n dataset = fluid.DatasetFactory().create_dataset(\"QueueDataset\")\n dataset.global_shuffle(fleet)\n\n Raises:\n NotImplementedError: QueueDataset does not support global shuffle\n\n \"\"\"\n raise NotImplementedError(\n \"QueueDataset does not support global shuffle, \"\n \"please use InMemoryDataset for global_shuffle\")\n\n\nclass FileInstantDataset(DatasetBase):\n \"\"\"\n FileInstantDataset, it will process data streamly.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory.create_dataset(\"FileInstantDataset\")\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize FileInstantDataset\n This class should be created by DatasetFactory\n \"\"\"\n super(FileInstantDataset, self).__init__()\n self.proto_desc.name = \"MultiSlotFileInstantDataFeed\"\n\n def local_shuffle(self):\n \"\"\"\n Local shuffle\n FileInstantDataset does not support local shuffle\n \"\"\"\n raise NotImplementedError(\n \"FileInstantDataset does not support local shuffle, \"\n \"please use InMemoryDataset for local_shuffle\")\n\n def global_shuffle(self, fleet=None):\n \"\"\"\n Global shuffle\n FileInstantDataset does not support global shuffle\n \"\"\"\n raise NotImplementedError(\n \"FileInstantDataset does not support global shuffle, \"\n \"please use InMemoryDataset for global_shuffle\")\n\n\nclass BoxPSDataset(InMemoryDataset):\n \"\"\"\n BoxPSDataset: derived from InMemoryDataset.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"BoxPSDataset\")\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize BoxPSDataset\n This class should be created by DatasetFactory\n \"\"\"\n super(BoxPSDataset, self).__init__()\n self.boxps = core.BoxPS(self.dataset)\n self.proto_desc.name = \"PaddleBoxDataFeed\"\n\n def set_date(self, date):\n \"\"\"\n Workaround for date\n \"\"\"\n year = int(date[:4])\n month = int(date[4:6])\n day = int(date[6:])\n self.boxps.set_date(year, month, day)\n\n def begin_pass(self):\n \"\"\"\n Begin Pass\n Notify BoxPS to load sparse parameters of next pass to GPU Memory \n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"BoxPSDataset\")\n dataset.begin_pass()\n \"\"\"\n self.boxps.begin_pass()\n\n def end_pass(self, need_save_delta):\n \"\"\"\n End Pass\n Notify BoxPS that current pass ended \n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"BoxPSDataset\")\n dataset.end_pass(True)\n \"\"\"\n self.boxps.end_pass(need_save_delta)\n\n def wait_preload_done(self):\n \"\"\"\n Wait async preload done\n Wait Until Feed Pass Done\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"BoxPSDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.preload_into_memory()\n dataset.wait_preload_done()\n \"\"\"\n self.boxps.wait_feed_pass_done()\n\n def load_into_memory(self):\n \"\"\"\n Load next pass into memory and notify boxps to fetch its emb from SSD\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"BoxPSDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.load_into_memory()\n\t \"\"\"\n self._prepare_to_run()\n self.boxps.load_into_memory()\n\n def preload_into_memory(self):\n \"\"\"\n Begin async preload next pass while current pass may be training\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"BoxPSDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.preload_into_memory()\n \"\"\"\n self._prepare_to_run()\n self.boxps.preload_into_memory()\n\n def _dynamic_adjust_before_train(self, thread_num):\n if not self.is_user_set_queue_num:\n self.dataset.dynamic_adjust_channel_num(thread_num, True)\n self.dataset.dynamic_adjust_readers_num(thread_num)\n\n def _dynamic_adjust_after_train(self):\n pass\n\n def slots_shuffle(self, slots):\n \"\"\"\n Slots Shuffle \n Slots Shuffle is a shuffle method in slots level, which is usually used \n in sparse feature with large scale of instances. To compare the metric, i.e.\n auc while doing slots shuffle on one or several slots with baseline to \n evaluate the importance level of slots(features).\n \n Args:\n slots(list[string]): the set of slots(string) to do slots shuffle.\n\n Examples:\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InMemoryDataset\")\n dataset.set_merge_by_lineid()\n #suppose there is a slot 0\n dataset.slots_shuffle(['0'])\n \"\"\"\n slots_set = set(slots)\n self.boxps.slots_shuffle(slots_set)\n\n\nclass PadBoxSlotDataset(BoxPSDataset):\n \"\"\"\n PadBoxSlotDataset: derived from InMemoryDataset.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"BoxPSDataset\")\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize BoxPSDataset\n This class should be created by DatasetFactory\n \"\"\"\n # define class name here\n # to decide whether we need create in memory instance\n self.proto_desc = data_feed_pb2.DataFeedDesc()\n self.proto_desc.pipe_command = \"cat\"\n self.dataset = core.Dataset(\"PadBoxSlotDataset\")\n self.thread_num = 1\n self.filelist = []\n self.boxps = core.BoxPS(self.dataset)\n self.proto_desc.name = \"SlotPaddleBoxDataFeed\"\n self.fleet_send_batch_size = None\n self.is_user_set_queue_num = False\n self.queue_num = None\n self.parse_ins_id = False\n self.parse_content = False\n self.parse_logkey = False\n self.merge_by_sid = True\n self.enable_pv_merge = False\n self.merge_by_lineid = False\n self.fleet_send_sleep_seconds = None\n\n def load_into_memory(self):\n \"\"\"\n Load next pass into memory and notify boxps to fetch its emb from SSD\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"BoxPSDataset\")\n filelist = [\"a.txt\", \"b.txt\"]\n dataset.set_filelist(filelist)\n dataset.load_into_memory()\n \"\"\"\n self._prepare_to_run()\n self.boxps.read_ins_into_memory()\n\n def disable_polling(self, disable=False):\n \"\"\"\n disable file polling\n \"\"\"\n self.dataset.disable_polling(disable)\n\n def disable_shuffle(self, disable=False):\n \"\"\"\n disable data shuffle\n \"\"\"\n self.dataset.disable_shuffle(disable)\n\n def preload_into_disk(self, path, file_num):\n \"\"\"\n prepare load data to disk\n \"\"\"\n self._prepare_to_run()\n self.dataset.preload_into_disk(path, file_num)\n\n def wait_load_disk_done(self):\n \"\"\"\n wait disk file load done\n \"\"\"\n self.dataset.wait_load_disk_done()\n\n def load_into_disk(self, path, file_num):\n \"\"\"\n sync load ins to disk\n \"\"\"\n self.preload_into_disk(path, file_num)\n self.wait_load_disk_done()\n\n def set_archivefile(self, archive=False):\n \"\"\"\n is load archive file\n \"\"\"\n self.dataset.set_archivefile(archive)\n\n\nclass InputTableDataset(PadBoxSlotDataset):\n def __init__(self):\n \"\"\"\n InputTableDataset: derived from PadBoxSlotDataset.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n dataset = fluid.DatasetFactory().create_dataset(\"InputTableDataset\")\n \"\"\"\n self.proto_desc = data_feed_pb2.DataFeedDesc()\n self.proto_desc.pipe_command = \"cat\"\n self.dataset = core.Dataset(\"InputTableDataset\")\n self.thread_num = 1\n self.filelist = []\n self.boxps = core.BoxPS(self.dataset)\n self.proto_desc.name = \"InputTableDataFeed\"\n self.fleet_send_batch_size = None\n self.is_user_set_queue_num = False\n self.queue_num = None\n self.parse_ins_id = False\n self.parse_content = False\n self.parse_logkey = False\n self.merge_by_sid = True\n self.enable_pv_merge = False\n self.merge_by_lineid = False\n self.fleet_send_sleep_seconds = None\n\n def set_index_parser(self, index_parser):\n \"\"\" set index parser\n \"\"\"\n self.proto_desc.index_parser = index_parser\n\n def set_index_filelist(self, filelist):\n \"\"\" set index filelist\n \"\"\"\n self.dataset.set_index_filelist(filelist)\n\n def set_feed_type(self, data_feed_type):\n \"\"\"\n Set data_feed_desc\n \"\"\"\n assert data_feed_type == 'InputTableDataFeed', 'InputTableDataset must use InputTableDataFeed'\n self.proto_desc.name = data_feed_type\n"
] |
[
[
"numpy.array"
]
] |
Vishal-V/Hack-Submission
|
[
"677d8d6a19ae63d3aa2ddd74e9ce8ae7a06b71df"
] |
[
"Flask/cancer_classification.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np \nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\nfrom sklearn.datasets import load_breast_cancer\n\n\n# In[3]:\n\n\ndata = load_breast_cancer()\n\n\n# In[4]:\n\n\ndata\n\n\n# In[5]:\n\n\ndata.keys()\n\n\n# In[6]:\n\n\nprint(data['feature_names'])\n\n\n# In[7]:\n\n\ndf_data = pd.DataFrame(np.c_[data['data'], data['target']], columns=np.append(data['feature_names'], ['target']))\n\n\n# In[8]:\n\n\ndf_data.head()\n\n\n# In[9]:\n\n\ndf_data.tail()\n\n\n# In[10]:\n\n\n# Exploratory data analysis\n\nsns.pairplot(df_data, vars=['mean radius', 'mean texture', 'mean area', 'worst area'], hue='target')\n\n\n# In[11]:\n\n\nsns.countplot(df_data['target'])\n\n\n# In[12]:\n\n\nsns.scatterplot(x='mean area', y='mean smoothness', hue='target', data = df_data)\n\n\n# In[13]:\n\n\n# Creating a correlation heatmap\n\nplt.figure(figsize=(20,10))\nsns.heatmap(df_data.corr(), annot=True)\n\n\n# In[ ]:\n\n\n\n\n\n# In[15]:\n\n\n# Split the training sets\n\nfrom sklearn.model_selection import train_test_split\n\nX = df_data.drop(['target'], axis=1)\nY = df_data['target']\n\n\n# In[16]:\n\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)\n\n\n# In[17]:\n\n\n# Normalization\n\nx_min = X_train.min()\nrange_x = (X_train - x_min).max()\nX_train_scaled = (X_train - x_min)/range_x\n\ntest_min = X_test.min()\nrange_test = (X_test - test_min).max()\nX_test_scaled = (X_test - test_min)/range_test\n\n\n# In[18]:\n\n\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score, classification_report, confusion_matrix\nfrom time import time\n\nmodel = SVC()\nt = time()\nmodel.fit(X_train_scaled, Y_train)\nprint(f'Time taken = {time() - t} ms')\n\n\n# In[19]:\n\n\npred = model.predict(X_test_scaled)\nacc = accuracy_score(pred, Y_test)\nprint(acc)\n\n\n# In[20]:\n\n\ncm = confusion_matrix(Y_test, pred)\n# plt.figure(figsize=(20,10))\nsns.heatmap(cm, annot=True)\n\n\n# In[21]:\n\n\nprint(classification_report(Y_test, pred))\n\n\n# In[22]:\n\n\nfrom sklearn.model_selection import GridSearchCV\n\nparams = {'C':[0.1,1,10,100], 'kernel':['rbf'], 'gamma':[1,0.1,0.01,0.001]}\n\ngrid = GridSearchCV(SVC(), params, refit=True, verbose = 4)\n\n\n# In[23]:\n\n\ngrid.fit(X_train_scaled, Y_train)\n\n\n# In[24]:\n\n\ngrid.best_params_\n\n\n# In[25]:\n\n\ngrid_pred = grid.predict(X_test_scaled)\nacc = accuracy_score(Y_test, grid_pred)\nconf = confusion_matrix(Y_test, grid_pred)\nsns.heatmap(conf, annot=True)\n\n\n# In[26]:\n\n\nprint(classification_report(Y_test, grid_pred))\n\n\n# In[27]:\n\n\nmodel1=SVC(C=1, gamma=1, kernel='rbf')\nmodel1.fit(X_train_scaled, Y_train)\nprediction=model1.predict(X_test_scaled)\naccuracy = accuracy_score(Y_test, prediction)\nprint(accuracy)\nprint(classification_report(Y_test, prediction))\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"sklearn.datasets.load_breast_cancer",
"matplotlib.pyplot.figure",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.confusion_matrix",
"numpy.append",
"sklearn.svm.SVC",
"sklearn.metrics.classification_report",
"sklearn.metrics.accuracy_score"
]
] |
aimagelab/RefiNet
|
[
"aedd551df78b265bff4de9610df077473d07166c"
] |
[
"src/Datasets/transforms.py"
] |
[
"import numpy as np\n\n\nclass Compose(object):\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, inputs):\n for t in self.transforms:\n inputs = t(inputs)\n\n return inputs\n\n\nclass GaussianNoise(object):\n def __init__(self, image_size, sigma=1, check: bool = True):\n self.check = check\n self.image_size = image_size\n self.sigma = sigma\n\n def __call__(self, kpt):\n done = False\n while done is False:\n noise = np.random.normal(0, self.sigma, (kpt.shape[0], kpt.shape[1]))\n kpt = kpt + noise\n if self.check:\n if kpt[kpt[:, 0] < 0].sum() > 0 or kpt[kpt[:, 1] < 0].sum() > 0:\n done = False\n elif kpt[kpt[:, 0] > self.image_size[1]].sum() > 0 or kpt[kpt[:, 1] > self.image_size[0]].sum() > 0:\n done = False\n else:\n done = True\n done = True\n return kpt\n"
] |
[
[
"numpy.random.normal"
]
] |
ds-wook/Final-NeuralFBProphet
|
[
"40caac12a1805da6a061452ea5571b48d6f2bb8f"
] |
[
"src/Final-NeuralFBProphet/two_optim.py"
] |
[
"import argparse\n\nimport joblib\nimport optuna\nfrom neuralprophet import NeuralProphet\nfrom optuna import Trial\nfrom optuna.samplers import TPESampler\nfrom sklearn.metrics import mean_squared_error\n\nfrom data.dataset import two_seconds_dataset\n\nparse = argparse.ArgumentParser(\"Optimize\")\nparse.add_argument(\"--path\", type=str, default=\"../../input/\")\nparse.add_argument(\"--trials\", type=int, default=360)\nparse.add_argument(\"--params\", type=str, default=\"two_second_params.pkl\")\nargs = parse.parse_args()\n\ndf, train, valid = two_seconds_dataset(args.path)\n\n\ndef objective(trial: Trial) -> float:\n params = {\n \"epochs\": trial.suggest_categorical(\"epochs\", [50, 100, 200, 300, 400, 500]),\n \"batch_size\": 64,\n \"num_hidden_layers\": trial.suggest_int(\"num_hidden_layers\", 0, 5),\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 1e-3, 0.1),\n \"changepoints_range\": trial.suggest_discrete_uniform(\n \"changepoints_range\", 0.8, 0.95, 0.001\n ),\n \"n_changepoints\": trial.suggest_int(\"n_changepoints\", 20, 35),\n \"seasonality_mode\": \"additive\",\n \"yearly_seasonality\": False,\n \"weekly_seasonality\": True,\n \"daily_seasonality\": True,\n \"loss_func\": \"MSE\",\n }\n # fit_model\n m = NeuralProphet(**params)\n m.fit(train, freq=\"1D\")\n future = m.make_future_dataframe(\n train, periods=len(valid), n_historic_predictions=True\n )\n\n forecast = m.predict(future)\n valid_forecast = forecast[forecast.y.isna()]\n val_rmse = mean_squared_error(valid_forecast.yhat1, valid, squared=False)\n\n return val_rmse\n\n\nif __name__ == \"__main__\":\n study = optuna.create_study(\n study_name=\"two second hyperparameter\",\n direction=\"minimize\",\n sampler=TPESampler(seed=42),\n )\n study.optimize(objective, n_trials=args.trials)\n prophet_params = study.best_params\n prophet_params[\"epochs\"] = 30\n prophet_params[\"batch_size\"] = 64\n prophet_params[\"loss_func\"] = \"MSE\"\n prophet_params[\"weekly_seasonality\"] = True\n prophet_params[\"daily_seasonality\"] = True\n prophet_params[\"yearly_seasonality\"] = False\n joblib.dump(prophet_params, \"../../parameters/\" + args.params)\n"
] |
[
[
"sklearn.metrics.mean_squared_error"
]
] |
MICCoMpy/PyCCE
|
[
"b24a311f54d04ce452ef4b75f52a61a35d502563"
] |
[
"pycce/utilities.py"
] |
[
"import numpy as np\r\nfrom pycce.sm import _smc\r\nfrom numba import jit\r\nfrom numba.typed import List\r\nimport warnings\r\n\r\ndef rotmatrix(initial_vector, final_vector):\r\n r\"\"\"\r\n Generate 3D rotation matrix which applied on initial vector will produce vector, aligned with final vector.\r\n\r\n Examples:\r\n\r\n >>> R = rotmatrix([0,0,1], [1,1,1])\r\n >>> R @ np.array([0,0,1])\r\n array([0.577, 0.577, 0.577])\r\n\r\n Args:\r\n initial_vector (ndarray with shape(3, )): Initial vector.\r\n final_vector (ndarray with shape (3, )): Final vector.\r\n\r\n Returns:\r\n ndarray with shape (3, 3): Rotation matrix.\r\n \"\"\"\r\n\r\n iv = np.asarray(initial_vector)\r\n fv = np.asarray(final_vector)\r\n a = iv / np.linalg.norm(iv)\r\n b = fv / np.linalg.norm(fv) # Final vector\r\n\r\n c = a @ b # Cosine between vectors\r\n # if they're antiparallel\r\n if c == -1.:\r\n raise ValueError('Vectors are antiparallel')\r\n\r\n v = np.cross(a, b)\r\n screw_v = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\r\n r = np.eye(3) + screw_v + np.dot(screw_v, screw_v) / (1 + c)\r\n\r\n return r\r\n\r\n\r\ndef expand(matrix, i, dim):\r\n \"\"\"\r\n Expand matrix M from it's own dimensions to the total Hilbert space.\r\n\r\n Args:\r\n matrix (ndarray with shape (dim[i], dim[i])): Inital matrix.\r\n i (int): Index of the spin dimensions in ``dim`` parameter.\r\n dim (ndarray): Array pf dimensions of all spins present in the cluster.\r\n\r\n Returns:\r\n ndarray with shape (prod(dim), prod(dim)): Expanded matrix.\r\n \"\"\"\r\n dbefore = np.asarray(dim[:i]).prod()\r\n dafter = np.asarray(dim[i + 1:]).prod()\r\n\r\n expanded_matrix = np.kron(np.kron(np.eye(dbefore, dtype=np.complex128), matrix),\r\n np.eye(dafter, dtype=np.complex128))\r\n\r\n return expanded_matrix\r\n\r\n\r\ndef dimensions_spinvectors(nspin, central_spin=None):\r\n \"\"\"\r\n Generate two arrays, containing dimensions of the spins in the cluster and the vectors with spin matrices.\r\n\r\n Args:\r\n nspin (BathArray with shape (n,)): Array of the n spins within cluster.\r\n central_spin (float, optional): If provided, include dimensions of the central spin with the total spin s.\r\n\r\n Returns:\r\n tuple: *tuple* containing:\r\n\r\n * **ndarray with shape (n,)**: Array with dimensions for each spin.\r\n\r\n * **list**: List with vectors of spin matrices for each spin in the cluster\r\n (Including central spin if ``central_spin`` is not None). Each with shape (3, N, N) where\r\n ``N = prod(dimensions)``.\r\n \"\"\"\r\n\r\n ntype = nspin.types\r\n\r\n spins = [ntype[n].s for n in nspin['N']]\r\n dimensions = [_smc[s].dim for s in spins]\r\n\r\n if central_spin is not None:\r\n dimensions += [_smc[central_spin].dim]\r\n spins += [central_spin]\r\n\r\n dimensions = np.asarray(dimensions, dtype=np.int32)\r\n\r\n vectors = []\r\n\r\n for j, s in enumerate(spins):\r\n vectors.append(spinvec(s, j, dimensions))\r\n\r\n vectors = np.asarray(vectors)\r\n\r\n return dimensions, vectors\r\n\r\n\r\ndef spinvec(s, j, dimensions):\r\n \"\"\"\r\n Generate spin vector for the particle, containing 3 spin matrices in the total basis of the system.\r\n\r\n Args:\r\n s (float): Spin of the particle.\r\n j (j): Particle index in ``dimensions`` array.\r\n dimensions (ndarray): Array with dimensions of all spins in the cluster.\r\n\r\n Returns:\r\n ndarray with shape (3, prod(dimensions), prod(dimensions)):\r\n Vector of spin matrices for the given spin in the cluster.\r\n \"\"\"\r\n vec = np.array([expand(_smc[s].x, j, dimensions),\r\n expand(_smc[s].y, j, dimensions),\r\n expand(_smc[s].z, j, dimensions)],\r\n dtype=np.complex128)\r\n return vec\r\n\r\n\r\ndef generate_projections(state_a, state_b=None):\r\n r\"\"\"\r\n Generate vector with the spin projections of the given spin states:\r\n\r\n .. math::\r\n\r\n [\\bra{a}\\hat{S}_x\\ket{b}, \\bra{a}\\hat{S}_y\\ket{b}, \\bra{a}\\hat{S}_z\\ket{b}],\r\n\r\n where :math:`\\ket{a}` and :math:`\\ket{b}` are the given spin states.\r\n\r\n Args:\r\n state_a (ndarray): state `a` of the central spin in :math:`\\hat{S}_z` basis.\r\n state_b (ndarray): state `b` of the central spin in :math:`\\hat{S}_z` basis.\r\n\r\n Returns:\r\n ndarray with shape (3,): :math:`[\\braket{\\hat{S}_x}, \\braket{\\hat{S}_y}, \\braket{\\hat{S}_z}]` projections.\r\n \"\"\"\r\n if state_b is None:\r\n state_b = state_a\r\n\r\n spin = (state_a.size - 1) / 2\r\n sm = _smc[spin]\r\n\r\n projections = np.array([state_a.conj() @ sm.x @ state_b,\r\n state_a.conj() @ sm.y @ state_b,\r\n state_a.conj() @ sm.z @ state_b],\r\n dtype=np.complex128)\r\n return projections\r\n\r\n\r\ndef zfs_tensor(D, E=0):\r\n \"\"\"\r\n Generate (3, 3) ZFS tensor from observable parameters D and E.\r\n\r\n Args:\r\n D (float or ndarray with shape (3, 3)): Longitudinal splitting (D) in ZFS **OR** total ZFS tensor.\r\n E (float): Transverse splitting (E) in ZFS.\r\n\r\n Returns:\r\n ndarray with shape (3, 3): Total ZFS tensor.\r\n \"\"\"\r\n if isinstance(D, (np.floating, float, int)):\r\n\r\n tensor = np.zeros((3, 3), dtype=np.float64)\r\n tensor[2, 2] = 2 / 3 * D\r\n tensor[1, 1] = -D / 3 - E\r\n tensor[0, 0] = -D / 3 + E\r\n else:\r\n tensor = D\r\n return tensor\r\n\r\n\r\ndef project_bath_states(states):\r\n r\"\"\"\r\n Generate projections of bath states on :math:`S_z` axis from any type of states input.\r\n Args:\r\n states (array-like): Array of bath spin states.\r\n\r\n Returns:\r\n ndarray: Array of :math:`S_z` projections of the bath states\r\n \"\"\"\r\n\r\n ndstates = np.asarray(states)\r\n\r\n if len(ndstates.shape) > 1:\r\n\r\n spin = (ndstates.shape[1] - 1) / 2\r\n projected_bath_state = np.empty((ndstates.shape[0], 3))\r\n\r\n projected_bath_state[:, 0] = np.trace(np.matmul(ndstates, _smc[spin].x), axis1=1, axis2=2)\r\n projected_bath_state[:, 1] = np.trace(np.matmul(ndstates, _smc[spin].y), axis1=1, axis2=2)\r\n projected_bath_state[:, 2] = np.trace(np.matmul(ndstates, _smc[spin].z), axis1=1, axis2=2)\r\n\r\n elif ndstates.dtype == object:\r\n with warnings.catch_warnings(record=True) as w:\r\n projected_bath_state = _loop_trace(list(states))\r\n\r\n else:\r\n projected_bath_state = ndstates\r\n\r\n if len(projected_bath_state.shape) > 1 and not np.any(projected_bath_state[:, :2]):\r\n projected_bath_state = projected_bath_state[:, 2]\r\n\r\n return projected_bath_state\r\n\r\n\r\n@jit(nopython=True)\r\ndef _loop_trace(states):\r\n proj_states = np.empty((len(states), 3), dtype=np.complex128)\r\n dims = List()\r\n\r\n sx = List()\r\n sy = List()\r\n sz = List()\r\n\r\n for j, dm in enumerate(states):\r\n dm = dm.astype(np.complex128)\r\n dim = dm.shape[0]\r\n try:\r\n ind = dims.index(dim)\r\n except:\r\n sxnew, synew, sznew = _gen_sm(dim)\r\n\r\n sx.append(sxnew)\r\n sy.append(synew)\r\n sz.append(sznew)\r\n dims.append(dim)\r\n\r\n ind = -1\r\n\r\n xproj = np.trace(dm @ sx[ind])\r\n yproj = np.trace(dm @ sy[ind])\r\n zproj = np.trace(dm @ sz[ind])\r\n\r\n proj_states[j, 0] = xproj\r\n proj_states[j, 1] = yproj\r\n proj_states[j, 2] = zproj\r\n\r\n return proj_states\r\n\r\n\r\n@jit(nopython=True)\r\ndef _gen_sm(dim):\r\n \"\"\"\r\n Numba-friendly spin matrix.\r\n Args:\r\n dim (int): dimensions of the spin marix.\r\n\r\n Returns:\r\n ndarray:\r\n \"\"\"\r\n s = (dim - 1) / 2\r\n projections = np.linspace(s, -s, dim).astype(np.complex128)\r\n plus = np.zeros((dim, dim), dtype=np.complex128)\r\n\r\n for i in range(dim - 1):\r\n plus[i, i + 1] += np.sqrt(s * (s + 1) -\r\n projections[i] * projections[i + 1])\r\n\r\n minus = plus.conj().T\r\n x = 1 / 2. * (plus + minus)\r\n y = 1 / 2j * (plus - minus)\r\n z = np.diag(projections[::-1])\r\n return x, y, z\r\n\r\n\r\ndef partial_inner_product(avec, total, dimensions, index=-1):\r\n r\"\"\"\r\n Returns partial inner product :math:`\\ket{b}=\\bra{a}\\ket{\\psi}`, where :math:`\\ket{a}` provided by\r\n ``avec`` contains degrees of freedom to be \"traced out\" and :math:`\\ket{\\psi}` provided by ``total``\r\n is the total statevector.\r\n\r\n Args:\r\n avec (ndarray with shape (a,)):\r\n total (ndarray with shape (a*b,)):\r\n dimensions (ndarray with shape (n,)):\r\n index ():\r\n\r\n Returns:\r\n\r\n \"\"\"\r\n if len(total.shape) == 1:\r\n matrix = np.moveaxis(total.reshape(dimensions), index, -1)\r\n matrix = matrix.reshape([np.prod(np.delete(dimensions, index)), dimensions[index]])\r\n else:\r\n total = total.reshape(total.shape[0], *dimensions)\r\n matrix = np.moveaxis(total, index, -1)\r\n matrix = matrix.reshape([total.shape[0], np.prod(np.delete(dimensions, index)), dimensions[index]])\r\n return avec @ matrix\r\n\r\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.eye",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.delete",
"numpy.any",
"numpy.moveaxis",
"numpy.cross",
"numpy.array",
"numpy.zeros",
"numpy.trace",
"numpy.empty"
]
] |
zhuohaoyu/TextBox
|
[
"bc44c51902fa9b3392c022b4cba5ff8757355869"
] |
[
"textbox/model/Seq2Seq/rnnencdec.py"
] |
[
"# @Time : 2020/11/14\n# @Author : Junyi Li\n# @Email : lijunyi@ruc.edu.cn\n\n# UPDATE:\n# @Time : 2020/12/25\n# @Author : Tianyi Tang\n# @Email : steventang@ruc.edu.cn\n\nr\"\"\"\nRNNEncDec\n################################################\nReference:\n Sutskever et al. \"Sequence to Sequence Learning with Neural Networks\" in NIPS 2014.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom textbox.model.abstract_generator import Seq2SeqGenerator\nfrom textbox.module.Encoder.rnn_encoder import BasicRNNEncoder\nfrom textbox.module.Decoder.rnn_decoder import BasicRNNDecoder, AttentionalRNNDecoder\nfrom textbox.model.init import xavier_normal_initialization\nfrom textbox.module.strategy import topk_sampling, greedy_search, Beam_Search_Hypothesis\n\n\nclass RNNEncDec(Seq2SeqGenerator):\n r\"\"\"RNN-based Encoder-Decoder architecture is a basic framework for Seq2Seq text generation.\n \"\"\"\n\n def __init__(self, config, dataset):\n super(RNNEncDec, self).__init__(config, dataset)\n\n # load parameters info\n self.embedding_size = config['embedding_size']\n self.hidden_size = config['hidden_size']\n self.num_enc_layers = config['num_enc_layers']\n self.num_dec_layers = config['num_dec_layers']\n self.rnn_type = config['rnn_type']\n self.bidirectional = config['bidirectional']\n self.dropout_ratio = config['dropout_ratio']\n self.attention_type = config['attention_type']\n self.alignment_method = config['alignment_method']\n self.context_size = config['context_size']\n self.strategy = config['decoding_strategy']\n\n if (self.strategy not in ['topk_sampling', 'greedy_search', 'beam_search']):\n raise NotImplementedError(\"{} decoding strategy not implemented\".format(self.strategy))\n if (self.strategy == 'beam_search'):\n self.beam_size = config['beam_size']\n\n self.padding_token_idx = dataset.padding_token_idx\n self.sos_token_idx = dataset.sos_token_idx\n self.eos_token_idx = dataset.eos_token_idx\n\n # define layers and loss\n self.source_token_embedder = nn.Embedding(\n self.source_vocab_size, self.embedding_size, padding_idx=self.padding_token_idx\n )\n\n if config['share_vocab']:\n self.target_token_embedder = self.source_token_embedder\n else:\n self.target_token_embedder = nn.Embedding(\n self.target_vocab_size, self.embedding_size, padding_idx=self.padding_token_idx\n )\n\n self.encoder = BasicRNNEncoder(\n self.embedding_size, self.hidden_size, self.num_enc_layers, self.rnn_type, self.dropout_ratio,\n self.bidirectional\n )\n\n if self.attention_type is not None:\n self.decoder = AttentionalRNNDecoder(\n self.embedding_size, self.hidden_size, self.context_size, self.num_dec_layers, self.rnn_type,\n self.dropout_ratio, self.attention_type, self.alignment_method\n )\n else:\n self.decoder = BasicRNNDecoder(\n self.embedding_size, self.hidden_size, self.num_dec_layers, self.rnn_type, self.dropout_ratio\n )\n\n self.dropout = nn.Dropout(self.dropout_ratio)\n self.vocab_linear = nn.Linear(self.hidden_size, self.target_vocab_size)\n self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_token_idx, reduction='none')\n\n self.max_target_length = config['max_target_length']\n\n # parameters initialization\n self.apply(xavier_normal_initialization)\n\n def generate(self, batch_data, eval_data):\n generate_corpus = []\n idx2token = eval_data.target_idx2token\n\n source_text = batch_data['source_idx']\n source_length = batch_data['source_length']\n source_embeddings = self.source_token_embedder(source_text)\n encoder_outputs, encoder_states = self.encoder(source_embeddings, source_length)\n\n if self.bidirectional:\n encoder_outputs = encoder_outputs[:, :, self.hidden_size:] + encoder_outputs[:, :, :self.hidden_size]\n if (self.rnn_type == 'lstm'):\n encoder_states = (encoder_states[0][::2], encoder_states[1][::2])\n else:\n encoder_states = encoder_states[::2]\n\n encoder_masks = torch.ne(source_text, self.padding_token_idx)\n for bid in range(source_text.size(0)):\n decoder_states = encoder_states[:, bid, :].unsqueeze(1)\n encoder_output = encoder_outputs[bid, :, :].unsqueeze(0)\n encoder_mask = encoder_masks[bid, :].unsqueeze(0)\n generate_tokens = []\n input_seq = torch.LongTensor([[self.sos_token_idx]]).to(self.device)\n\n if (self.strategy == 'beam_search'):\n hypothesis = Beam_Search_Hypothesis(\n self.beam_size, self.sos_token_idx, self.eos_token_idx, self.device, idx2token\n )\n\n for gen_idx in range(self.max_target_length):\n decoder_input = self.target_token_embedder(input_seq)\n if self.attention_type is not None:\n decoder_outputs, decoder_states, _ = self.decoder(\n decoder_input, decoder_states, encoder_output, encoder_mask\n )\n else:\n decoder_outputs, decoder_states = self.decoder(decoder_input, decoder_states)\n\n token_logits = self.vocab_linear(decoder_outputs)\n if (self.strategy == 'topk_sampling'):\n token_idx = topk_sampling(token_logits).item()\n elif (self.strategy == 'greedy_search'):\n token_idx = greedy_search(token_logits).item()\n elif (self.strategy == 'beam_search'):\n if self.attention_type is not None:\n input_seq, decoder_states, encoder_output, encoder_mask = \\\n hypothesis.step(gen_idx, token_logits, decoder_states, encoder_output, encoder_mask)\n else:\n input_seq, decoder_states = hypothesis.step(gen_idx, token_logits, decoder_states)\n\n if (self.strategy in ['topk_sampling', 'greedy_search']):\n if token_idx == self.eos_token_idx:\n break\n else:\n generate_tokens.append(idx2token[token_idx])\n input_seq = torch.LongTensor([[token_idx]]).to(self.device)\n elif (self.strategy == 'beam_search'):\n if (hypothesis.stop()):\n break\n\n if (self.strategy == 'beam_search'):\n generate_tokens = hypothesis.generate()\n\n generate_corpus.append(generate_tokens)\n\n return generate_corpus\n\n def forward(self, corpus, epoch_idx=0):\n source_text = corpus['source_idx']\n source_length = corpus['source_length']\n\n input_text = corpus['target_idx'][:, :-1]\n target_text = corpus['target_idx'][:, 1:]\n\n source_embeddings = self.dropout(self.source_token_embedder(source_text))\n input_embeddings = self.dropout(self.target_token_embedder(input_text))\n encoder_outputs, encoder_states = self.encoder(source_embeddings, source_length)\n\n if self.bidirectional:\n encoder_outputs = encoder_outputs[:, :, self.hidden_size:] + encoder_outputs[:, :, :self.hidden_size]\n if (self.rnn_type == 'lstm'):\n encoder_states = (encoder_states[0][::2], encoder_states[1][::2])\n else:\n encoder_states = encoder_states[::2]\n\n encoder_masks = torch.ne(source_text, self.padding_token_idx)\n\n if self.attention_type is not None:\n decoder_outputs, decoder_states, _ = self.decoder(\n input_embeddings, encoder_states, encoder_outputs, encoder_masks\n )\n else:\n decoder_outputs, decoder_states = self.decoder(input_embeddings, encoder_states)\n\n token_logits = self.vocab_linear(decoder_outputs)\n\n loss = self.loss(token_logits.view(-1, token_logits.size(-1)), target_text.contiguous().view(-1))\n loss = loss.reshape_as(target_text)\n\n length = corpus['target_length'] - 1\n loss = loss.sum(dim=1) / length.float()\n loss = loss.mean()\n return loss\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.nn.Dropout",
"torch.LongTensor",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.ne"
]
] |
varun-affinsys/Monocular-Depth-Estimation-with-Transfer-Learning-pretrained-MobileNetV2
|
[
"9b20c5b3d7a9f90e1dc6f40e17ee31d9b3dee684"
] |
[
"UtilityTest.py"
] |
[
"import os\nimport glob\nimport time\nfrom PIL import Image\nimport numpy as np\nimport PIL\nimport random\nimport torch\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\n\ndef _is_pil_image(img):\n return isinstance(img, Image.Image)\n\ndef _is_numpy_image(img):\n return isinstance(img, np.ndarray) and (img.ndim in {2, 3})\n\nclass DepthDataset(Dataset):\n def __init__(self, root_dir, transform=None):\n \n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(os.listdir(self.root_dir))\n\n def __getitem__(self, idx):\n \n img_name = os.path.join(self.root_dir,os.listdir(self.root_dir)[idx])\n image = (Image.open(img_name))\n\n sample1={'image': image}\n\n if self.transform: sample1 = self.transform({'image': image})\n return sample1\n \n \n\nclass ToTensor(object):\n def __init__(self,is_test=False):\n self.is_test = is_test\n\n def __call__(self, sample):\n image= sample['image']\n \n image = image.resize((640, 480))\n image = self.to_tensor(image)\n\n return {'image': image}\n\n def to_tensor(self, pic):\n pic = np.array(pic)\n if not (_is_numpy_image(pic) or _is_pil_image(pic)):\n raise TypeError( 'pic should be PIL Image or ndarray. Got {}'.format(type(pic)))\n \n if isinstance(pic, np.ndarray):\n if pic.ndim==2:\n pic=pic[..., np.newaxis]\n \n img = torch.from_numpy(pic.transpose((2, 0, 1)))\n\n return img.float().div(255)"
] |
[
[
"numpy.array"
]
] |
noogel/xyzDeepLearning
|
[
"5cfb477b317202b4d09850ae9602bc99cfc9f90c"
] |
[
"machine_learning/show_pic_location.py"
] |
[
"#! /usr/bin/python\n# -*- coding:utf-8 -*-\n\"\"\"\n@author: abc\n@file: euclidean_distance.py\n@date: 2016-12-09\n@desc: 差分矩阵求和/均值\n\"\"\"\n__author__ = \"abc\"\n\nimport cv2\nimport numpy as np\n\n\ndef show_pic_location(img, findimg):\n \"\"\"\n show_pic_location\n :param img:\n :param findimg:\n :return:\n \"\"\"\n w = img.shape[1]\n h = img.shape[0]\n fw = findimg.shape[1]\n fh = findimg.shape[0]\n findpt = None\n for now_h in xrange(h - fh):\n for now_w in xrange(w - fw):\n comp_tz = img[now_h:now_h + fh, now_w: now_w + fw, :] - findimg\n # 求和\n # if np.sum(comp_tz) < 1:\n # findpt = now_w, now_h\n # 均值\n if abs(np.mean(comp_tz)) < 20:\n findpt = now_w, now_h\n if findpt is not None:\n cv2.rectangle(img, findpt, (findpt[0] + fw, findpt[1] + fh), (255, 0, 0))\n return img\n\n\ndef add_noise(img):\n \"\"\"\n add_noise\n :param img:\n :return:\n \"\"\"\n count = 30000\n for k in xrange(count):\n xi = int(np.random.uniform(0, img.shape[1]))\n xj = int(np.random.uniform(0, img.shape[0]))\n img[xj, xi, 0] = 255 * np.random.rand()\n img[xj, xi, 1] = 255 * np.random.rand()\n img[xj, xi, 2] = 255 * np.random.rand()\n\n\ndef handle_img(imgpath, imgpath1, imgpath2):\n \"\"\"\n handle_img\n :param imgpath:\n :param imgpath1:\n :param imgpath2:\n :return:\n \"\"\"\n myimg = cv2.imread(imgpath)\n myimg1 = cv2.imread(imgpath1)\n myimg2 = cv2.imread(imgpath2)\n\n cv2.namedWindow('img1')\n cv2.imshow('img1', myimg1)\n cv2.namedWindow('img2')\n cv2.imshow('img2', myimg2)\n\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n\n\n add_noise(myimg)\n\n myimg = show_pic_location(myimg, myimg1)\n myimg = show_pic_location(myimg, myimg2)\n\n cv2.namedWindow('img')\n cv2.imshow('img', myimg)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n imgpath = \"/home/abc/Projects/machine_learning/img/test.png\"\n imgpath1 = \"/home/abc/Projects/machine_learning/img/test_1.png\"\n imgpath2 = \"/home/abc/Projects/machine_learning/img/test_2.png\"\n handle_img(imgpath, imgpath1, imgpath2)\n"
] |
[
[
"numpy.random.uniform",
"numpy.mean",
"numpy.random.rand"
]
] |
shravan20/mycaptain-ai-certification
|
[
"d8a700d3bb2059fc7e359e8e1ff23ed0c154299e"
] |
[
"ImageClassification/test.py"
] |
[
"from train import load_data, batch_size\nfrom tensorflow.keras.models import load_model\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# CIFAR-10 classes\ncategories = {\n 0: \"airplane\",\n 1: \"automobile\",\n 2: \"bird\",\n 3: \"cat\",\n 4: \"deer\",\n 5: \"dog\",\n 6: \"frog\",\n 7: \"horse\",\n 8: \"ship\",\n 9: \"truck\"\n}\n\n# load the testing set\n# (_, _), (X_test, y_test) = load_data()\nds_train, ds_test, info = load_data()\n# load the model with final model weights\nmodel = load_model(\"results/cifar10-model-v1.h5\")\n# evaluation\nloss, accuracy = model.evaluate(ds_test, steps=info.splits[\"test\"].num_examples // batch_size)\nprint(\"Test accuracy:\", accuracy*100, \"%\")\n\n# get prediction for this image\ndata_sample = next(iter(ds_test))\nsample_image = data_sample[0].numpy()[0]\nsample_label = categories[data_sample[1].numpy()[0]]\nprediction = np.argmax(model.predict(sample_image.reshape(-1, *sample_image.shape))[0])\nprint(\"Predicted label:\", categories[prediction])\nprint(\"True label:\", sample_label)\n\n# show the first image\nplt.axis('off')\nplt.imshow(sample_image)\nplt.show()\n"
] |
[
[
"tensorflow.keras.models.load_model",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis"
]
] |
DedSec-1/Idiot-Boxx
|
[
"de41bb0ace787f34ba8100dc1f33b570f24315b4"
] |
[
"openCV.py"
] |
[
"import cv2\nimport mediapipe as mp\nfrom tensorflow.keras.models import load_model\nimport numpy as np\nimport pyttsx3 \n\nCLASSIFICATION = {\n 0: \"A\",\n 1: \"B\",\n 2: \"C\",\n 3: \"D\",\n 4: \"E\",\n 5: \"F\",\n 6: \"G\",\n 7: \"H\",\n 8: \"I\",\n 9: \"J\",\n 10: \"K\",\n 11: \"L\",\n 12: \"M\",\n 13: \"N\",\n 14: \"O\",\n 15: \"P\",\n 16: \"Q\",\n 17: \"R\",\n 18: \"S\",\n 19: \"T\",\n 20: \"U\",\n 21: \"V\",\n 22: \"W\",\n 23: \"X\",\n 24: \"Y\",\n}\n\nmodel = load_model(\"model.h5\")\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands\n\n# For webcam input:\nhands = mp_hands.Hands(\n max_num_hands=1, min_detection_confidence=0.5, min_tracking_confidence=0.5\n)\n\n\nclass VideoCamera(object):\n def __init__(self):\n # capturing video\n self.video = cv2.VideoCapture(0)\n\n def __del__(self):\n # releasing camera\n self.video.release()\n\n def get_frame(self):\n while True:\n success, image = self.video.read()\n image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)\n # To improve performance, optionally mark the image as not writeable to\n # pass by reference.\n image.flags.writeable = False\n results = hands.process(image)\n # Preperation for bounding box\n\n image_height, image_width, _ = image.shape\n\n # Draw the hand annotations on the image.\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n if results.multi_hand_landmarks:\n for hand_landmarks in results.multi_hand_landmarks:\n\n \n tup_x = (\n hand_landmarks.landmark[mp_hands.HandLandmark.WRIST].x\n * image_width,\n hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_CMC].x\n * image_width,\n hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_MCP].x\n * image_width,\n hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_IP].x\n * image_width,\n hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].x\n * image_width,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.INDEX_FINGER_MCP\n ].x\n * image_width,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.INDEX_FINGER_PIP\n ].x\n * image_width,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.INDEX_FINGER_DIP\n ].x\n * image_width,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.INDEX_FINGER_TIP\n ].x\n * image_width,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.MIDDLE_FINGER_DIP\n ].x\n * image_width,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.MIDDLE_FINGER_MCP\n ].x\n * image_width,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.MIDDLE_FINGER_PIP\n ].x\n * image_width,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.MIDDLE_FINGER_TIP\n ].x\n * image_width,\n hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_DIP].x\n * image_width,\n hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_MCP].x\n * image_width,\n hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_PIP].x\n * image_width,\n hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_TIP].x\n * image_width,\n hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_MCP].x\n * image_width,\n hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_PIP].x\n * image_width,\n hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_TIP].x\n * image_width,\n hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_DIP].x\n * image_width,\n )\n\n tup_y = (\n hand_landmarks.landmark[mp_hands.HandLandmark.WRIST].y\n * image_height,\n hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_CMC].y\n * image_height,\n hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_MCP].y\n * image_height,\n hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_IP].y\n * image_height,\n hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].y\n * image_height,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.INDEX_FINGER_MCP\n ].y\n * image_height,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.INDEX_FINGER_PIP\n ].y\n * image_height,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.INDEX_FINGER_DIP\n ].y\n * image_height,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.INDEX_FINGER_TIP\n ].y\n * image_height,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.MIDDLE_FINGER_DIP\n ].y\n * image_height,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.MIDDLE_FINGER_MCP\n ].y\n * image_height,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.MIDDLE_FINGER_PIP\n ].y\n * image_height,\n hand_landmarks.landmark[\n mp_hands.HandLandmark.MIDDLE_FINGER_TIP\n ].y\n * image_height,\n hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_DIP].y\n * image_height,\n hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_MCP].y\n * image_height,\n hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_PIP].y\n * image_height,\n hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_TIP].y\n * image_height,\n hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_MCP].y\n * image_height,\n hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_PIP].y\n * image_height,\n hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_TIP].y\n * image_height,\n hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_DIP].y\n * image_height,\n )\n\n y_ub = max(tup_y) # Index finger\n y_lb = min(tup_y)\n\n x_ub = max(tup_x)\n x_lb = min(tup_x)\n\n w = abs(x_lb - x_ub)\n h = abs(y_lb - y_ub)\n x = min(tup_x) - 40\n y = min(tup_y) - 40\n image1 = image[int(x) : int(x + w + 80), int(y) : int(y + h + 80)]\n img = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)\n img = cv2.resize(img, (7172, 784))\n imgs = np.array([img])\n imgs = imgs.reshape(-1, 28, 28, 1)\n model_output = model.predict(imgs)\n \n mp_drawing.draw_landmarks(\n image, hand_landmarks, mp_hands.HAND_CONNECTIONS\n )\n cv2.rectangle(\n image,\n (int(x), int(y)),\n (int(x + w + 80), int(y + h + 80)),\n (255, 0, 0),\n 2,\n )\n w = 0\n if np.argmax(model_output) < 25:\n w = np.argmax(model_output)\n else:\n w = 0\n cv2.putText(\n image,\n CLASSIFICATION[w],\n (50, 50),\n font,\n 1,\n (255, 0, 0),\n 2,\n cv2.LINE_AA,\n )\n engine = pyttsx3.init()\n engine.say(CLASSIFICATION[w])\n engine.runAndWait()\n success, jpeg = cv2.imencode(\".jpeg\", image)\n return jpeg.tobytes()\n"
] |
[
[
"tensorflow.keras.models.load_model",
"numpy.array",
"numpy.argmax"
]
] |
anandrajasekar18/spinningup
|
[
"c14daf6579e0341a387358adf4230de36d6e7cd4"
] |
[
"spinup/algos/tf1/cvi/cvi_ad.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nimport gym\nimport time\nimport math\nfrom spinup.algos.cvi import core\nfrom spinup.algos.cvi.core import get_vars\nfrom spinup.utils.logx import EpochLogger\n\n\n\nclass ReplayBuffer:\n \"\"\"\n A simple FIFO experience replay buffer for SAC agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size):\n self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)\n self.rews_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done):\n self.obs1_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr+1) % self.max_size\n self.size = min(self.size+1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n idxs = np.random.randint(0, self.size, size=batch_size)\n return dict(obs1=self.obs1_buf[idxs],\n obs2=self.obs2_buf[idxs],\n acts=self.acts_buf[idxs],\n rews=self.rews_buf[idxs],\n done=self.done_buf[idxs])\n\n\n\ndef linear_decay(steps):\n return 0.2 + 0.00067 * steps\n\ndef exp_decay(steps):\n return 0.2 * math.e**(0.00183 * steps)\n\ndef cos_decay(steps):\n return 0.2 + math.cos(math.pi/2 - math.pi/1200 * steps) * 0.4\n\"\"\"\n\nSoft Actor-Critic\n\n(With slight variations that bring it closer to TD3)\n\n\"\"\"\ndef cvi_ad(env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=0, \n steps_per_epoch=5000, epochs=100, replay_size=int(1e6), gamma=0.99, alp = 0.8,\n polyak=0.995, lr=1e-3, alpha=0.2, batch_size=100, start_steps=10000, \n max_ep_len=1000, logger_kwargs=dict(), save_freq=1, decay = None, squash = False):\n \"\"\"\n\n Args:\n env_fn : A function which creates a copy of the environment.\n The environment must satisfy the OpenAI Gym API.\n\n actor_critic: A function which takes in placeholder symbols \n for state, ``x_ph``, and action, ``a_ph``, and returns the main \n outputs from the agent's Tensorflow computation graph:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``mu`` (batch, act_dim) | Computes mean actions from policy\n | given states.\n ``pi`` (batch, act_dim) | Samples actions from policy given \n | states.\n ``logp_pi`` (batch,) | Gives log probability, according to\n | the policy, of the action sampled by\n | ``pi``. Critical: must be differentiable\n | with respect to policy parameters all\n | the way through action sampling.\n ``q1`` (batch,) | Gives one estimate of Q* for \n | states in ``x_ph`` and actions in\n | ``a_ph``.\n ``q2`` (batch,) | Gives another estimate of Q* for \n | states in ``x_ph`` and actions in\n | ``a_ph``.\n ``q1_pi`` (batch,) | Gives the composition of ``q1`` and \n | ``pi`` for states in ``x_ph``: \n | q1(x, pi(x)).\n ``q2_pi`` (batch,) | Gives the composition of ``q2`` and \n | ``pi`` for states in ``x_ph``: \n | q2(x, pi(x)).\n ``v`` (batch,) | Gives the value estimate for states\n | in ``x_ph``. \n =========== ================ ======================================\n\n ac_kwargs (dict): Any kwargs appropriate for the actor_critic \n function you provided to SAC.\n\n seed (int): Seed for random number generators.\n\n steps_per_epoch (int): Number of steps of interaction (state-action pairs) \n for the agent and the environment in each epoch.\n\n epochs (int): Number of epochs to run and train agent.\n\n replay_size (int): Maximum length of replay buffer.\n\n gamma (float): Discount factor. (Always between 0 and 1.)\n\n polyak (float): Interpolation factor in polyak averaging for target \n networks. Target networks are updated towards main networks \n according to:\n\n .. math:: \\\\theta_{\\\\text{targ}} \\\\leftarrow \n \\\\rho \\\\theta_{\\\\text{targ}} + (1-\\\\rho) \\\\theta\n\n where :math:`\\\\rho` is polyak. (Always between 0 and 1, usually \n close to 1.)\n\n lr (float): Learning rate (used for both policy and value learning).\n\n alpha (float): Entropy regularization coefficient. (Equivalent to \n inverse of reward scale in the original SAC paper.)\n\n batch_size (int): Minibatch size for SGD.\n\n start_steps (int): Number of steps for uniform-random action selection,\n before running real policy. Helps exploration.\n\n max_ep_len (int): Maximum length of trajectory / episode / rollout.\n\n logger_kwargs (dict): Keyword args for EpochLogger.\n\n save_freq (int): How often (in terms of gap between epochs) to save\n the current policy and value function.\n\n \"\"\"\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n env, test_env = env_fn(), env_fn()\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n # Action limit for clamping: critically, assumes all dimensions share the same bound!\n act_limit = env.action_space.high[0]\n\n # Share information about action space with policy architecture\n ac_kwargs['action_space'] = env.action_space\n\n # Inputs to computation graph\n x_ph, a_ph, x2_ph, r_ph, d_ph = core.placeholders(obs_dim, act_dim, obs_dim, None, None)\n adv_ph = tf.placeholder(dtype = tf.float32, shape = (None,))\n alp_ph = tf.placeholder(dtype = tf.float32)\n t_step = tf.placeholder(dtype = tf.float32)\n #adv_ph1 = tf.placeholder(dtype = tf.float32, shape = (None,))\n #adv_ph2 = tf.placeholder(dtype = tf.float32, shape = (None,))\n\n # Main outputs from computation graph\n with tf.variable_scope('main'):\n mu, pi, logp_pi, ad1, ad2, ad1_pi, ad2_pi, v = actor_critic(x_ph, a_ph, **ac_kwargs)\n \n # Target value network\n with tf.variable_scope('target'):\n _, _, _, ad1_targ, ad2_targ, _, _, v_targ = actor_critic(x2_ph, a_ph, **ac_kwargs)\n \n squash_eps = 1e-2\n if squash:\n print(\"Squashed\")\n squash_func = lambda x: tf.sign(x) * (tf.sqrt(tf.abs(x) + 1) - 1) + x * squash_eps\n squash_ifunc = lambda x: tf.sign(x) * ((tf.sqrt(1 + 4 * squash_eps * (tf.abs(x) + 1 + squash_eps)) - 1)** 2 * (1 / (2 * squash_eps))** 2 - 1)\n else:\n print (\"Not Squashed\")\n squash_func = lambda x: x\n squash_ifunc = lambda x: x\n # Experience buffer\n replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)\n\n # Count variables\n var_counts = tuple(core.count_vars(scope) for scope in \n ['main/pi', 'main/q1', 'main/q2', 'main/v', 'main'])\n print(('\\nNumber of parameters: \\t pi: %d, \\t' + \\\n 'q1: %d, \\t q2: %d, \\t v: %d, \\t total: %d\\n')%var_counts)\n\n q1 = v + ad1\n q2 = v + ad2\n q1_pi = v + ad1_pi\n q2_pi = v + ad2_pi\n\n # Min Double-Q:\n min_q_pi = tf.minimum(q1_pi, q2_pi)\n\n # Targets for Q and V regression\n q_backup = tf.stop_gradient(squash_func(r_ph + gamma*(1-d_ph)*squash_ifunc(v_targ) + alp_ph * adv_ph))\n #q_backup1 = tf.stop_gradient(r_ph + gamma*(1-d_ph)*v_targ + alp * adv_ph1)\n #q_backup2 = tf.stop_gradient(r_ph + gamma*(1-d_ph)*v_targ + alp * adv_ph2)\n\n v_backup = tf.stop_gradient(squash_func(squash_ifunc(min_q_pi) - alpha * logp_pi))\n\n # Soft actor-critic losses\n #alp = tf.Variable(0.2,dtype=tf.float32)\n #q_min = tf.minimum(q1,q2)\n pi_loss = tf.reduce_mean(alpha * logp_pi - squash_ifunc(min_q_pi))\n q1_loss = 0.5 * tf.reduce_mean((q_backup - q1)**2)\n q2_loss = 0.5 * tf.reduce_mean((q_backup - q2)**2)\n v_loss = 0.5 * tf.reduce_mean((v_backup - v)**2)\n value_loss = q1_loss + q2_loss + v_loss\n\n # Policy train op \n # (has to be separate from value train op, because q1_pi appears in pi_loss)\n pi_optimizer = tf.train.AdamOptimizer(learning_rate=lr)\n train_pi_op = pi_optimizer.minimize(pi_loss, var_list=get_vars('main/pi'))\n\n # Value train op\n # (control dep of train_pi_op because sess.run otherwise evaluates in nondeterministic order)\n value_optimizer = tf.train.AdamOptimizer(learning_rate=lr)\n value_params = get_vars('main/q') + get_vars('main/v') \n with tf.control_dependencies([train_pi_op]):\n train_value_op = value_optimizer.minimize(value_loss, var_list=value_params)\n\n # Polyak averaging for target variables\n # (control flow because sess.run otherwise evaluates in nondeterministic order)\n with tf.control_dependencies([train_value_op]):\n target_update = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)\n for v_main, v_targ in zip(get_vars('main') , get_vars('target'))])\n # target_update = tf.group([tf.assign(v_targ, tf.cond(tf.not_equal(t_step%1000,0), lambda: v_targ, lambda: v_main))\n # for v_main, v_targ in zip(get_vars('main') , get_vars('target'))])\n\n # All ops to call during one training step\n step_ops = [pi_loss, q1_loss, q2_loss, v_loss, q1, q2, v, logp_pi, \n train_pi_op, train_value_op, target_update]\n \n # adv_op = squash_ifunc(tf.minimum(q1_targ, q2_targ))-squash_ifunc(v_targ)\n adv_op = squash_ifunc(tf.minimum(ad1_targ, ad2_targ))\n #adv_op1 = q1_targ-v_targ\n #adv_op2 = q2_targ-v_targ\n\n # Initializing targets to match main variables\n target_init = tf.group([tf.assign(v_targ, v_main)\n for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n sess.run(target_init)\n\n # Setup model saving\n logger.setup_tf_saver(sess, inputs={'x': x_ph, 'a': a_ph}, \n outputs={'mu': mu, 'pi': pi, 'q1': q1, 'q2': q2, 'v': v})\n\n def get_action(o, deterministic=False):\n act_op = mu if deterministic else pi\n return sess.run(act_op, feed_dict={x_ph: o.reshape(1,-1)})[0]\n\n def test_agent(n=10):\n global sess, mu, pi, q1, q2, q1_pi, q2_pi\n for j in range(n):\n o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0\n while not(d or (ep_len == max_ep_len)):\n # Take deterministic actions at test time \n o, r, d, _ = test_env.step(get_action(o, True))\n ep_ret += r\n ep_len += 1\n logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)\n\n start_time = time.time()\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n total_steps = steps_per_epoch * epochs\n\n\n if decay:\n alp_val = 0.2\n else:\n alp_val = alp\n\n update_step = 0\n # Main loop: collect experience in env and update/log each epoch\n for t in range(total_steps):\n\n \"\"\"\n Until start_steps have elapsed, randomly sample actions\n from a uniform distribution for better exploration. Afterwards, \n use the learned policy. \n \"\"\"\n if t > start_steps:\n a = get_action(o)\n else:\n a = env.action_space.sample()\n\n # Step the env\n o2, r, d, _ = env.step(a)\n ep_ret += r\n ep_len += 1\n\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n d = False if ep_len==max_ep_len else d\n\n # Store experience to replay buffer\n replay_buffer.store(o, a, r, o2, d)\n\n # Super critical, easy to overlook step: make sure to update \n # most recent observation!\n o = o2\n\n if d or (ep_len == max_ep_len):\n \"\"\"\n Perform all SAC updates at the end of the trajectory.\n This is a slight difference from the SAC specified in the\n original paper.\n \"\"\"\n for j in range(ep_len):\n update_step+=1\n batch = replay_buffer.sample_batch(batch_size)\n feed_dict = {x2_ph: batch['obs1'],\n a_ph: batch['acts']\n }\n advantage = sess.run(adv_op , feed_dict)\n #advantage = sess.run([adv_op1, adv_op2] , feed_dict)\n \n feed_dict = {x_ph: batch['obs1'],\n x2_ph: batch['obs2'],\n a_ph: batch['acts'],\n r_ph: batch['rews'],\n d_ph: batch['done'],\n t_step: update_step,\n adv_ph : advantage,\n alp_ph : alp_val\n #adv_ph1 : advantage[0],\n #adv_ph2 : advantage[1]\n }\n outs = sess.run(step_ops, feed_dict)\n logger.store(LossPi=outs[0], LossQ1=outs[1], LossQ2=outs[2],\n LossV=outs[3], Q1Vals=outs[4], Q2Vals=outs[5],\n VVals=outs[6], LogPi=outs[7])\n\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n\n\n # End of epoch wrap-up\n if t > 0 and t % steps_per_epoch == 0:\n epoch = t // steps_per_epoch\n if decay:\n alp_val = eval(decay)(t//steps_per_epoch)\n\n # Save model\n if (epoch % save_freq == 0) or (epoch == epochs-1):\n logger.save_state({'env': env}, None)\n\n # Test the performance of the deterministic version of the agent.\n test_agent()\n\n # Log info about epoch\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('TestEpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('TestEpLen', average_only=True)\n logger.log_tabular('TotalEnvInteracts', t)\n logger.log_tabular('Q1Vals', with_min_and_max=True) \n logger.log_tabular('Q2Vals', with_min_and_max=True) \n logger.log_tabular('VVals', with_min_and_max=True) \n logger.log_tabular('LogPi', with_min_and_max=True)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('LossQ1', average_only=True)\n logger.log_tabular('LossQ2', average_only=True)\n logger.log_tabular('LossV', average_only=True)\n logger.log_tabular('Time', time.time()-start_time)\n logger.dump_tabular()\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='HalfCheetah-v2')\n parser.add_argument('--hid', type=int, default=300)\n parser.add_argument('--l', type=int, default=1)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--seed', '-s', type=int, default=0)\n parser.add_argument('--epochs', type=int, default=50)\n parser.add_argument('--exp_name', type=str, default='cvi')\n args = parser.parse_args()\n\n from spinup.utils.run_utils import setup_logger_kwargs\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n cvi(lambda : gym.make(args.env), actor_critic=core.mlp_actor_critic,\n ac_kwargs=dict(hidden_sizes=[args.hid]*args.l),\n gamma=args.gamma, seed=args.seed, epochs=args.epochs,\n logger_kwargs=logger_kwargs)\n"
] |
[
[
"tensorflow.sign",
"numpy.random.seed",
"tensorflow.reduce_mean",
"tensorflow.control_dependencies",
"tensorflow.minimum",
"tensorflow.assign",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.variable_scope",
"tensorflow.train.AdamOptimizer",
"tensorflow.Session",
"tensorflow.set_random_seed",
"numpy.zeros",
"tensorflow.abs",
"numpy.random.randint"
]
] |
davidtea/CIFAR-10
|
[
"38f58d9888c321974f434fc26e3037def4669f2c"
] |
[
"cifar10_train.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A binary to train CIFAR-10 using a single GPU.\n\nAccuracy:\ncifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of\ndata) as judged by cifar10_eval.py.\n\nSpeed: With batch_size 128.\n\nSystem | Step Time (sec/batch) | Accuracy\n------------------------------------------------------------------\n1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)\n1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)\n\nUsage:\nPlease see the tutorial and website for how to download the CIFAR-10\ndata set, compile the program and train the model.\n\nhttp://tensorflow.org/tutorials/deep_cnn/\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport numpy as np\n# Save to file in aws terminal\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport time\n\nimport tensorflow as tf\n\nimport cifar10\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('summaries_dir', 'summary',\n \"\"\"Directory where to store summaries.\"\"\")\ntf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',\n \"\"\"Directory where to write event logs \"\"\"\n \"\"\"and checkpoint.\"\"\")\ntf.app.flags.DEFINE_integer('max_steps', 25000,\n \"\"\"Number of batches to run.\"\"\")\ntf.app.flags.DEFINE_boolean('log_device_placement', False,\n \"\"\"Whether to log device placement.\"\"\")\ntf.app.flags.DEFINE_integer('log_frequency', 10,\n \"\"\"How often to log results to the console.\"\"\")\n\nloss_values = []\n\ndef train():\n # Save output to file\n f = open('train_log.txt', 'w')\n\n \"\"\"Train CIFAR-10 for a number of steps.\"\"\"\n with tf.Graph().as_default():\n global_step = tf.contrib.framework.get_or_create_global_step()\n\n # Get images and labels for CIFAR-10.\n images, labels = cifar10.distorted_inputs()\n\n # Build a Graph that computes the logits predictions from the\n # inference model.\n logits = cifar10.inference(images)\n\n # Calculate loss.\n loss = cifar10.loss(logits, labels)\n\n # Build a Graph that trains the model with one batch of examples and\n # updates the model parameters.\n train_op = cifar10.train(loss, global_step)\n\n class _LoggerHook(tf.train.SessionRunHook):\n \"\"\"Logs loss and runtime.\"\"\"\n\n def begin(self):\n self._step = -1\n self._start_time = time.time()\n\n def before_run(self, run_context):\n self._step += 1\n return tf.train.SessionRunArgs(loss) # Asks for loss value.\n\n def after_run(self, run_context, run_values):\n if self._step % FLAGS.log_frequency == 0:\n current_time = time.time()\n duration = current_time - self._start_time\n self._start_time = current_time\n\n loss_value = run_values.results\n loss_values.append(loss_value)\n examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration\n sec_per_batch = float(duration / FLAGS.log_frequency)\n\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '\n 'sec/batch)')\n print (format_str % (datetime.now(), self._step, loss_value, examples_per_sec, sec_per_batch))\n print(format_str % (datetime.now(), self._step, loss_value, examples_per_sec, sec_per_batch), file=f)\n\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=FLAGS.train_dir,\n hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),\n tf.train.NanTensorHook(loss),\n _LoggerHook()],\n config=tf.ConfigProto(\n log_device_placement=FLAGS.log_device_placement)) as mon_sess:\n while not mon_sess.should_stop():\n mon_sess.run(train_op)\n f.close()\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n cifar10.maybe_download_and_extract()\n if tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.DeleteRecursively(FLAGS.train_dir)\n tf.gfile.MakeDirs(FLAGS.train_dir)\n\n train()\n \n print(\"Lowest loss:\", min(loss_values))\n fig = plt.figure() \n plt.plot([x for x in range(0, len(loss_values)*10, int(len(loss_values)/100)*10)], [loss_values[x] for x in range(0, len(loss_values), int(len(loss_values)/100))], 'r')\n plt.title('Loss Value')\n plt.ylabel('Loss')\n plt.xlabel('Steps')\n fig.savefig('graph.png', bbox_inches='tight')\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.gfile.DeleteRecursively",
"tensorflow.train.StopAtStepHook",
"matplotlib.pyplot.title",
"tensorflow.train.NanTensorHook",
"tensorflow.gfile.Exists",
"matplotlib.use",
"tensorflow.train.SessionRunArgs",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.ConfigProto",
"tensorflow.app.run",
"matplotlib.pyplot.ylabel",
"tensorflow.gfile.MakeDirs",
"tensorflow.app.flags.DEFINE_string",
"matplotlib.pyplot.xlabel",
"tensorflow.contrib.framework.get_or_create_global_step",
"tensorflow.app.flags.DEFINE_boolean",
"matplotlib.pyplot.figure"
]
] |
mlrun/frameworks
|
[
"3b046710102af00baac9a5c2fdeceb7fc1f8838a"
] |
[
"frameworks/keras/mlrun_interface.py"
] |
[
"from abc import ABC\nfrom typing import Union, List, Dict, Tuple, Any\nimport os\nimport importlib\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.callbacks import (\n Callback,\n ModelCheckpoint,\n TensorBoard,\n ProgbarLogger,\n CSVLogger,\n BaseLogger,\n)\nfrom tensorflow.keras.optimizers import Optimizer\n\nimport mlrun\nfrom frameworks._common import MLRunInterface\nfrom frameworks.keras.callbacks import (\n MLRunLoggingCallback,\n TensorboardLoggingCallback,\n)\n\n\nclass KerasMLRunInterface(MLRunInterface, keras.Model, ABC):\n \"\"\"\n MLRun model is for enabling additional features supported by MLRun in keras. With MLRun model one can apply horovod\n and use auto logging with ease.\n \"\"\"\n\n # Properties attributes to be inserted so the keras mlrun interface will be fully enabled:\n _PROPERTIES = {\n # Auto enabled callbacks list:\n \"_callbacks\": [],\n # Variable to hold the horovod module:\n \"_hvd\": None,\n # List of all the callbacks that should only be applied on rank 0 when using horovod:\n \"_RANK_0_ONLY_CALLBACKS\": [\n MLRunLoggingCallback.__name__,\n TensorboardLoggingCallback.__name__,\n ModelCheckpoint.__name__,\n TensorBoard.__name__,\n ProgbarLogger.__name__,\n CSVLogger.__name__,\n BaseLogger.__name__,\n ], # type: List[str]\n }\n\n # Methods attributes to be inserted so the keras mlrun interface will be fully enabled:\n _METHODS = [\n \"auto_log\",\n \"use_horovod\",\n \"note_rank_0_callback\",\n \"_pre_compile\",\n \"_pre_fit\",\n ] # type: List[str]\n\n @classmethod\n def add_interface(cls, model: keras.Model, *args, **kwargs):\n \"\"\"\n Wrap the given model with MLRun model features, providing it with MLRun model attributes including its\n parameters and methods.\n\n :param model: The model to wrap.\n\n :return: The wrapped model.\n \"\"\"\n super(KerasMLRunInterface, cls).add_interface(model=model)\n\n # Wrap the compile method:\n def compile_wrapper(compile_method):\n def wrapper(*args, **kwargs):\n # Call the pre compile method:\n (\n optimizer,\n experimental_run_tf_function,\n ) = model._pre_compile(optimizer=kwargs[\"optimizer\"])\n # Assign parameters:\n kwargs[\"optimizer\"] = optimizer\n if experimental_run_tf_function is not None:\n kwargs[\n \"experimental_run_tf_function\"\n ] = experimental_run_tf_function\n # Call the original compile method:\n compile_method(*args, **kwargs)\n\n return wrapper\n\n setattr(model, \"compile\", compile_wrapper(model.compile))\n\n # Wrap the fit method:\n def fit_wrapper(fit_method):\n def wrapper(*args, **kwargs):\n # Setup the callbacks list:\n if \"callbacks\" not in kwargs or kwargs[\"callbacks\"] is None:\n kwargs[\"callbacks\"] = []\n # Add auto log callbacks if they were added:\n kwargs[\"callbacks\"] = kwargs[\"callbacks\"] + model._callbacks\n # Setup default values if needed:\n if \"verbose\" not in kwargs:\n kwargs[\"verbose\"] = 1\n if \"steps_per_epoch\" not in kwargs:\n kwargs[\"steps_per_epoch\"] = None\n if \"validation_steps\" not in kwargs:\n kwargs[\"validation_steps\"] = None\n # Call the pre fit method:\n (\n callbacks,\n verbose,\n steps_per_epoch,\n validation_steps,\n ) = model._pre_fit(\n callbacks=kwargs[\"callbacks\"],\n verbose=kwargs[\"verbose\"],\n steps_per_epoch=kwargs[\"steps_per_epoch\"],\n validation_steps=kwargs[\"validation_steps\"],\n )\n # Assign parameters:\n kwargs[\"callbacks\"] = callbacks\n kwargs[\"verbose\"] = verbose\n kwargs[\"steps_per_epoch\"] = steps_per_epoch\n kwargs[\"validation_steps\"] = validation_steps\n # Call the original fit method:\n fit_method(*args, **kwargs)\n\n return wrapper\n\n setattr(model, \"fit\", fit_wrapper(model.fit))\n\n def auto_log(\n self,\n context: mlrun.MLClientCtx,\n mlrun_callback__kwargs: Dict[str, Any] = None,\n tensorboard_callback_kwargs: Dict[str, Any] = None,\n ):\n \"\"\"\n Initialize the defaulted logging callbacks by MLRun. Given the context, the method will setup a list of\n callbacks with the most common settings for logging a training session in tensorflow.keras. For further\n information regarding the logging callbacks, see 'mlrun.frameworks.keras.callbacks.MLRunLoggingCallback' and\n 'mlrun.frameworks.keras.callbacks.TensorboardLoggingCallback'.\n\n :param context: The MLRun context to log with.\n :param mlrun_callback__kwargs: Key word arguments for the MLRun callback. For further information see the\n documentation of the class 'MLRunLoggingCallback'. Note that both 'context'\n and 'auto_log' parameters are already given here.\n :param tensorboard_callback_kwargs: Key word arguments for the tensorboard callback. For further information see\n the documentation of the class 'TensorboardLoggingCallback'. Note that both\n 'context' and 'auto_log' parameters are already given here.\n \"\"\"\n # If horovod is being used, there is no need to add the logging callbacks to ranks other than 0:\n if self._hvd is not None and self._hvd.rank() != 0:\n return\n\n # Set the dictionaries defaults:\n mlrun_callback__kwargs = (\n {} if mlrun_callback__kwargs is None else mlrun_callback__kwargs\n )\n tensorboard_callback_kwargs = (\n {} if tensorboard_callback_kwargs is None else tensorboard_callback_kwargs\n )\n\n # Add the MLRun logging callback:\n self._callbacks.append(\n MLRunLoggingCallback(\n context=context, auto_log=True, **mlrun_callback__kwargs\n )\n )\n\n # Add the Tensorboard logging callback:\n self._callbacks.append(\n TensorboardLoggingCallback(\n context=context, auto_log=True, **tensorboard_callback_kwargs\n )\n )\n\n def use_horovod(self):\n \"\"\"\n Setup the model or wrapped model to run with horovod.\n \"\"\"\n # Import horovod:\n self._hvd = importlib.import_module(\"horovod.tensorflow.keras\")\n\n # Initialize horovod:\n self._hvd.init()\n\n def note_rank_0_callback(self, callback_name: str):\n \"\"\"\n Note an additional custom callback to be applied only on rank 0 when using horovod.\n\n :param callback_name: The name of the callback.\n \"\"\"\n self._RANK_0_ONLY_CALLBACKS.append(callback_name)\n\n def _pre_compile(self, optimizer: Optimizer) -> Tuple[Optimizer, Union[bool, None]]:\n \"\"\"\n Method to call before calling 'compile' to setup the run and inputs for using horovod.\n\n :param optimizer: The optimzier to compile. It will be wrapped in horovod's distributed optimizer:\n 'hvd.DistributedOptimizer'.\n\n :return: The updated parameters:\n [0] = Wrapped optimizer.\n [1] = The 'experimental_run_tf_function' parameter for 'compile' kwargs or 'None' if horovod should not\n be used.\n\n :raise ValueError: In case the optimizer was passed as a string.\n \"\"\"\n # Check if needed to run with horovod:\n if self._hvd is None:\n return optimizer, None\n\n # Validate the optimizer input:\n if isinstance(optimizer, str):\n raise ValueError(\n \"When using horovod, the compile mehotd is expecting an initialized optimizer \"\n \"instance and not a string.\"\n )\n\n # Setup the device to run on GPU if available:\n if tf.config.experimental.list_physical_devices(\"GPU\"):\n # Pin each GPU to a single process:\n gpus = tf.config.experimental.list_physical_devices(\"GPU\")\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n if gpus:\n tf.config.experimental.set_visible_devices(\n gpus[self._hvd.local_rank()], \"GPU\"\n )\n else:\n # No GPUs were found, or 'use_cuda' was false:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n # Adjust learning rate based on the number of GPUs:\n optimizer.lr = optimizer.lr * self._hvd.size()\n\n # Wrap the optimizer in horovod's distributed optimizer: 'hvd.DistributedOptimizer'.\n optimizer = self._hvd.DistributedOptimizer(optimizer)\n\n # Compile the model with `experimental_run_tf_function=False` to ensure Tensorflow uses the distributed\n # optimizer to compute gradients:\n experimental_run_tf_function = False\n\n return optimizer, experimental_run_tf_function\n\n def _pre_fit(\n self,\n callbacks: List[Callback],\n verbose: int,\n steps_per_epoch: Union[int, None],\n validation_steps: Union[int, None],\n ) -> Tuple[List[Callback], int, Union[int, None], Union[int, None]]:\n \"\"\"\n Method to call before calling 'fit' to setup the run and inputs for using horovod.\n\n :param callbacks: Callbacks to use in the run. The callbacks will be split among the ranks so only\n certain callbacks (mainly logging and checkpoints) will be in rank 0.\n :param verbose: Whether or not to print the progress of training. If '1' or '2' only rank 0 will be\n applied with the verbose.\n :param steps_per_epoch: Amount of training steps to run in each epoch. The steps will be divided by the size of\n ranks (horovod workers).\n :param validation_steps: Amount of validation steps to run in each epoch. The steps will be divided by the size\n of ranks (horovod workers).\n\n :return: The updated parameters according to the used rank:\n [0] = Callbacks list.\n [1] = Verbose\n [2] = Steps per epoch or None if not given.\n [3] = Validation steps or None if not given.\n \"\"\"\n # Check if needed to run with horovod:\n if self._hvd is None:\n return callbacks, verbose, steps_per_epoch, validation_steps\n\n # Setup the callbacks:\n metric_average_callback = self._hvd.callbacks.MetricAverageCallback()\n metric_average_callback._supports_tf_logs = True\n horovod_callbacks = [\n self._hvd.callbacks.BroadcastGlobalVariablesCallback(0),\n metric_average_callback,\n self._hvd.callbacks.LearningRateWarmupCallback(\n initial_lr=float(self.optimizer.lr)\n ),\n ]\n if self._hvd.rank() != 0:\n callbacks = [\n callback\n for callback in callbacks\n if type(callback).__name__ not in self._RANK_0_ONLY_CALLBACKS\n ]\n callbacks = horovod_callbacks + callbacks\n\n # Pick the verbose:\n if self._hvd.rank() != 0:\n verbose = 0\n\n # Adjust the number of steps per epoch based on the number of GPUs (if given):\n if steps_per_epoch is not None:\n steps_per_epoch = steps_per_epoch // self._hvd.size()\n if validation_steps is not None:\n validation_steps = validation_steps // self._hvd.size()\n\n return callbacks, verbose, steps_per_epoch, validation_steps\n"
] |
[
[
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.config.experimental.set_memory_growth"
]
] |
SNUHDR2018/ConSSL
|
[
"c7d406d0224e38895986c8fb7281a189e493c982"
] |
[
"ConSSL/datamodules/vocdetection_datamodule.py"
] |
[
"from typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom pytorch_lightning import LightningDataModule\nfrom torch.utils.data import DataLoader\n\nfrom ConSSL.utils import _TORCHVISION_AVAILABLE\nfrom ConSSL.utils.warnings import warn_missing_pkg\n\nif _TORCHVISION_AVAILABLE:\n from torchvision import transforms as transform_lib\n from torchvision.datasets import VOCDetection\nelse: # pragma: no cover\n warn_missing_pkg('torchvision')\n\n\nclass Compose(object):\n \"\"\"\n Like `torchvision.transforms.compose` but works for (image, target)\n \"\"\"\n\n def __init__(self, transforms: List[Callable], image_transforms: Optional[Callable] = None) -> None:\n self.transforms = transforms\n self.image_transforms = image_transforms\n\n def __call__(self, image: Any, target: Any) -> Tuple[torch.Tensor, torch.Tensor]:\n for t in self.transforms:\n image, target = t(image, target)\n if self.image_transforms:\n image = self.image_transforms(image)\n return image, target\n\n\ndef _collate_fn(batch: List[torch.Tensor]) -> tuple:\n return tuple(zip(*batch))\n\n\nCLASSES = (\n \"__background__ \",\n \"aeroplane\",\n \"bicycle\",\n \"bird\",\n \"boat\",\n \"bottle\",\n \"bus\",\n \"car\",\n \"cat\",\n \"chair\",\n \"cow\",\n \"diningtable\",\n \"dog\",\n \"horse\",\n \"motorbike\",\n \"person\",\n \"pottedplant\",\n \"sheep\",\n \"sofa\",\n \"train\",\n \"tvmonitor\",\n)\n\n\ndef _prepare_voc_instance(image: Any, target: Dict[str, Any]):\n \"\"\"\n Prepares VOC dataset into appropriate target for fasterrcnn\n\n https://github.com/pytorch/vision/issues/1097#issuecomment-508917489\n \"\"\"\n anno = target[\"annotation\"]\n boxes = []\n classes = []\n area = []\n iscrowd = []\n objects = anno[\"object\"]\n if not isinstance(objects, list):\n objects = [objects]\n for obj in objects:\n bbox = obj[\"bndbox\"]\n bbox = [int(bbox[n]) - 1 for n in [\"xmin\", \"ymin\", \"xmax\", \"ymax\"]]\n boxes.append(bbox)\n classes.append(CLASSES.index(obj[\"name\"]))\n iscrowd.append(int(obj[\"difficult\"]))\n area.append((bbox[2] - bbox[0]) * (bbox[3] - bbox[1]))\n\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\n classes = torch.as_tensor(classes)\n area = torch.as_tensor(area)\n iscrowd = torch.as_tensor(iscrowd)\n\n image_id = anno[\"filename\"][5:-4]\n image_id = torch.as_tensor([int(image_id)])\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = classes\n target[\"image_id\"] = image_id\n\n # for conversion to coco api\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n\n return image, target\n\n\nclass VOCDetectionDataModule(LightningDataModule):\n \"\"\"\n TODO(teddykoker) docstring\n \"\"\"\n\n name = \"vocdetection\"\n\n def __init__(\n self,\n data_dir: str,\n year: str = \"2012\",\n num_workers: int = 16,\n normalize: bool = False,\n shuffle: bool = False,\n pin_memory: bool = False,\n drop_last: bool = False,\n *args: Any,\n **kwargs: Any,\n ) -> None:\n if not _TORCHVISION_AVAILABLE: # pragma: no cover\n raise ModuleNotFoundError(\n 'You want to use VOC dataset loaded from `torchvision` which is not installed yet.'\n )\n\n super().__init__(*args, **kwargs)\n\n self.year = year\n self.data_dir = data_dir\n self.num_workers = num_workers\n self.normalize = normalize\n self.shuffle = shuffle\n self.pin_memory = pin_memory\n self.drop_last = drop_last\n\n @property\n def num_classes(self) -> int:\n \"\"\"\n Return:\n 21\n \"\"\"\n return 21\n\n def prepare_data(self) -> None:\n \"\"\"\n Saves VOCDetection files to data_dir\n \"\"\"\n VOCDetection(self.data_dir, year=self.year, image_set=\"train\", download=True)\n VOCDetection(self.data_dir, year=self.year, image_set=\"val\", download=True)\n\n def train_dataloader(\n self, batch_size: int = 1, image_transforms: Union[List[Callable], Callable] = None\n ) -> DataLoader:\n \"\"\"\n VOCDetection train set uses the `train` subset\n\n Args:\n batch_size: size of batch\n transforms: custom transforms\n \"\"\"\n transforms = [_prepare_voc_instance]\n image_transforms = image_transforms or self.train_transforms or self._default_transforms()\n transforms = Compose(transforms, image_transforms)\n dataset = VOCDetection(self.data_dir, year=self.year, image_set=\"train\", transforms=transforms)\n loader = DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=self.shuffle,\n num_workers=self.num_workers,\n drop_last=self.drop_last,\n pin_memory=self.pin_memory,\n collate_fn=_collate_fn,\n )\n return loader\n\n def val_dataloader(self, batch_size: int = 1, image_transforms: Optional[List[Callable]] = None) -> DataLoader:\n \"\"\"\n VOCDetection val set uses the `val` subset\n\n Args:\n batch_size: size of batch\n transforms: custom transforms\n \"\"\"\n transforms = [_prepare_voc_instance]\n image_transforms = image_transforms or self.train_transforms or self._default_transforms()\n transforms = Compose(transforms, image_transforms)\n dataset = VOCDetection(self.data_dir, year=self.year, image_set=\"val\", transforms=transforms)\n loader = DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n drop_last=self.drop_last,\n pin_memory=self.pin_memory,\n collate_fn=_collate_fn,\n )\n return loader\n\n def _default_transforms(self) -> Callable:\n if self.normalize:\n voc_transforms = transform_lib.Compose([\n transform_lib.ToTensor(),\n transform_lib.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n else:\n voc_transforms = transform_lib.Compose([transform_lib.ToTensor()])\n return voc_transforms\n"
] |
[
[
"torch.utils.data.DataLoader",
"torch.as_tensor"
]
] |
awesome-archive/detectron2
|
[
"4fe0d8f10a13016c2a802e5e47af67ebe5576fc3"
] |
[
"demo/predictor.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport atexit\nimport bisect\nimport multiprocessing as mp\nfrom collections import deque\nimport cv2\nimport torch\n\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.engine.defaults import DefaultPredictor\nfrom detectron2.utils.video_visualizer import VideoVisualizer\nfrom detectron2.utils.visualizer import ColorMode, Visualizer\n\n\nclass VisualizationDemo(object):\n def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):\n \"\"\"\n Args:\n cfg (CfgNode):\n instance_mode (ColorMode):\n parallel (bool): whether to run the model in different processes from visualization.\n Useful since the visualization logic can be slow.\n \"\"\"\n self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])\n self.cpu_device = torch.device(\"cpu\")\n self.instance_mode = instance_mode\n\n self.parallel = parallel\n if parallel:\n num_gpu = torch.cuda.device_count()\n self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)\n else:\n self.predictor = DefaultPredictor(cfg)\n\n def run_on_image(self, image):\n \"\"\"\n Args:\n image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n This is the format used by OpenCV.\n\n Returns:\n predictions (dict): the output of the model.\n vis_output (VisImage): the visualized image output.\n \"\"\"\n vis_output = None\n predictions = self.predictor(image)\n # Convert image from OpenCV BGR format to Matplotlib RGB format.\n image = image[:, :, ::-1]\n visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)\n if \"panoptic_seg\" in predictions:\n panoptic_seg, segments_info = predictions[\"panoptic_seg\"]\n vis_output = visualizer.draw_panoptic_seg_predictions(\n panoptic_seg.to(self.cpu_device), segments_info\n )\n else:\n if \"sem_seg\" in predictions:\n vis_output = visualizer.draw_sem_seg(\n predictions[\"sem_seg\"].argmax(dim=0).to(self.cpu_device)\n )\n if \"instances\" in predictions:\n instances = predictions[\"instances\"].to(self.cpu_device)\n vis_output = visualizer.draw_instance_predictions(predictions=instances)\n\n return predictions, vis_output\n\n def _frame_from_video(self, video):\n while video.isOpened():\n success, frame = video.read()\n if success:\n yield frame\n else:\n break\n\n def run_on_video(self, video):\n \"\"\"\n Visualizes predictions on frames of the input video.\n\n Args:\n video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be\n either a webcam or a video file.\n\n Yields:\n ndarray: BGR visualizations of each video frame.\n \"\"\"\n video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)\n\n def process_predictions(frame, predictions):\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n if \"panoptic_seg\" in predictions:\n panoptic_seg, segments_info = predictions[\"panoptic_seg\"]\n vis_frame = video_visualizer.draw_panoptic_seg_predictions(\n frame, panoptic_seg.to(self.cpu_device), segments_info\n )\n elif \"instances\" in predictions:\n predictions = predictions[\"instances\"].to(self.cpu_device)\n vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)\n elif \"sem_seg\" in predictions:\n vis_frame = video_visualizer.draw_sem_seg(\n frame, predictions[\"sem_seg\"].argmax(dim=0).to(self.cpu_device)\n )\n\n # Converts Matplotlib RGB format to OpenCV BGR format\n vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)\n return vis_frame\n\n frame_gen = self._frame_from_video(video)\n if self.parallel:\n buffer_size = self.predictor.default_buffer_size\n\n frame_data = deque()\n\n for cnt, frame in enumerate(frame_gen):\n frame_data.append(frame)\n self.predictor.put(frame)\n\n if cnt >= buffer_size:\n frame = frame_data.popleft()\n predictions = self.predictor.get()\n yield process_predictions(frame, predictions)\n\n while len(frame_data):\n frame = frame_data.popleft()\n predictions = self.predictor.get()\n yield process_predictions(frame, predictions)\n else:\n for frame in frame_gen:\n yield process_predictions(frame, self.predictor(frame))\n\n\nclass AsyncPredictor:\n \"\"\"\n A predictor that runs the model asynchronously, possibly on >1 GPUs.\n Because rendering the visualization takes considerably amount of time,\n this helps improve throughput when rendering videos.\n \"\"\"\n\n class _StopToken:\n pass\n\n class _PredictWorker(mp.Process):\n def __init__(self, cfg, task_queue, result_queue):\n self.cfg = cfg\n self.task_queue = task_queue\n self.result_queue = result_queue\n super().__init__()\n\n def run(self):\n predictor = DefaultPredictor(self.cfg)\n\n while True:\n task = self.task_queue.get()\n if isinstance(task, AsyncPredictor._StopToken):\n break\n idx, data = task\n result = predictor(data)\n self.result_queue.put((idx, result))\n\n def __init__(self, cfg, num_gpus: int = 1):\n \"\"\"\n Args:\n cfg (CfgNode):\n num_gpus (int): if 0, will run on CPU\n \"\"\"\n num_workers = max(num_gpus, 1)\n self.task_queue = mp.Queue(maxsize=num_workers * 3)\n self.result_queue = mp.Queue(maxsize=num_workers * 3)\n self.procs = []\n for gpuid in range(max(num_gpus, 1)):\n cfg = cfg.clone()\n cfg.defrost()\n cfg.MODEL.DEVICE = \"cuda:{}\".format(gpuid) if num_gpus > 0 else \"cpu\"\n self.procs.append(\n AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)\n )\n\n self.put_idx = 0\n self.get_idx = 0\n self.result_rank = []\n self.result_data = []\n\n for p in self.procs:\n p.start()\n atexit.register(self.shutdown)\n\n def put(self, image):\n self.put_idx += 1\n self.task_queue.put((self.put_idx, image))\n\n def get(self):\n self.get_idx += 1 # the index needed for this request\n if len(self.result_rank) and self.result_rank[0] == self.get_idx:\n res = self.result_data[0]\n del self.result_data[0], self.result_rank[0]\n return res\n\n while True:\n # make sure the results are returned in the correct order\n idx, res = self.result_queue.get()\n if idx == self.get_idx:\n return res\n insert = bisect.bisect(self.result_rank, idx)\n self.result_rank.insert(insert, idx)\n self.result_data.insert(insert, res)\n\n def __len__(self):\n return self.put_idx - self.get_idx\n\n def __call__(self, image):\n self.put(image)\n return self.get()\n\n def shutdown(self):\n for _ in self.procs:\n self.task_queue.put(AsyncPredictor._StopToken())\n\n @property\n def default_buffer_size(self):\n return len(self.procs) * 5\n"
] |
[
[
"torch.device",
"torch.cuda.device_count"
]
] |
gustasvs/AI
|
[
"23360a8865e8211568594c2b2ced11dcdc9b0006",
"23360a8865e8211568594c2b2ced11dcdc9b0006"
] |
[
"DiRT AI/DiRT NeuralNetworks/functions/grabscreen.py",
"Tensorflow-master/experiments/2D_car/car_env.py"
] |
[
"import win32gui, win32ui, win32con, win32api\nimport cv2\nimport numpy as np\n\n\ndef grab_screen(region=None):\n hwin = win32gui.GetDesktopWindow()\n\n left,top,x2,y2 = region\n width = x2 - left + 1\n height = y2 - top + 1\n\n hwindc = win32gui.GetWindowDC(hwin)\n srcdc = win32ui.CreateDCFromHandle(hwindc)\n memdc = srcdc.CreateCompatibleDC()\n bmp = win32ui.CreateBitmap()\n bmp.CreateCompatibleBitmap(srcdc, width, height)\n memdc.SelectObject(bmp)\n memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)\n \n signedIntsArray = bmp.GetBitmapBits(True)\n img = np.fromstring(signedIntsArray, dtype='uint8')\n img.shape = (height,width,4)\n\n srcdc.DeleteDC()\n memdc.DeleteDC()\n win32gui.ReleaseDC(hwin, hwindc)\n win32gui.DeleteObject(bmp.GetHandle())\n\n return cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)",
"import numpy as np\nimport pyglet\n\n\npyglet.clock.set_fps_limit(10000)\n\n\nclass CarEnv(object):\n n_sensor = 5\n action_dim = 1\n state_dim = n_sensor\n viewer = None\n viewer_xy = (1080, 720)\n sensor_max = 150.\n start_point = [100, 100]\n speed = 50.\n dt = 0.1\n\n def __init__(self, discrete_action=False):\n self.is_discrete_action = discrete_action\n if discrete_action:\n self.actions = [-1, 0, 1]\n else:\n self.action_bound = [-1, 1]\n\n self.terminal = False\n # node1 (x, y, r, w, l),\n self.car_info = np.array([0, 0, 0, 20, 40], dtype=np.float64) # car coordination\n\n self.obstacles_coords = [\n # np.array([\n # [120, 120],\n # [380, 120],\n # [380, 380],\n # [120, 380],]),\n \n np.array([\n [500, 100],\n [200, 100],\n [200, 200],\n [100, 200],])# ,\n # np.array([\n # [300, 300],\n # [400, 300],\n # [400, 400],\n # [300, 400],])\n ]\n\n self.sensor_info = self.sensor_max + np.zeros((self.n_sensor, 3)) # n sensors, (distance, end_x, end_y)\n\n def step(self, action):\n if self.is_discrete_action:\n action = self.actions[action]\n else:\n action = np.clip(action, *self.action_bound)[0]\n self.car_info[2] += action * np.pi/30 # max r = 6 degree\n self.car_info[:2] = self.car_info[:2] + \\\n self.speed * self.dt * np.array([np.cos(self.car_info[2]), np.sin(self.car_info[2])])\n\n self._update_sensor()\n s = self._get_state()\n r = -1 if self.terminal else 0\n return s, r, self.terminal\n\n def reset(self):\n self.terminal = False\n self.car_info[:3] = np.array([*self.start_point, -np.pi/2])\n self._update_sensor()\n return self._get_state()\n\n def render(self):\n if self.viewer is None:\n self.viewer = Viewer(*self.viewer_xy, self.car_info, self.sensor_info,self.obstacles_coords)\n self.viewer.render()\n\n def sample_action(self):\n if self.is_discrete_action:\n a = np.random.choice(list(range(3)))\n else:\n a = np.random.uniform(*self.action_bound, size=self.action_dim)\n return a\n\n def set_fps(self, fps=30):\n pyglet.clock.set_fps_limit(fps)\n\n def _get_state(self):\n state = self.sensor_info[:, 0].flatten()/self.sensor_max\n return state\n def obstacles_collision(self, obstacle, s, q):\n for oi in range(len(obstacle)):\n p = obstacle[oi]\n r = obstacle[(oi + 1) % len(obstacle)] - obstacle[oi]\n if np.cross(r, s) != 0: # may collision\n t = np.cross((q - p), s) / np.cross(r, s)\n u = np.cross((q - p), r) / np.cross(r, s)\n if 0 <= t <= 1 and 0 <= u <= 1:\n intersection = q + u * s\n self.possible_intersections.append(intersection)\n self.possible_sensor_distance.append(np.linalg.norm(u*s))\n def _update_sensor(self):\n cx, cy, rotation = self.car_info[:3]\n\n n_sensors = len(self.sensor_info)\n sensor_theta = np.linspace(-np.pi / 2, np.pi / 2, n_sensors)\n xs = cx + (np.zeros((n_sensors, ))+self.sensor_max) * np.cos(sensor_theta)\n ys = cy + (np.zeros((n_sensors, ))+self.sensor_max) * np.sin(sensor_theta)\n xys = np.array([[x, y] for x, y in zip(xs, ys)]) # shape (5 sensors, 2)\n\n # sensors\n tmp_x = xys[:, 0] - cx\n tmp_y = xys[:, 1] - cy\n # apply rotation\n rotated_x = tmp_x * np.cos(rotation) - tmp_y * np.sin(rotation)\n rotated_y = tmp_x * np.sin(rotation) + tmp_y * np.cos(rotation)\n # rotated x y\n self.sensor_info[:, -2:] = np.vstack([rotated_x+cx, rotated_y+cy]).T\n\n q = np.array([cx, cy])\n for si in range(len(self.sensor_info)):\n s = self.sensor_info[si, -2:] - q\n self.possible_sensor_distance = [self.sensor_max]\n self.possible_intersections = [self.sensor_info[si, -2:]]\n\n # obstacle collision\n for ob in range(len(self.obstacles_coords)):\n self.obstacles_collision(self.obstacles_coords[ob], s, q)\n\n # window collision\n win_coord = np.array([\n [0, 0],\n [self.viewer_xy[0], 0],\n [*self.viewer_xy],\n [0, self.viewer_xy[1]],\n [0, 0],\n ])\n for oi in range(4):\n p = win_coord[oi]\n r = win_coord[(oi + 1) % len(win_coord)] - win_coord[oi]\n if np.cross(r, s) != 0: # may collision\n t = np.cross((q - p), s) / np.cross(r, s)\n u = np.cross((q - p), r) / np.cross(r, s)\n if 0 <= t <= 1 and 0 <= u <= 1:\n intersection = p + t * r\n self.possible_intersections.append(intersection)\n self.possible_sensor_distance.append(np.linalg.norm(intersection - q))\n\n distance = np.min(self.possible_sensor_distance)\n distance_index = np.argmin(self.possible_sensor_distance)\n self.sensor_info[si, 0] = distance\n self.sensor_info[si, -2:] = self.possible_intersections[distance_index]\n if distance < self.car_info[-1]/2:\n self.terminal = True\n\n\nclass Viewer(pyglet.window.Window):\n color = {\n 'background': [1]*3 + [1]\n }\n fps_display = pyglet.clock.ClockDisplay()\n bar_thc = 5\n\n def __init__(self, width, height, car_info, sensor_info, obstacles_coords):\n super(Viewer, self).__init__(width, height, resizable=False, caption='2D car', vsync=False) # vsync=False to not use the monitor FPS\n self.set_location(x=80, y=10)\n pyglet.gl.glClearColor(*self.color['background'])\n\n self.car_info = car_info\n self.sensor_info = sensor_info\n\n self.batch = pyglet.graphics.Batch()\n background = pyglet.graphics.OrderedGroup(0)\n foreground = pyglet.graphics.OrderedGroup(1)\n\n self.sensors = []\n line_coord = [0, 0] * 2\n c = (73, 73, 73) * 2\n for i in range(len(self.sensor_info)):\n self.sensors.append(self.batch.add(2, pyglet.gl.GL_LINES, foreground, ('v2f', line_coord), ('c3B', c)))\n\n car_box = [0, 0] * 4\n c = (249, 86, 86) * 4\n self.car = self.batch.add(4, pyglet.gl.GL_QUADS, foreground, ('v2f', car_box), ('c3B', c))\n\n c = (134, 181, 244) * 4\n for ob in range(len(obstacles_coords)):\n #self.obstacle = \n self.batch.add(4, pyglet.gl.GL_QUADS, background, ('v2f', obstacles_coords[ob].flatten()), ('c3B', c))\n\n def render(self):\n pyglet.clock.tick()\n self._update()\n self.switch_to()\n self.dispatch_events()\n self.dispatch_event('on_draw')\n self.flip()\n\n def on_draw(self):\n self.clear()\n self.batch.draw()\n # self.fps_display.draw()\n\n def _update(self):\n cx, cy, r, w, l = self.car_info\n\n # sensors\n for i, sensor in enumerate(self.sensors):\n sensor.vertices = [cx, cy, *self.sensor_info[i, -2:]]\n\n # car\n xys = [\n [cx + l / 2, cy + w / 2],\n [cx - l / 2, cy + w / 2],\n [cx - l / 2, cy - w / 2],\n [cx + l / 2, cy - w / 2],\n ]\n r_xys = []\n for x, y in xys:\n tempX = x - cx\n tempY = y - cy\n # apply rotation\n rotatedX = tempX * np.cos(r) - tempY * np.sin(r)\n rotatedY = tempX * np.sin(r) + tempY * np.cos(r)\n # rotated x y\n x = rotatedX + cx\n y = rotatedY + cy\n r_xys += [x, y]\n self.car.vertices = r_xys\n\n\nif __name__ == '__main__':\n np.random.seed(1)\n env = CarEnv()\n env.set_fps(30)\n for ep in range(20):\n s = env.reset()\n # for t in range(100):\n while True:\n env.render()\n s, r, done = env.step(env.sample_action())\n if done:\n break"
] |
[
[
"numpy.fromstring"
],
[
"numpy.random.seed",
"numpy.linspace",
"numpy.min",
"numpy.clip",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.argmin",
"numpy.cross",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] |
mtzgroup/ChemPixCH
|
[
"0f2763eb8989904d11eb3486f101a50173723ed4"
] |
[
"im2smiles/model/utils/general.py"
] |
[
"import os\nimport numpy as np\nimport time\nimport logging\nimport sys\nimport subprocess, shlex\nfrom shutil import copyfile\nimport json\nfrom threading import Timer\nfrom os import listdir\nfrom os.path import isfile, join\n\n\ndef minibatches(data_generator, minibatch_size):\n \"\"\"\n Args:\n data_generator: generator of (img, formulas) tuples\n minibatch_size: (int)\n\n Returns:\n list of tuples\n\n \"\"\"\n x_batch, y_batch = [], []\n for (x, y) in data_generator:\n if len(x_batch) == minibatch_size:\n yield x_batch, y_batch\n x_batch, y_batch = [], []\n\n x_batch += [x]\n y_batch += [y]\n\n if len(x_batch) != 0:\n yield x_batch, y_batch\n\n\ndef run(cmd, timeout_sec):\n \"\"\"Run cmd in the shell with timeout\"\"\"\n proc = subprocess.Popen(cmd, shell=True)\n kill_proc = lambda p: p.kill()\n timer = Timer(timeout_sec, kill_proc, [proc])\n try:\n timer.start()\n stdout, stderr = proc.communicate()\n finally:\n timer.cancel()\n\n\ndef get_logger(filename):\n \"\"\"Return instance of logger\"\"\"\n logger = logging.getLogger('logger')\n logger.setLevel(logging.INFO)\n logging.basicConfig(format='%(message)s', level=logging.INFO)\n handler = logging.FileHandler(filename)\n handler.setLevel(logging.INFO)\n handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logging.getLogger().addHandler(handler)\n return logger\n\n\ndef init_dir(dir_name):\n \"\"\"Creates directory if it does not exists\"\"\"\n if dir_name is not None:\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\ndef init_file(path_file, mode=\"a\"):\n \"\"\"Makes sure that a given file exists\"\"\"\n with open(path_file, mode) as f:\n pass\n\n\ndef get_files(dir_name):\n files = [f for f in listdir(dir_name) if isfile(join(dir_name, f))]\n return files\n\n\ndef delete_file(path_file):\n try:\n os.remove(path_file)\n except Exception:\n pass\n\n\nclass Config():\n \"\"\"Class that loads hyperparameters from json file into attributes\"\"\"\n\n def __init__(self, source):\n \"\"\"\n Args:\n source: path to json file or dict\n \"\"\"\n self.source = source\n\n if type(source) is dict:\n self.__dict__.update(source)\n elif type(source) is list:\n for s in source:\n self.load_json(s)\n else:\n self.load_json(source)\n\n def load_json(self, source):\n print(source)\n with open(source) as f:\n data = json.load(f)\n self.__dict__.update(data)\n\n def save(self, dir_name):\n init_dir(dir_name)\n if type(self.source) is list:\n for s in self.source:\n c = Config(s)\n c.save(dir_name)\n elif type(self.source) is dict:\n json.dumps(self.source, indent=4)\n else:\n copyfile(self.source, dir_name + self.export_name)\n\n\nclass Progbar(object):\n \"\"\"Progbar class inspired by keras\"\"\"\n\n def __init__(self, max_step, width=30):\n self.max_step = max_step\n self.width = width\n self.last_width = 0\n\n self.sum_values = {}\n\n self.start = time.time()\n self.last_step = 0\n\n self.info = \"\"\n self.bar = \"\"\n\n def _update_values(self, curr_step, values):\n for k, v in values:\n if k not in self.sum_values:\n self.sum_values[k] = [v * (curr_step - self.last_step),\n curr_step - self.last_step]\n else:\n self.sum_values[k][0] += v * (curr_step - self.last_step)\n self.sum_values[k][1] += (curr_step - self.last_step)\n\n def _write_bar(self, curr_step):\n last_width = self.last_width\n sys.stdout.write(\"\\b\" * last_width)\n sys.stdout.write(\"\\r\")\n\n numdigits = int(np.floor(np.log10(self.max_step))) + 1\n barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)\n bar = barstr % (curr_step, self.max_step)\n prog = float(curr_step)/self.max_step\n prog_width = int(self.width*prog)\n if prog_width > 0:\n bar += ('='*(prog_width-1))\n if curr_step < self.max_step:\n bar += '>'\n else:\n bar += '='\n bar += ('.'*(self.width-prog_width))\n bar += ']'\n sys.stdout.write(bar)\n\n return bar\n\n def _get_eta(self, curr_step):\n now = time.time()\n if curr_step:\n time_per_unit = (now - self.start) / curr_step\n else:\n time_per_unit = 0\n eta = time_per_unit*(self.max_step - curr_step)\n\n if curr_step < self.max_step:\n info = ' - ETA: %ds' % eta\n else:\n info = ' - %ds' % (now - self.start)\n\n return info\n\n def _get_values_sum(self):\n info = \"\"\n for name, value in self.sum_values.items():\n info += ' - %s: %.4f' % (name, value[0] / max(1, value[1]))\n return info\n\n def _write_info(self, curr_step):\n info = \"\"\n info += self._get_eta(curr_step)\n info += self._get_values_sum()\n\n sys.stdout.write(info)\n\n return info\n\n def _update_width(self, curr_step):\n curr_width = len(self.bar) + len(self.info)\n if curr_width < self.last_width:\n sys.stdout.write(\" \"*(self.last_width - curr_width))\n\n if curr_step >= self.max_step:\n sys.stdout.write(\"\\n\")\n\n sys.stdout.flush()\n\n self.last_width = curr_width\n\n def update(self, curr_step, values):\n \"\"\"Updates the progress bar.\n\n Args:\n values: List of tuples (name, value_for_last_step).\n The progress bar will display averages for these values.\n\n \"\"\"\n self._update_values(curr_step, values)\n self.bar = self._write_bar(curr_step)\n self.info = self._write_info(curr_step)\n self._update_width(curr_step)\n self.last_step = curr_step\n"
] |
[
[
"numpy.log10"
]
] |
sharat910/datacube-iirs
|
[
"1b7e2d192d969609756def8923c20899733e695d"
] |
[
"build/lib/datacube/ndexpr/__init__.py"
] |
[
"# ------------------------------------------------------------------------------\n# Name: ndexpr.py\n# Purpose: ndarray Math Expression evaluator\n#\n# Author: Peter Wang\n#\n# Created: 7 October 2015\n# Copyright: 2015 Commonwealth Scientific and Industrial Research Organisation\n# (CSIRO)\n# Code based on PyParsing fourFn example by Paul McGuire\n# Used with his with permission\n# (http://pyparsing.wikispaces.com/file/view/fourFn.py)\n# Adapted get_pqa_mask function from stacker.py by Josh Sixsmith &\n# Alex IP of Geoscience Australia\n# https://github.com/GeoscienceAustralia/agdc/blob/master/src/stacker.py\n# License: This software is open source under the Apache v2.0 License\n# as provided in the accompanying LICENSE file or available from\n# https://github.com/data-cube/agdc-v2/blob/master/LICENSE\n# By continuing, you acknowledge that you have read and you accept\n# and will abide by the terms of the License.\n#\n# Updates:\n# 7/10/2015: Initial Version.\n#\n# ------------------------------------------------------------------------------\n\n# pylint: disable=too-many-statements, too-many-branches, expression-not-assigned, too-many-locals,\n# pylint: disable=too-many-return-statements, protected-access, undefined-variable, too-many-public-methods\n# pylint: disable=consider-using-enumerate, deprecated-method\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport math\nimport operator\nimport inspect\nimport sys\nimport ctypes\nfrom pprint import pprint\nimport numpy as np\nimport xarray as xr\nfrom xarray import ufuncs\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\n\nfrom pyparsing import Literal, CaselessLiteral, Word, Combine, Group,\\\n Optional, ZeroOrMore, Forward, nums, alphas, delimitedList,\\\n ParserElement, FollowedBy\n\nParserElement.enablePackrat()\n\n\nclass NDexpr(object):\n\n def __init__(self):\n\n self.ae = False\n self.local_dict = None\n self.f = None\n\n self.user_functions = None\n\n self.expr_stack = []\n self.texpr_stack = []\n\n # Define constants\n self.constants = {}\n\n # Define Operators\n self.opn = {\"+\": operator.add,\n \"-\": operator.sub,\n \"*\": operator.mul,\n \"/\": operator.truediv,\n \">\": operator.gt,\n \">=\": operator.ge,\n \"<\": operator.lt,\n \"<=\": operator.le,\n \"==\": operator.eq,\n \"!=\": operator.ne,\n \"|\": operator.or_,\n \"&\": operator.and_,\n \"!\": operator.inv}\n\n # Define xarray DataArray operators with 1 input parameter\n self.xfn1 = {\"angle\": xr.ufuncs.angle,\n \"arccos\": xr.ufuncs.arccos,\n \"arccosh\": xr.ufuncs.arccosh,\n \"arcsin\": xr.ufuncs.arcsin,\n \"arcsinh\": xr.ufuncs.arcsinh,\n \"arctan\": xr.ufuncs.arctan,\n \"arctanh\": xr.ufuncs.arctanh,\n \"ceil\": xr.ufuncs.ceil,\n \"conj\": xr.ufuncs.conj,\n \"cos\": xr.ufuncs.cos,\n \"cosh\": xr.ufuncs.cosh,\n \"deg2rad\": xr.ufuncs.deg2rad,\n \"degrees\": xr.ufuncs.degrees,\n \"exp\": xr.ufuncs.exp,\n \"expm1\": xr.ufuncs.expm1,\n \"fabs\": xr.ufuncs.fabs,\n \"fix\": xr.ufuncs.fix,\n \"floor\": xr.ufuncs.floor,\n \"frexp\": xr.ufuncs.frexp,\n \"imag\": xr.ufuncs.imag,\n \"iscomplex\": xr.ufuncs.iscomplex,\n \"isfinite\": xr.ufuncs.isfinite,\n \"isinf\": xr.ufuncs.isinf,\n \"isnan\": xr.ufuncs.isnan,\n \"isreal\": xr.ufuncs.isreal,\n \"log\": xr.ufuncs.log,\n \"log10\": xr.ufuncs.log10,\n \"log1p\": xr.ufuncs.log1p,\n \"log2\": xr.ufuncs.log2,\n \"rad2deg\": xr.ufuncs.rad2deg,\n \"radians\": xr.ufuncs.radians,\n \"real\": xr.ufuncs.real,\n \"rint\": xr.ufuncs.rint,\n \"sign\": xr.ufuncs.sign,\n \"signbit\": xr.ufuncs.signbit,\n \"sin\": xr.ufuncs.sin,\n \"sinh\": xr.ufuncs.sinh,\n \"sqrt\": xr.ufuncs.sqrt,\n \"square\": xr.ufuncs.square,\n \"tan\": xr.ufuncs.tan,\n \"tanh\": xr.ufuncs.tanh,\n \"trunc\": xr.ufuncs.trunc}\n\n # Define xarray DataArray operators with 2 input parameter\n self.xfn2 = {\"arctan2\": xr.ufuncs.arctan2,\n \"copysign\": xr.ufuncs.copysign,\n \"fmax\": xr.ufuncs.fmax,\n \"fmin\": xr.ufuncs.fmin,\n \"fmod\": xr.ufuncs.fmod,\n \"hypot\": xr.ufuncs.hypot,\n \"ldexp\": xr.ufuncs.ldexp,\n \"logaddexp\": xr.ufuncs.logaddexp,\n \"logaddexp2\": xr.ufuncs.logaddexp2,\n \"logicaland\": xr.ufuncs.logical_and,\n \"logicalnot\": xr.ufuncs.logical_not,\n \"logicalor\": xr.ufuncs.logical_or,\n \"logicalxor\": xr.ufuncs.logical_xor,\n \"maximum\": xr.ufuncs.maximum,\n \"minimum\": xr.ufuncs.minimum,\n \"nextafter\": xr.ufuncs.nextafter}\n\n # Define non-xarray DataArray operators with 2 input parameter\n self.fn2 = {\"percentile\": np.percentile}\n\n # Define xarray DataArray reduction operators\n self.xrfn = {\"all\": xr.DataArray.all,\n \"any\": xr.DataArray.any,\n \"argmax\": xr.DataArray.argmax,\n \"argmin\": xr.DataArray.argmin,\n \"max\": xr.DataArray.max,\n \"mean\": xr.DataArray.mean,\n \"median\": xr.DataArray.median,\n \"min\": xr.DataArray.min,\n \"prod\": xr.DataArray.prod,\n \"sum\": xr.DataArray.sum,\n \"std\": xr.DataArray.std,\n \"var\": xr.DataArray.var}\n\n # Define non-xarray DataArray operators with 2 input parameter\n self.xcond = {\"<\": np.percentile}\n\n # Define Grammar\n point = Literal(\".\")\n e = CaselessLiteral(\"E\")\n fnumber = Combine(Word(\"+-\"+nums, nums) +\n Optional(point + Optional(Word(nums))) +\n Optional(e + Word(\"+-\"+nums, nums)))\n variable = Word(alphas, alphas+nums+\"_$\")\n\n seq = Literal(\"=\")\n b_not = Literal(\"~\")\n plus = Literal(\"+\")\n minus = Literal(\"-\")\n mult = Literal(\"*\")\n div = Literal(\"/\")\n gt = Literal(\">\")\n gte = Literal(\">=\")\n lt = Literal(\"<\")\n lte = Literal(\"<=\")\n eq = Literal(\"==\")\n neq = Literal(\"!=\")\n b_or = Literal(\"|\")\n b_and = Literal(\"&\")\n l_not = Literal(\"!\")\n lpar = Literal(\"(\").suppress()\n rpar = Literal(\")\").suppress()\n comma = Literal(\",\")\n colon = Literal(\":\")\n lbrac = Literal(\"[\")\n rbrac = Literal(\"]\")\n lcurl = Literal(\"{\")\n rcurl = Literal(\"}\")\n qmark = Literal(\"?\")\n scolon = Literal(\";\")\n addop = plus | minus\n multop = mult | div\n sliceop = colon\n compop = gte | lte | gt | lt\n eqop = eq | neq\n bitcompop = b_or | b_and\n bitnotop = b_not\n logicalnotop = l_not\n assignop = seq\n expop = Literal(\"^\")\n\n expr = Forward()\n indexexpr = Forward()\n\n atom = (Optional(\"-\") +\n (variable + seq + expr).setParseAction(self.push_assign) |\n indexexpr.setParseAction(self.push_index) |\n (lpar + expr + qmark.setParseAction(self.push_ternary1) + expr +\n scolon.setParseAction(self.push_ternary2) + expr +\n rpar).setParseAction(self.push_ternary) |\n (lpar + expr + qmark + expr + scolon + expr +\n rpar).setParseAction(self.push_ternary) |\n (logicalnotop + expr).setParseAction(self.push_ulnot) |\n (bitnotop + expr).setParseAction(self.push_unot) |\n (minus + expr).setParseAction(self.push_uminus) |\n (variable + lcurl + expr +\n rcurl).setParseAction(self.push_mask) |\n (variable + lpar + expr + (comma + expr)*3 +\n rpar).setParseAction(self.push_expr4) |\n (variable + lpar + expr + (comma + expr)*2 +\n rpar).setParseAction(self.push_expr3) |\n (variable + lpar + expr + comma + expr +\n rpar).setParseAction(self.push_expr2) |\n (variable + lpar + expr + rpar |\n variable).setParseAction(self.push_expr1) |\n fnumber.setParseAction(self.push_expr) |\n (lpar + expr + ZeroOrMore(comma + expr).setParseAction(self.get_tuple) +\n rpar).setParseAction(self.push_tuple) |\n (lpar + expr.suppress() +\n rpar).setParseAction(self.push_uminus))\n\n # Define order of operations for operators\n\n factor = Forward()\n factor << atom + ZeroOrMore((expop + factor).setParseAction(self.push_op))\n term = factor + ZeroOrMore((multop + factor).setParseAction(self.push_op))\n term2 = term + ZeroOrMore((addop + term).setParseAction(self.push_op))\n term3 = term2 + ZeroOrMore((sliceop + term2).setParseAction(self.push_op))\n term4 = term3 + ZeroOrMore((compop + term3).setParseAction(self.push_op))\n term5 = term4 + ZeroOrMore((eqop + term4).setParseAction(self.push_op))\n term6 = term5 + ZeroOrMore((bitcompop + term5).setParseAction(self.push_op))\n expr << term6 + ZeroOrMore((assignop + term6).setParseAction(self.push_op))\n\n # Define index operators\n\n colon_expr = (colon + FollowedBy(comma) ^ colon +\n FollowedBy(rbrac)).setParseAction(self.push_colon)\n range_expr = colon_expr | expr | colon\n indexexpr << (variable + lbrac + delimitedList(range_expr, delim=',') +\n rbrac).setParseAction(self.push_expr)\n\n self.parser = expr\n\n def set_ae(self, flag):\n self.ae = flag\n\n def push_expr(self, strg, loc, toks):\n self.expr_stack.append(toks[0])\n\n def push_expr1(self, strg, loc, toks):\n if toks[0] in self.xrfn:\n self.expr_stack.append('1')\n self.expr_stack.append(toks[0])\n\n def push_expr2(self, strg, loc, toks):\n if toks[0] in self.xrfn:\n self.expr_stack.append('2')\n self.expr_stack.append(toks[0])\n\n def push_expr3(self, strg, loc, toks):\n if toks[0] in self.xrfn:\n self.expr_stack.append('3')\n self.expr_stack.append(toks[0])\n\n def push_expr4(self, strg, loc, toks):\n if toks[0] in self.xrfn:\n self.expr_stack.append('4')\n self.expr_stack.append(toks[0])\n\n def push_op(self, strg, loc, toks):\n self.expr_stack.append(toks[0])\n\n def push_uminus(self, strg, loc, toks):\n if toks and toks[0] == '-':\n self.expr_stack.append('unary -')\n\n def push_unot(self, strg, loc, toks):\n if toks and toks[0] == '~':\n self.expr_stack.append('unary ~')\n\n def push_ulnot(self, strg, loc, toks):\n if toks and toks[0] == '!':\n self.expr_stack.append('unary !')\n\n def push_index(self, strg, loc, toks):\n self.expr_stack.append(\"[]\")\n\n def push_tuple(self, strg, loc, toks):\n if ',' in toks.asList():\n self.expr_stack.append(\"()\")\n\n def get_tuple(self, strg, loc, toks):\n count = toks.asList().count(',')\n if count > 0:\n self.expr_stack.append(str(count+1))\n\n def push_colon(self, strg, loc, toks):\n self.expr_stack.append(\"::\")\n\n def push_mask(self, strg, loc, toks):\n self.expr_stack.append(toks[0])\n self.expr_stack.append(\"{}\")\n\n def push_assign(self, strg, loc, toks):\n self.expr_stack.append(toks[0])\n self.expr_stack.append(\"=\")\n\n def push_ternary(self, strg, loc, toks):\n self.texpr_stack.append(self.expr_stack)\n self.expr_stack = []\n self.expr_stack.append(self.texpr_stack[::-1])\n self.expr_stack.append('?')\n self.expr_stack = self.flatten_list(self.expr_stack)\n self.texpr_stack = []\n\n def push_ternary1(self, strg, loc, toks):\n self.texpr_stack.append(self.expr_stack)\n self.expr_stack = []\n\n def push_ternary2(self, strg, loc, toks):\n self.texpr_stack.append(self.expr_stack)\n self.expr_stack = []\n\n def evaluate_stack(self, s):\n op = s.pop()\n if op == 'unary -':\n return -self.evaluate_stack(s)\n elif op == 'unary ~':\n return ~self.evaluate_stack(s)\n elif op == 'unary !':\n return xr.ufuncs.logical_not(self.evaluate_stack(s))\n elif op == \"=\":\n op1 = s.pop()\n op2 = self.evaluate_stack(s)\n self.f.f_globals[op1] = op2\n\n # code to write to locals, need to sort out when to write to locals/globals.\n # self.f.f_locals[op1] = op2\n # ctypes.pythonapi.PyFrame_LocalsToFast(ctypes.py_object(self.f), ctypes.c_int(1))\n elif op in self.opn.keys():\n op2 = self.evaluate_stack(s)\n op1 = self.evaluate_stack(s)\n if op == '+' and isinstance(op2, xr.DataArray) and \\\n op2.dtype.type == np.bool_:\n return xr.DataArray.where(op1, op2)\n return self.opn[op](op1, op2)\n elif op == \"::\":\n return slice(None, None, None)\n elif op == \"()\":\n num_args = int(self.evaluate_stack(s))\n fn_args = ()\n for i in range(0, num_args):\n fn_args += self.evaluate_stack(s),\n fn_args = fn_args[::-1]\n return fn_args\n elif op in self.xrfn:\n dim = int(self.evaluate_stack(s))\n dims = ()\n for i in range(1, dim):\n dims += int(self.evaluate_stack(s)),\n op1 = self.evaluate_stack(s)\n\n args = {}\n if op == 'argmax' or op == 'argmin':\n if dim != 1:\n args['axis'] = dims[0]\n elif dim != 1:\n args['axis'] = dims\n\n if sys.version_info >= (3, 0):\n if 'skipna' in list(inspect.signature(self.xrfn[op]).parameters.keys()) and \\\n op != 'prod':\n args['skipna'] = True\n else:\n if 'skipna' in inspect.getargspec(self.xrfn[op])[0] and \\\n op != 'prod':\n args['skipna'] = True\n\n val = self.xrfn[op](xr.DataArray(op1), **args)\n return val\n elif op in self.xfn1:\n val = self.xfn1[op](self.evaluate_stack(s))\n\n if isinstance(val, tuple) or isinstance(val, np.ndarray):\n return xr.DataArray(val)\n return val\n elif op in self.xfn2:\n op2 = self.evaluate_stack(s)\n op1 = self.evaluate_stack(s)\n val = self.xfn2[op](op1, op2)\n\n if isinstance(val, tuple) or isinstance(val, np.ndarray):\n return xr.DataArray(val)\n return val\n elif op in self.fn2:\n op2 = self.evaluate_stack(s)\n op1 = self.evaluate_stack(s)\n val = self.fn2[op](op1, op2)\n\n if isinstance(val, tuple) or isinstance(val, np.ndarray):\n return xr.DataArray(val)\n return val\n elif self.user_functions is not None and op in self.user_functions:\n fn = self.user_functions[op]\n num_args = len(inspect.getargspec(fn).args)\n\n fn_args = ()\n for i in range(0, num_args):\n fn_args += self.evaluate_stack(s),\n fn_args = fn_args[::-1]\n\n val = self.user_functions[op](*fn_args)\n return val\n elif op in \":\":\n op2 = int(self.evaluate_stack(s))\n op1 = int(self.evaluate_stack(s))\n\n return slice(op1, op2, None)\n elif op in \"[]\":\n op1 = self.evaluate_stack(s)\n ops = ()\n i = 0\n dims = len(s)\n while len(s) > 0:\n val = self.evaluate_stack(s)\n if not isinstance(val, slice):\n val = int(val)\n ops += val,\n i = i+1\n ops = ops[::-1]\n return op1[ops]\n elif op in \"{}\":\n op1 = self.evaluate_stack(s)\n op2 = self.evaluate_stack(s)\n if op2.dtype != bool:\n op2 = self.get_pqa_mask(op2.astype(np.int64).values)\n\n val = xr.DataArray.where(op1, op2)\n return val\n elif op == \"?\":\n op1 = s.pop()\n op2 = s.pop()\n op3 = s.pop()\n\n ifval = self.evaluate_stack(op1)\n if ifval:\n return self.evaluate_stack(op2)\n else:\n return self.evaluate_stack(op3)\n elif op[0].isalpha():\n if self.local_dict is not None and op in self.local_dict:\n return self.local_dict[op]\n frame = self.getframe(op)\n if op in frame.f_locals:\n return frame.f_locals[op]\n if op in frame.f_globals:\n return frame.f_globals[op]\n else:\n return float(op)\n\n def is_number(self, s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n def flatten_list(self, l):\n return [item for sublist in l for item in sublist]\n\n def getframe(self, var):\n try:\n limit = sys.getrecursionlimit()\n for i in range(0, limit):\n frame = sys._getframe(i)\n if var in frame.f_locals or var in frame.f_globals:\n return frame\n return self.f\n except ValueError:\n return self.f\n\n def evaluate(self, s, local_dict=None, user_functions=None):\n if local_dict is None:\n self.local_dict = None\n self.f = sys._getframe(1)\n else:\n self.f = None\n self.local_dict = local_dict\n if user_functions is not None:\n self.user_functions = user_functions\n self.expr_stack = []\n results = self.parser.parseString(s)\n #print(self.expr_stack)\n val = self.evaluate_stack(self.expr_stack[:])\n return val\n\n def test(self, s, e):\n result = self.evaluate(s)\n self.f = sys._getframe(1)\n if isinstance(result, int) or isinstance(result, float) or \\\n isinstance(result, np.float64):\n r = e == result\n elif isinstance(e, np.ndarray):\n r = result.equals(xr.DataArray(e))\n else:\n r = e.equals(result)\n if r:\n print(s, \"=\", r)\n return True\n else:\n print(s, \"=\", r, \" ****** FAILED ******\")\n return False\n\n def get_pqa_mask(self, pqa_ndarray):\n '''\n create pqa_mask from a ndarray\n\n Parameters:\n pqa_ndarray: input pqa array\n good_pixel_masks: known good pixel values\n dilation: amount of dilation to apply\n '''\n good_pixel_masks = [32767, 16383, 2457]\n dilation = 3\n pqa_mask = np.zeros(pqa_ndarray.shape, dtype=np.bool)\n for i in range(len(pqa_ndarray)):\n pqa_array = pqa_ndarray[i]\n # Ignore bit 6 (saturation for band 62) - always 0 for Landsat 5\n pqa_array = pqa_array | 64\n\n # Dilating both the cloud and cloud shadow masks\n s = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]\n acca = (pqa_array & 1024) >> 10\n erode = ndimage.binary_erosion(acca, s, iterations=dilation, border_value=1)\n dif = erode - acca\n dif[dif < 0] = 1\n pqa_array += (dif << 10)\n del acca\n fmask = (pqa_array & 2048) >> 11\n erode = ndimage.binary_erosion(fmask, s, iterations=dilation, border_value=1)\n dif = erode - fmask\n dif[dif < 0] = 1\n pqa_array += (dif << 11)\n del fmask\n acca_shad = (pqa_array & 4096) >> 12\n erode = ndimage.binary_erosion(acca_shad, s, iterations=dilation, border_value=1)\n dif = erode - acca_shad\n dif[dif < 0] = 1\n pqa_array += (dif << 12)\n del acca_shad\n fmask_shad = (pqa_array & 8192) >> 13\n erode = ndimage.binary_erosion(fmask_shad, s, iterations=dilation, border_value=1)\n dif = erode - fmask_shad\n dif[dif < 0] = 1\n pqa_array += (dif << 13)\n\n for good_pixel_mask in good_pixel_masks:\n pqa_mask[i][pqa_array == good_pixel_mask] = True\n return pqa_mask\n\n def plot_3d(self, array_result):\n print('plot3D')\n\n img = array_result\n num_t = img.shape[0]\n num_rowcol = math.ceil(math.sqrt(num_t))\n fig = plt.figure(1)\n fig.clf()\n plot_count = 1\n for i in range(img.shape[0]):\n data = img[i]\n ax = fig.add_subplot(num_rowcol, num_rowcol, plot_count)\n cax = ax.imshow(data, interpolation='nearest', aspect='equal')\n plot_count += 1\n fig.tight_layout()\n plt.subplots_adjust(wspace=0.5, hspace=0.5)\n plt.show()\n\n def test_1_level(self):\n x5 = xr.DataArray(np.random.randn(2, 3))\n self.evaluate(\"z5 = x5 + 1\")\n assert z5.equals(x5 + 1)\n\n def test_2_level(self):\n self.test_2_level_fn()\n\n def test_2_level_fn(self):\n x6 = xr.DataArray(np.random.randn(2, 3))\n self.evaluate(\"z6 = x6 + 1\")\n assert z6.equals(x6 + 1)\n"
] |
[
[
"scipy.ndimage.binary_erosion",
"numpy.random.randn",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] |
michaels10/pydec
|
[
"738c3d9cf1cedc95a61be63fae36073e038d08bc"
] |
[
"pydec/io/tests/test_arrayio.py"
] |
[
"from pydec.testing import *\n\nfrom scipy import arange, prod, reshape, rand, random, allclose, rank, zeros\nfrom scipy.sparse import csr_matrix, csc_matrix, coo_matrix\n\nfrom pydec.io.arrayio import write_array, read_array\n\n\n#TODO replace with tempfile\n\nfilename = '/tmp/pydec_arrayio_testfile.dat'\n\nclass TestArrayIO():\n def setUp(self):\t\n random.seed(0) #make tests repeatable \n \n def tearDown(self):\n import os\n os.remove(filename) \n\n def test_dense(self):\n sizes = [(2,2),(3,3),(5,1),(1,5)]\n sizes += [(2,2,2),(4,3,2),(1,1,5),(1,5,1),(5,1,1)]\n for dims in sizes:\n mats = [arange(prod(dims)).reshape(dims),rand(*dims)] \n for A in mats:\n formats = ['binary','ascii']\n if rank(A) <= 2: formats.append('basic') #use basic when possible\n for format in formats:\n write_array(filename,A,format=format)\n \n B = read_array(filename)\n assert_almost_equal(A,B,decimal=12)\n\n\n def test_sparse(self):\n sizes = [(2,2),(3,3),(1,10),(10,1),(10,10)]\n for dims in sizes:\n base_mats = []\n base_mats.append((rand(*dims) < 0.5)*rand(*dims)) #random matrix with 50% nnz\n base_mats.append(zeros(dims)) #empty matrix\n base_mats.append(arange(prod(dims)).reshape(dims))\n\n mats = []\n for base_mat in base_mats:\n mats.append(csr_matrix(base_mat))\n mats.append(csc_matrix(base_mat))\n mats.append(coo_matrix(base_mat))\n \n \n for A in mats:\n formats = ['binary','ascii'] \n for format in formats:\n write_array(filename,A,format=format)\n \n B = read_array(filename)\n assert_almost_equal(A.todense(),B.todense(),decimal=12)\n assert_equal(type(A),type(B))\n\n"
] |
[
[
"scipy.sparse.csc_matrix",
"scipy.sparse.coo_matrix",
"scipy.random.seed",
"scipy.prod",
"scipy.zeros",
"scipy.sparse.csr_matrix",
"scipy.rand",
"scipy.rank"
]
] |
guilhermesilveira/marked_card_detector
|
[
"03a81171b7625cbef0c3968ad881c09c4b384d08"
] |
[
"marked_card_detector/card.py"
] |
[
"import cv2\nimport numpy as np\n\nfrom marked_card_detector.graphics import flattener\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n# based on https://github.com/EdjeElectronics/OpenCV-Playing-Card-Detector/blob/master/Cards.py\n# Width and height of card corner, where rank and suit are\nCORNER_WIDTH = 32\nCORNER_HEIGHT = 84\n\n\nclass Card:\n \"\"\"Structure to store information about query cards in the camera image.\"\"\"\n\n def __init__(self, contour, image):\n self.image = image\n self.one_percent = int(image.shape[0] * 0.01)\n\n self.contour = contour # Contour of card\n self.width, self.height = 0, 0 # Width and height of card\n self.corner_pts = [] # Corner points of card\n self.center = [] # Center point of card\n self.warp = [] # 200x300, flattened, grayed, blurred image\n self.rank_img = [] # Thresholded, sized image of card's rank\n self.suit_img = [] # Thresholded, sized image of card's suit\n self.best_rank_match = \"Unknown\" # Best matched rank\n self.best_suit_match = \"Unknown\" # Best matched suit\n self.rank_diff = 0 # Difference between rank image and best matched train rank image\n self.suit_diff = 0 # Difference between suit image and best matched train suit image\n\n self.__preprocess_card__()\n\n def __preprocess_card__(self):\n \"\"\"Uses contour to find information about the query card. Isolates rank\n and suit images from the card.\"\"\"\n\n image = self.image\n contour = self.contour\n\n # Find perimeter of card and use it to approximate corner points\n peri = cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, 0.01 * peri, True)\n pts = np.float32(approx)\n self.corner_pts = pts\n\n # Find width and height of card's bounding rectangle\n x, y, w, h = cv2.boundingRect(contour)\n self.width, self.height = w, h\n\n # Find center point of card by taking x and y average of the four corners.\n average = np.sum(pts, axis=0) / len(pts)\n cent_x = int(average[0][0])\n cent_y = int(average[0][1])\n self.center = [cent_x, cent_y]\n\n # Warp card into 200x300 flattened image using perspective transform\n self.warp = flattener(image, pts, w, h)\n\n # Grab corner of warped card image and do a 4x zoom\n # corner = self.warp[0:CORNER_HEIGHT, 0:CORNER_WIDTH]\n # corner_zoom = cv2.resize(qcorner, (0, 0), fx=4, fy=4)\n\n # Sample known white pixel intensity to determine good threshold level\n # white_level = Qcorner_zoom[15,int((CORNER_WIDTH*4)/2)]\n # thresh_level = white_level - CARD_THRESH\n # if (thresh_level <= 0):\n # thresh_level = 1\n # retval, query_thresh = cv2.threshold(Qcorner_zoom, thresh_level, 255, cv2. THRESH_BINARY_INV)\n\n # Split in to top and bottom half (top shows rank, bottom shows suit)\n # Qrank = query_thresh[20:185, 0:128]\n # Qsuit = query_thresh[186:336, 0:128]\n\n # Find rank contour and bounding rectangle, isolate and find largest contour\n # Qrank_cnts, hier = cv2.findContours(Qrank, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n # Qrank_cnts = sorted(Qrank_cnts, key=cv2.contourArea,reverse=True)\n\n # # Find bounding rectangle for largest contour, use it to resize query rank\n # # image to match dimensions of the train rank image\n # if len(Qrank_cnts) != 0:\n # x1,y1,w1,h1 = cv2.boundingRect(Qrank_cnts[0])\n # Qrank_roi = Qrank[y1:y1+h1, x1:x1+w1]\n # Qrank_sized = cv2.resize(Qrank_roi, (RANK_WIDTH,RANK_HEIGHT), 0, 0)\n # card.rank_img = Qrank_sized\n\n # # Find suit contour and bounding rectangle, isolate and find largest contour\n # Qsuit_cnts, hier = cv2.findContours(Qsuit, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n # Qsuit_cnts = sorted(Qsuit_cnts, key=cv2.contourArea,reverse=True)\n\n # # Find bounding rectangle for largest contour, use it to resize query suit\n # # image to match dimensions of the train suit image\n # if len(Qsuit_cnts) != 0:\n # x2,y2,w2,h2 = cv2.boundingRect(Qsuit_cnts[0])\n # Qsuit_roi = Qsuit[y2:y2+h2, x2:x2+w2]\n # Qsuit_sized = cv2.resize(Qsuit_roi, (SUIT_WIDTH, SUIT_HEIGHT), 0, 0)\n # card.suit_img = Qsuit_sized\n\n def top_left(self):\n return self.warp[70:95, 70:90]\n\n def draw(self, copy=False):\n \"\"\"Draw the card name, center point, and contour on the camera image.\"\"\"\n\n if copy:\n image = self.image.copy()\n else:\n image = self.image\n\n x = self.center[0]\n y = self.center[1]\n cv2.circle(image, (x, y), self.one_percent * 2, (255, 0, 0), -1)\n\n rank_name = self.best_rank_match\n suit_name = self.best_suit_match\n\n # Draw card name twice, so letters have black outline\n cv2.putText(image, (rank_name + ' of'), (x - 60, y - 10), font, 1, (0, 0, 0), 3, cv2.LINE_AA)\n cv2.putText(image, (rank_name + ' of'), (x - 60, y - 10), font, 1, (50, 200, 200), 2, cv2.LINE_AA)\n\n cv2.putText(image, suit_name, (x - 60, y + 25), font, 1, (0, 0, 0), 3, cv2.LINE_AA)\n cv2.putText(image, suit_name, (x - 60, y + 25), font, 1, (50, 200, 200), 2, cv2.LINE_AA)\n\n cv2.drawContours(image, self.contour, -1, (255, 0, 0), self.one_percent)\n\n # Can draw difference value for troubleshooting purposes\n # (commented out during normal operation)\n # r_diff = str(self.rank_diff)\n # s_diff = str(self.suit_diff)\n # cv2.putText(image,r_diff,(x+20,y+30),font,0.5,(0,0,255),1,cv2.LINE_AA)\n # cv2.putText(image,s_diff,(x+20,y+50),font,0.5,(0,0,255),1,cv2.LINE_AA)\n\n return image\n"
] |
[
[
"numpy.sum",
"numpy.float32"
]
] |
laigm/rcnn
|
[
"37bd2f3ea460353efbccd17ccd02045ee6a442d2"
] |
[
"frcnn.py"
] |
[
"import colorsys\nimport copy\nimport math\nimport os\nimport pickle\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image, ImageDraw, ImageFont\nfrom tensorflow import keras\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.applications.imagenet_utils import preprocess_input\nfrom tensorflow.keras.layers import Input\n\nimport nets.frcnn as frcnn\nfrom nets.frcnn_training import get_new_img_size\nfrom utils.anchors import get_anchors\nfrom utils.config import Config\nfrom utils.utils import BBoxUtility\n\n\n# --------------------------------------------#\n# 使用自己训练好的模型预测需要修改2个参数\n# model_path和classes_path都需要修改!\n# 如果出现shape不匹配\n# 一定要注意训练时的NUM_CLASSES、\n# model_path和classes_path参数的修改\n# --------------------------------------------#\nclass FRCNN(object):\n _defaults = {\n \"model_path\": 'model_data/voc_weights.h5',\n \"classes_path\": 'model_data/voc_classes.txt',\n \"confidence\": 0.5,\n \"iou\": 0.3\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n # ---------------------------------------------------#\n # 初始化faster RCNN\n # ---------------------------------------------------#\n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults)\n self.class_names = self._get_class()\n self.config = Config()\n self.generate()\n self.bbox_util = BBoxUtility()\n\n # ---------------------------------------------------#\n # 获得所有的分类\n # ---------------------------------------------------#\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n # ---------------------------------------------------#\n # 获得所有的分类\n # ---------------------------------------------------#\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n\n # -------------------------------#\n # 计算总的类的数量\n # -------------------------------#\n self.num_classes = len(self.class_names) + 1\n\n # -------------------------------#\n # 载入模型与权值\n # -------------------------------#\n self.model_rpn, self.model_classifier = frcnn.get_predict_model(self.config, self.num_classes)\n self.model_rpn.load_weights(self.model_path, by_name=True)\n self.model_classifier.load_weights(self.model_path, by_name=True)\n\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # 画框设置不同的颜色\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n\n # ---------------------------------------------------#\n # 用于计算共享特征层的大小\n # ---------------------------------------------------#\n def get_img_output_length(self, width, height):\n def get_output_length(input_length):\n # input_length += 6\n filter_sizes = [7, 3, 1, 1]\n padding = [3, 1, 0, 0]\n stride = 2\n for i in range(4):\n # input_length = (input_length - filter_size + stride) // stride\n input_length = (input_length + 2 * padding[i] - filter_sizes[i]) // stride + 1\n return input_length\n\n return get_output_length(width), get_output_length(height)\n\n @tf.function(experimental_relax_shapes=True)\n def model_rpn_get_pred(self, photo):\n preds = self.model_rpn(photo, training=False)\n return preds\n\n @tf.function(experimental_relax_shapes=True)\n def model_classifier_get_pred(self, photo):\n preds = self.model_classifier(photo, training=False)\n return preds\n\n # ---------------------------------------------------#\n # 检测图片\n # ---------------------------------------------------#\n def detect_image(self, image):\n image_shape = np.array(np.shape(image)[0:2])\n # 原始图片的宽和高\n old_width, old_height = image_shape[1], image_shape[0]\n old_image = copy.deepcopy(image)\n\n # ---------------------------------------------------------#\n # 给原图像进行resize,resize到短边为600的大小上\n # ---------------------------------------------------------#\n width, height = get_new_img_size(old_width, old_height)\n image = image.resize([width, height], Image.BICUBIC)\n # print(image) # <PIL.Image.Image image mode=RGB size=800x600 at 0x16A801BD788>\n photo = np.array(image, dtype=np.float64)\n # print(photo.shape) # (600, 800, 3)\n\n # -----------------------------------------------------------#\n # 图片预处理,归一化。\n # -----------------------------------------------------------#\n photo = preprocess_input(np.expand_dims(photo, 0)) # shape:(1, 600, 600, 3)\n rpn_pred = self.model_rpn_get_pred(photo)\n \"\"\"\n rpn_pred[0].shape = (1, 12996, 1) \n rpn_pred[1].shape = (1, 12996, 4) 存储的是坐标的偏移量\n rpn_pred[2].shape = (1, 38, 38, 1024) 共享特征层\n \"\"\"\n rpn_pred = [x.numpy() for x in rpn_pred]\n\n # -----------------------------------------------------------#\n # 将建议框网络的预测结果进行解码\n # -----------------------------------------------------------#\n # 共享特征层的尺寸\n # 图片短边对应共享特征层尺寸中的 38. 如:640*480->800*600->50*38\n # 600*600 则对应为 38*38\n base_feature_width, base_feature_height = self.get_img_output_length(width, height)\n # 获得 38*38*9 个anchors\n # anchors.shape = (12996, 4)\n anchors = get_anchors([base_feature_width, base_feature_height], width, height)\n # rpn_results.shape = (1, 300, 5)\n # 非极大抑制中效果较好的内容 -> 留下300个建议框\n rpn_results = self.bbox_util.detection_out_rpn(rpn_pred, anchors)\n\n # -------------------------------------------------------------#\n # 在获得建议框和共享特征层后,将二者传入classifier网络中进行预测\n # -------------------------------------------------------------#\n # rpn_pred[2].shape = (1, 38, 38, 1024) 共享特征层\n base_layer = rpn_pred[2]\n # 获得建议框坐标信息 proposal_box.shape = (1, 300, 4)\n proposal_box = np.array(rpn_results)[:, :, 1:]\n temp_ROIs = np.zeros_like(proposal_box)\n temp_ROIs[:, :, [0, 1, 2, 3]] = proposal_box[:, :, [1, 0, 3, 2]]\n classifier_pred = self.model_classifier_get_pred([base_layer, temp_ROIs])\n \"\"\"\n classifier_pred[0].shape = (1, 300, 21)\n classifier_pred[1].shape = (1, 300, 80)\n \"\"\"\n classifier_pred = [x.numpy() for x in classifier_pred]\n\n # -------------------------------------------------------------#\n # 利用classifier的预测结果对建议框进行解码,获得预测框\n # -------------------------------------------------------------#\n # print(tf.shape(proposal_box)[1])\n results = self.bbox_util.detection_out_classifier(classifier_pred, proposal_box, self.config, self.confidence)\n\n if len(results[0]) == 0:\n return old_image\n # results.shape = (9, 6) 数字 9 代表从图片中得到 9 个预测框,数字 6 代表预测框的6个信息:坐标、置信度和类别\n results = np.array(results[0])\n # 预测框的坐标信息\n boxes = results[:, :4]\n # 预测框的置信度信息\n top_conf = results[:, 4]\n # 预测框中物体所属类别所对应得索引信息\n top_label_indices = results[:, 5]\n # 预测框在原始图片中的位置\n boxes[:, [0, 2]] = boxes[:, [0, 2]] * old_width\n boxes[:, [1, 3]] = boxes[:, [1, 3]] * old_height\n\n font = ImageFont.truetype(font='model_data/simhei.ttf',\n size=np.floor(3e-2 * np.shape(image)[1] + 0.5).astype('int32'))\n # (600 + 600) // 600 * 2 = 4\n thickness = max((np.shape(old_image)[0] + np.shape(old_image)[1]) // old_width * 2, 1)\n image = old_image\n \"\"\"\n >>> seasons = ['Spring', 'Summer', 'Fall', 'Winter']\n >>> list(enumerate(seasons))\n [(0, 'Spring'), (1, 'Summer'), (2, 'Fall'), (3, 'Winter')]\n >>> list(enumerate(seasons, start=1)) # 下标从 1 开始\n [(1, 'Spring'), (2, 'Summer'), (3, 'Fall'), (4, 'Winter')]\n \"\"\"\n for i, c in enumerate(top_label_indices):\n # 预测框中物体类别名称\n predicted_class = self.class_names[int(c)]\n # 预测框的置信度\n score = top_conf[i]\n # 预测框坐标信息\n left, top, right, bottom = boxes[i]\n top = top - 5\n left = left - 5\n bottom = bottom + 5\n right = right + 5\n # 确保预测框边界不超过图片边界\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32'))\n right = min(np.shape(image)[1], np.floor(right + 0.5).astype('int32'))\n # print(top,left,bottom,right)\n # 画框框\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n label = label.encode('utf-8')\n print(label, top, left, bottom, right)\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=self.colors[int(c)])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[int(c)])\n draw.text(text_origin, str(label, 'UTF-8'), fill=(0, 0, 0), font=font)\n del draw\n return image\n"
] |
[
[
"numpy.expand_dims",
"tensorflow.function",
"numpy.shape",
"numpy.zeros_like",
"numpy.floor",
"numpy.array"
]
] |
mandanasmi/torch-rl
|
[
"0f195945070fde80db0876b61a5f800ce1494619"
] |
[
"torch_rl/torch_rl/algos/new_algo.py"
] |
[
"import numpy as np\nfrom comet_ml import Experiment\nimport torch\nfrom torch_rl.format import default_preprocess_obss\nfrom abc import ABC\nfrom collections import deque\nimport random\nfrom matplotlib import pyplot as plt\nimport math\nimport json, os, csv\nimport torch.nn.functional as F\n\nhyper_params = {\n \"learning_rate\": 0.01\n}\n\nexperiment = Experiment(\"UcVgpp0wPaprHG4w8MFVMgq7j\", project_name=\"navi-corl-2019\")\nexperiment.log_parameters(hyper_params)\n\n\nclass DQNAlgo_new(ABC):\n \"\"\"The class for the DQN\"\"\"\n\n def __init__(self, env, base_model, target_net, num_frames, discount=0.99, lr=0.005, adam_eps=1e-8,\n batch_size=128, preprocess_obss=None, capacity=10000, log_interval=100,\n save_interval=1000, train_interval=500, record_qvals=False, target_update=10):\n\n self.env = env\n self.base_model = base_model\n self.target_model = target_net\n self.base_model.train()\n self.discount = discount\n self.optimizer = torch.optim.SGD(self.base_model.parameters(), lr) #, eps=adam_eps)\n self.batch_size = batch_size\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.num_frames = num_frames\n self.preprocess_obss = preprocess_obss or default_preprocess_obss\n self.batch_num = 0\n self.replay_buffer = ReplayBuffer(capacity)\n\n self.episode_success = []\n self.all_rewards = []\n self.losses = []\n self.log_interval = log_interval\n self.save_interval = save_interval\n self.train_interval = train_interval\n self.target_update = target_update\n\n self.curriculum_threshold = 0.5\n\n self.qvals = []\n self.record_qvals = record_qvals\n\n epsilon_start = 1.0\n epsilon_final = 0.01\n epsilon_decay = 100000\n self.epsilon_by_frame = lambda frame_idx: epsilon_final + (epsilon_start - epsilon_final) \\\n * math.exp(-1. * frame_idx / epsilon_decay)\n\n def update_parameters(self, status, model_dir):\n num_frames = status['num_frames']\n episode_reward = 0\n episode_length = 0\n episode_length_list = []\n self.obs = self.env.reset()\n\n if self.record_qvals:\n orig_obs = self.obs\n experiment.log_metric(\"good_action_for_qvals\", self.env.shortest_path_length()[0])\n np.save(model_dir+\"/orig_obs.npy\", orig_obs)\n self.qvals.append(self.base_model(self.preprocess_obss([orig_obs], device=self.device)))\n\n\n for frame_idx in range(num_frames, self.num_frames):\n with experiment.train():\n\n preprocessed_obs = self.preprocess_obss([self.obs], device=self.device)\n epsilon = self.epsilon_by_frame(frame_idx)\n experiment.log_metric(\"epsilon\", epsilon, step=frame_idx)\n\n action = self.base_model.act(preprocessed_obs, epsilon)\n next_state, reward, done, _ = self.env.step(action)\n\n self.replay_buffer.push(self.obs, action, reward, next_state, done)\n self.obs = next_state\n\n episode_reward += reward\n episode_length += 1\n\n if len(self.replay_buffer) > self.batch_size and frame_idx % self.train_interval == 0:\n loss = self.compute_td_loss()\n self.losses.append(loss.item())\n experiment.log_metric(\"loss\", loss.item(), step=frame_idx)\n\n if self.record_qvals:\n with torch.no_grad():\n qvals = self.base_model(self.preprocess_obss([orig_obs], device=self.device)).cpu().numpy()\n self.qvals.append(qvals)\n qval_dict = {\"BIG_LEFT\": qvals[0][0], \"SMALL_LEFT\": qvals[0][1], \"FORWARD\": qvals[0][2], \"SMALL_RIGHT\": qvals[0][3], \"BIG_RIGHT\": qvals[0][4], }\n experiment.log_metrics(qval_dict, step=frame_idx)\n if done:\n success = 0.0\n if reward >= 2.0:\n success = 1.0\n self.episode_success.append(success)\n experiment.log_metric(\"episode_success_rate\", np.sum(self.episode_success)/len(self.episode_success))\n experiment.log_metric(\"num_episodes_finished\", len(self.episode_success))\n experiment.log_metric(\"episode_length\", episode_length, step=frame_idx)\n\n episode_length_list.append(episode_length)\n episode_length = 0\n\n self.obs = self.env.reset()\n self.all_rewards.append(episode_reward)\n experiment.log_metric(\"episode_reward\", episode_reward, step=frame_idx)\n episode_reward = 0\n\n if len(self.all_rewards) % self.target_update == 0:\n self.target_model.load_state_dict(self.base_model.state_dict())\n\n if len(self.all_rewards) % self.log_interval == 0 and len(self.all_rewards) > 0:\n print(\"Number of Trajectories:\", len(self.all_rewards),\n \"| Number of Frames:\", frame_idx,\n \"| Success Rate:\", np.mean(self.episode_success[-100:]),\n \"| Average Episode Reward:\", np.mean(self.all_rewards[-100:]),\n \"| Losses:\", np.mean(self.losses[-100:]),\n \"| Epsilon:\", epsilon,\n \"| Length of Episode:\", np.mean(episode_length_list[-100:]))\n status[\"num_frames\"] = frame_idx\n\n # # Curriculum learning\n # if np.mean(self.episode_success[-100:]) >= self.curriculum_threshold:\n # print(\"empirical_win_rate: \" + str(np.mean(self.episode_success[-100:])))\n # print(\"Increasing Difficulty by 1!\")\n # status[\"difficulty\"] += 1\n # self.env.set_difficulty(status[\"difficulty\"])\n # print(\"New Difficulty:\", status[\"difficulty\"])\n if len(self.all_rewards) % self.save_interval == 0 and len(self.all_rewards) > 0:\n # Save losses and rewards.\n with open(model_dir+'/losses.csv', 'w') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow(self.losses)\n with open(model_dir+'/rewards.csv', 'w') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow(self.all_rewards)\n with open(model_dir+'/episode_success.csv', 'w') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow(self.episode_success)\n\n # Save status\n path = os.path.join(model_dir, \"status.json\")\n with open(path, \"w\") as file:\n json.dump(status, file)\n\n # Saving model\n if torch.cuda.is_available():\n self.base_model.cpu()\n torch.save(self.base_model, model_dir+\"/model.pt\")\n print(\"Done saving model and logs...\")\n if torch.cuda.is_available():\n self.base_model.cuda()\n\n # Save q values if debug mode\n if self.record_qvals:\n with open(model_dir + '/q_vals.csv', 'w') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow(self.qvals)\n\n if len(self.all_rewards) % self.target_update == 0:\n self.base_model.embed_imgs = []\n self.base_model.embed_gps = []\n with experiment.test():\n obs = self.env.reset()\n spl = self.env.shortest_path_length()\n for action in spl:\n obs = self.preprocess_obss([obs], device=self.device)\n self.base_model.act(obs, 0)\n obs, reward, done, _ = self.env.step(action)\n self.process_embeddings(model_dir)\n\n\n def process_embeddings(self, model_dir):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n\n if self.base_model.embed_imgs:\n img_means = [img.mean().item() for img in self.base_model.embed_imgs]\n img_medians = [img.median().item() for img in self.base_model.embed_imgs]\n ax.plot(img_means, label=\"img_means\")\n ax.plot(img_medians, label=\"img_medians\")\n\n gps_means = [gps.mean().item() for gps in self.base_model.embed_gps]\n gps_medians = [gps.median().item() for gps in self.base_model.embed_gps]\n ax.plot(gps_means, label=\"gps_means\")\n ax.plot(gps_medians, label=\"gps_medians\")\n plt.legend()\n if not os.path.isdir(model_dir + \"/figs\"):\n os.mkdir(model_dir + \"/figs\")\n plt.savefig(model_dir + \"/figs/embedding_means_\" + str(len(self.episode_success)))\n plt.close()\n\n def compute_td_loss(self):\n\n state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)\n\n obs = self.preprocess_obss(state, device=self.device)\n next_obs = self.preprocess_obss(next_state, device=self.device)\n action = torch.LongTensor(action).to(device=self.device)\n reward = torch.FloatTensor(reward).to(device=self.device)\n done = torch.FloatTensor(done).to(device=self.device)\n\n q_values = self.base_model(obs)\n next_q_values = self.target_model(next_obs).detach()\n\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n next_q_value = next_q_values.max(1)[0]\n expected_q_value = reward + (self.discount * next_q_value * (1 - done))\n\n # Compute Huber loss\n loss = F.smooth_l1_loss(q_value, expected_q_value)\n\n # Optimize the model\n self.optimizer.zero_grad()\n loss.backward()\n for param in self.base_model.parameters():\n param.grad.data.clamp_(-1, 1)\n self.optimizer.step()\n\n return loss\n\n\nclass ReplayBuffer(object):\n def __init__(self, capacity):\n self.buffer = deque(maxlen=capacity)\n\n def push(self, state, action, reward, next_state, done):\n state = np.expand_dims(state, 0)\n next_state = np.expand_dims(next_state, 0)\n\n self.buffer.append((state, action, reward, next_state, done))\n\n def sample(self, batch_size):\n state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))\n return np.concatenate(state), action, reward, np.concatenate(next_state), done\n\n def __len__(self):\n return len(self.buffer)\n"
] |
[
[
"matplotlib.pyplot.legend",
"torch.LongTensor",
"numpy.expand_dims",
"numpy.save",
"numpy.concatenate",
"torch.FloatTensor",
"torch.save",
"torch.nn.functional.smooth_l1_loss",
"matplotlib.pyplot.close",
"torch.cuda.is_available",
"torch.no_grad",
"numpy.mean",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] |
Parry-Parry/Level-4-Project
|
[
"ea2d6be2b7964fe21a8e1a92d79e89fcab8b6b74"
] |
[
"code2text/models/baseline/baseline_weak.py"
] |
[
"from tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.layers import Dense\n\n# configure\nnum_encoder_tokens = 71\nnum_decoder_tokens = 93\nlatent_dim = 256\n# Define an input sequence and process it.\nencoder_inputs = Input(shape=(None, num_encoder_tokens))\nencoder = LSTM(latent_dim, return_state=True)\nencoder_outputs, state_h, state_c = encoder(encoder_inputs)\n# We discard `encoder_outputs` and only keep the states.\nencoder_states = [state_h, state_c]\n# Set up the decoder, using `encoder_states` as initial state.\ndecoder_inputs = Input(shape=(None, num_decoder_tokens))\n# We set up our decoder to return full output sequences,\n# and to return internal states as well. We don't use the\n# return states in the training model, but we will use them in inference.\ndecoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)\ndecoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)\ndecoder_dense = Dense(num_decoder_tokens, activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\n# Define the model that will turn\n# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\nmodel = Model([encoder_inputs, decoder_inputs], decoder_outputs)\n# plot the model\n# define encoder inference model\nencoder_model = Model(encoder_inputs, encoder_states)\n# define decoder inference model\ndecoder_state_input_h = Input(shape=(latent_dim,))\ndecoder_state_input_c = Input(shape=(latent_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\ndecoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_states_inputs)\ndecoder_states = [state_h, state_c]\ndecoder_outputs = decoder_dense(decoder_outputs)\ndecoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)\n\n\n"
] |
[
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Input"
]
] |
asparc/acados
|
[
"fe353023814cdfc47ae84eaeef1fcb0b3fa7b687"
] |
[
"interfaces/acados_template/acados_template/utils.py"
] |
[
"#\n# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,\n# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,\n# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,\n# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl\n#\n# This file is part of acados.\n#\n# The 2-Clause BSD License\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.;\n#\n\nimport os, sys, json\nimport urllib.request\nimport shutil\nimport numpy as np\nfrom casadi import SX, MX, DM, Function, CasadiMeta\n\nALLOWED_CASADI_VERSIONS = ('3.5.3', '3.5.2', '3.5.1', '3.4.5', '3.4.0')\nTERA_VERSION = \"0.0.34\"\n\ndef get_acados_path():\n ACADOS_PATH = os.environ.get('ACADOS_SOURCE_DIR')\n if not ACADOS_PATH:\n acados_template_path = os.path.dirname(os.path.abspath(__file__))\n acados_path = os.path.join(acados_template_path, '../../../')\n ACADOS_PATH = os.path.realpath(acados_path)\n return ACADOS_PATH\n\ndef get_tera_exec_path():\n ACADOS_PATH = get_acados_path()\n return os.path.join(ACADOS_PATH, 'bin/t_renderer')\n\nplatform2tera = {\n \"linux\": \"linux\",\n \"darwin\": \"osx\",\n \"win32\": \"window.exe\"\n}\n\n\ndef is_column(x):\n if isinstance(x, np.ndarray):\n if x.ndim == 1:\n return True\n elif x.ndim == 2 and x.shape[1] == 1:\n return True\n else:\n return False\n elif isinstance(x, (MX, SX, DM)):\n if x.shape[1] == 1:\n return True\n elif x.shape[0] == 0 and x.shape[1] == 0:\n return True\n else:\n return False\n elif x == None or x == []:\n return False\n else:\n raise Exception(\"is_column expects one of the following types: np.ndarray, casadi.MX, casadi.SX.\"\n + \" Got: \" + str(type(x)))\n\ndef is_empty(x):\n if isinstance(x, (MX, SX, DM)):\n return x.is_empty()\n elif isinstance(x, np.ndarray):\n if np.prod(x.shape) == 0:\n return True\n else:\n return False\n elif x == None or x == []:\n return True\n else:\n raise Exception(\"is_empty expects one of the following types: casadi.MX, casadi.SX, \"\n + \"None, numpy array empty list. Got: \" + str(type(x)))\n\n\ndef casadi_length(x):\n if isinstance(x, (MX, SX, DM)):\n return int(np.prod(x.shape))\n else:\n raise Exception(\"casadi_length expects one of the following types: casadi.MX, casadi.SX.\"\n + \" Got: \" + str(type(x)))\n\ndef make_model_consistent(model):\n x = model.x\n xdot = model.xdot\n u = model.u\n z = model.z\n p = model.p\n\n if isinstance(x, MX):\n symbol = MX.sym\n elif isinstance(x, SX):\n symbol = SX.sym\n else:\n raise Exception(\"model.x must be casadi.SX or casadi.MX, got {}\".format(type(x)))\n\n if is_empty(p):\n model.p = symbol('p', 0, 0)\n\n if is_empty(z):\n model.z = symbol('z', 0, 0)\n\n return model\n\n\ndef get_tera():\n tera_path = get_tera_exec_path()\n acados_path = get_acados_path()\n\n if os.path.exists(tera_path) and os.access(tera_path, os.X_OK):\n return tera_path\n\n repo_url = \"https://github.com/acados/tera_renderer/releases\"\n url = \"{}/download/v{}/t_renderer-v{}-{}\".format(\n repo_url, TERA_VERSION, TERA_VERSION, platform2tera[sys.platform])\n\n manual_install = 'For manual installation follow these instructions:\\n'\n manual_install += '1 Download binaries from {}\\n'.format(url)\n manual_install += '2 Copy them in {}/bin\\n'.format(acados_path)\n manual_install += '3 Strip the version and platform from the binaries: '\n manual_install += 'as t_renderer-v0.0.34-X -> t_renderer)\\n'\n manual_install += '4 Enable execution privilege on the file \"t_renderer\" with:\\n'\n manual_install += '\"chmod +x {}\"\\n\\n'.format(tera_path)\n\n msg = \"\\n\"\n msg += 'Tera template render executable not found, '\n msg += 'while looking in path:\\n{}\\n'.format(tera_path)\n msg += 'In order to be able to render the templates, '\n msg += 'you need to download the tera renderer binaries from:\\n'\n msg += '{}\\n\\n'.format(repo_url)\n msg += 'Do you wish to set up Tera renderer automatically?\\n'\n msg += 'y/N? (press y to download tera or any key for manual installation)\\n'\n\n if input(msg) == 'y':\n print(\"Dowloading {}\".format(url))\n with urllib.request.urlopen(url) as response, open(tera_path, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n print(\"Successfully downloaded t_renderer.\")\n os.chmod(tera_path, 0o755)\n return tera_path\n\n msg_cancel = \"\\nYou cancelled automatic download.\\n\\n\"\n msg_cancel += manual_install\n msg_cancel += \"Once installed re-run your script.\\n\\n\"\n print(msg_cancel)\n\n sys.exit(1)\n\n\ndef render_template(in_file, out_file, template_dir, json_path):\n cwd = os.getcwd()\n if not os.path.exists(template_dir):\n os.mkdir(template_dir)\n os.chdir(template_dir)\n\n tera_path = get_tera()\n\n # setting up loader and environment\n acados_path = os.path.dirname(os.path.abspath(__file__))\n\n template_glob = acados_path + '/c_templates_tera/*'\n acados_template_path = acados_path + '/c_templates_tera'\n\n # call tera as system cmd\n os_cmd = \"{tera_path} '{template_glob}' '{in_file}' '{json_path}' '{out_file}'\".format(\n tera_path=tera_path,\n template_glob=template_glob,\n json_path=json_path,\n in_file=in_file,\n out_file=out_file\n )\n status = os.system(os_cmd)\n if (status != 0):\n raise Exception('Rendering of {} failed! Exiting.\\n'.format(in_file))\n\n os.chdir(cwd)\n\n\n## Conversion functions\ndef np_array_to_list(np_array):\n if isinstance(np_array, (np.ndarray)):\n return np_array.tolist()\n elif isinstance(np_array, (SX)):\n return DM(np_array).full()\n elif isinstance(np_array, (DM)):\n return np_array.full()\n else:\n raise(Exception(\n \"Cannot convert to list type {}\".format(type(np_array))\n ))\n\n\ndef format_class_dict(d):\n \"\"\"\n removes the __ artifact from class to dict conversion\n \"\"\"\n out = {}\n for k, v in d.items():\n if isinstance(v, dict):\n v = format_class_dict(v)\n\n out_key = k.split('__', 1)[-1]\n out[k.replace(k, out_key)] = v\n return out\n\n\ndef acados_class2dict(class_instance):\n \"\"\"\n removes the __ artifact from class to dict conversion\n \"\"\"\n\n d = dict(class_instance.__dict__)\n out = {}\n for k, v in d.items():\n if isinstance(v, dict):\n v = format_class_dict(v)\n\n out_key = k.split('__', 1)[-1]\n out[k.replace(k, out_key)] = v\n return out\n\n\ndef ocp_check_against_layout(ocp_nlp, ocp_dims):\n \"\"\"\n Check dimensions against layout\n Parameters\n ---------\n ocp_nlp : dict\n dictionary loaded from JSON to be post-processed.\n\n ocp_dims : instance of AcadosOcpDims\n \"\"\"\n\n # load JSON layout\n current_module = sys.modules[__name__]\n acados_path = os.path.dirname(current_module.__file__)\n with open(acados_path + '/acados_layout.json', 'r') as f:\n ocp_nlp_layout = json.load(f)\n\n ocp_check_against_layout_recursion(ocp_nlp, ocp_dims, ocp_nlp_layout)\n return\n\n\n\ndef ocp_check_against_layout_recursion(ocp_nlp, ocp_dims, layout):\n\n for key, item in ocp_nlp.items():\n\n try:\n layout_of_key = layout[key]\n except KeyError:\n raise Exception(\"ocp_check_against_layout_recursion: field\" \\\n \" '{0}' is not in layout but in OCP description.\".format(key))\n\n if isinstance(item, dict):\n ocp_check_against_layout_recursion(item, ocp_dims, layout_of_key)\n\n if 'ndarray' in layout_of_key:\n if isinstance(item, int) or isinstance(item, float):\n item = np.array([item])\n if isinstance(item, (list, np.ndarray)) and (layout_of_key[0] != 'str'):\n dim_layout = []\n dim_names = layout_of_key[1]\n\n for dim_name in dim_names:\n dim_layout.append(ocp_dims[dim_name])\n\n dims = tuple(dim_layout)\n\n item = np.array(item)\n item_dims = item.shape\n if len(item_dims) != len(dims):\n raise Exception('Mismatching dimensions for field {0}. ' \\\n 'Expected {1} dimensional array, got {2} dimensional array.' \\\n .format(key, len(dims), len(item_dims)))\n\n if np.prod(item_dims) != 0 or np.prod(dims) != 0:\n if dims != item_dims:\n raise Exception('acados -- mismatching dimensions for field {0}. ' \\\n 'Provided data has dimensions {1}, ' \\\n 'while associated dimensions {2} are {3}' \\\n .format(key, item_dims, dim_names, dims))\n return\n\n\ndef J_to_idx(J):\n nrows = J.shape[0]\n idx = np.zeros((nrows, ))\n for i in range(nrows):\n this_idx = np.nonzero(J[i,:])[0]\n if len(this_idx) != 1:\n raise Exception('Invalid J matrix structure detected, ' \\\n 'must contain one nonzero element per row. Exiting.')\n if this_idx.size > 0 and J[i,this_idx[0]] != 1:\n raise Exception('J matrices can only contain 1s. Exiting.')\n idx[i] = this_idx[0]\n return idx\n\n\ndef J_to_idx_slack(J):\n nrows = J.shape[0]\n ncol = J.shape[1]\n idx = np.zeros((ncol, ))\n i_idx = 0\n for i in range(nrows):\n this_idx = np.nonzero(J[i,:])[0]\n if len(this_idx) == 1:\n idx[i_idx] = i\n i_idx = i_idx + 1\n elif len(this_idx) > 1:\n raise Exception('J_to_idx_slack: Invalid J matrix. Exiting. ' \\\n 'Found more than one nonzero in row ' + str(i))\n if this_idx.size > 0 and J[i,this_idx[0]] != 1:\n raise Exception('J_to_idx_slack: J matrices can only contain 1s, ' \\\n 'got J(' + str(i) + ', ' + str(this_idx[0]) + ') = ' + str(J[i,this_idx[0]]) )\n if not i_idx == ncol:\n raise Exception('J_to_idx_slack: J must contain a 1 in every column!')\n return idx\n\n\ndef acados_dae_model_json_dump(model):\n\n # load model\n x = model.x\n xdot = model.xdot\n u = model.u\n z = model.z\n p = model.p\n\n f_impl = model.f_impl_expr\n model_name = model.name\n\n # create struct with impl_dae_fun, casadi_version\n fun_name = model_name + '_impl_dae_fun'\n impl_dae_fun = Function(fun_name, [x, xdot, u, z, p], [f_impl])\n\n casadi_version = CasadiMeta.version()\n str_impl_dae_fun = impl_dae_fun.serialize()\n\n dae_dict = {\"str_impl_dae_fun\": str_impl_dae_fun, \"casadi_version\": casadi_version}\n\n # dump\n json_file = model_name + '_acados_dae.json'\n with open(json_file, 'w') as f:\n json.dump(dae_dict, f, default=np_array_to_list, indent=4, sort_keys=True)\n print(\"dumped \", model_name, \" dae to file:\", json_file, \"\\n\")\n\n\ndef set_up_imported_gnsf_model(acados_formulation):\n\n gnsf = acados_formulation.gnsf_model\n\n # check CasADi version\n dump_casadi_version = gnsf['casadi_version']\n casadi_version = CasadiMeta.version()\n\n if not casadi_version == dump_casadi_version:\n print(\"WARNING: GNSF model was dumped with another CasADi version.\\n\"\n + \"This might yield errors. Please use the same version for compatibility, serialize version: \"\n + dump_casadi_version + \" current Python CasADi verison: \" + casadi_version)\n input(\"Press any key to attempt to continue...\")\n\n # load model\n phi_fun = Function.deserialize(gnsf['phi_fun'])\n phi_fun_jac_y = Function.deserialize(gnsf['phi_fun_jac_y'])\n phi_jac_y_uhat = Function.deserialize(gnsf['phi_jac_y_uhat'])\n get_matrices_fun = Function.deserialize(gnsf['get_matrices_fun'])\n\n # obtain gnsf dimensions\n size_gnsf_A = get_matrices_fun.size_out(0)\n acados_formulation.dims.gnsf_nx1 = size_gnsf_A[1]\n acados_formulation.dims.gnsf_nz1 = size_gnsf_A[0] - size_gnsf_A[1]\n acados_formulation.dims.gnsf_nuhat = max(phi_fun.size_in(1))\n acados_formulation.dims.gnsf_ny = max(phi_fun.size_in(0))\n acados_formulation.dims.gnsf_nout = max(phi_fun.size_out(0))\n\n # save gnsf functions in model\n acados_formulation.model.phi_fun = phi_fun\n acados_formulation.model.phi_fun_jac_y = phi_fun_jac_y\n acados_formulation.model.phi_jac_y_uhat = phi_jac_y_uhat\n acados_formulation.model.get_matrices_fun = get_matrices_fun\n\n if \"f_lo_fun_jac_x1k1uz\" in gnsf:\n f_lo_fun_jac_x1k1uz = Function.deserialize(gnsf['f_lo_fun_jac_x1k1uz'])\n acados_formulation.model.f_lo_fun_jac_x1k1uz = f_lo_fun_jac_x1k1uz\n else:\n dummy_var_x1 = SX.sym('dummy_var_x1', acados_formulation.dims.gnsf_nx1)\n dummy_var_x1dot = SX.sym('dummy_var_x1dot', acados_formulation.dims.gnsf_nx1)\n dummy_var_z1 = SX.sym('dummy_var_z1', acados_formulation.dims.gnsf_nz1)\n dummy_var_u = SX.sym('dummy_var_z1', acados_formulation.dims.nu)\n dummy_var_p = SX.sym('dummy_var_z1', acados_formulation.dims.np)\n empty_var = SX.sym('empty_var', 0, 0)\n\n empty_fun = Function('empty_fun', \\\n [dummy_var_x1, dummy_var_x1dot, dummy_var_z1, dummy_var_u, dummy_var_p],\n [empty_var])\n acados_formulation.model.f_lo_fun_jac_x1k1uz = empty_fun\n\n del acados_formulation.gnsf_model\n"
] |
[
[
"numpy.nonzero",
"numpy.array",
"numpy.zeros",
"numpy.prod"
]
] |
dchou1618/MST-DecisionTree-Phishing
|
[
"6443b07dc425a3b58cf88e9092284fb7e082bfc2"
] |
[
"c45/c45.py"
] |
[
"import math\r\nfrom xml.dom import minidom\r\nfrom xml.etree import ElementTree as ET\r\n\r\nfrom sklearn.base import BaseEstimator, ClassifierMixin\r\nfrom sklearn.utils.validation import check_array, check_is_fitted, check_X_y\r\n\r\nfrom .c45_utils import decision, grow_tree\r\n\r\nclass C45(BaseEstimator, ClassifierMixin):\r\n \"\"\"A C4.5 tree classifier.\r\n\r\n Parameters\r\n ----------\r\n attrNames : list, optional (default=None)\r\n The list of feature names used in printing tree during. If left default,\r\n attributes will be named attr0, attr1... etc\r\n See also\r\n --------\r\n DecisionTreeClassifier\r\n References\r\n ----------\r\n .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning\r\n .. [2] https://en.wikipedia.org/wiki/C4.5_algorithm\r\n .. [3] L. Breiman, J. Friedman, R. Olshen, and C. Stone, \"Classification\r\n and Regression Trees\", Wadsworth, Belmont, CA, 1984.\r\n .. [4] J. R. Quinlain, \"C4.5: Programs for Machine Learning\",\r\n Morgan Kaufmann Publishers, 1993\r\n Examples\r\n --------\r\n >>> from sklearn.datasets import load_iris\r\n >>> from sklearn.model_selection import cross_val_score\r\n >>> from c45 import C45\r\n >>> iris = load_iris()\r\n >>> clf = C45(attrNames=iris.feature_names)\r\n >>> cross_val_score(clf, iris.data, iris.target, cv=10)\r\n ... # doctest: +SKIP\r\n ...\r\n array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,\r\n 0.93..., 0.93..., 1. , 0.93..., 1. ])\r\n \"\"\"\r\n def __init__(self, attrNames=None):\r\n if attrNames is not None:\r\n attrNames = [''.join(i for i in x if i.isalnum()).replace(' ', '_') for x in attrNames]\r\n self.attrNames = attrNames\r\n\r\n def fit(self, X, y):\r\n X, y = check_X_y(X, y)\r\n self.X_ = X\r\n self.y_ = y\r\n self.resultType = type(y[0])\r\n if self.attrNames is None:\r\n self.attrNames = [f'attr{x}' for x in range(len(self.X_[0]))]\r\n\r\n assert(len(self.attrNames) == len(self.X_[0]))\r\n\r\n data = [[] for i in range(len(self.attrNames))]\r\n categories = []\r\n\r\n for i in range(len(self.X_)):\r\n categories.append(str(self.y_[i]))\r\n for j in range(len(self.attrNames)):\r\n data[j].append(self.X_[i][j])\r\n root = ET.Element('DecisionTree')\r\n grow_tree(data,categories,root,self.attrNames)\r\n self.root = root\r\n self.tree_ = ET.tostring(root, encoding=\"unicode\")\r\n return self\r\n def getTree(self):\r\n return self.root\r\n def predict(self, X):\r\n check_is_fitted(self, ['tree_', 'resultType', 'attrNames'])\r\n X = check_array(X)\r\n dom = minidom.parseString(self.tree_)\r\n root = dom.childNodes[0]\r\n prediction = []\r\n for i in range(len(X)):\r\n answerlist = decision(root,X[i],self.attrNames,1)\r\n answerlist = sorted(answerlist.items(), key=lambda x:x[1], reverse = True )\r\n answer = answerlist[0][0]\r\n prediction.append((self.resultType)(answer))\r\n return prediction\r\n\r\n def printTree(self):\r\n check_is_fitted(self, ['tree_'])\r\n dom = minidom.parseString(self.tree_)\r\n print(dom.toprettyxml(newl=\"\\r\\n\"))\r\n return dom.toprettyxml(newl=\"\\r\\n\")\r\n"
] |
[
[
"sklearn.utils.validation.check_is_fitted",
"sklearn.utils.validation.check_X_y",
"sklearn.utils.validation.check_array"
]
] |
allocateam/opendc
|
[
"8ee3067c40c2e94d504c9a663d1df174f289ba8e"
] |
[
"simulator/opendc-experiments/opendc-experiments-allocateam/tools/plot/plot.py"
] |
[
"#!/usr/bin/env python3\n\nimport argparse\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import List, Dict, Type\n\nimport pandas as pd\nimport seaborn as sns\n\nfrom metrics import Metric, Plot\nfrom metrics.plot import MetricWorkloadBarPlot as bar_plot\nfrom metrics.plot import MetricWorkloadViolinPlot as violin_plot\nfrom metrics.plot import ReportSetting1Makespan, ReportSetting1WaitingTime\nfrom metrics.plot import ReportSetting2Makespan, ReportSetting2WaitingTime\nfrom metrics.plot import ReportSetting3\nimport metrics\n\n\ndef iter_runs(experiments):\n for portfolio_id in experiments['portfolio_id'].unique():\n for scenario_id in experiments['scenario_id'].unique():\n p_id = experiments['portfolio_id'] == portfolio_id\n s_id = experiments['scenario_id'] == scenario_id\n for _, run in experiments[p_id & s_id].iterrows():\n yield run\n\n\nclass Plotter:\n OUTPUT_PATH = f\"{Path(__file__).parent.resolve()}/results/{datetime.now():%Y-%m-%d-%H-%M-%S}\"\n\n def __init__(self,\n plot_classes: Dict[Type[Metric], List[Type[Plot]]],\n path: Path,\n scenario_filter=None):\n self.metric_classes = list(plot_classes.keys())\n self.plot_classes = plot_classes\n self.path = path\n\n self.metrics = self._preprocess(path, scenario_filter)\n self.make_output_path()\n\n def make_output_path(self, sub_dir=None):\n output_path = Path(self.OUTPUT_PATH)\n path = output_path / sub_dir if sub_dir is not None else output_path\n path.mkdir(parents=True, exist_ok=True)\n\n def _preprocess(self, path: Path, scenario_filter) -> List[Metric]:\n experiments = pd.read_parquet(path / \"experiments.parquet\")\n if scenario_filter is not None:\n experiments = experiments[scenario_filter(experiments)]\n return [\n metric(self.plot_classes[metric], iter_runs(experiments))\n for metric in self.metric_classes\n ]\n\n def plot_all(self):\n print(\"Plotting..\")\n for metric in self.metrics:\n metric.generate_plot(self)\n print(f\"✅ {metric.name}\")\n\n print(f\"Plots successfully stored in {self.OUTPUT_PATH}\")\n\ndef generate_report_plots(args):\n # Setting 1\n report_plots = {\n metrics.JobMakespanMetric: [ReportSetting1Makespan],\n metrics.JobWaitingTimeMetric: [ReportSetting1WaitingTime],\n }\n\n scenario_filter = lambda df: df.workload_name == \"spec_trace-2\"\n\n plotter = Plotter(report_plots, args.path, scenario_filter)\n plotter.plot_all()\n\n # Setting 2\n\n report_plots = {\n metrics.JobMakespanMetric: [ReportSetting2Makespan],\n metrics.JobWaitingTimeMetric: [ReportSetting2WaitingTime]\n }\n\n scenario_filter = lambda df: df.topology == \"medium\"\n\n plotter = Plotter(report_plots, args.path, scenario_filter)\n plotter.plot_all()\n\n # Setting 3\n\n groups = [\n ([metrics.JobMakespanMetric, metrics.JobWaitingTimeMetric], \"s3-enduser-1\"),\n ([metrics.JobTurnaroundTimeMetric, metrics.TaskThroughputMetric], \"s3-enduser-2\"),\n ([metrics.IdleTimeMetric, metrics.PowerConsumptionMetric], \"s3-datacenter\")\n ]\n scenario_filter = lambda df: (df.topology == \"medium\") & (df.workload_name == \"spec_trace-2\")\n\n for multi_metrics, filename in groups:\n report_plots = {m: [] for m in multi_metrics}\n plotter = Plotter(report_plots, args.path, scenario_filter)\n dfs = []\n for metric in plotter.metrics:\n df = metric.metric_dataframe()\n df.rename(columns={metric.name: \"value\"}, inplace=True)\n df['metric'] = metric.name\n dfs.append(df)\n\n plot = ReportSetting3(filename)\n plot.generate(pd.concat(dfs), None, plotter, None)\n\n\ndef main():\n \"\"\"Usage: python3 plot.py <path_to_data_dir>\"\"\"\n\n report_plots = True\n\n parser = argparse.ArgumentParser(description=\"Plot metrics for the Allocateam experiment.\")\n parser.add_argument(\n \"path\",\n nargs='?',\n type=str,\n help=\"The path to the input csv file.\",\n default=metrics.metric.BASE_DATA_PATH,\n )\n args = parser.parse_args()\n\n sns.set(\n style=\"darkgrid\",\n font_scale=1.6\n )\n\n if report_plots:\n generate_report_plots(args)\n else:\n all_plots = {\n metrics.JobTurnaroundTimeMetric: [bar_plot, violin_plot],\n metrics.TaskThroughputMetric: [bar_plot],\n metrics.PowerConsumptionMetric: [bar_plot, violin_plot],\n metrics.IdleTimeMetric: [bar_plot],\n metrics.JobWaitingTimeMetric: [bar_plot, violin_plot],\n metrics.JobMakespanMetric: [bar_plot, violin_plot],\n }\n plotter = Plotter(all_plots, args.path)\n plotter.plot_all()\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.read_parquet",
"pandas.concat"
]
] |
EnergyModels/BLIS
|
[
"514577a99a16804894019b4ee53d48eda0c6c313"
] |
[
"examples/storage_sizing/test_create_inputs.py"
] |
[
"# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\"\"\"\nBLIS - Balancing Load of Intermittent Solar:\nA characteristic-based transient power plant model\n\nCopyright (C) 2020. University of Virginia Licensing & Ventures Group (UVA LVG). All Rights Reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the\nSoftware.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\nWARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\nOTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\nimport pandas as pd\nimport numpy as np\n\n\ndef monteCarloInputs(filename, sheetname, iterations):\n # Read Excel with inputs\n df_xls = pd.read_excel(filename, sheetname=sheetname, index_col=0)\n\n # Create Dataframe to hold inputs\n rows = range(iterations)\n parameters1 = df_xls.index.values\n parameters2 = np.append('sheetname', parameters1)\n df = pd.DataFrame(data=0.0, index=rows, columns=parameters2)\n\n # Create Inputs\n for param in parameters1:\n\n dist_type = df_xls.loc[param][\"Distribution\"]\n\n # Constants\n if dist_type == \"constant\" or dist_type == \"Constant\" or dist_type == \"C\":\n avg = df_xls.loc[param][\"Average\"]\n df.loc[:, param] = avg\n\n # Uniform Distributions\n elif dist_type == \"uniform\" or dist_type == \"Uniform\" or dist_type == \"U\":\n low = df_xls.loc[param][\"Low\"]\n high = df_xls.loc[param][\"High\"]\n df.loc[:, param] = np.random.uniform(low=low, high=high, size=iterations)\n\n # Normal Distributions\n elif dist_type == \"normal\" or dist_type == \"Normal\" or dist_type == \"N\":\n avg = df_xls.loc[param][\"Average\"]\n stdev = df_xls.loc[param][\"Stdev\"]\n df.loc[:, param] = np.random.normal(loc=avg, scale=stdev, size=iterations)\n\n # LogNormal Distributions\n elif dist_type == \"lognormal\" or dist_type == \"Lognormal\" or dist_type == \"LN\":\n avg = df_xls.loc[param][\"Average\"]\n stdev = df_xls.loc[param][\"Stdev\"]\n df.loc[:, param] = np.random.lognormal(mean=avg, sigma=stdev, size=iterations)\n\n # Traingular Distributions\n elif dist_type == \"triangle\" or dist_type == \"Triangle\" or dist_type == \"T\":\n left = df_xls.loc[param][\"Low\"]\n mode = df_xls.loc[param][\"Average\"]\n right = df_xls.loc[param][\"High\"]\n df.loc[:, param] = np.random.triangular(left, mode, right, size=iterations)\n df.sheetname = sheetname\n return df\n\n\n# =====================\n# Main Program\n# =====================\nif __name__ == '__main__':\n\n # ==============\n # User Inputs\n # ==============\n studyName = \"results_sizing\"\n\n # Monte Carlo Case Inputs (uses excel, each sheet is a separate study)\n xls_filename = \"inputs_sizing.xlsx\"\n sheetnames = [\"CAES\", \"BATT\", \"UTES\", \"Flywheel\"]\n\n # Specify number of iterations per case\n iterations = 10 # To test\n\n # ==============\n # Run Simulations\n # ==============\n all_outputs = []\n count = 0\n\n # Iterate each Monte Carlo case\n for sheetname in sheetnames:\n inputs = monteCarloInputs(xls_filename, sheetname, iterations)\n inputs.to_csv('inputs_' + sheetname + '.csv')\n"
] |
[
[
"numpy.random.lognormal",
"pandas.read_excel",
"pandas.DataFrame",
"numpy.append",
"numpy.random.normal",
"numpy.random.triangular",
"numpy.random.uniform"
]
] |
conan7882/tmp
|
[
"669161ff20770c5698f2e4f5acc3c366a4cd7151"
] |
[
"src/utils/utils.py"
] |
[
"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# File: utils.py\r\n# Author: Qian Ge <geqian1001@gmail.com>\r\n\r\n\r\nimport numpy as np\r\n\r\ndef make_list(inputs):\r\n if not isinstance(inputs, list):\r\n return [inputs]\r\n else:\r\n return inputs\r\n\r\ndef get_shape4D(in_val):\r\n \"\"\"\r\n Return a 4D shape\r\n Args:\r\n in_val (int or list with length 2)\r\n Returns:\r\n list with length 4\r\n \"\"\"\r\n # if isinstance(in_val, int):\r\n return [1] + get_shape2D(in_val) + [1]\r\n\r\ndef get_shape2D(in_val):\r\n \"\"\"\r\n Return a 2D shape \r\n Args:\r\n in_val (int or list with length 2) \r\n Returns:\r\n list with length 2\r\n \"\"\"\r\n # in_val = int(in_val)\r\n if isinstance(in_val, int):\r\n return [in_val, in_val]\r\n # if isinstance(in_val, list):\r\n if len(in_val) == 2:\r\n # assert len(in_val) == 2\r\n return in_val\r\n raise RuntimeError('Illegal shape: {}'.format(in_val))\r\n\r\ndef to_nparray(lists):\r\n \"\"\" convert list of list into np.array \r\n\r\n Args:\r\n lists: list[list[]]\r\n \"\"\"\r\n return [np.array(l) for l in lists]\r\n\r\n"
] |
[
[
"numpy.array"
]
] |
symbench/scm
|
[
"8e40d3549dd605b1822156216684a28cc027a8e8"
] |
[
"circuit_recognizer/circuit_recognizer/test/connect/test_line.py"
] |
[
"from itertools import chain\n\nimport numpy as np\n\nfrom circuit_recognizer.connect.line import DirectedLine, detect_intersection_rect\n\n\nclass TestLine:\n def test_detect_intersection(self):\n image = np.array(\n [\n [False, False, True, False, False, False, False],\n [False, False, True, True, True, True, True],\n [False, False, True, False, False, False, False],\n [False, False, True, False, True, False, False],\n [False, False, True, False, False, False, False],\n [False, False, True, False, False, False, False],\n [False, False, True, False, False, False, False],\n ]\n )\n line = DirectedLine.btwn(np.array([6, 2]), np.array([0, 2]))\n intersection = line.detect_intersection(image)\n assert intersection\n (x, y, width, height) = intersection.bounding_box()\n assert width == 1\n assert height == 1\n assert line.remove_overlap_into(intersection.bounding_box())\n\n def test_detect_intersection_big(self):\n image = np.zeros((20, 20), dtype=bool)\n image[:, 12] = True\n image[5, :] = True\n line = DirectedLine.btwn(np.array([10, 12]), np.array([0, 12]))\n intersection = line.detect_intersection(image)\n assert intersection\n (x, y, width, height) = intersection.bounding_box()\n assert x == 12\n assert y == 5\n assert width == 1\n assert height == 1\n assert line.remove_overlap_into(intersection.bounding_box())\n\n def test_detect_intersection_rect_T(self):\n image = np.array(\n [\n [False, False, False, False],\n [True, True, True, True],\n [True, True, True, True],\n [True, True, True, True],\n [False, True, True, False],\n ]\n )\n i_regions = [\n DirectedLine.btwn(*np.array([(3, 1), (1, 1)])),\n DirectedLine.btwn(*np.array([(3, 2), (1, 2)])),\n DirectedLine.btwn(*np.array([(3, 1), (3, 2)])),\n DirectedLine.btwn(*np.array([(2, 1), (2, 2)])),\n DirectedLine.btwn(*np.array([(1, 1), (1, 2)])),\n ]\n\n for line in i_regions:\n (top_left, bottom_right) = detect_intersection_rect(image, line)\n assert (\n top_left == 1\n ).all(), f\"{line.direction}: Top left of junction should be [1 1] but was {top_left}\"\n assert (\n bottom_right == np.array([3, 2])\n ).all(), f\"{line.direction}: Intersection BR should be [3 2] but was {bottom_right}\"\n\n line = line.reverse()\n (top_left, bottom_right) = detect_intersection_rect(image, line)\n assert (\n top_left == 1\n ).all(), f\"{line.direction}: Top left of junction should be [1 1] but was {top_left}\"\n assert (\n bottom_right == np.array([3, 2])\n ).all(), f\"{line.direction}: Intersection BR should be [3 2] but was {bottom_right}\"\n\n def test_detect_intersection_rect_L(self):\n image = np.array(\n [\n [False, True, True, False],\n [False, True, True, True],\n [False, True, True, True],\n [False, False, False, False],\n ]\n )\n i_regions = [\n DirectedLine.btwn(*np.array([(1, 1), (1, 2)])),\n DirectedLine.btwn(*np.array([(1, 2), (1, 1)])),\n DirectedLine.btwn(*np.array([(1, 2), (2, 2)])),\n DirectedLine.btwn(*np.array([(2, 2), (1, 2)])),\n ]\n\n for line in i_regions:\n (top_left, bottom_right) = detect_intersection_rect(image, line)\n assert (\n top_left == 1\n ).all(), f\"Expected top left to be [1 1]. Found {top_left}\"\n assert (\n bottom_right == 2\n ).all(), f\"Expected bottom right to be [2 2]. Found {bottom_right}\"\n\n def test_detect_intersection_rect_L_270(self):\n image = np.array(\n [\n [False, True, True, False],\n [True, True, True, False],\n [True, True, True, False],\n [False, False, False, False],\n ]\n )\n\n i_regions = [\n DirectedLine.btwn(*np.array([(1, 1), (1, 2)])),\n DirectedLine.btwn(*np.array([(1, 2), (1, 1)])),\n DirectedLine.btwn(*np.array([(1, 1), (2, 1)])),\n DirectedLine.btwn(*np.array([(2, 1), (1, 1)])),\n ]\n\n for line in i_regions:\n (top_left, bottom_right) = detect_intersection_rect(image, line)\n assert (top_left == 1).all()\n assert (bottom_right == 2).all()\n\n def test_detect_intersection_L_1px(self):\n image = np.array(\n [\n [False, True, False, False],\n [False, True, False, False],\n [False, True, True, True],\n [False, False, False, False],\n ]\n )\n i_regions = [\n DirectedLine.btwn(*np.array([(1, 1), (1, 2)])),\n DirectedLine.btwn(*np.array([(1, 2), (1, 1)])),\n DirectedLine.btwn(*np.array([(1, 2), (2, 2)])),\n DirectedLine.btwn(*np.array([(2, 2), (1, 2)])),\n ]\n\n for line in i_regions:\n (top_left, bottom_right) = detect_intersection_rect(image, line)\n assert (\n top_left == 1\n ).all(), f\"Expected top left to be [1 1]. Found {top_left}\"\n assert (\n bottom_right == 2\n ).all(), f\"Expected bottom right to be [2 2]. Found {bottom_right}\"\n"
] |
[
[
"numpy.array",
"numpy.zeros"
]
] |
fl42/frigate
|
[
"d7460d9c3606267b36a2ba5efccde395f6e05064"
] |
[
"frigate/config.py"
] |
[
"import base64\nimport json\nimport logging\nimport os\nfrom typing import Dict\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport voluptuous as vol\nimport yaml\n\nfrom frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR\nfrom frigate.util import create_mask\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_TRACKED_OBJECTS = ['person']\n\nDETECTORS_SCHEMA = vol.Schema(\n {\n vol.Required(str): {\n vol.Required('type', default='edgetpu'): vol.In(['cpu', 'edgetpu']),\n vol.Optional('device', default='usb'): str,\n vol.Optional('num_threads', default=3): int\n }\n }\n)\n\nDEFAULT_DETECTORS = {\n 'coral': {\n 'type': 'edgetpu',\n 'device': 'usb'\n }\n}\n\nMQTT_SCHEMA = vol.Schema(\n {\n vol.Required('host'): str,\n vol.Optional('port', default=1883): int,\n vol.Optional('topic_prefix', default='frigate'): str,\n vol.Optional('client_id', default='frigate'): str,\n vol.Optional('stats_interval', default=60): int,\n 'user': str,\n 'password': str\n }\n)\n\nRETAIN_SCHEMA = vol.Schema(\n {\n vol.Required('default',default=10): int,\n 'objects': {\n str: int\n }\n }\n)\n\nCLIPS_SCHEMA = vol.Schema(\n {\n vol.Optional('max_seconds', default=300): int,\n 'tmpfs_cache_size': str,\n vol.Optional('retain', default={}): RETAIN_SCHEMA\n }\n)\n\nFFMPEG_GLOBAL_ARGS_DEFAULT = ['-hide_banner','-loglevel','warning']\nFFMPEG_INPUT_ARGS_DEFAULT = ['-avoid_negative_ts', 'make_zero',\n '-fflags', '+genpts+discardcorrupt',\n '-rtsp_transport', 'tcp',\n '-stimeout', '5000000',\n '-use_wallclock_as_timestamps', '1']\nDETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ['-f', 'rawvideo',\n '-pix_fmt', 'yuv420p']\nRTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = [\"-c\", \"copy\", \"-f\", \"flv\"]\nSAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT = [\"-f\", \"segment\", \"-segment_time\",\n \"10\", \"-segment_format\", \"mp4\", \"-reset_timestamps\", \"1\", \"-strftime\",\n \"1\", \"-c\", \"copy\", \"-an\"]\nRECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = [\"-f\", \"segment\", \"-segment_time\",\n \"60\", \"-segment_format\", \"mp4\", \"-reset_timestamps\", \"1\", \"-strftime\",\n \"1\", \"-c\", \"copy\", \"-an\"]\n\nGLOBAL_FFMPEG_SCHEMA = vol.Schema(\n {\n vol.Optional('global_args', default=FFMPEG_GLOBAL_ARGS_DEFAULT): vol.Any(str, [str]),\n vol.Optional('hwaccel_args', default=[]): vol.Any(str, [str]),\n vol.Optional('input_args', default=FFMPEG_INPUT_ARGS_DEFAULT): vol.Any(str, [str]),\n vol.Optional('output_args', default={}): {\n vol.Optional('detect', default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),\n vol.Optional('record', default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),\n vol.Optional('clips', default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),\n vol.Optional('rtmp', default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),\n }\n }\n)\n\nMOTION_SCHEMA = vol.Schema(\n {\n 'mask': vol.Any(str, [str]),\n 'threshold': vol.Range(min=1, max=255),\n 'contour_area': int,\n 'delta_alpha': float,\n 'frame_alpha': float,\n 'frame_height': int\n }\n)\n\nDETECT_SCHEMA = vol.Schema(\n {\n 'max_disappeared': int\n }\n)\n\nFILTER_SCHEMA = vol.Schema(\n {\n str: {\n 'min_area': int,\n 'max_area': int,\n 'threshold': float,\n }\n }\n)\n\ndef filters_for_all_tracked_objects(object_config):\n for tracked_object in object_config.get('track', DEFAULT_TRACKED_OBJECTS):\n if not 'filters' in object_config:\n object_config['filters'] = {}\n if not tracked_object in object_config['filters']:\n object_config['filters'][tracked_object] = {}\n return object_config\n\nOBJECTS_SCHEMA = vol.Schema(vol.All(filters_for_all_tracked_objects,\n {\n 'track': [str],\n 'mask': vol.Any(str, [str]),\n vol.Optional('filters', default = {}): FILTER_SCHEMA.extend(\n { \n str: {\n 'min_score': float,\n 'mask': vol.Any(str, [str]),\n }\n })\n }\n))\n\ndef each_role_used_once(inputs):\n roles = [role for i in inputs for role in i['roles']]\n roles_set = set(roles)\n if len(roles) > len(roles_set):\n raise ValueError\n return inputs\n\ndef detect_is_required(inputs):\n roles = [role for i in inputs for role in i['roles']]\n if not 'detect' in roles:\n raise ValueError\n return inputs\n\nCAMERA_FFMPEG_SCHEMA = vol.Schema(\n {\n vol.Required('inputs'): vol.All([{\n vol.Required('path'): str,\n vol.Required('roles'): ['detect', 'clips', 'record', 'rtmp'],\n 'global_args': vol.Any(str, [str]),\n 'hwaccel_args': vol.Any(str, [str]),\n 'input_args': vol.Any(str, [str]),\n }], vol.Msg(each_role_used_once, msg=\"Each input role may only be used once\"), \n vol.Msg(detect_is_required, msg=\"The detect role is required\")),\n 'global_args': vol.Any(str, [str]),\n 'hwaccel_args': vol.Any(str, [str]),\n 'input_args': vol.Any(str, [str]),\n 'output_args': {\n vol.Optional('detect', default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),\n vol.Optional('record', default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),\n vol.Optional('clips', default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),\n vol.Optional('rtmp', default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),\n }\n }\n)\n\ndef ensure_zones_and_cameras_have_different_names(cameras):\n zones = [zone for camera in cameras.values() for zone in camera['zones'].keys()]\n for zone in zones:\n if zone in cameras.keys():\n raise ValueError\n return cameras\n\nCAMERAS_SCHEMA = vol.Schema(vol.All(\n {\n str: {\n vol.Required('ffmpeg'): CAMERA_FFMPEG_SCHEMA,\n vol.Required('height'): int,\n vol.Required('width'): int,\n 'fps': int,\n vol.Optional('best_image_timeout', default=60): int,\n vol.Optional('zones', default={}): {\n str: {\n vol.Required('coordinates'): vol.Any(str, [str]),\n vol.Optional('filters', default={}): FILTER_SCHEMA\n }\n },\n vol.Optional('clips', default={}): {\n vol.Optional('enabled', default=False): bool,\n vol.Optional('pre_capture', default=5): int,\n vol.Optional('post_capture', default=5): int,\n vol.Optional('required_zones', default=[]): [str],\n 'objects': [str],\n vol.Optional('retain', default={}): RETAIN_SCHEMA,\n },\n vol.Optional('record', default={}): {\n 'enabled': bool,\n 'retain_days': int,\n },\n vol.Optional('rtmp', default={}): {\n vol.Required('enabled', default=True): bool,\n },\n vol.Optional('snapshots', default={}): {\n vol.Optional('enabled', default=False): bool,\n vol.Optional('timestamp', default=False): bool,\n vol.Optional('bounding_box', default=False): bool,\n vol.Optional('crop', default=False): bool,\n vol.Optional('required_zones', default=[]): [str],\n 'height': int,\n vol.Optional('retain', default={}): RETAIN_SCHEMA,\n },\n vol.Optional('mqtt', default={}): {\n vol.Optional('enabled', default=True): bool,\n vol.Optional('timestamp', default=True): bool,\n vol.Optional('bounding_box', default=True): bool,\n vol.Optional('crop', default=True): bool,\n vol.Optional('height', default=270): int,\n vol.Optional('required_zones', default=[]): [str],\n },\n vol.Optional('objects', default={}): OBJECTS_SCHEMA,\n vol.Optional('motion', default={}): MOTION_SCHEMA,\n vol.Optional('detect', default={}): DETECT_SCHEMA.extend({\n vol.Optional('enabled', default=True): bool\n })\n }\n }, vol.Msg(ensure_zones_and_cameras_have_different_names, msg='Zones cannot share names with cameras'))\n)\n\nFRIGATE_CONFIG_SCHEMA = vol.Schema(\n {\n vol.Optional('database', default={}): {\n vol.Optional('path', default=os.path.join(CLIPS_DIR, 'frigate.db')): str\n },\n vol.Optional('model', default={'width': 320, 'height': 320}): {\n vol.Required('width'): int,\n vol.Required('height'): int\n },\n vol.Optional('detectors', default=DEFAULT_DETECTORS): DETECTORS_SCHEMA,\n 'mqtt': MQTT_SCHEMA,\n vol.Optional('logger', default={'default': 'info', 'logs': {}}): {\n vol.Optional('default', default='info'): vol.In(['info', 'debug', 'warning', 'error', 'critical']),\n vol.Optional('logs', default={}): {str: vol.In(['info', 'debug', 'warning', 'error', 'critical']) }\n },\n vol.Optional('snapshots', default={}): {\n vol.Optional('retain', default={}): RETAIN_SCHEMA\n },\n vol.Optional('clips', default={}): CLIPS_SCHEMA,\n vol.Optional('record', default={}): {\n vol.Optional('enabled', default=False): bool,\n vol.Optional('retain_days', default=30): int,\n },\n vol.Optional('ffmpeg', default={}): GLOBAL_FFMPEG_SCHEMA,\n vol.Optional('objects', default={}): OBJECTS_SCHEMA,\n vol.Optional('motion', default={}): MOTION_SCHEMA,\n vol.Optional('detect', default={}): DETECT_SCHEMA,\n vol.Required('cameras', default={}): CAMERAS_SCHEMA,\n vol.Optional('environment_vars', default={}): { str: str }\n }\n)\n\nclass DatabaseConfig():\n def __init__(self, config):\n self._path = config['path']\n\n @property\n def path(self):\n return self._path\n\n def to_dict(self):\n return {\n 'path': self.path\n }\n\nclass ModelConfig():\n def __init__(self, config):\n self._width = config['width']\n self._height = config['height']\n\n @property\n def width(self):\n return self._width\n\n @property\n def height(self):\n return self._height\n\n def to_dict(self):\n return {\n 'width': self.width,\n 'height': self.height\n }\n\nclass DetectorConfig():\n def __init__(self, config):\n self._type = config['type']\n self._device = config['device']\n self._num_threads = config['num_threads']\n\n @property\n def type(self):\n return self._type\n\n @property\n def device(self):\n return self._device\n\n @property\n def num_threads(self):\n return self._num_threads\n\n def to_dict(self):\n return {\n 'type': self.type,\n 'device': self.device,\n 'num_threads': self.num_threads\n }\n\nclass LoggerConfig():\n def __init__(self, config):\n self._default = config['default'].upper()\n self._logs = {k: v.upper() for k, v in config['logs'].items()}\n\n @property\n def default(self):\n return self._default\n\n @property\n def logs(self):\n return self._logs\n\n def to_dict(self):\n return {\n 'default': self.default,\n 'logs': self.logs\n }\n\nclass MqttConfig():\n def __init__(self, config):\n self._host = config['host']\n self._port = config['port']\n self._topic_prefix = config['topic_prefix']\n self._client_id = config['client_id']\n self._user = config.get('user')\n self._password = config.get('password')\n self._stats_interval = config.get('stats_interval')\n \n @property\n def host(self):\n return self._host\n\n @property\n def port(self):\n return self._port\n\n @property\n def topic_prefix(self):\n return self._topic_prefix\n\n @property\n def client_id(self):\n return self._client_id\n\n @property\n def user(self):\n return self._user\n\n @property\n def password(self):\n return self._password\n\n @property\n def stats_interval(self):\n return self._stats_interval\n\n def to_dict(self):\n return {\n 'host': self.host,\n 'port': self.port,\n 'topic_prefix': self.topic_prefix,\n 'client_id': self.client_id,\n 'user': self.user,\n 'stats_interval': self.stats_interval\n }\n\nclass CameraInput():\n def __init__(self, camera_config, global_config, ffmpeg_input):\n self._path = ffmpeg_input['path']\n self._roles = ffmpeg_input['roles']\n self._global_args = ffmpeg_input.get('global_args', camera_config.get('global_args', global_config['global_args']))\n self._hwaccel_args = ffmpeg_input.get('hwaccel_args', camera_config.get('hwaccel_args', global_config['hwaccel_args']))\n self._input_args = ffmpeg_input.get('input_args', camera_config.get('input_args', global_config['input_args']))\n\n @property\n def path(self):\n return self._path\n\n @property\n def roles(self):\n return self._roles\n\n @property\n def global_args(self):\n return self._global_args if isinstance(self._global_args, list) else self._global_args.split(' ')\n\n @property\n def hwaccel_args(self):\n return self._hwaccel_args if isinstance(self._hwaccel_args, list) else self._hwaccel_args.split(' ')\n\n @property\n def input_args(self):\n return self._input_args if isinstance(self._input_args, list) else self._input_args.split(' ')\n\nclass CameraFfmpegConfig():\n def __init__(self, global_config, config):\n self._inputs = [CameraInput(config, global_config, i) for i in config['inputs']]\n self._output_args = config.get('output_args', global_config['output_args'])\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def output_args(self):\n return {k: v if isinstance(v, list) else v.split(' ') for k, v in self._output_args.items()}\n\nclass RetainConfig():\n def __init__(self, global_config, config):\n self._default = config.get('default', global_config.get('default'))\n self._objects = config.get('objects', global_config.get('objects', {}))\n\n @property\n def default(self):\n return self._default\n\n @property\n def objects(self):\n return self._objects\n\n def to_dict(self):\n return {\n 'default': self.default,\n 'objects': self.objects\n }\n\nclass ClipsConfig():\n def __init__(self, config):\n self._max_seconds = config['max_seconds']\n self._tmpfs_cache_size = config.get('tmpfs_cache_size', '').strip()\n self._retain = RetainConfig(config['retain'], config['retain'])\n \n @property\n def max_seconds(self):\n return self._max_seconds\n\n @property\n def tmpfs_cache_size(self):\n return self._tmpfs_cache_size\n\n @property\n def retain(self):\n return self._retain\n\n def to_dict(self):\n return {\n 'max_seconds': self.max_seconds,\n 'tmpfs_cache_size': self.tmpfs_cache_size,\n 'retain': self.retain.to_dict()\n }\n\nclass SnapshotsConfig():\n def __init__(self, config):\n self._retain = RetainConfig(config['retain'], config['retain'])\n\n @property\n def retain(self):\n return self._retain\n\n def to_dict(self):\n return {\n 'retain': self.retain.to_dict()\n }\n\nclass RecordConfig():\n def __init__(self, global_config, config):\n self._enabled = config.get('enabled', global_config['enabled'])\n self._retain_days = config.get('retain_days', global_config['retain_days'])\n\n @property\n def enabled(self):\n return self._enabled\n\n @property\n def retain_days(self):\n return self._retain_days\n\n def to_dict(self):\n return {\n 'enabled': self.enabled,\n 'retain_days': self.retain_days,\n }\n\nclass FilterConfig():\n def __init__(self, global_config, config, global_mask=None, frame_shape=None):\n self._min_area = config.get('min_area', global_config.get('min_area', 0))\n self._max_area = config.get('max_area', global_config.get('max_area', 24000000))\n self._threshold = config.get('threshold', global_config.get('threshold', 0.7))\n self._min_score = config.get('min_score', global_config.get('min_score', 0.5))\n\n self._raw_mask = []\n if global_mask:\n if isinstance(global_mask, list):\n self._raw_mask += global_mask\n elif isinstance(global_mask, str):\n self._raw_mask += [global_mask]\n\n mask = config.get('mask')\n if mask:\n if isinstance(mask, list):\n self._raw_mask += mask\n elif isinstance(mask, str):\n self._raw_mask += [mask]\n self._mask = create_mask(frame_shape, self._raw_mask) if self._raw_mask else None\n\n @property\n def min_area(self):\n return self._min_area\n\n @property\n def max_area(self):\n return self._max_area\n\n @property\n def threshold(self):\n return self._threshold\n\n @property\n def min_score(self):\n return self._min_score\n\n @property\n def mask(self):\n return self._mask\n\n def to_dict(self):\n return {\n 'min_area': self.min_area,\n 'max_area': self.max_area,\n 'threshold': self.threshold,\n 'min_score': self.min_score,\n 'mask': self._raw_mask\n }\n\nclass ObjectConfig():\n def __init__(self, global_config, config, frame_shape):\n self._track = config.get('track', global_config.get('track', DEFAULT_TRACKED_OBJECTS))\n self._raw_mask = config.get('mask')\n self._filters = { name: FilterConfig(global_config['filters'].get(name, {}), config['filters'].get(name, {}), self._raw_mask, frame_shape) for name in self._track }\n\n @property\n def track(self):\n return self._track\n\n @property\n def filters(self) -> Dict[str, FilterConfig]:\n return self._filters\n\n def to_dict(self):\n return {\n 'track': self.track,\n 'mask': self._raw_mask,\n 'filters': { k: f.to_dict() for k, f in self.filters.items() }\n }\n\nclass CameraSnapshotsConfig():\n def __init__(self, global_config, config):\n self._enabled = config['enabled']\n self._timestamp = config['timestamp']\n self._bounding_box = config['bounding_box']\n self._crop = config['crop']\n self._height = config.get('height')\n self._retain = RetainConfig(global_config['snapshots']['retain'], config['retain'])\n self._required_zones = config['required_zones']\n \n @property\n def enabled(self):\n return self._enabled\n\n @property\n def timestamp(self):\n return self._timestamp\n\n @property\n def bounding_box(self):\n return self._bounding_box\n\n @property\n def crop(self):\n return self._crop\n\n @property\n def height(self):\n return self._height\n \n @property\n def retain(self):\n return self._retain\n\n @property\n def required_zones(self):\n return self._required_zones\n \n def to_dict(self):\n return {\n 'enabled': self.enabled,\n 'timestamp': self.timestamp,\n 'bounding_box': self.bounding_box,\n 'crop': self.crop,\n 'height': self.height,\n 'retain': self.retain.to_dict(),\n 'required_zones': self.required_zones\n }\n\nclass CameraMqttConfig():\n def __init__(self, config):\n self._enabled = config['enabled']\n self._timestamp = config['timestamp']\n self._bounding_box = config['bounding_box']\n self._crop = config['crop']\n self._height = config.get('height')\n self._required_zones = config['required_zones']\n\n @property\n def enabled(self):\n return self._enabled\n\n @property\n def timestamp(self):\n return self._timestamp\n\n @property\n def bounding_box(self):\n return self._bounding_box\n\n @property\n def crop(self):\n return self._crop\n\n @property\n def height(self):\n return self._height\n\n @property\n def required_zones(self):\n return self._required_zones\n\n def to_dict(self):\n return {\n 'enabled': self.enabled,\n 'timestamp': self.timestamp,\n 'bounding_box': self.bounding_box,\n 'crop': self.crop,\n 'height': self.height,\n 'required_zones': self.required_zones\n }\n\nclass CameraClipsConfig():\n def __init__(self, global_config, config):\n self._enabled = config['enabled']\n self._pre_capture = config['pre_capture']\n self._post_capture = config['post_capture']\n self._objects = config.get('objects')\n self._retain = RetainConfig(global_config['clips']['retain'], config['retain'])\n self._required_zones = config['required_zones']\n \n @property\n def enabled(self):\n return self._enabled\n\n @property\n def pre_capture(self):\n return self._pre_capture\n\n @property\n def post_capture(self):\n return self._post_capture\n\n @property\n def objects(self):\n return self._objects\n\n @property\n def retain(self):\n return self._retain\n\n @property\n def required_zones(self):\n return self._required_zones\n\n def to_dict(self):\n return {\n 'enabled': self.enabled,\n 'pre_capture': self.pre_capture,\n 'post_capture': self.post_capture,\n 'objects': self.objects,\n 'retain': self.retain.to_dict(),\n 'required_zones': self.required_zones\n }\n\nclass CameraRtmpConfig():\n def __init__(self, global_config, config):\n self._enabled = config['enabled']\n\n @property\n def enabled(self):\n return self._enabled\n\n def to_dict(self):\n return {\n 'enabled': self.enabled,\n }\n\nclass MotionConfig():\n def __init__(self, global_config, config, frame_shape):\n self._raw_mask = config.get('mask')\n if self._raw_mask:\n self._mask = create_mask(frame_shape, self._raw_mask)\n else:\n default_mask = np.zeros(frame_shape, np.uint8)\n default_mask[:] = 255\n self._mask = default_mask\n self._threshold = config.get('threshold', global_config.get('threshold', 25))\n self._contour_area = config.get('contour_area', global_config.get('contour_area', 100))\n self._delta_alpha = config.get('delta_alpha', global_config.get('delta_alpha', 0.2))\n self._frame_alpha = config.get('frame_alpha', global_config.get('frame_alpha', 0.2))\n self._frame_height = config.get('frame_height', global_config.get('frame_height', frame_shape[0]//6))\n\n @property\n def mask(self):\n return self._mask\n\n @property\n def threshold(self):\n return self._threshold\n\n @property\n def contour_area(self):\n return self._contour_area\n\n @property\n def delta_alpha(self):\n return self._delta_alpha\n\n @property\n def frame_alpha(self):\n return self._frame_alpha\n\n @property\n def frame_height(self):\n return self._frame_height\n\n def to_dict(self):\n return {\n 'mask': self._raw_mask,\n 'threshold': self.threshold,\n 'contour_area': self.contour_area,\n 'delta_alpha': self.delta_alpha,\n 'frame_alpha': self.frame_alpha,\n 'frame_height': self.frame_height,\n }\n\n\n\nclass DetectConfig():\n def __init__(self, global_config, config, camera_fps):\n self._enabled = config['enabled']\n self._max_disappeared = config.get('max_disappeared', global_config.get('max_disappeared', camera_fps*5))\n\n @property\n def enabled(self):\n return self._enabled\n\n @property\n def max_disappeared(self):\n return self._max_disappeared\n\n def to_dict(self):\n return {\n 'enabled': self.enabled,\n 'max_disappeared': self._max_disappeared,\n }\n\nclass ZoneConfig():\n def __init__(self, name, config):\n self._coordinates = config['coordinates']\n self._filters = { name: FilterConfig(c, c) for name, c in config['filters'].items() }\n\n if isinstance(self._coordinates, list):\n self._contour = np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in self._coordinates])\n elif isinstance(self._coordinates, str):\n points = self._coordinates.split(',')\n self._contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])\n else:\n print(f\"Unable to parse zone coordinates for {name}\")\n self._contour = np.array([])\n\n self._color = (0,0,0)\n\n @property\n def coordinates(self):\n return self._coordinates\n\n @property\n def contour(self):\n return self._contour\n\n @contour.setter\n def contour(self, val):\n self._contour = val\n\n @property\n def color(self):\n return self._color\n\n @color.setter\n def color(self, val):\n self._color = val\n\n @property\n def filters(self):\n return self._filters\n\n def to_dict(self):\n return {\n 'filters': {k: f.to_dict() for k, f in self.filters.items()},\n 'coordinates': self._coordinates\n }\n\nclass CameraConfig():\n def __init__(self, name, config, global_config):\n self._name = name\n self._ffmpeg = CameraFfmpegConfig(global_config['ffmpeg'], config['ffmpeg'])\n self._height = config.get('height')\n self._width = config.get('width')\n self._frame_shape = (self._height, self._width)\n self._frame_shape_yuv = (self._frame_shape[0]*3//2, self._frame_shape[1])\n self._fps = config.get('fps')\n self._best_image_timeout = config['best_image_timeout']\n self._zones = { name: ZoneConfig(name, z) for name, z in config['zones'].items() }\n self._clips = CameraClipsConfig(global_config, config['clips'])\n self._record = RecordConfig(global_config['record'], config['record'])\n self._rtmp = CameraRtmpConfig(global_config, config['rtmp'])\n self._snapshots = CameraSnapshotsConfig(global_config, config['snapshots'])\n self._mqtt = CameraMqttConfig(config['mqtt'])\n self._objects = ObjectConfig(global_config['objects'], config.get('objects', {}), self._frame_shape)\n self._motion = MotionConfig(global_config['motion'], config['motion'], self._frame_shape)\n self._detect = DetectConfig(global_config['detect'], config['detect'], config.get('fps', 5))\n\n self._ffmpeg_cmds = []\n for ffmpeg_input in self._ffmpeg.inputs:\n ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)\n if ffmpeg_cmd is None:\n continue\n\n self._ffmpeg_cmds.append({\n 'roles': ffmpeg_input.roles,\n 'cmd': ffmpeg_cmd\n })\n\n\n self._set_zone_colors(self._zones)\n\n def _get_ffmpeg_cmd(self, ffmpeg_input):\n ffmpeg_output_args = []\n if 'detect' in ffmpeg_input.roles:\n ffmpeg_output_args = self.ffmpeg.output_args['detect'] + ffmpeg_output_args + ['pipe:']\n if self.fps:\n ffmpeg_output_args = [\"-r\", str(self.fps)] + ffmpeg_output_args\n if 'rtmp' in ffmpeg_input.roles and self.rtmp.enabled:\n ffmpeg_output_args = self.ffmpeg.output_args['rtmp'] + [\n f\"rtmp://127.0.0.1/live/{self.name}\"\n ] + ffmpeg_output_args\n if 'clips' in ffmpeg_input.roles:\n ffmpeg_output_args = self.ffmpeg.output_args['clips'] + [\n f\"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4\"\n ] + ffmpeg_output_args\n if 'record' in ffmpeg_input.roles and self.record.enabled:\n ffmpeg_output_args = self.ffmpeg.output_args['record'] + [\n f\"{os.path.join(RECORD_DIR, self.name)}-%Y%m%d%H%M%S.mp4\"\n ] + ffmpeg_output_args\n\n # if there arent any outputs enabled for this input\n if len(ffmpeg_output_args) == 0:\n return None\n\n cmd = (['ffmpeg'] +\n ffmpeg_input.global_args +\n ffmpeg_input.hwaccel_args +\n ffmpeg_input.input_args +\n ['-i', ffmpeg_input.path] +\n ffmpeg_output_args)\n\n return [part for part in cmd if part != '']\n\n def _set_zone_colors(self, zones: Dict[str, ZoneConfig]):\n # set colors for zones\n all_zone_names = zones.keys()\n zone_colors = {}\n colors = plt.cm.get_cmap('tab10', len(all_zone_names))\n for i, zone in enumerate(all_zone_names):\n zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3])\n\n for name, zone in zones.items():\n zone.color = zone_colors[name]\n\n @property\n def name(self):\n return self._name\n\n @property\n def ffmpeg(self):\n return self._ffmpeg\n\n @property\n def height(self):\n return self._height\n\n @property\n def width(self):\n return self._width\n\n @property\n def fps(self):\n return self._fps\n\n @property\n def best_image_timeout(self):\n return self._best_image_timeout\n\n @property\n def zones(self)-> Dict[str, ZoneConfig]:\n return self._zones\n\n @property\n def clips(self):\n return self._clips\n\n @property\n def record(self):\n return self._record\n\n @property\n def rtmp(self):\n return self._rtmp\n\n @property\n def snapshots(self):\n return self._snapshots\n\n @property\n def mqtt(self):\n return self._mqtt\n\n @property\n def objects(self):\n return self._objects\n\n @property\n def motion(self):\n return self._motion\n\n @property\n def detect(self):\n return self._detect\n\n @property\n def frame_shape(self):\n return self._frame_shape\n\n @property\n def frame_shape_yuv(self):\n return self._frame_shape_yuv\n\n @property\n def ffmpeg_cmds(self):\n return self._ffmpeg_cmds\n\n def to_dict(self):\n return {\n 'name': self.name,\n 'height': self.height,\n 'width': self.width,\n 'fps': self.fps,\n 'best_image_timeout': self.best_image_timeout,\n 'zones': {k: z.to_dict() for k, z in self.zones.items()},\n 'clips': self.clips.to_dict(),\n 'record': self.record.to_dict(),\n 'rtmp': self.rtmp.to_dict(),\n 'snapshots': self.snapshots.to_dict(),\n 'mqtt': self.mqtt.to_dict(),\n 'objects': self.objects.to_dict(),\n 'motion': self.motion.to_dict(),\n 'detect': self.detect.to_dict(),\n 'frame_shape': self.frame_shape,\n 'ffmpeg_cmds': [{'roles': c['roles'], 'cmd': ' '.join(c['cmd'])} for c in self.ffmpeg_cmds],\n }\n\n\nclass FrigateConfig():\n def __init__(self, config_file=None, config=None):\n if config is None and config_file is None:\n raise ValueError('config or config_file must be defined')\n elif not config_file is None:\n config = self._load_file(config_file)\n\n config = FRIGATE_CONFIG_SCHEMA(config)\n\n config = self._sub_env_vars(config)\n\n self._database = DatabaseConfig(config['database'])\n self._model = ModelConfig(config['model'])\n self._detectors = { name: DetectorConfig(d) for name, d in config['detectors'].items() }\n self._mqtt = MqttConfig(config['mqtt'])\n self._clips = ClipsConfig(config['clips'])\n self._snapshots = SnapshotsConfig(config['snapshots'])\n self._cameras = { name: CameraConfig(name, c, config) for name, c in config['cameras'].items() }\n self._logger = LoggerConfig(config['logger'])\n self._environment_vars = config['environment_vars']\n\n def _sub_env_vars(self, config):\n frigate_env_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}\n\n if 'password' in config['mqtt']:\n config['mqtt']['password'] = config['mqtt']['password'].format(**frigate_env_vars)\n\n for camera in config['cameras'].values():\n for i in camera['ffmpeg']['inputs']:\n i['path'] = i['path'].format(**frigate_env_vars)\n\n return config\n\n def _load_file(self, config_file):\n with open(config_file) as f:\n raw_config = f.read()\n\n if config_file.endswith(\".yml\"):\n config = yaml.safe_load(raw_config)\n elif config_file.endswith(\".json\"):\n config = json.loads(raw_config)\n\n return config\n\n def to_dict(self):\n return {\n 'database': self.database.to_dict(),\n 'model': self.model.to_dict(),\n 'detectors': {k: d.to_dict() for k, d in self.detectors.items()},\n 'mqtt': self.mqtt.to_dict(),\n 'clips': self.clips.to_dict(),\n 'snapshots': self.snapshots.to_dict(),\n 'cameras': {k: c.to_dict() for k, c in self.cameras.items()},\n 'logger': self.logger.to_dict(),\n 'environment_vars': self._environment_vars\n }\n\n @property\n def database(self):\n return self._database\n\n @property\n def model(self):\n return self._model\n\n @property\n def detectors(self) -> Dict[str, DetectorConfig]:\n return self._detectors\n\n @property\n def logger(self):\n return self._logger\n\n @property\n def mqtt(self):\n return self._mqtt\n\n @property\n def clips(self):\n return self._clips\n\n @property\n def snapshots(self):\n return self._snapshots\n\n @property\n def cameras(self) -> Dict[str, CameraConfig]:\n return self._cameras\n\n @property\n def environment_vars(self):\n return self._environment_vars\n"
] |
[
[
"numpy.array",
"numpy.zeros"
]
] |
thvasilo/autogluon
|
[
"49eb617cd324cdc758a2a7b74bfef5e34d7f96d7"
] |
[
"autogluon/utils/tabular/ml/models/lr/lr_model.py"
] |
[
"import logging\nimport re\n\nimport numpy as np\nfrom pandas import DataFrame\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import FeatureUnion, Pipeline\nfrom sklearn.preprocessing import StandardScaler, QuantileTransformer\n\nfrom .hyperparameters.parameters import get_param_baseline, get_model_params, get_default_params, INCLUDE, IGNORE, ONLY\nfrom .hyperparameters.searchspaces import get_default_searchspace\nfrom .lr_preprocessing_utils import NlpDataPreprocessor, OheFeaturesGenerator, NumericDataPreprocessor\nfrom ...constants import BINARY, REGRESSION\nfrom ....ml.models.abstract.abstract_model import AbstractModel\n\nlogger = logging.getLogger(__name__)\n\n\nclass LinearModel(AbstractModel):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.model_class, self.penalty, self.handle_text = get_model_params(self.problem_type, self.params)\n self.types_of_features = None\n self.pipeline = None\n\n self.model_params, default_params = get_default_params(self.problem_type, self.penalty)\n for param, val in default_params.items():\n self._set_default_param_value(param, val)\n\n def tokenize(self, s):\n return re.split('[ ]+', s)\n\n def _get_types_of_features(self, df):\n \"\"\" Returns dict with keys: : 'continuous', 'skewed', 'onehot', 'embed', 'language', values = ordered list of feature-names falling into each category.\n Each value is a list of feature-names corresponding to columns in original dataframe.\n TODO: ensure features with zero variance have already been removed before this function is called.\n \"\"\"\n if self.types_of_features is not None:\n logger.warning(\"Attempting to _get_types_of_features for LRModel, but previously already did this.\")\n categorical_featnames = self.__get_feature_type_if_present('object') + self.__get_feature_type_if_present('bool')\n continuous_featnames = self.__get_feature_type_if_present('float') + self.__get_feature_type_if_present('int') + self.__get_feature_type_if_present(\n 'datetime')\n language_featnames = self.feature_types_metadata['nlp']\n valid_features = categorical_featnames + continuous_featnames + language_featnames\n if len(categorical_featnames) + len(continuous_featnames) + len(language_featnames) != df.shape[1]:\n unknown_features = [feature for feature in df.columns if feature not in valid_features]\n df = df.drop(columns=unknown_features)\n self.features = list(df.columns)\n\n types_of_features = {'continuous': [], 'skewed': [], 'onehot': [], 'language': []}\n return self._select_features(df, types_of_features, categorical_featnames, language_featnames, continuous_featnames)\n\n def _select_features(self, df, types_of_features, categorical_featnames, language_featnames, continuous_featnames):\n features_seclector = {\n INCLUDE: self._select_features_handle_text_include,\n ONLY: self._select_features_handle_text_only,\n IGNORE: self._select_features_handle_text_ignore,\n }.get(self.handle_text, self._select_features_handle_text_ignore)\n return features_seclector(df, types_of_features, categorical_featnames, language_featnames, continuous_featnames)\n\n def __get_feature_type_if_present(self, feature_type):\n \"\"\" Returns crude categorization of feature types \"\"\"\n return self.feature_types_metadata[feature_type] if feature_type in self.feature_types_metadata else []\n\n # TODO: handle collinear features - they will impact results quality\n def preprocess(self, X: DataFrame, is_train=False, vect_max_features=1000, model_specific_preprocessing=False):\n if model_specific_preprocessing: # This is hack to work-around pre-processing caching in bagging/stacker models\n X = X.copy()\n if is_train:\n feature_types = self._get_types_of_features(X)\n self.preprocess_train(X, feature_types, vect_max_features)\n X = self.pipeline.transform(X)\n\n return X\n\n def preprocess_train(self, X, feature_types, vect_max_features):\n transformer_list = []\n if len(feature_types['language']) > 0:\n pipeline = Pipeline(steps=[\n (\"preparator\", NlpDataPreprocessor(nlp_cols=feature_types['language'])),\n (\"vectorizer\",\n TfidfVectorizer(ngram_range=self.params['proc.ngram_range'], sublinear_tf=True, max_features=vect_max_features, tokenizer=self.tokenize))\n ])\n transformer_list.append(('vect', pipeline))\n if len(feature_types['onehot']) > 0:\n pipeline = Pipeline(steps=[\n ('generator', OheFeaturesGenerator(cats_cols=feature_types['onehot'])),\n ])\n transformer_list.append(('cats', pipeline))\n if len(feature_types['continuous']) > 0:\n pipeline = Pipeline(steps=[\n ('generator', NumericDataPreprocessor(cont_cols=feature_types['continuous'])),\n ('imputer', SimpleImputer(strategy=self.params['proc.impute_strategy'])),\n ('scaler', StandardScaler())\n ])\n transformer_list.append(('cont', pipeline))\n if len(feature_types['skewed']) > 0:\n pipeline = Pipeline(steps=[\n ('generator', NumericDataPreprocessor(cont_cols=feature_types['skewed'])),\n ('imputer', SimpleImputer(strategy=self.params['proc.impute_strategy'])),\n ('quantile', QuantileTransformer(output_distribution='normal')), # Or output_distribution = 'uniform'\n ])\n transformer_list.append(('skew', pipeline))\n self.pipeline = FeatureUnion(transformer_list=transformer_list)\n self.pipeline.fit(X)\n\n def _set_default_params(self):\n for param, val in get_param_baseline().items():\n self._set_default_param_value(param, val)\n\n def _get_default_searchspace(self, problem_type):\n return get_default_searchspace(problem_type)\n\n # TODO: It could be possible to adaptively set max_iter [1] to approximately respect time_limit based on sample-size, feature-dimensionality, and the solver used.\n # [1] https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#examples-using-sklearn-linear-model-logisticregression\n def fit(self, X_train, Y_train, X_test=None, Y_test=None, time_limit=None, **kwargs):\n hyperparams = self.params.copy()\n\n if self.problem_type == BINARY:\n Y_train = Y_train.astype(int).values\n\n X_train = self.preprocess(X_train, is_train=True, vect_max_features=hyperparams['vectorizer_dict_size'], model_specific_preprocessing=True)\n\n params = {k: v for k, v in self.params.items() if k in self.model_params}\n\n # Ridge/Lasso are using alpha instead of C, which is C^-1\n # https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge\n if self.problem_type == REGRESSION:\n # For numerical reasons, using alpha = 0 with the Lasso object is not advised, so we add epsilon\n params['alpha'] = 1 / (params['C'] if params['C'] != 0 else 1e-8)\n params.pop('C', None)\n\n model = self.model_class(**params)\n\n logger.log(15, f'Training Model with the following hyperparameter settings:')\n logger.log(15, model)\n\n self.model = model.fit(X_train, Y_train)\n\n def _predict_proba(self, X, preprocess=True):\n X = self.preprocess(X, is_train=False, model_specific_preprocessing=True)\n return super()._predict_proba(X, preprocess=False)\n\n def hyperparameter_tune(self, X_train, X_test, Y_train, Y_test, scheduler_options=None, **kwargs):\n self.fit(X_train=X_train, X_test=X_test, Y_train=Y_train, Y_test=Y_test, **kwargs)\n hpo_model_performances = {self.name: self.score(X_test, Y_test)}\n hpo_results = {}\n self.save()\n hpo_models = {self.name: self.path}\n\n return hpo_models, hpo_model_performances, hpo_results\n\n def get_info(self):\n # TODO: All AG-Tabular models now offer a get_info method:\n # https://github.com/awslabs/autogluon/blob/master/autogluon/utils/tabular/ml/models/abstract/abstract_model.py#L474\n # dict of weights?\n return super().get_info()\n\n def _select_features_handle_text_include(self, df, types_of_features, categorical_featnames, language_featnames, continuous_featnames):\n # continuous = numeric features to rescale\n # skewed = features to which we will apply power (ie. log / box-cox) transform before normalization\n # onehot = features to one-hot encode (unknown categories for these features encountered at test-time are encoded as all zeros). We one-hot encode any features encountered that only have two unique values.\n one_hot_threshold = 10000 # FIXME research memory constraints\n for feature in self.features:\n feature_data = df[feature]\n num_unique_vals = len(feature_data.unique())\n if feature in language_featnames:\n types_of_features['language'].append(feature)\n elif feature in continuous_featnames:\n if np.abs(feature_data.skew()) > self.params['proc.skew_threshold']:\n types_of_features['skewed'].append(feature)\n else:\n types_of_features['continuous'].append(feature)\n elif (feature in categorical_featnames) and (num_unique_vals <= one_hot_threshold):\n types_of_features['onehot'].append(feature)\n return types_of_features\n\n def _select_features_handle_text_only(self, df, types_of_features, categorical_featnames, language_featnames, continuous_featnames):\n for feature in self.features:\n if feature in language_featnames:\n types_of_features['language'].append(feature)\n return types_of_features\n\n def _select_features_handle_text_ignore(self, df, types_of_features, categorical_featnames, language_featnames, continuous_featnames):\n # continuous = numeric features to rescale\n # skewed = features to which we will apply power (ie. log / box-cox) transform before normalization\n # onehot = features to one-hot encode (unknown categories for these features encountered at test-time are encoded as all zeros). We one-hot encode any features encountered that only have two unique values.\n one_hot_threshold = 10000 # FIXME research memory constraints\n for feature in self.features:\n feature_data = df[feature]\n num_unique_vals = len(feature_data.unique())\n if feature in continuous_featnames:\n if '__nlp__' in feature:\n continue\n if np.abs(feature_data.skew()) > self.params['proc.skew_threshold']:\n types_of_features['skewed'].append(feature)\n else:\n types_of_features['continuous'].append(feature)\n elif (feature in categorical_featnames) and (num_unique_vals <= one_hot_threshold):\n types_of_features['onehot'].append(feature)\n return types_of_features\n"
] |
[
[
"sklearn.pipeline.FeatureUnion",
"sklearn.preprocessing.QuantileTransformer",
"sklearn.impute.SimpleImputer",
"sklearn.preprocessing.StandardScaler",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
jbofill10/COIVID-19-EDA
|
[
"816a4b4d75bbdbcbcaaf17fb844d1524ec614ca1"
] |
[
"EDA/task1_transmission_incubation/Transmission.py"
] |
[
"import spacy\nimport pandas as pd\nimport os\nimport json\nimport matplotlib.pyplot as plt\nimport matplotlib.style as style\nfrom spacy.matcher import Matcher\n\nfrom tqdm import tqdm\n\n\ndef transmission(df):\n style.use('seaborn-poster')\n style.use('ggplot')\n\n if not os.path.exists('Data/pickles/transmission_pickle'):\n transmit_keywords = ['transmit', 'transmission']\n findings = dict()\n paper_id_tracker = set()\n temp_papers = list()\n\n transmission_papers = pd.DataFrame(columns=['paper_id', 'title', 'abstract', 'body'])\n body_text = df['body'].values\n\n nlp = spacy.load(\"en_core_web_sm\", disable=['ner', 'parser'])\n nlp.add_pipe(nlp.create_pipe('sentencizer'))\n matcher = Matcher(nlp.vocab)\n\n for i in transmit_keywords:\n matcher.add(i, build_transmission_pattern(i))\n\n index = 0\n for i in tqdm(body_text):\n spacy.prefer_gpu()\n texts = i.split('.')\n for docs in nlp.pipe(texts, disable=['parser', 'ner', 'entity_linker']):\n for match_id, start, end in matcher(docs):\n if df.loc[index]['paper_id'] not in paper_id_tracker:\n paper_id_tracker.add(df.loc[index]['paper_id'])\n temp_papers.append([df.loc[index]['paper_id'], df.loc[index]['title'], df.loc[index]['abstract'], df.loc[index]['body']])\n if str(docs[start:end]) in findings:\n findings[str(docs[start:end]).lower()] += 1\n else:\n findings[str(docs[start:end]).lower()] = 1\n index += 1\n\n with open('Data/pickles/transmission_count', 'w') as data:\n json.dumps(findings, data)\n\n for i in temp_papers:\n transmission_papers = pd.concat([transmission_papers, pd.DataFrame([[i[0], i[1], i[2], i[3]]],\n columns=['paper_id', 'title', 'abstract',\n 'body'])])\n\n transmission_papers.to_pickle('Data/pickles/transmission_pickle')\n else:\n\n transmission_techniques = ['aerosol', 'contact', 'surface', 'breathing']\n\n transmission_papers = pd.read_pickle('Data/pickles/transmission_pickle')\n\n transmission_medium_freq = list()\n\n for x in tqdm(transmission_papers['body']):\n for sent in x.split('. '):\n if 'transmission' in sent:\n for type in transmission_techniques:\n if type in sent:\n transmission_medium_freq.append(type)\n\n transmission_papers.head()\n freq_list = list()\n\n with open('Data/pickles/transmission_count', 'r') as data:\n findings = json.load(data)\n\n for i in findings:\n for x in range(0, findings[i]):\n freq_list.append(i)\n\n findings_df = pd.DataFrame(freq_list, columns=['variation'])\n transmission_mediums_df = pd.DataFrame(transmission_medium_freq, columns=['medium'])\n print(transmission_mediums_df['medium'].value_counts())\n colors = ['#51c4e9', '#4a47a3', '#ad62aa', '#eab9c9']\n plt.pie(transmission_mediums_df['medium'].value_counts(),\n labels=[i.capitalize() for i in transmission_mediums_df['medium'].value_counts().index],\n autopct='%1.1f%%', startangle=90, shadow=True, textprops={'fontsize': 14}, colors=colors)\n\n plt.title(\"Common Transmission Mediums Mentioned\", fontsize=25)\n plt.savefig('Charts/TransmissionMediums.png')\n plt.show()\n\n test = findings_df['variation'].value_counts()\n test['test'] = range(0,7)\n'''\n plt.hist(freq_list, bins=5, histtype='bar',\n ec='white', linewidth=1.2, color='#005082')\n plt.title('Word Frequency from NLP on COVID-19 Scientific Literature', fontsize=21)\n plt.xticks(range(0, len(findings)),\n sorted(map(lambda x:x.capitalize(),list(findings.keys()))),\n rotation=25)\n plt.xlabel('Different Variations of \"Transmission\" in COVID-19 Literature',\n fontsize=20, color='black')\n plt.ylabel('Frequency',\n fontsize=20, color='black')\n plt.savefig('Charts/TransmissionFreq')\n plt.show()\n'''\n\ndef build_transmission_pattern(keyword):\n return [[{'LEMMA': keyword.lower()}]]\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.style.use",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"pandas.read_pickle",
"matplotlib.pyplot.show"
]
] |
Tomoya-K-0504/deepSELF
|
[
"0e5a7d0169b3e9edcb5c8d9802140a84ce5cb69a"
] |
[
"deepself/models/nn_models/rnn.py"
] |
[
"from __future__ import print_function, division\n\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nfrom deepself.models.nn_models.nn_utils import initialize_weights\n\nsupported_rnns = {\n 'lstm': nn.LSTM,\n 'rnn': nn.RNN,\n 'gru': nn.GRU\n}\n\n\nfrom dataclasses import dataclass\nfrom deepself.utils.enums import RNNType\nfrom deepself.utils.nn_config import NNModelConfig\n\n\n@dataclass\nclass RNNConfig(NNModelConfig): # RNN model arguments\n # TODO remove \"rnn_\"\n rnn_type: RNNType = RNNType.gru # Type of the RNN. rnn|gru|lstm|deepspeech are supported\n rnn_hidden_size: int = 100 # Hidden size of RNNs\n rnn_n_layers: int = 1 # Number of RNN layers\n max_norm: int = 400 # Norm cutoff to prevent explosion of gradients\n bidirectional: bool = True # Turn off bi-directional RNNs, introduces lookahead convolution\n # TODO change to bn\n batch_norm_size: int = 0 # Batch normalization or not\n seq_len: int = 0 # Length of sequence\n\n\ndef construct_rnn(cfg, output_size):\n \"\"\"\n\n :param cfg: {\n 'rnn_type': 'deepspeech' or lstm or gru,\n 'input_size': input feature size of data\n 'n_layers': Number of layers in rnn\n 'seq_len': Length of time dimension\n 'hidden_size': Number of hidden size in rnn\n 'is_bidirectional': True or False\n 'is_inference_softmax': True or False\n }\n :return:\n \"\"\"\n if len(cfg.input_size) == 2:\n cfg.input_size = cfg.input_size[0]\n\n return RNNClassifier(cfg.input_size, out_time_feature=cfg.seq_len,\n rnn_type=supported_rnns[cfg.rnn_type.value], output_size=output_size,\n rnn_hidden_size=cfg.rnn_hidden_size, n_layers=cfg.rnn_n_layers,\n bidirectional=cfg.bidirectional, batch_norm_size=cfg.get('batch_norm_size'))\n\n\nclass SequenceWise(nn.Module):\n def __init__(self, module):\n \"\"\"\n Collapses input of dim T*N*H to (T*N)*H, and applies to a module.\n Allows handling of variable sequence lengths and minibatch sizes.\n :param module: Module to apply input to.\n \"\"\"\n super(SequenceWise, self).__init__()\n self.module = module\n\n def forward(self, x):\n t, n = x.size(0), x.size(1)\n # if sequence-wise, normalize at last dimension, should be n x t direction\n x = x.reshape(t * n, -1) # t x n x h -> (t x n) x h\n x = self.module(x)\n x = x.view(t, n, -1)\n return x\n\n def __repr__(self):\n tmpstr = self.__class__.__name__ + ' (\\n'\n tmpstr += self.module.__repr__()\n tmpstr += ')'\n return tmpstr\n\n\nclass BatchRNN(nn.Module):\n def __init__(self, input_size, hidden_size, batch_norm_size, sequence_wise=False, rnn_type=nn.LSTM,\n bidirectional=False):\n super(BatchRNN, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.bidirectional = bidirectional\n self.batch_norm = SequenceWise(nn.BatchNorm1d(batch_norm_size)) if sequence_wise else nn.BatchNorm1d(batch_norm_size)\n self.rnn = initialize_weights(\n rnn_type(input_size=input_size, hidden_size=hidden_size, bidirectional=bidirectional, bias=True))\n self.num_directions = 2 if bidirectional else 1\n\n def forward(self, x):\n x = x.transpose(0, 1).transpose(1, 2) # l x n x c -> n x c x l\n x = self.batch_norm(x.to(torch.float))\n x = x.transpose(1, 2).transpose(0, 1) # n x c x l -> l x n x h\n x, _ = self.rnn(x)\n\n if self.bidirectional:\n x = x.view(x.size(0), x.size(1), 2, -1).sum(dim=2).view(x.size(0), x.size(1), -1) # (TxNxH*2) -> (TxNxH) by sum\n\n return x\n\n\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, batch_norm_size, sequence_wise=False, rnn_type=nn.LSTM,\n bidirectional=False):\n super(RNN, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.bidirectional = bidirectional\n self.rnn = initialize_weights(\n rnn_type(input_size=input_size, hidden_size=hidden_size, bidirectional=bidirectional, bias=True))\n self.num_directions = 2 if bidirectional else 1\n\n def forward(self, x):\n x, _ = self.rnn(x.to(torch.float))\n\n if self.bidirectional:\n x = x.view(x.size(0), x.size(1), 2, -1).sum(dim=2).view(x.size(0), x.size(1), -1) # (TxNxH*2) -> (TxNxH) by sum\n return x\n\n\nclass InferenceBatchSoftmax(nn.Module):\n def forward(self, input_):\n if not self.training:\n return torch.exp(nn.LogSoftmax(dim=-1)(input_))\n else:\n return input_\n\n\nclass RNNClassifier(nn.Module):\n def __init__(self, input_size, out_time_feature, output_size, batch_norm_size=None, sequence_wise=False,\n rnn_type=nn.LSTM, rnn_hidden_size=768, n_layers=5, bidirectional=True):\n super(RNNClassifier, self).__init__()\n\n rnns = []\n rnn_cls = BatchRNN if batch_norm_size else RNN\n rnn = rnn_cls(input_size=input_size, hidden_size=rnn_hidden_size, rnn_type=rnn_type,\n bidirectional=bidirectional, batch_norm_size=batch_norm_size, sequence_wise=sequence_wise)\n rnns.append(('0', rnn))\n for x in range(n_layers - 1):\n rnn = rnn_cls(input_size=rnn_hidden_size, hidden_size=rnn_hidden_size,\n rnn_type=rnn_type, bidirectional=bidirectional, batch_norm_size=rnn_hidden_size,\n sequence_wise=sequence_wise)\n rnns.append(('%d' % (x + 1), rnn))\n self.rnns = nn.Sequential(OrderedDict(rnns))\n self.fc = nn.Sequential(\n nn.BatchNorm1d(rnn_hidden_size * out_time_feature),\n # nn.BatchNorm1d(26500),\n # nn.BatchNorm1d(rnn_hidden_size),\n initialize_weights(nn.Linear(rnn_hidden_size * out_time_feature, output_size, bias=False))\n # initialize_weights(nn.Linear(26500, output_size, bias=False))\n # initialize_weights(nn.Linear(rnn_hidden_size, output_size, bias=False))\n )\n self.classify = True if output_size != 1 else False\n\n def forward(self, x):\n x = x.transpose(0, 2).transpose(1, 2) # batch x feature x time -> # time x batch x feature\n\n for rnn in self.rnns:\n x = rnn(x)\n\n x = x.transpose(0, 1) # time x batch x freq -> batch x time x freq\n\n x = x.transpose(1, 2) # batch x sequence x freq -> batch x freq x sequence\n # x = nn.AvgPool1d(kernel_size=x.size(2))(x) # average within sequence, outputs batch x freq x 1\n x = x.reshape(x.size(0), -1)\n\n x = self.fc(x)\n\n if not self.classify:\n return x\n\n x = torch.exp(nn.LogSoftmax(dim=-1)(x))\n\n return x\n\n\nclass DeepSpeech(RNNClassifier):\n def __init__(self, conv, input_size, out_time_feature, rnn_type=nn.LSTM, rnn_hidden_size=768, n_layers=5,\n bidirectional=True, output_size=2):\n super(DeepSpeech, self).__init__(input_size=input_size, out_time_feature=out_time_feature, rnn_type=nn.LSTM,\n rnn_hidden_size=rnn_hidden_size, n_layers=n_layers,\n bidirectional=bidirectional, output_size=output_size, batch_norm_size=input_size)\n\n self.hidden_size = rnn_hidden_size\n self.hidden_layers = n_layers\n self.rnn_type = rnn_type\n self.bidirectional = bidirectional\n\n self.conv = conv\n print(f'Number of parameters\\tconv: {get_param_size(self.conv)}\\trnn: {get_param_size(super())}')\n\n def forward(self, x):\n x = self.conv(x.to(torch.float)) # batch x channel x freq x time\n\n sizes = x.size() # batch x channel x freq_feature x time_feature\n if len(sizes) == 4:\n x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3]) # Collapse feature dimension batch x feature x time\n x = super().forward(x)\n return x\n\n def change_last_layer(self, n_classes):\n self.fc[1] = initialize_weights(nn.Linear(self.fc[1].in_features, n_classes, bias=False))\n # print(self.fc[1].in_features)\n # self.fc[1] = nn.Linear(self.fc[1].in_features, n_classes, bias=False)\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.BatchNorm1d",
"torch.nn.LogSoftmax"
]
] |
sujanabasnet/Hackflight
|
[
"98d0f0dee07dbddd81c63bd1997e845ca244106f"
] |
[
"extras/python/hackflight.py"
] |
[
"\"\"\"\nHackflight class in python\n\nCopyright (C) 2021 S.Basnet, N. Manaye, N. Nguyen, S.D. Levy\n\nMIT License\n\"\"\"\n\n\nimport numpy as np\n# from debugging import debug\n\n\nclass Hackflight(object):\n\n def __init__(self, board, receiver, actuator):\n self.board = board\n self.receiver = receiver\n self.actuator = actuator\n\n self.sensors = []\n self.closedloops = []\n\n def addSensor(self, sensor):\n self.sensors.append(sensor)\n\n def addClosedLoopController(self, controller):\n self.closedloops.append(controller)\n\n def begin(self):\n \"\"\"Set up 12 state values, and initializes other classes\n like board and receiver\"\"\"\n # See Bouabdallah (2004)\n self.state = np.zeros(12)\n\n # Start the board\n self.board.begin()\n\n # Initialize the sensors\n self._startSensors()\n\n # Initialize the receiver\n self.receiver.begin()\n\n def update(self):\n \"\"\"Get demands from the receiver and state values from\n the sensors\"\"\"\n # Grab control signal if available and\n # Run closed loop controllers\n demands = self._runClosedLoop(self.receiver.getDemands())\n\n # debug(\"T: %+3.3f R: %+3.3f P: %+3.3f Y: %+3.3f \" %\n # tuple(demands))\n\n # Check Sensors\n self._checkSensors()\n\n return demands\n\n def _startSensors(self):\n return\n\n def _checkReceiver(self):\n \"\"\"Run a receiver class method to get the\n data values from the controller.\"\"\"\n\n self.receiver.getData()\n\n def _checkSensors(self):\n return\n\n def _runClosedLoop(self, demands):\n\n # debug(\"T: %+3.3f R: %+3.3f P: %+3.3f Y: %+3.3f \" %\n # tuple(demands))\n\n for clc in self.closedloops:\n demands = clc.modifyDemands(demands)\n\n return demands\n"
] |
[
[
"numpy.zeros"
]
] |
BrisaDavis/visclaw
|
[
"2ad7217502ce4df2b2452dcbb3eb010d567a2eca"
] |
[
"src/python/visclaw/animation_tools.py"
] |
[
"\"\"\"\nThis animation_tools module contains tools to create animations in Python and\nJupyter notebooks.\n\nThree types of animations are supported: \n - using the ipywidget interact to create a figure with a slider bar, \n - using JSAnimation to create Javascript code that loops over a set of \n images and adds controls to play as an animation.\n - creation of mp4 files using ffmpeg (provided this package is installed).\n\nThe set of images to combine in an animation can be specified as a\nlist of images, a list of `matplotlib` figures, or a directory of\n`png` or other image files.\n\nUtilities are provided to convert between these.\n\nFunctions are provided to create inline animations in Jupyter notebooks or \nstand-alone files that can be viewed in other ways, including \n - An html file with the JSAnimation version,\n - A mp4 file,\n - A reStructured text file with the JSAnimation for inclusion in Sphinx docs.\n\nThe utility function make_anim_from_plotdir can be used to convert the png \nfiles in a Clawpack _plots directory into standalone animations of the types\nlisted above. See the file make_anim.py for an example of how this can be\ninvoked from an applications directory.\n\nSee also:\n https://ipywidgets.readthedocs.io/en/latest/#ipywidgets\n https://github.com/jakevdp/JSAnimation\n\nMore documentation of these functions is needed and they can probably be\nimproved.\n\n\"\"\"\n\n# use Python 3 style print function rather than Python 2 print statements:\nfrom __future__ import print_function \n\nfrom IPython.display import display\nfrom matplotlib import image, animation\nfrom matplotlib import pyplot as plt\nfrom ipywidgets import interact, interact_manual\nimport ipywidgets\nimport io\nfrom matplotlib import pyplot as plt\n\nfrom JSAnimation import IPython_display\n\n\ndef make_plotdir(plotdir='_plots', clobber=True):\n \"\"\"\n Utility function to create a directory for storing a sequence of plot\n files, or if the directory already exists, clear out any old plots. \n If clobber==False then it will abort instead of deleting existing files.\n \"\"\"\n\n import os\n if os.path.isdir(plotdir):\n if clobber:\n os.system(\"rm %s/*\" % plotdir)\n else:\n raise IOError('*** Cannot clobber existing directory %s' % plotdir)\n else:\n os.system(\"mkdir %s\" % plotdir)\n print(\"Figure files for each frame will be stored in \", plotdir)\n\n\ndef save_frame(frameno, plotdir='_plots', fname_base='frame', format='png',\n verbose=False, **kwargs):\n \"\"\"\n After giving matplotlib commands to create the plot for a single frame \n of the desired animation, this can be called to save the figure with\n the appropriate file name such as _plots/frame00001.png.\n \"\"\"\n\n plt.draw()\n filename = '%s/%s%s.%s' % (plotdir, fname_base, str(frameno).zfill(5), format)\n plt.savefig(filename, **kwargs)\n if verbose:\n print(\"Saved \",filename)\n\n\ndef make_anim(plotdir, fname_pattern='frame*.png', figsize=(10,6), dpi=None):\n \"\"\"\n Assumes that a set of frames are available as png files in directory _plots,\n numbered consecutively, e.g. frame0000.png, frame0001.png, etc.\n\n Creates an animation based display each frame in turn, and returns anim.\n\n You can then display anim in an IPython notebook, or\n call make_html(anim) to create a stand-alone webpage.\n \"\"\"\n\n import matplotlib\n\n if matplotlib.backends.backend in ['MacOSX']:\n print(\"*** animation.FuncAnimation doesn't work with backend %s\" \\\n % matplotlib.backends.backend)\n print(\"*** Suggest using 'Agg'\")\n return\n \n\n import glob # for finding all files matching a pattern\n\n # Find all frame files:\n filenames = glob.glob('%s/%s' % (plotdir, fname_pattern))\n\n # sort them into increasing order:\n filenames=sorted(filenames)\n\n fig = plt.figure(figsize=figsize, dpi=dpi)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis('off') # so there's not a second set of axes\n im = plt.imshow(image.imread(filenames[0]))\n\n def init():\n im.set_data(image.imread(filenames[0]))\n return im,\n\n def animate(i):\n image_i=image.imread(filenames[i])\n im.set_data(image_i)\n return im,\n\n anim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=len(filenames), interval=200, blit=True)\n\n return anim\n\n\ndef JSAnimate_images(images, figsize=(10,6), dpi=None):\n\n import matplotlib\n\n if matplotlib.backends.backend in ['MacOSX']:\n print(\"*** animation.FuncAnimation doesn't work with backend %s\" \\\n % matplotlib.backends.backend)\n print(\"*** Suggest using 'Agg'\")\n return\n \n\n fig = plt.figure(figsize=figsize, dpi=None)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis('off') # so there's not a second set of axes\n\n im = plt.imshow(images[0])\n\n def init():\n im.set_data(images[0])\n return im,\n\n def animate(i):\n im.set_data(images[i])\n return im,\n\n anim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=len(images), interval=200, blit=True)\n\n plt.close(fig)\n return anim\n\n\ndef make_html(anim, file_name='anim.html', title=None, raw_html='', \\\n fps=None, embed_frames=True, default_mode='once'):\n \"\"\"\n Take an animation created by make_anim and convert it into a stand-alone\n html file.\n \"\"\"\n\n from JSAnimation.IPython_display import anim_to_html\n\n\n html_body = anim_to_html(anim, fps=fps, embed_frames=embed_frames, \\\n default_mode=default_mode)\n\n html_file = open(file_name,'w')\n html_file.write(\"<html>\\n <h1>%s</h1>\\n\" % title)\n html_file.write(raw_html)\n html_file.write(html_body)\n html_file.close()\n print(\"Created %s\" % file_name)\n\n\ndef make_rst(anim, file_name='anim.rst',\n fps=None, embed_frames=True, default_mode='once'):\n \"\"\"\n Take an animation created by make_anim and convert it into an rst file \n (reStructuredText, for inclusion in Sphinx documentation, for example).\n \"\"\"\n\n from JSAnimation.IPython_display import anim_to_html\n\n\n rst_body = anim_to_html(anim, fps=fps, embed_frames=embed_frames, \\\n default_mode=default_mode)\n\n rst_body = rst_body.split('\\n')\n\n rst_file = open(file_name,'w')\n rst_file.write(\".. raw:: html\\n\")\n for line in rst_body:\n rst_file.write(\" %s\\n\" % line)\n rst_file.close()\n print(\"Created %s\" % file_name)\n print(\"Imbed this in another rst file using:\")\n print(\".. include:: %s\" % file_name)\n\n\ndef make_mp4(anim, file_name='anim.mp4',\n fps=None, embed_frames=True, default_mode='once'):\n \"\"\"\n Take an animation and covert to mp4 file using ffmpeg, which must be\n installed.\n \"\"\"\n import os\n\n if not animation.writers.is_available('ffmpeg'):\n print(\"** ffmpeg must be installed to create mp4 file\")\n return\n\n if os.path.splitext(file_name)[1] != '.mp4':\n print(\"*** Might not work if file extension is not .mp4\")\n if fps is None:\n fps = 3\n writer = animation.writers['ffmpeg'](fps=fps)\n anim.save(file_name, writer=writer)\n print(\"Created %s\" % file_name)\n\n\ndef read_images(plotdir, fname_pattern='*.png'):\n\n import glob, os\n images = []\n files = glob.glob(os.path.join(plotdir, fname_pattern))\n for file in files:\n im = plt.imread(file)\n images.append(im)\n return images\n\ndef save_images(images, figsize=(8,6), plotdir='_plots', clobber=True, \\\n fname_base='frame', format='png', verbose=False, **kwargs):\n\n make_plotdir(plotdir=plotdir, clobber=clobber)\n for frameno,image in enumerate(images):\n fig = imshow_noaxes(image, figsize)\n filename = '%s/%s%s.%s' % (plotdir, fname_base, str(frameno).zfill(5), format)\n plt.savefig(filename, format=format, **kwargs)\n plt.close(fig)\n if verbose:\n print(\"Saved \",filename)\n\ndef save_figs(figs, plotdir='_plots', clobber=True, \\\n fname_base='frame', format='png', verbose=False, **kwargs):\n\n make_plotdir(plotdir=plotdir, clobber=clobber)\n for frameno,fig in enumerate(figs):\n filename = '%s/%s%s.%s' % (plotdir, fname_base, str(frameno).zfill(5), format)\n fig.savefig(filename, format=format, **kwargs)\n plt.close(fig)\n if verbose:\n print(\"Saved \",filename)\n\n\ndef make_image(fig, **kwargs):\n \"\"\"\n Take a matplotlib figure *fig* and convert it to an image *im* that \n can be viewed with imshow.\n \"\"\"\n\n import io\n png = io.BytesIO()\n fig.savefig(png,format='png', **kwargs)\n png.seek(0)\n im = plt.imread(png)\n return im\n\ndef make_images(figs, **kwargs):\n \"\"\"\n Take a list of matplotlib figures *figs* and convert to list of images.\n \"\"\"\n\n images = []\n for fig in figs:\n im = make_image(fig, **kwargs)\n images.append(im)\n return images\n\ndef imshow_noaxes(im, figsize=(8,6)):\n fig = plt.figure(figsize=figsize)\n ax = plt.axes()\n plt.imshow(im)\n ax.axis('off')\n return fig\n \ndef interact_animate_images(images, figsize=(10,6), manual=False, TextInput=False):\n\n def display_frame(frameno): \n imshow_noaxes(images[frameno], figsize=figsize)\n\n if TextInput:\n if TextInput:\n print(\"Valid frameno values: from %i to %i\" % (0,len(images)-1))\n widget = ipywidgets.IntText(min=0,max=len(images)-1, value=0)\n else:\n widget = ipywidgets.IntSlider(min=0,max=len(images)-1, value=0)\n\n if manual:\n interact_manual(display_frame, frameno=widget)\n else:\n interact(display_frame, frameno=widget)\n\ndef interact_animate_figs(figs, manual=False, TextInput=False):\n\n def display_frame(frameno): \n display(figs[frameno])\n\n if TextInput:\n widget = ipywidgets.IntText(min=0,max=len(figs)-1, value=0)\n else:\n widget = ipywidgets.IntSlider(min=0,max=len(figs)-1, value=0)\n\n if manual:\n if TextInput:\n print(\"Valid frameno values: from %i to %i\" % (0,len(figs)-1))\n interact_manual(display_frame, frameno=widget)\n else:\n interact(display_frame, frameno=widget)\n\n\ndef make_anim_from_plotdir(plotdir='_plots', fignos='all',\n outputs=['mp4','html','rst'], file_name_prefix=None,\n figsize=(5,4), dpi=None, fps=5):\n\n \"\"\"\n After running `make plots` using VisClaw, convert the png files in \n the plots directory into stand-alone files that can be embedded in\n webpages or Sphinx documentation.\n\n Call this from a script that starts with:\n import matplotlib\n matplotlib.use('Agg')\n \"\"\"\n import glob, re\n\n if fignos == 'all':\n # determine what fignos are used in the plotdir\n movie_files = glob.glob(plotdir + '/movie*html')\n if len(movie_files) == 0:\n print('No movie files found in %s' % plotdir)\n return\n \n fignos = []\n regexp = re.compile(r\"movie[^ ]*fig(?P<figno>[0-9]*)[.html]\")\n for f in movie_files:\n result = regexp.search(f)\n fignos.append(result.group('figno'))\n\n print(\"Found these figures: %s\" % fignos)\n \n\n for figno in fignos:\n\n fname_pattern = 'frame*fig%s.png' % figno\n anim = make_anim(plotdir, fname_pattern, figsize, dpi)\n\n if 'mp4' in outputs:\n file_name = file_name_prefix + 'fig%s.mp4' % figno\n make_mp4(anim, file_name, fps=fps, \\\n embed_frames=True, default_mode='once')\n\n if 'html' in outputs:\n file_name = file_name_prefix + 'fig%s.html' % figno\n make_html(anim, file_name, fps=fps, \\\n embed_frames=True, default_mode='once')\n\n if 'rst' in outputs:\n file_name = file_name_prefix + 'fig%s.rst' % figno\n make_rst(anim, file_name, fps=fps, \\\n embed_frames=True, default_mode='once')\n\n\n"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.animation.writers.is_available",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.axes",
"matplotlib.image.imread",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure"
]
] |
lionelxhub/electra_japanese
|
[
"105d3e42edf84e66d5288939cd73546748eec73a"
] |
[
"pretrain/pretrain_data.py"
] |
[
"# coding=utf-8\r\n# Copyright 2020 The Google Research Authors.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"Helpers for preparing pre-training data and supplying them to the model.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\n\r\nimport numpy as np\r\nimport tensorflow.compat.v1 as tf\r\n\r\nimport configure_pretraining\r\nfrom model import tokenization\r\nfrom util import utils\r\n\r\n\r\ndef get_input_fn(config: configure_pretraining.PretrainingConfig, is_training,\r\n num_cpu_threads=4):\r\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\r\n\r\n input_files = []\r\n for input_pattern in config.pretrain_tfrecords.split(\",\"):\r\n input_files.extend(tf.io.gfile.glob(input_pattern))\r\n\r\n def input_fn(params):\r\n \"\"\"The actual input function.\"\"\"\r\n batch_size = params[\"batch_size\"]\r\n\r\n name_to_features = {\r\n \"input_ids\": tf.io.FixedLenFeature([config.max_seq_length], tf.int64),\r\n \"input_mask\": tf.io.FixedLenFeature([config.max_seq_length], tf.int64),\r\n \"segment_ids\": tf.io.FixedLenFeature([config.max_seq_length], tf.int64),\r\n }\r\n\r\n d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\r\n d = d.repeat()\r\n d = d.shuffle(buffer_size=len(input_files))\r\n\r\n # `cycle_length` is the number of parallel files that get read.\r\n cycle_length = min(num_cpu_threads, len(input_files))\r\n\r\n # `sloppy` mode means that the interleaving is not exact. This adds\r\n # even more randomness to the training pipeline.\r\n d = d.apply(\r\n tf.data.experimental.parallel_interleave(\r\n tf.data.TFRecordDataset,\r\n sloppy=is_training,\r\n cycle_length=cycle_length))\r\n d = d.shuffle(buffer_size=100)\r\n\r\n # We must `drop_remainder` on training because the TPU requires fixed\r\n # size dimensions. For eval, we assume we are evaluating on the CPU or GPU\r\n # and we *don\"t* want to drop the remainder, otherwise we wont cover\r\n # every sample.\r\n d = d.apply(\r\n tf.data.experimental.map_and_batch(\r\n lambda record: _decode_record(record, name_to_features),\r\n batch_size=batch_size,\r\n num_parallel_batches=num_cpu_threads,\r\n drop_remainder=True))\r\n return d\r\n\r\n return input_fn\r\n\r\n\r\ndef _decode_record(record, name_to_features):\r\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\r\n example = tf.io.parse_single_example(record, name_to_features)\r\n\r\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\r\n # So cast all int64 to int32.\r\n for name in list(example.keys()):\r\n t = example[name]\r\n if t.dtype == tf.int64:\r\n t = tf.cast(t, tf.int32)\r\n example[name] = t\r\n\r\n return example\r\n\r\n\r\n# model inputs - it's a bit nicer to use a namedtuple rather than keep the\r\n# features as a dict\r\nInputs = collections.namedtuple(\r\n \"Inputs\", [\"input_ids\", \"input_mask\", \"segment_ids\", \"masked_lm_positions\",\r\n \"masked_lm_ids\", \"masked_lm_weights\"])\r\n\r\n\r\ndef features_to_inputs(features):\r\n return Inputs(\r\n input_ids=features[\"input_ids\"],\r\n input_mask=features[\"input_mask\"],\r\n segment_ids=features[\"segment_ids\"],\r\n masked_lm_positions=(features[\"masked_lm_positions\"]\r\n if \"masked_lm_positions\" in features else None),\r\n masked_lm_ids=(features[\"masked_lm_ids\"]\r\n if \"masked_lm_ids\" in features else None),\r\n masked_lm_weights=(features[\"masked_lm_weights\"]\r\n if \"masked_lm_weights\" in features else None),\r\n )\r\n\r\n\r\ndef get_updated_inputs(inputs, **kwargs):\r\n features = inputs._asdict()\r\n for k, v in kwargs.items():\r\n features[k] = v\r\n return features_to_inputs(features)\r\n\r\n\r\nENDC = \"\\033[0m\"\r\nCOLORS = [\"\\033[\" + str(n) + \"m\" for n in list(range(91, 97)) + [90]]\r\nRED = COLORS[0]\r\nBLUE = COLORS[3]\r\nCYAN = COLORS[5]\r\nGREEN = COLORS[1]\r\n\r\n\r\ndef print_tokens(inputs: Inputs, inv_vocab, updates_mask=None):\r\n \"\"\"Pretty-print model inputs.\"\"\"\r\n pos_to_tokid = {}\r\n for tokid, pos, weight in zip(\r\n inputs.masked_lm_ids[0], inputs.masked_lm_positions[0],\r\n inputs.masked_lm_weights[0]):\r\n if weight == 0:\r\n pass\r\n else:\r\n pos_to_tokid[pos] = tokid\r\n\r\n text = \"\"\r\n provided_update_mask = (updates_mask is not None)\r\n if not provided_update_mask:\r\n updates_mask = np.zeros_like(inputs.input_ids)\r\n for pos, (tokid, um) in enumerate(\r\n zip(inputs.input_ids[0], updates_mask[0])):\r\n token = inv_vocab[tokid]\r\n if token == \"[PAD]\":\r\n break\r\n if pos in pos_to_tokid:\r\n token = RED + token + \\\r\n \" (\" + inv_vocab[pos_to_tokid[pos]] + \")\" + ENDC\r\n if provided_update_mask:\r\n assert um == 1\r\n else:\r\n if provided_update_mask:\r\n assert um == 0\r\n text += token + \" \"\r\n utils.log(tokenization.printable_text(text))\r\n"
] |
[
[
"tensorflow.compat.v1.io.FixedLenFeature",
"tensorflow.compat.v1.io.parse_single_example",
"tensorflow.compat.v1.data.experimental.parallel_interleave",
"tensorflow.compat.v1.io.gfile.glob",
"numpy.zeros_like",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.constant"
]
] |
aced-differentiate/dft-input-gen
|
[
"14bee323517714c433682bad2dcb897b223dd5ec"
] |
[
"src/dftinputgen/utils.py"
] |
[
"import re\nimport six\nimport numpy as np\n\nfrom ase import io as ase_io\n\nfrom dftinputgen.data import STANDARD_ATOMIC_WEIGHTS\n\n\nclass DftInputGeneratorUtilsError(Exception):\n \"\"\"Base class for errors associated with the helper utilities.\"\"\"\n\n pass\n\n\ndef get_elem_symbol(species_label):\n \"\"\"Get element symbol from species label, e.g. \"Fe\" from \"Fe1\", \"Fe-2\".\n\n NB: Returns the first valid element symbol encountered.\n\n Raises `DftInputGeneratorError` if no valid element symbol was found.\n \"\"\"\n re_formula = re.compile(\"([A-Z][a-z]?)\")\n symbols = re_formula.findall(species_label)\n for symbol in symbols:\n if symbol in STANDARD_ATOMIC_WEIGHTS:\n return symbol\n msg = \"No valid element symbol found\"\n raise DftInputGeneratorUtilsError(msg)\n\n\ndef read_crystal_structure(crystal_structure, **kwargs):\n \"\"\"Use `ase.io.read` to from crystal structure file specified.\"\"\"\n if isinstance(crystal_structure, six.string_types):\n return ase_io.read(crystal_structure, **kwargs)\n else:\n msg = \"Expected type str; found {}\".format(type(crystal_structure))\n raise TypeError(msg)\n\n\ndef get_kpoint_grid_from_spacing(crystal_structure, spacing):\n \"\"\"Get k-point grid for an input crystal structure and k-spacing.\n\n Returns a list [k1, k2, k3] with the dimensions of a uniform\n k-point grid corresponding to the input `spacing`.\n\n Parameters\n ----------\n crystal_structure: `ase.Atoms` object\n Crystal structure for which to calculate k-point grid\n\n spacing: float\n Maximum distance between two k-points on a uniform grid in reciprocal\n space.\n\n Returns\n -------\n k-point grid as a 3 x 1 list of integers.\n\n \"\"\"\n rcell = 2 * np.pi * (np.linalg.inv(crystal_structure.cell).T)\n return list(map(int, np.ceil(np.linalg.norm(rcell, axis=1) / spacing)))\n"
] |
[
[
"numpy.linalg.inv",
"numpy.linalg.norm"
]
] |
Daikon-Sun/AdjustAutocorrelation
|
[
"e57264d5d13a471ad5873062d88be7d81912a229"
] |
[
"main.py"
] |
[
"import logging\nimport numpy as np\nimport sys\nimport os\nimport importlib\n\n\nimport torch\nimport torch.nn as nn\n\n\nfrom args import args\nfrom utils import create_dir, ForecastingData\n\n\ndef run():\n\n if args.task_type == 'forecasting':\n data = ForecastingData()\n\n # datasets = []\n # for i in range(3):\n # datasets.append(data.get_dataset(i))\n\n def model_decay(epoch):\n return args.model_decay_rate**epoch\n def rho_decay(epoch):\n return args.rho_decay_rate**epoch\n\n model_package = importlib.import_module(f'models.{args.task_type}.{args.model_type}')\n org_model = getattr(model_package, args.model_type)().to(args.device)\n model = org_model\n\n if args.model_type == 'AGCRN':\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n else:\n nn.init.uniform_(p)\n\n # for name, param in model.named_parameters():\n # logging.info(name, param.shape, param.requires_grad)\n\n total_num = sum([param.nelement() for param in model.parameters()])\n logging.info('total num of parameters: {}'.format(total_num))\n\n if args.task_type == 'forecasting':\n n_rho = 1 if args.one_rho else args.n_series\n\n if n_rho == 1:\n rho = torch.tensor(args.init_rho, device=args.device, requires_grad=not args.fix_rho)\n else:\n init_rho = np.ones(n_rho, dtype=np.float32) * args.init_rho\n rho = torch.tensor(init_rho, device=args.device, requires_grad=not args.fix_rho)\n\n runner_package = importlib.import_module(f'runner.{args.task_type}_runner')\n runner = getattr(runner_package, f'{args.task_type}Runner')(model, rho, data)\n runner.run()\n\n\nif __name__ == '__main__':\n\n # torch.backends.cudnn.benchmark = True\n\n if not os.path.isdir(args.output_dir):\n create_dir(args.output_dir)\n\n # FORMAT = '%(asctime)s %(levelname)s: %(message)s'\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n output_file_handler = logging.FileHandler(os.path.join(args.output_dir, 'log.txt'), mode='w')\n stdout_handler = logging.StreamHandler(sys.stdout)\n logger.addHandler(output_file_handler)\n logger.addHandler(stdout_handler)\n\n logging.info(args)\n\n run()\n"
] |
[
[
"torch.nn.init.xavier_uniform_",
"torch.nn.init.uniform_",
"numpy.ones",
"torch.tensor"
]
] |
MainRo/deep-speaker
|
[
"48ff6c503bd15eab23ef46911fe48c88cab4058d",
"48ff6c503bd15eab23ef46911fe48c88cab4058d"
] |
[
"deep_speaker/do_train.py",
"deep_speaker/toolbox/audio_features.py"
] |
[
"import asyncio\nimport argparse\nimport pickle\nimport yaml\nfrom deep_speaker.toolbox import import_function\nfrom makinage.data import pull\n\n\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\n\ndef load_config(config_file):\n with open(config_file, 'r') as f:\n return yaml.load(f)\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\"deep-speaker train\")\n parser.add_argument(\"--config\", required=True)\n return parser.parse_args()\n\n\ndef label_to_array(label):\n a = np.zeros(9300, dtype=np.float)\n a[label] = 1.0\n return a\n\n\nasync def train(config):\n loop = asyncio.get_running_loop()\n #device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n epoch_count = config['config']['train']['epoch_count']\n batch_size = config['config']['train']['batch_size']\n\n criterion = import_function(config['config']['train']['loss'])()\n model = import_function(config['config']['train']['model'])()\n #model.to(device)\n optimizer = import_function(config['config']['train']['optimizer'])(model.parameters())\n\n writer = SummaryWriter(config['config']['train']['summary_path'])\n print(\"train...\")\n\n #writer.add_graph(model, verbose=True)\n #writer.close()\n\n step = 0\n for epoch in range(epoch_count):\n print(\"on epoch {}\".format(epoch))\n train_data = pull(\n loop,\n config['kafka']['endpoint'],\n \"feature_utterance\",\n \"train_set\",\n batch_size=batch_size)\n\n running_loss = 0.0\n i = 0\n async for batch in train_data:\n # get the inputs; data is a list of [inputs, labels]\n batch = [pickle.loads(i.value) for i in batch]\n inputs = [np.expand_dims(i.data, axis=0) for i in batch]\n inputs = np.stack(inputs)\n inputs = torch.from_numpy(inputs)\n\n labels_class = [i.label for i in batch]\n labels_class = np.stack(labels_class)\n labels_class = torch.from_numpy(labels_class)\n\n labels = [label_to_array(i.label) for i in batch]\n labels = np.stack(labels)\n labels = torch.from_numpy(labels)\n\n #inputs, labels = data[0].to(device), data[1].to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs.float())\n loss = criterion(outputs, labels_class)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 20 == 0: # print every 2000 mini-batches\n writer.add_scalar('training loss',\n running_loss / 1000,\n step)\n\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n\n i += 1\n step += 1\n\n writer.close()\n print('Finished Training')\n\n\ndef main():\n args = parse_arguments()\n config = load_config(args.config)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(train(config))\n",
"import numpy as np\nfrom scipy.fftpack import dct\n\n\ndef pre_emphasis(data, pre_emphasis=0.97):\n return np.append(data[0], data[1:] - pre_emphasis * data[:-1])\n\n\ndef frame(data, sample_rate=16000, frame_size=0.025, frame_stride=0.01):\n frame_length, frame_step = frame_size * sample_rate, frame_stride * sample_rate # Convert from seconds to samples\n signal_length = len(data)\n frame_length = int(round(frame_length))\n frame_step = int(round(frame_step))\n num_frames = int(np.ceil(float(np.abs(signal_length - frame_length)) / frame_step)) # Make sure that we have at least 1 frame\n\n pad_signal_length = num_frames * frame_step + frame_length\n z = np.zeros((pad_signal_length - signal_length))\n pad_signal = np.append(data, z) # Pad Signal to make sure that all frames have equal number of samples without truncating any samples from the original signal\n\n indices = np.tile(np.arange(0, frame_length), (num_frames, 1)) + np.tile(np.arange(0, num_frames * frame_step, frame_step), (frame_length, 1)).T\n frames = pad_signal[indices.astype(np.int32, copy=False)]\n return frames, frame_length\n\n\ndef filter_banks(data, sample_rate=16000, nfilt=40, nfft=512):\n low_freq_mel = 0\n high_freq_mel = (2595 * np.log10(1 + (sample_rate / 2) / 700)) # Convert Hz to Mel\n mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Equally spaced in Mel scale\n hz_points = (700 * (10**(mel_points / 2595) - 1)) # Convert Mel to Hz\n bin = np.floor((nfft + 1) * hz_points / sample_rate)\n\n fbank = np.zeros((nfilt, int(np.floor(nfft / 2 + 1))))\n for m in range(1, nfilt + 1):\n f_m_minus = int(bin[m - 1]) # left\n f_m = int(bin[m]) # center\n f_m_plus = int(bin[m + 1]) # right\n\n for k in range(f_m_minus, f_m):\n fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])\n for k in range(f_m, f_m_plus):\n fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])\n filter_banks = np.dot(data, fbank.T)\n filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks) # Numerical Stability\n filter_banks = 20 * np.log10(filter_banks) # dB\n return filter_banks\n\n\ndef compute_filter_bank(data, nfft=512, nfilt=64):\n data = pre_emphasis(data)\n data, frame_length = frame(data)\n data *= np.hamming(frame_length)\n data = np.absolute(np.fft.rfft(data, nfft)) # Magnitude of the FFT\n data = ((1.0 / nfft) * ((data) ** 2)) # Power Spectrum\n data = filter_banks(data, nfilt=nfilt, nfft=nfft)\n return data\n\n\ndef compute_mfcc(data, nfft=512, nfilt=64, num_ceps=20, cep_lifter=22):\n data = pre_emphasis(data)\n data, frame_length = frame(data)\n data *= np.hamming(frame_length)\n data = np.absolute(np.fft.rfft(data, nfft)) # Magnitude of the FFT\n data = ((1.0 / nfft) * ((data) ** 2)) # Power Spectrum\n data = filter_banks(data, nfilt=nfilt, nfft=nfft)\n data = dct(data, type=2, axis=1, norm='ortho')[:, 1:(num_ceps + 1)] # mfcc\n\n # liftering\n (nframes, ncoeff) = data.shape\n n = np.arange(ncoeff)\n lift = 1 + (cep_lifter / 2) * np.sin(np.pi * n / cep_lifter)\n data *= lift\n\n return data\n\n"
] |
[
[
"numpy.expand_dims",
"torch.from_numpy",
"numpy.stack",
"torch.utils.tensorboard.SummaryWriter",
"numpy.zeros"
],
[
"numpy.dot",
"numpy.abs",
"numpy.linspace",
"numpy.fft.rfft",
"numpy.arange",
"numpy.finfo",
"numpy.sin",
"scipy.fftpack.dct",
"numpy.append",
"numpy.log10",
"numpy.hamming",
"numpy.floor",
"numpy.zeros"
]
] |
Nightwalker495/SiamFC
|
[
"3813f80c907c87f67f6937e489d3cf452f7d96db"
] |
[
"src/test_tracking.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Author: Milan Ondrasovic <milan.ondrasovic@gmail.com>\n\nimport sys\nimport pathlib\nfrom typing import Iterable, Optional, cast\n\nimport click\nimport cv2 as cv\nimport numpy as np\nimport torch\n\nfrom sot.cfg import TrackerConfig\nfrom sot.tracker import TrackerSiamFC\nfrom sot.utils import cv_to_pil_img, ImageT, pil_to_cv_img\nfrom sot.visual import SiameseTrackingVisualizer\n\n\ndef iter_video_capture() -> Iterable[np.ndarray]:\n cap = cv.VideoCapture(0)\n \n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n yield frame\n\n\ndef is_image_file(file: pathlib.Path) -> bool:\n return file.suffix.lower() in (\".jpg\", \".jpeg\", \".png\")\n\n\ndef iter_dir_imgs(dir_path: str) -> Iterable[np.ndarray]:\n for file in filter(is_image_file, pathlib.Path(dir_path).iterdir()):\n img = cv.imread(str(file), cv.IMREAD_COLOR)\n yield img\n\n\n@click.command()\n@click.option(\"-i\", \"--imgs-dir-path\", help=\"directory path with images\")\n@click.option(\"-m\", \"--model-file-path\", help=\"a pre-trained model file path\")\n@click.option(\n \"-o\", \"--output-dir-path\",\n help=\"directory path for frame tracking previews\")\ndef main(\n imgs_dir_path: Optional[str], model_file_path: Optional[str],\n output_dir_path: Optional[str]) -> int:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n cfg = TrackerConfig()\n tracker = TrackerSiamFC(cfg, device, model_file_path)\n \n curr_exemplar_img = None\n curr_instance_img = None\n curr_response_map = None\n \n def retrieve_exemplar_img(exemplar_img: ImageT) -> None:\n nonlocal curr_exemplar_img\n curr_exemplar_img = pil_to_cv_img(exemplar_img)\n \n def retrieve_instance_img(instance_img: ImageT) -> None:\n nonlocal curr_instance_img\n curr_instance_img = pil_to_cv_img(instance_img)\n \n def retrieve_response_map(response_map: np.ndarray) -> None:\n nonlocal curr_response_map\n curr_response_map = response_map\n \n tracker.on_exemplar_img_extract = retrieve_exemplar_img\n tracker.on_instance_img_extract = retrieve_instance_img\n tracker.on_response_map_calc = retrieve_response_map\n \n if imgs_dir_path is None:\n imgs_iter = iter_video_capture()\n else:\n imgs_iter = iter_dir_imgs(imgs_dir_path)\n is_first = True\n \n visualizer = None\n \n for frame in imgs_iter:\n if is_first:\n # bbox = np.asarray(cv.selectROI(\"tracker initialization\", frame))\n bbox = np.asarray((262, 127, 386, 66))\n tracker.init(cv_to_pil_img(frame), bbox)\n visualizer = SiameseTrackingVisualizer(\n cast(np.ndarray, curr_exemplar_img), border_value=(32, 32, 32),\n output_dir_path=output_dir_path, wait_key=1)\n is_first = False\n else:\n bbox_pred = tracker.update(cv_to_pil_img(frame))\n if not visualizer.show_curr_state(\n frame, cast(np.ndarray, curr_instance_img),\n cast(np.ndarray, curr_response_map), bbox_pred):\n break\n \n visualizer.close()\n \n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n"
] |
[
[
"numpy.asarray",
"torch.cuda.is_available"
]
] |
irwanmazlin/Object-Detection
|
[
"943a08cd142ebe6b841910cac1a0ae85a6e4b23a"
] |
[
"vehicle detection/tensorflow-yolov4-tflite/core/utils.py"
] |
[
"import cv2\nimport random\nimport colorsys\nimport numpy as np\nimport tensorflow as tf\nfrom core.config import cfg\n\ndef load_freeze_layer(model='yolov4', tiny=False):\n if tiny:\n if model == 'yolov3':\n freeze_layouts = ['conv2d_9', 'conv2d_12']\n else:\n freeze_layouts = ['conv2d_17', 'conv2d_20']\n else:\n if model == 'yolov3':\n freeze_layouts = ['conv2d_58', 'conv2d_66', 'conv2d_74']\n else:\n freeze_layouts = ['conv2d_93', 'conv2d_101', 'conv2d_109']\n return freeze_layouts\n\ndef load_weights(model, weights_file, model_name='yolov4', is_tiny=False):\n if is_tiny:\n if model_name == 'yolov3':\n layer_size = 13\n output_pos = [9, 12]\n else:\n layer_size = 21\n output_pos = [17, 20]\n else:\n if model_name == 'yolov3':\n layer_size = 75\n output_pos = [58, 66, 74]\n else:\n layer_size = 110\n output_pos = [93, 101, 109]\n wf = open(weights_file, 'rb')\n major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)\n\n j = 0\n for i in range(layer_size):\n conv_layer_name = 'conv2d_%d' %i if i > 0 else 'conv2d'\n bn_layer_name = 'batch_normalization_%d' %j if j > 0 else 'batch_normalization'\n\n conv_layer = model.get_layer(conv_layer_name)\n filters = conv_layer.filters\n k_size = conv_layer.kernel_size[0]\n in_dim = conv_layer.input_shape[-1]\n\n if i not in output_pos:\n # darknet weights: [beta, gamma, mean, variance]\n bn_weights = np.fromfile(wf, dtype=np.float32, count=4 * filters)\n # tf weights: [gamma, beta, mean, variance]\n bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]\n bn_layer = model.get_layer(bn_layer_name)\n j += 1\n else:\n conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)\n\n # darknet shape (out_dim, in_dim, height, width)\n conv_shape = (filters, in_dim, k_size, k_size)\n conv_weights = np.fromfile(wf, dtype=np.float32, count=np.product(conv_shape))\n # tf shape (height, width, in_dim, out_dim)\n conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0])\n\n if i not in output_pos:\n conv_layer.set_weights([conv_weights])\n bn_layer.set_weights(bn_weights)\n else:\n conv_layer.set_weights([conv_weights, conv_bias])\n\n # assert len(wf.read()) == 0, 'failed to read all data'\n wf.close()\n\n\ndef read_class_names(class_file_name):\n names = {}\n with open(class_file_name, 'r') as data:\n for ID, name in enumerate(data):\n names[ID] = name.strip('\\n')\n return names\n\ndef load_config(FLAGS):\n if FLAGS.tiny:\n STRIDES = np.array(cfg.YOLO.STRIDES_TINY)\n ANCHORS = get_anchors(cfg.YOLO.ANCHORS_TINY, FLAGS.tiny)\n XYSCALE = cfg.YOLO.XYSCALE_TINY if FLAGS.model == 'yolov4' else [1, 1]\n else:\n STRIDES = np.array(cfg.YOLO.STRIDES)\n if FLAGS.model == 'yolov4':\n ANCHORS = get_anchors(cfg.YOLO.ANCHORS, FLAGS.tiny)\n elif FLAGS.model == 'yolov3':\n ANCHORS = get_anchors(cfg.YOLO.ANCHORS_V3, FLAGS.tiny)\n XYSCALE = cfg.YOLO.XYSCALE if FLAGS.model == 'yolov4' else [1, 1, 1]\n NUM_CLASS = len(read_class_names(cfg.YOLO.CLASSES))\n\n return STRIDES, ANCHORS, NUM_CLASS, XYSCALE\n\ndef get_anchors(anchors_path, tiny=False):\n anchors = np.array(anchors_path)\n if tiny:\n return anchors.reshape(2, 3, 2)\n else:\n return anchors.reshape(3, 3, 2)\n\ndef image_preprocess(image, target_size, gt_boxes=None):\n\n ih, iw = target_size\n h, w, _ = image.shape\n\n scale = min(iw/w, ih/h)\n nw, nh = int(scale * w), int(scale * h)\n image_resized = cv2.resize(image, (nw, nh))\n\n image_paded = np.full(shape=[ih, iw, 3], fill_value=128.0)\n dw, dh = (iw - nw) // 2, (ih-nh) // 2\n image_paded[dh:nh+dh, dw:nw+dw, :] = image_resized\n image_paded = image_paded / 255.\n\n if gt_boxes is None:\n return image_paded\n\n else:\n gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw\n gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh\n return image_paded, gt_boxes\n\ndef draw_bbox(image, bboxes, classes=read_class_names(cfg.YOLO.CLASSES), show_label=True):\n num_classes = len(classes)\n image_h, image_w, _ = image.shape\n hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))\n\n random.seed(0)\n random.shuffle(colors)\n random.seed(None)\n\n out_boxes, out_scores, out_classes, num_boxes = bboxes\n for i in range(num_boxes[0]):\n if int(out_classes[0][i]) < 0 or int(out_classes[0][i]) > num_classes: continue\n coor = out_boxes[0][i]\n coor[0] = int(coor[0] * image_h)\n coor[2] = int(coor[2] * image_h)\n coor[1] = int(coor[1] * image_w)\n coor[3] = int(coor[3] * image_w)\n\n print(\"coordinate x_min\",int(coor[1]))\n\n image_crop = image[int(coor[0]):int(coor[2]), int(coor[1]):int(coor[3])]\n cv2.imwrite('./data/vehicle/vehiclecrop'+ str(i) + '.png', image_crop)\n\n\n fontScale = 0.5\n score = out_scores[0][i]\n class_ind = int(out_classes[0][i])\n bbox_color = colors[class_ind]\n bbox_thick = int(0.6 * (image_h + image_w) / 600)\n c1, c2 = (coor[1], coor[0]), (coor[3], coor[2])\n cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)\n\n if show_label:\n bbox_mess = '%s: %.2f' % (classes[class_ind], score)\n t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0]\n c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3)\n cv2.rectangle(image, c1, (np.float32(c3[0]), np.float32(c3[1])), bbox_color, -1) #filled\n\n cv2.putText(image, bbox_mess, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA)\n return image\n\ndef bbox_iou(bboxes1, bboxes2):\n \"\"\"\n @param bboxes1: (a, b, ..., 4)\n @param bboxes2: (A, B, ..., 4)\n x:X is 1:n or n:n or n:1\n @return (max(a,A), max(b,B), ...)\n ex) (4,):(3,4) -> (3,)\n (2,1,4):(2,3,4) -> (2,3)\n \"\"\"\n bboxes1_area = bboxes1[..., 2] * bboxes1[..., 3]\n bboxes2_area = bboxes2[..., 2] * bboxes2[..., 3]\n\n bboxes1_coor = tf.concat(\n [\n bboxes1[..., :2] - bboxes1[..., 2:] * 0.5,\n bboxes1[..., :2] + bboxes1[..., 2:] * 0.5,\n ],\n axis=-1,\n )\n bboxes2_coor = tf.concat(\n [\n bboxes2[..., :2] - bboxes2[..., 2:] * 0.5,\n bboxes2[..., :2] + bboxes2[..., 2:] * 0.5,\n ],\n axis=-1,\n )\n\n left_up = tf.maximum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])\n right_down = tf.minimum(bboxes1_coor[..., 2:], bboxes2_coor[..., 2:])\n\n inter_section = tf.maximum(right_down - left_up, 0.0)\n inter_area = inter_section[..., 0] * inter_section[..., 1]\n\n union_area = bboxes1_area + bboxes2_area - inter_area\n\n iou = tf.math.divide_no_nan(inter_area, union_area)\n\n return iou\n\n\ndef bbox_giou(bboxes1, bboxes2):\n \"\"\"\n Generalized IoU\n @param bboxes1: (a, b, ..., 4)\n @param bboxes2: (A, B, ..., 4)\n x:X is 1:n or n:n or n:1\n @return (max(a,A), max(b,B), ...)\n ex) (4,):(3,4) -> (3,)\n (2,1,4):(2,3,4) -> (2,3)\n \"\"\"\n bboxes1_area = bboxes1[..., 2] * bboxes1[..., 3]\n bboxes2_area = bboxes2[..., 2] * bboxes2[..., 3]\n\n bboxes1_coor = tf.concat(\n [\n bboxes1[..., :2] - bboxes1[..., 2:] * 0.5,\n bboxes1[..., :2] + bboxes1[..., 2:] * 0.5,\n ],\n axis=-1,\n )\n bboxes2_coor = tf.concat(\n [\n bboxes2[..., :2] - bboxes2[..., 2:] * 0.5,\n bboxes2[..., :2] + bboxes2[..., 2:] * 0.5,\n ],\n axis=-1,\n )\n\n left_up = tf.maximum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])\n right_down = tf.minimum(bboxes1_coor[..., 2:], bboxes2_coor[..., 2:])\n\n inter_section = tf.maximum(right_down - left_up, 0.0)\n inter_area = inter_section[..., 0] * inter_section[..., 1]\n\n union_area = bboxes1_area + bboxes2_area - inter_area\n\n iou = tf.math.divide_no_nan(inter_area, union_area)\n\n enclose_left_up = tf.minimum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])\n enclose_right_down = tf.maximum(\n bboxes1_coor[..., 2:], bboxes2_coor[..., 2:]\n )\n\n enclose_section = enclose_right_down - enclose_left_up\n enclose_area = enclose_section[..., 0] * enclose_section[..., 1]\n\n giou = iou - tf.math.divide_no_nan(enclose_area - union_area, enclose_area)\n\n return giou\n\n\ndef bbox_ciou(bboxes1, bboxes2):\n \"\"\"\n Complete IoU\n @param bboxes1: (a, b, ..., 4)\n @param bboxes2: (A, B, ..., 4)\n x:X is 1:n or n:n or n:1\n @return (max(a,A), max(b,B), ...)\n ex) (4,):(3,4) -> (3,)\n (2,1,4):(2,3,4) -> (2,3)\n \"\"\"\n bboxes1_area = bboxes1[..., 2] * bboxes1[..., 3]\n bboxes2_area = bboxes2[..., 2] * bboxes2[..., 3]\n\n bboxes1_coor = tf.concat(\n [\n bboxes1[..., :2] - bboxes1[..., 2:] * 0.5,\n bboxes1[..., :2] + bboxes1[..., 2:] * 0.5,\n ],\n axis=-1,\n )\n bboxes2_coor = tf.concat(\n [\n bboxes2[..., :2] - bboxes2[..., 2:] * 0.5,\n bboxes2[..., :2] + bboxes2[..., 2:] * 0.5,\n ],\n axis=-1,\n )\n\n left_up = tf.maximum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])\n right_down = tf.minimum(bboxes1_coor[..., 2:], bboxes2_coor[..., 2:])\n\n inter_section = tf.maximum(right_down - left_up, 0.0)\n inter_area = inter_section[..., 0] * inter_section[..., 1]\n\n union_area = bboxes1_area + bboxes2_area - inter_area\n\n iou = tf.math.divide_no_nan(inter_area, union_area)\n\n enclose_left_up = tf.minimum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])\n enclose_right_down = tf.maximum(\n bboxes1_coor[..., 2:], bboxes2_coor[..., 2:]\n )\n\n enclose_section = enclose_right_down - enclose_left_up\n\n c_2 = enclose_section[..., 0] ** 2 + enclose_section[..., 1] ** 2\n\n center_diagonal = bboxes2[..., :2] - bboxes1[..., :2]\n\n rho_2 = center_diagonal[..., 0] ** 2 + center_diagonal[..., 1] ** 2\n\n diou = iou - tf.math.divide_no_nan(rho_2, c_2)\n\n v = (\n (\n tf.math.atan(\n tf.math.divide_no_nan(bboxes1[..., 2], bboxes1[..., 3])\n )\n - tf.math.atan(\n tf.math.divide_no_nan(bboxes2[..., 2], bboxes2[..., 3])\n )\n )\n * 2\n / np.pi\n ) ** 2\n\n alpha = tf.math.divide_no_nan(v, 1 - iou + v)\n\n ciou = diou - alpha * v\n\n return ciou\n\ndef nms(bboxes, iou_threshold, sigma=0.3, method='nms'):\n \"\"\"\n :param bboxes: (xmin, ymin, xmax, ymax, score, class)\n\n Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf\n https://github.com/bharatsingh430/soft-nms\n \"\"\"\n classes_in_img = list(set(bboxes[:, 5]))\n best_bboxes = []\n\n for cls in classes_in_img:\n cls_mask = (bboxes[:, 5] == cls)\n cls_bboxes = bboxes[cls_mask]\n\n while len(cls_bboxes) > 0:\n max_ind = np.argmax(cls_bboxes[:, 4])\n best_bbox = cls_bboxes[max_ind]\n best_bboxes.append(best_bbox)\n cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])\n iou = bbox_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])\n weight = np.ones((len(iou),), dtype=np.float32)\n\n assert method in ['nms', 'soft-nms']\n\n if method == 'nms':\n iou_mask = iou > iou_threshold\n weight[iou_mask] = 0.0\n\n if method == 'soft-nms':\n weight = np.exp(-(1.0 * iou ** 2 / sigma))\n\n cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight\n score_mask = cls_bboxes[:, 4] > 0.\n cls_bboxes = cls_bboxes[score_mask]\n\n return best_bboxes\n\ndef freeze_all(model, frozen=True):\n model.trainable = not frozen\n if isinstance(model, tf.keras.Model):\n for l in model.layers:\n freeze_all(l, frozen)\ndef unfreeze_all(model, frozen=False):\n model.trainable = not frozen\n if isinstance(model, tf.keras.Model):\n for l in model.layers:\n unfreeze_all(l, frozen)\n\n"
] |
[
[
"numpy.fromfile",
"tensorflow.concat",
"numpy.product",
"tensorflow.maximum",
"tensorflow.minimum",
"numpy.full",
"numpy.concatenate",
"numpy.argmax",
"numpy.float32",
"numpy.exp",
"numpy.array",
"tensorflow.math.divide_no_nan"
]
] |
wenbingl/keras-onnx
|
[
"2dd93301024b1e296a8d77be72483f35506478be"
] |
[
"keras2onnx/main.py"
] |
[
"###############################################################################\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n###############################################################################\nimport os\nfrom .proto import onnx, get_opset_number_from_onnx\nfrom .topology import convert_topology\nfrom .common import with_variable\nfrom .ke2onnx import *\nfrom .parser import *\nfrom ._builtin import *\n\n_TF_SESSION = None\n\n\n@with_variable('pb_visual_writer')\ndef get_tensorboard_writer():\n pb_visual_writer = None\n _tb_log_dir = os.environ.get('TB_LOG_DIR')\n if _tb_log_dir:\n from tensorflow.python.summary import summary\n pb_visual_writer = summary.FileWriter(_tb_log_dir)\n setattr(get_tensorboard_writer, 'pb_visual_writer', pb_visual_writer)\n return pb_visual_writer\n\n\ndef _build_opmap_from_keras(model):\n # type: (keras.Model) -> []\n\n static_set_ke2onnx_converters(set_converter)\n output_dict = {}\n for l_ in model.layers:\n if hasattr(l_, 'layers'):\n dict = _build_opmap_from_keras(l_)\n output_dict.update(dict)\n continue\n\n for node_ in extract_inbound_nodes(l_):\n for ts_ in node_.output_tensors:\n output_dict[GRAPH_OUTMOST_NAME + '/' + ts_.op.name] = l_\n\n return output_dict\n\n\ndef _convert_tf(name, tf_graph_def, keras_op_table, output_names, target_opset, doc_string, channel_first_inputs=None):\n # type: (str, tf.GraphDef, {}, [], int, str, []) -> onnx.ModelProto\n if target_opset is None:\n target_opset = get_opset_number_from_onnx()\n\n with tf.Graph().as_default() as tf_graph:\n tf.import_graph_def(tf_graph_def, name=GRAPH_OUTMOST_NAME)\n if get_tensorboard_writer() is not None:\n get_tensorboard_writer().add_graph(tf_graph)\n\n output_names = [GRAPH_OUTMOST_NAME + '/' + name for name in output_names]\n\n topology = parse_graph(tf_graph, keras_op_table, target_opset, output_names)\n topology.compile()\n\n return convert_topology(topology, name, doc_string, target_opset, channel_first_inputs)\n\n\ndef convert_keras(model, name=None, doc_string='', target_opset=None, channel_first_inputs=None):\n # type: (keras.Model, str, str, int, []) -> onnx.ModelProto\n \"\"\"\n :param model: keras model\n :param name: the converted onnx model internal name\n :param doc_string:\n :param target_opset:\n :param channel_first_inputs: A list of channel first input.\n :return:\n \"\"\"\n from keras import backend as K\n\n if name is None:\n name = model.name\n\n op_dict = _build_opmap_from_keras(model)\n output_names = [n.name for n in model.outputs]\n\n sess = K.get_session()\n out_node = [n_.replace(':0', '') for n_ in output_names]\n tf_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, output_node_names=out_node)\n return _convert_tf(name, tf_graph_def, op_dict, output_names, target_opset, doc_string, channel_first_inputs)\n\n\ndef convert_keras_tf(name, output_names, doc_string='', target_opset=None, channel_first_inputs=None):\n # type: (str, [], str, int, []) -> onnx.ModelProto\n \"\"\"\n Convert the frozen tensorflow model originally defined by Keras\n :param name:\n :param lstm_scope_name:\n :return:\n \"\"\"\n graph_def = tf.GraphDef()\n with tf.gfile.FastGFile(name, 'rb') as f:\n graph_def.ParseFromString(f.read())\n\n return _convert_tf(name, graph_def, None, output_names, target_opset, doc_string, channel_first_inputs)\n"
] |
[
[
"tensorflow.python.summary.summary.FileWriter"
]
] |
mvoofan/pct_code
|
[
"89bba63cc7c6c0f49a552a7d00ca87ef124f8bed"
] |
[
"train.py"
] |
[
"import time\nfrom options.train_options import TrainOptions\nfrom data.data_loader import CreateDataLoader\nfrom models.models import create_model\nfrom util.visualizer import Visualizer\nfrom tqdm import tqdm\nimport numpy as np\nimport os\nimport torch\n\nopt = TrainOptions().parse()\ndata_loader = CreateDataLoader(opt)\ndataset = data_loader.load_data()\ndataset_size = len(data_loader)\nprint('#training images = %d' % dataset_size)\n\nmodel = create_model(opt)\n#visualizer = Visualizer(opt)\ntotal_steps = 0\n\ndata_num=0\n\ntry:\n opt.load_G_net\nexcept NameError:\n print('Error in load_G_net')\nelse:\n if 'empty'==opt.load_G_net:\n print('emtpy in load_G_net')\n else:\n model.netG.load_state_dict(torch.load(opt.load_G_net))\n model.netG.eval()\n\n\ntry:\n opt.load_D_net\nexcept NameError:\n print('Error in load_D_net')\nelse:\n if 'empty'==opt.load_D_net:\n print('emtpy in load_D_net')\n else:\n model.netD.load_state_dict(torch.load(opt.load_D_net))\n model.netD.eval()\n\n\n\n\nfor epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):\n epoch_start_time = time.time()\n epoch_iter = 0\n\n if 'pbar' in vars():\n try:\n pbar.close()\n except:\n print('error!')\n \n pbar = tqdm(total=dataset_size)\n\n for i, data in enumerate(dataset):\n pbar.update(1)\n iter_start_time = time.time()\n total_steps += opt.batchSize\n epoch_iter += opt.batchSize\n model.set_input(data)\n\n if total_steps % 100 ==0:\n #show loss value\n print('loss_D: %f loss_G: %f D_learning_rate: %f G_learning_rate %f ' % (model.loss_D.data.cpu().numpy(), model.loss_G.data.cpu().numpy(), model.optimizer_D.param_groups[0]['lr'],model.optimizer_G.param_groups[0]['lr']) )\n #print('loss_G ', model.loss_G.data.cpu().numpy())\n\n\n\n model.optimize_parameters()\n\n '''\n if total_steps % opt.display_freq == 0:\n visualizer.display_current_results(model.get_current_visuals(), epoch)\n '''\n\n '''\n if total_steps % opt.print_freq == 0:\n errors = model.get_current_errors()\n t = (time.time() - iter_start_time) / opt.batchSize\n visualizer.print_current_errors(epoch, epoch_iter, errors, t)\n if opt.display_id > 0:\n visualizer.plot_current_errors(epoch, float(epoch_iter)/dataset_size, opt, errors)\n '''\n\n\n \n\n if total_steps % opt.save_latest_freq == 0:\n print('saving the latest model (epoch %d, total_steps %d)' %\n (epoch, total_steps))\n model.save('latest')\n if total_steps % (500* opt.batchSize) ==0:\n input_A_np=model.input_A.data.cpu().numpy()\n fake_B_np=model.fake_B.data.cpu().numpy()\n input_B_np=model.input_B.data.cpu().numpy()\n np.save(os.path.join(opt.checkpoints_dir, 'bp_step%d' % (data_num*100) ), input_A_np[:,0,:,:,:])\n np.save(os.path.join(opt.checkpoints_dir, 'output%d' % (data_num*100) ), fake_B_np[:,0,:,:,:])\n np.save(os.path.join(opt.checkpoints_dir, 'gt_ct%d' % (data_num*100) ), input_B_np[:,0,:,:,:])\n data_num+=1\n\n if epoch % opt.save_epoch_freq == 0:\n print('saving the model at the end of epoch %d, iters %d' %\n (epoch, total_steps))\n model.save('latest')\n model.save(epoch)\n\n print('End of epoch %d / %d \\t Time Taken: %d sec' %\n (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))\n\n if epoch > opt.niter:\n model.update_learning_rate()\n"
] |
[
[
"torch.load"
]
] |
xmnfw/pywim
|
[
"5d8b1ca467af80444551082588c2c713c3074e77",
"5d8b1ca467af80444551082588c2c713c3074e77"
] |
[
"pywim/utils/dsp/baseline.py",
"pywim/utils/stats/iqr.py"
] |
[
"\"\"\"\nNmrglue is a module for working with NMR data in Python.\nWhen used with the NumPy, SciPy, and matplotlib packages nmrglue provides a\nrobust environment for rapidly developing new methods for processing,\nanalyzing, and visualizing NMR data. Nmrglue also provides a\nframework for connecting existing NMR software packages.\n\nA collection of NMR processing functions for filtering, smoothing, and\ncorrecting spectral baselines.\n\nJ.J. Helmus, C.P. Jaroniec, Nmrglue:\nAn open source Python package for the analysis of multidimensional NMR data,\nJ. Biomol. NMR 2013, 55, 355-367. 10.1007/s10858-013-9718-x.\n\n@see: https://code.google.com/p/nmrglue/\n\n\"\"\"\nimport numpy as np\nimport scipy\nimport scipy.ndimage\nimport scipy.signal\n\n\ndef base(data, nl, nw=0):\n \"\"\"\n Linear (first-order) Baseline Correction based on node list.\n\n Parameters:\n\n * data Array of spectral data.\n * nl List of baseline nodes.\n * nw Node half-width in points.\n\n \"\"\"\n\n if data.ndim == 1:\n data -= calc_bl_linear(data, nl, nw)\n else: # for 2D array loop over traces\n for i, vec in enumerate(data):\n data[i] -= calc_bl_linear(vec, nl, nw)\n return data\n\n\ndef calc_bl_linear(x, nl, nw=0):\n \"\"\"\n Calculate a baseline using linear approximation between nodes\n\n Parameters:\n\n * x 1D data\n * nl List of baseline nodes\n * nw Node half-width in points\n\n \"\"\"\n bl = np.zeros_like(x)\n for i in range(len(nl)-1):\n # minimum and maximum index\n _min = nl[i]\n _max = nl[i+1]\n\n # linspace s1 and s2\n s1 = x[_min-nw:_min+nw+1].mean()\n s2 = x[_max-nw:_max+nw+1].mean()\n bl[_min:_max+1] = np.linspace(s1, s2, _max-_min+1)\n return bl\n\n\ndef cbf(data, last=10, apply=slice(None)):\n \"\"\"\n Constant Baseline correction\n\n Parameters:\n\n * data Array of spectral data.\n * last Percent of -1 axis used to calculate correction.\n * apply Slice describing 0th-axis region(s) to apply correction to.\n Ignored in 1D data.\n\n \"\"\"\n # calculate the correction\n n = data.shape[-1]*last/100.+1.\n corr = data[..., int(-n):].sum(axis=-1)/n\n\n # apply correction\n if data.ndim == 2:\n data[apply] = data[apply] - np.array([corr]).transpose()[apply]\n return data\n else:\n return data-corr\n\n\ndef cbf_explicit(data, calc=slice(None), apply=slice(None)):\n \"\"\"\n Constant Baseline - explicit region\n\n Parameters:\n\n * data Array of spectral data.\n * calc Slice describing region to use for calculating correction.\n * apply Slice describing 0th-axis region(s) to apply correction to.\n Ignored in 1D data.\n\n \"\"\"\n # calculate correction\n n = len(range(data.shape[-1])[calc])\n corr = data[..., calc].sum(axis=-1)/n\n\n # apply correction\n if data.ndim == 2:\n data[apply] = data[apply] - np.array([corr]).transpose()[apply]\n return data\n else:\n return data-corr\n\n\ndef med(data, mw=24, sf=16, sigma=5.0):\n \"\"\"\n Median baseline correction\n\n Algorith described in:\n Friedrichs, M.S. JBNMR 1995 5 147-153.\n\n Parameters:\n\n * data Array of spectral data.\n * mw Median Window size in pts.\n * sf Smooth window size in pts.\n * sigma Standard-deviation of Gaussian in convolution\n\n \"\"\"\n if data.ndim == 1:\n data -= calc_bl_med(data, mw, sf, sigma)\n else:\n for i, vec in enumerate(data):\n data[i] = vec - calc_bl_med(vec, mw, sf, sigma)\n return data\n\n\ndef calc_bl_med(x, mw, sf, sigma):\n \"\"\"\n Calculate a baseline using median baseline correction.\n\n Algorithm described in:\n Friedrichs, M.S. JBNMR 1995 5 147-153\n\n Parameter:\n\n x 1D data\n mw Median Window size in pts.\n sf Smooth window size in pts.\n sigma Standard-deviation of Gaussian in convolution.\n\n \"\"\"\n\n # create extrema array (non extrema values are masked out)\n mask = x == scipy.ndimage.median_filter(x, size=3)\n mask[0] = False # first pt always extrema\n mask[-1] = False # last pt always extrema\n e = np.ma.masked_array(x, mask)\n\n # fill in the median vector\n # half_mw = mw/2\n m = scipy.ndimage.median_filter(e, mw+1, mode=\"mirror\")\n # using the median_filter might give slightly different results than\n # described algorithm but is MUCH faster\n\n # convolve with a gaussian\n g = scipy.signal.gaussian(sf, sigma)\n g = g/g.sum()\n\n return scipy.signal.convolve(m, g, mode='same')\n\n\ndef sol_general(data, _filter, w=16, mode='same'):\n \"\"\"\n Solvent filter with generic filter.\n\n Algorithm described in:\n Marion et al. JMR 1989 84 425-430\n\n Parameters:\n\n * data Array of spectral data.\n * filter filter to convolve with data\n * mode mode for output ('valid','same', or 'full')\n\n \"\"\"\n A = _filter.sum()\n if data.ndim == 2:\n _filter = np.atleast_2d(_filter)\n return data-scipy.signal.convolve(data, _filter, mode=mode)/A\n\n\ndef sol_boxcar(data, w=16, mode='same'):\n \"\"\"\n Solvent filter with boxcar filter.\n\n Parameters:\n\n * data Array of spectral data.\n * w Width of convolution window.\n * mode mode for output ('valid','same', or 'full')\n\n \"\"\"\n _filter = scipy.signal.boxcar(w)\n return sol_general(data, _filter, w=w, mode=mode)\n\n\ndef sol_sine(data, w=16, mode='same'):\n \"\"\"\n Solvent filter with sine-bell filter.\n\n Parameters:\n\n * data Array of spectral data.\n * w Width of convolution window.\n * mode mode for output ('valid','same', or 'full')\n\n \"\"\"\n _filter = np.cos(np.pi*np.linspace(-0.5, 0.5, w))\n return sol_general(data, _filter, w=w, mode=mode)\n\n\ndef sol_sine2(data, w=16, mode='same'):\n \"\"\"\n Solvent filter with square sine-bell filter.\n\n Parameters:\n\n * data Array of spectral data.\n * w Width of convolution window.\n * mode mode for output ('valid','same', or 'full')\n\n \"\"\"\n _filter = np.cos(np.pi*np.linspace(-0.5, 0.5, w))**2\n return sol_general(data, _filter, w=w, mode=mode)\n\n\ndef sol_gaussian(data, w=16, mode='same'):\n \"\"\"\n Solvent filter with square gaussian filter.\n\n Parameters:\n\n * data Array of spectral data.\n * w Width of convolution window.\n * mode mode for output ('valid','same', or 'full')\n\n \"\"\"\n _filter = scipy.signal.gaussian(w, w/2.)\n return sol_general(data, _filter, w=w, mode=mode)\n",
"from scipy.stats import scoreatpercentile\nimport pandas as pd\nimport numpy as np\n\n\ndef iqr(data: pd.Series, min_value: float=None) -> (float, float):\n \"\"\"\n IQR: Q3-Q1\n Outlier: Q3 + 1.5*(IQR) > DATA < Q1 - 1.5*IQR\n\n :param data: pd.Series\n :param min_value: float\n :return: mask considering lower bound and upper bound\n\n \"\"\"\n _data = data.copy()\n\n if min_value is not None:\n _data = data[data >= min_value]\n\n q1 = scoreatpercentile(_data, 25)\n q3 = scoreatpercentile(_data, 75)\n _iqr = q3-q1\n\n lb = q1 - _iqr*1.5\n ub = q3 + _iqr*1.5\n\n return np.all(((lb <= data.values), (data.values <= ub)), 0)\n\n\ndef reject_outliers(data: pd.Series) -> pd.Series:\n \"\"\"\n\n :param data:\n :return:\n \"\"\"\n return data[iqr(data)].reset_index(drop=True)\n"
] |
[
[
"numpy.linspace",
"scipy.ndimage.median_filter",
"numpy.atleast_2d",
"numpy.zeros_like",
"numpy.ma.masked_array",
"scipy.signal.boxcar",
"numpy.array",
"scipy.signal.convolve",
"scipy.signal.gaussian"
],
[
"numpy.all",
"scipy.stats.scoreatpercentile"
]
] |
mkduong-ai/grAdapt
|
[
"94c2659b0f6ff9a2984a9dc58e3c83213313bf90"
] |
[
"grAdapt/utils/math/linalg.py"
] |
[
"import scipy.linalg\nimport numpy as np\n\n\ndef inv_stable(A):\n \"\"\"Inverts a quadratic matrix with non-negative eigenvalues\n Inverts using cholesky decomposition if matrix is positive semi-definite.\n If not, inverts matrix using eigenvalue decomposition.\n If eigendecomposition does not succeed then use numpy.linalg.inv\n\n Parameters\n ----------\n A : array-like (n, n)\n Matrix A has non-negative eigenvalues\n\n Returns\n -------\n array-like (n, n)\n \"\"\"\n try:\n return inv_chol(A, symmetric=True)\n except:\n try:\n return inv_eig(A)\n except:\n return np.linalg.inv(A)\n\n\ndef inv_eig(A):\n \"\"\"Inverts a quadratic matrix with non-negative eigenvalues\n Uses eigendecomposition of a matrix.\n\n Parameters\n ----------\n A : array-like (n, n)\n\n Returns\n -------\n The inverse array-like (n, n)\n\n Notes\n -----\n :math:`A = Q V Q^{-1}`\n :math: `A^_{-1} = (Q V Q^{-1})^{-1} = Q^{-1} V^{-1} Q`\n \"\"\"\n try:\n V, Q = np.linalg.eig(A)\n V = np.diag(1/V)\n\n # test symmetry\n symmetric = np.allclose(A, A.T, rtol=1e-3, atol=1e-4)\n\n if symmetric:\n return Q @ V @ Q.T\n else:\n return Q @ V @ np.linalg.inv(Q)\n except:\n raise Exception('Finding inverse of matrix with eigendecomposition failed.')\n\n\ndef inv_chol(A, symmetric=None):\n \"\"\"Inverts a positive semi-definite matrix A (stable)\n Adds a small positive definite matrix and uses cholesky decomposition.\n\n Parameters\n ----------\n A : positive semi-definite matrix (n, n)\n symmetric : boolean\n set True if matrix is symmetric. Else false.\n\n Returns\n -------\n An approximate inverse positive definite matrix (n, n)\n \"\"\"\n not_2d = len(A.shape) != 2\n try:\n not_quadratic = A.shape[0] != A.shape[1]\n except:\n not_quadratic = True\n\n # test symmetry\n if symmetric is None:\n symmetric = np.allclose(A, A.T, rtol=1e-3, atol=1e-4)\n\n if not_2d or not_quadratic:\n raise Exception('Given Matrix is either not 2D or quadratic.')\n elif not symmetric:\n raise Exception('Given matrix is not symmetric.')\n else:\n # make A positive definite for the triangulation\n Id = np.eye(A.shape[0])\n P = Id * 1e-7\n A_tilde = A + P\n # triangulate matrix with cholesky\n L = scipy.linalg.cholesky(A_tilde, lower=True)\n L_inv = scipy.linalg.solve_triangular(L, Id, lower=True)\n A_tilde_inv = L_inv.T @ L_inv\n\n return A_tilde_inv\n"
] |
[
[
"numpy.diag",
"numpy.allclose",
"numpy.linalg.inv",
"numpy.linalg.eig",
"numpy.eye"
]
] |
theshanky/Fingerprint-Liveness
|
[
"4a303cc5043e8aef1bea59b581cf2943b773b97f"
] |
[
"config.py"
] |
[
"# coding: utf8\nimport torch\nimport warnings\n\nclass DefaultConfig(object):\n model = 'MyMobilenet' # 使用的模型,名字必须与models/__init__.py中的名字一致\n env = model #\n ATTACK = 1\n GENUINE = 0\n # train_filelists=[\n # ['/home/shashank/Desktop/Fingerprint/dataset/colored','/home/shashank/Desktop/Fingerprint/dataset/colored/BiometrikaTrain/Livetest.txt',GENUINE],\n # ['/home/shashank/Desktop/Fingerprint/dataset/colored','/home/shashank/Desktop/Fingerprint/dataset/colored/BiometrikaTrain/Spooftest.txt',ATTACK]\n # ]\n # test_filelists=[\n # ['/home/shashank/Desktop/Fingerprint/dataset/colored','/home/shashank/Desktop/Fingerprint/dataset/colored/BiometrikaTest/Livetest.txt',GENUINE],\n # ['/home/shashank/Desktop/Fingerprint/dataset/colored','/home/shashank/Desktop/Fingerprint/dataset/colored/BiometrikaTest/Spooftest.txt',ATTACK]\n # ]\n\n # train_filelists=[\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/BiometrikaTrain/Livetest.txt',GENUINE],\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/BiometrikaTrain/Spooftest.txt',ATTACK]\n # ]\n # test_filelists=[\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/BiometrikaTest/Livetest.txt',GENUINE],\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/BiometrikaTest/Spooftest.txt',ATTACK]\n # ]\n\n #GRAY\n # train_filelists=[\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/gray/train/alivetest.txt',GENUINE],\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/gray/train/faketest.txt',ATTACK]\n # ]\n # test_filelists=[\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/gray/Test/alivetest.txt',GENUINE],\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/gray/Test/faketest.txt',ATTACK]\n # ]\n\n\n #CrossMatch\n # train_filelists=[\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/CrossMatch/Training/Livetest.txt',GENUINE],\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/CrossMatch/Training/Faketest.txt',ATTACK]\n # ]\n # test_filelists=[\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/CrossMatch/Testing/Livetest.txt',GENUINE],\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/CrossMatch/Testing/Faketest.txt',ATTACK]\n # ]\n \n #resized\n #/home/shashank/Desktop/Fingerprint/dataset\n # train_filelists=[\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/1test.txt',GENUINE],\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/1test.txt',ATTACK]\n # ]\n # test_filelists=[\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/1test.txt',GENUINE],\n # ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/1test.txt',ATTACK]\n # ]\n\n #ultimate\n train_filelists=[\n ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/ultimate/Train/Livetest.txt',GENUINE],\n ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/ultimate/Train/Spooftest.txt',ATTACK]\n ]\n test_filelists=[\n ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/ultimate/Test/Livetest.txt',GENUINE],\n ['/home/shashank/Desktop/Fingerprint/dataset','/home/shashank/Desktop/Fingerprint/dataset/ultimate/Test/Spooftest.txt',ATTACK]\n ]\n\n #load_model_path = 'checkpoints/model.pth' # 加载预训练的模型的路径,为None代表不加载\n load_model_path = None # 加载预训练的模型的路径,为None代表不加载\n\n batch_size = 8# batch size\n use_gpu = torch.cuda.is_available() # use GPU or not\n #use_gpu = True # use GPU or not\n num_workers = 8 # how many workers for loading data\n print_freq = 20 # print info every N batch\n debug_file = '/tmp/debug' # if os.path.exists(debug_file): enter ipdb\n result_name = 'result'\n\n max_epoch = 5\n lr = 0.01 # initial learning rate\n lr_decay = 0.5 # when val_loss increase, lr = lr*lr_decay\n lr_stepsize=3#learning step size\n weight_decay = 1e-5 # 损失函数\n cropscale = 3.5\n image_size = 224\ndef parse(self, kwargs):\n '''\n 根据字典kwargs 更新 config参数\n '''\n # 更新配置参数\n for k, v in kwargs.items():\n if not hasattr(self, k):\n # 警告还是报错,取决于你个人的喜好\n warnings.warn(\"Warning: opt has not attribut %s\" %k)\n setattr(self, k, v)\n\n # 打印配置信息\n print('user config:')\n for k, v in self.__class__.__dict__.items():\n if not k.startswith('__'):\n print(k, getattr(self, k))\n\nDefaultConfig.parse = parse\nopt = DefaultConfig()\n"
] |
[
[
"torch.cuda.is_available"
]
] |
mikeludemann/python-data-visualization
|
[
"e5317505d41ae79389f6eec61cefeca1690935b0"
] |
[
"src/default/slider/index.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button, RadioButtons\n\nfig, ax = plt.subplots()\nplt.subplots_adjust(left=0.25, bottom=0.25)\nt = np.arange(0.0, 1.0, 0.001)\na0 = 5\nf0 = 3\ndelta_f = 5.0\ns = a0 * np.sin(2 * np.pi * f0 * t)\nl, = plt.plot(t, s, lw=2)\nax.margins(x=0)\n\naxcolor = 'lightgoldenrodyellow'\naxfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\naxamp = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)\n\nsfreq = Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0, valstep=delta_f)\nsamp = Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)\n\n\ndef update(val):\n\tamp = samp.val\n\tfreq = sfreq.val\n\tl.set_ydata(amp*np.sin(2*np.pi*freq*t))\n\tfig.canvas.draw_idle()\n\n\nsfreq.on_changed(update)\nsamp.on_changed(update)\n\nresetax = plt.axes([0.8, 0.025, 0.1, 0.04])\nbutton = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')\n\n\ndef reset(event):\n\tsfreq.reset()\n\tsamp.reset()\nbutton.on_clicked(reset)\n\nrax = plt.axes([0.025, 0.5, 0.15, 0.15], facecolor=axcolor)\nradio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)\n\n\ndef colorfunc(label):\n\tl.set_color(label)\n\tfig.canvas.draw_idle()\nradio.on_clicked(colorfunc)\n\nplt.show()\n"
] |
[
[
"matplotlib.widgets.Button",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.sin",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axes",
"matplotlib.widgets.Slider",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"matplotlib.widgets.RadioButtons"
]
] |
semaphoreP/EXOSIMS
|
[
"9b2c25ff0bf1e6378af08a95b04c9e51ef4f1340"
] |
[
"EXOSIMS/SurveySimulation/SLSQPSchedulerF.py"
] |
[
"from EXOSIMS.Prototypes.SurveySimulation import SurveySimulation\nimport astropy.units as u\nimport numpy as np\nfrom ortools.linear_solver import pywraplp\nfrom scipy.optimize import minimize,minimize_scalar\nimport os\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\nclass SLSQPSchedulerF(SurveySimulation):\n \"\"\"SLSQPScheduler\n \n This class implements a continuous optimization of integration times\n using the scipy minimize function with method SLSQP. ortools with the CBC \n linear solver is used to find an initial solution consistent with the constraints.\n For details see Savransky et al. 2017 (SPIE).\n\n Args: \n \\*\\*specs:\n user specified values\n\n Notes:\n Due to the time costs of the current comp_per_inttime calculation in GarrettCompleteness\n this should be used with BrownCompleteness.\n\n Requires ortools\n \n \"\"\"\n\n def __init__(self, cacheOptTimes=False, staticOptTimes=False, **specs):\n \n #initialize the prototype survey\n SurveySimulation.__init__(self, **specs)\n\n #Calculate fZmax\n self.valfZmax, self.absTimefZmax = self.ZodiacalLight.calcfZmax(np.arange(self.TargetList.nStars), self.Observatory, self.TargetList, self.TimeKeeping, filter(lambda mode: mode['detectionMode'] == True, self.OpticalSystem.observingModes)[0], self.cachefname)\n\n assert isinstance(staticOptTimes, bool), 'staticOptTimes must be boolean.'\n self.staticOptTimes = staticOptTimes\n self._outspec['staticOptTimes'] = self.staticOptTimes\n\n assert isinstance(cacheOptTimes, bool), 'cacheOptTimes must be boolean.'\n self._outspec['cacheOptTimes'] = cacheOptTimes\n\n\n #some global defs\n self.detmode = filter(lambda mode: mode['detectionMode'] == True, self.OpticalSystem.observingModes)[0]\n self.ohTimeTot = self.Observatory.settlingTime + self.detmode['syst']['ohTime']\n self.maxTime = self.TimeKeeping.missionLife*self.TimeKeeping.missionPortion\n\n self.constraints = {'type':'ineq',\n 'fun': lambda x: self.maxTime.to(u.d).value - np.sum(x[x*u.d > 0.1*u.s]) - \n np.sum(x*u.d > 0.1*u.s).astype(float)*self.ohTimeTot.to(u.d).value,\n 'jac':lambda x: np.ones(len(x))*-1.}\n\n self.t0 = None\n if cacheOptTimes:\n #Generate cache Name########################################################################\n cachefname = self.cachefname + 't0'\n \n if os.path.isfile(cachefname):\n self.vprint(\"Loading cached t0 from %s\"%cachefname)\n with open(cachefname, 'rb') as f:\n self.t0 = pickle.load(f)\n sInds = np.arange(self.TargetList.nStars)\n fZ = np.array([self.ZodiacalLight.fZ0.value]*len(sInds))*self.ZodiacalLight.fZ0.unit\n self.scomp0 = -self.objfun(self.t0.to(u.d).value,sInds,fZ)\n\n\n if self.t0 is None:\n #find nominal background counts for all targets in list\n _, Cbs, Csps = self.OpticalSystem.Cp_Cb_Csp(self.TargetList, range(self.TargetList.nStars), \n self.ZodiacalLight.fZ0, self.ZodiacalLight.fEZ0, 25.0, self.WAint, self.detmode)\n\n #find baseline solution with dMagLim-based integration times\n self.vprint('Finding baseline fixed-time optimal target set.')\n t0 = self.OpticalSystem.calc_intTime(self.TargetList, range(self.TargetList.nStars), \n self.ZodiacalLight.fZ0, self.ZodiacalLight.fEZ0, self.dMagint, self.WAint, self.detmode)\n comp0 = self.Completeness.comp_per_intTime(t0, self.TargetList, range(self.TargetList.nStars), \n self.ZodiacalLight.fZ0, self.ZodiacalLight.fEZ0, self.WAint, self.detmode, C_b=Cbs, C_sp=Csps)\n\n \n solver = pywraplp.Solver('SolveIntegerProblem',pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n xs = [ solver.IntVar(0.0,1.0, 'x'+str(j)) for j in range(len(comp0)) ]\n\n #constraint is x_i*t_i < maxtime\n constraint = solver.Constraint(-solver.infinity(),self.maxTime.to(u.day).value)\n for j,x in enumerate(xs):\n constraint.SetCoefficient(x, t0[j].to(u.day).value + self.ohTimeTot.to(u.day).value)\n\n #objective is max x_i*comp_i\n objective = solver.Objective()\n for j,x in enumerate(xs):\n objective.SetCoefficient(x, comp0[j])\n objective.SetMaximization()\n\n cpres = solver.Solve()\n x0 = np.array([x.solution_value() for x in xs])\n self.scomp0 = np.sum(comp0*x0)\n self.t0 = t0\n\n #now find the optimal eps baseline and use whichever gives you the highest starting completeness\n self.vprint('Finding baseline fixed-eps optimal target set.')\n def totCompfeps(eps):\n compstars,tstars,x = self.inttimesfeps(eps, Cbs.to('1/d').value, Csps.to('1/d').value)\n return -np.sum(compstars*x)\n epsres = minimize_scalar(totCompfeps,method='bounded',bounds = [0,1],options = {'disp':True})\n comp_epsmax,t_epsmax,x_epsmax = self.inttimesfeps(epsres['x'],Cbs.to('1/d').value, Csps.to('1/d').value)\n if np.sum(comp_epsmax*x_epsmax) > self.scomp0:\n x0 = x_epsmax\n self.scomp0 = np.sum(comp_epsmax*x_epsmax) \n self.t0 = t_epsmax*u.day\n\n #now optimize the solution\n self.vprint('Optimizing baseline integration times.')\n sInds = np.arange(self.TargetList.nStars)\n fZ = self.valfZmin#np.array([self.ZodiacalLight.fZ0.value]*len(sInds))*self.ZodiacalLight.fZ0.unit\n bounds = [(0,self.maxTime.to(u.d).value) for i in range(len(sInds))]\n initguess = x0*self.t0.to(u.d).value\n ires = minimize(self.objfun, initguess, jac=self.objfun_deriv, args=(sInds,fZ), \n constraints=self.constraints, method='SLSQP', bounds=bounds, options={'maxiter':300,'ftol':1e-3})\n\n assert ires['success'], \"Initial time optimization failed.\"\n\n self.t0 = ires['x']*u.d\n self.scomp0 = -ires['fun']\n\n if cacheOptTimes:\n with open(cachefname,'wb') as f:\n pickle.dump(self.t0, f)\n self.vprint(\"Saved cached optimized t0 to %s\"%cachefname)\n\n\n def inttimesfeps(self,eps,Cb,Csp):\n \"\"\"\n Compute the optimal subset of targets for a given epsilon value\n where epsilon is the maximum completeness gradient.\n\n Everything is in units of days\n \"\"\"\n\n tstars = (-Cb*eps*np.sqrt(np.log(10)) + np.sqrt((Cb*eps)**2.*np.log(10) + \n 5*Cb*Csp**2.*eps))/(2.0*Csp**2.*eps*np.log(10))\n compstars = self.Completeness.comp_per_intTime(tstars*u.day, self.TargetList, \n np.arange(self.TargetList.nStars), self.ZodiacalLight.fZ0, \n self.ZodiacalLight.fEZ0, self.WAint, self.detmode, C_b=Cb/u.d, C_sp=Csp/u.d)\n\n \n solver = pywraplp.Solver('SolveIntegerProblem',pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n xs = [ solver.IntVar(0.0,1.0, 'x'+str(j)) for j in range(len(compstars)) ]\n constraint = solver.Constraint(-solver.infinity(), self.maxTime.to(u.d).value)\n\n for j,x in enumerate(xs):\n constraint.SetCoefficient(x, tstars[j] + self.ohTimeTot.to(u.day).value)\n\n objective = solver.Objective()\n for j,x in enumerate(xs):\n objective.SetCoefficient(x, compstars[j])\n objective.SetMaximization()\n\n cpres = solver.Solve()\n\n x = np.array([x.solution_value() for x in xs])\n\n return compstars,tstars,x\n\n\n def objfun(self,t,sInds,fZ):\n \"\"\"\n Objective Function for SLSQP minimization. Purpose is to maximize summed completeness\n\n Args:\n t (ndarray):\n Integration times in days. NB: NOT an astropy quantity.\n sInds (ndarray):\n Target star indices (of same size as t)\n fZ (astropy Quantity):\n Surface brightness of local zodiacal light in units of 1/arcsec2\n Same size as t\n\n \"\"\"\n good = t*u.d >= 0.1*u.s\n\n comp = self.Completeness.comp_per_intTime(t[good]*u.d, self.TargetList, sInds[good], fZ[good], \n self.ZodiacalLight.fEZ0, self.WAint[sInds][good], self.detmode)\n\n return -comp.sum()\n\n\n def objfun_deriv(self,t,sInds,fZ):\n \"\"\"\n Jacobian of objective Function for SLSQP minimization. \n\n Args:\n t (astropy Quantity):\n Integration times in days. NB: NOT an astropy quantity.\n sInds (ndarray):\n Target star indices (of same size as t)\n fZ (astropy Quantity):\n Surface brightness of local zodiacal light in units of 1/arcsec2\n Same size as t\n\n \"\"\"\n good = t*u.d >= 0.1*u.s\n\n tmp = self.Completeness.dcomp_dt(t[good]*u.d, self.TargetList, sInds[good], fZ[good], \n self.ZodiacalLight.fEZ0, self.WAint[sInds][good], self.detmode).to(\"1/d\").value\n\n jac = np.zeros(len(t))\n jac[good] = tmp\n return -jac\n\n\n\n def calc_targ_intTime(self, sInds, startTimes, mode):\n \"\"\"\n Given a subset of targets, calculate their integration times given the\n start of observation time.\n\n This implementation updates the optimized times based on current conditions and \n mission time left.\n\n Note: next_target filter will discard targets with zero integration times.\n \n Args:\n sInds (integer array):\n Indices of available targets\n startTimes (astropy quantity array):\n absolute start times of observations. \n must be of the same size as sInds \n mode (dict):\n Selected observing mode for detection\n\n Returns:\n intTimes (astropy Quantity array):\n Integration times for detection \n same dimension as sInds\n \"\"\"\n \n if self.staticOptTimes:\n intTimes = self.t0[sInds]\n else:\n # assumed values for detection\n #fZ = self.ZodiacalLight.fZ(self.Observatory, self.TargetList, sInds, startTimes, mode)\n fZ = self.valfZmin[sInds]\n\n\n #### instead of actual time left, try bounding by maxTime - detection time used\n #need to update time used in choose_next_target\n \n timeLeft = (self.TimeKeeping.missionLife - self.TimeKeeping.currentTimeNorm)*self.TimeKeeping.missionPortion\n bounds = [(0,timeLeft.to(u.d).value) for i in range(len(sInds))]\n\n initguess = self.t0[sInds].to(u.d).value\n ires = minimize(self.objfun, initguess, jac=self.objfun_deriv, args=(sInds,fZ), constraints=self.constraints,\n method='SLSQP', bounds=bounds, options={'disp':True,'maxiter':300,'ftol':1e-3})\n \n #update default times for these targets\n self.t0[sInds] = ires['x']*u.d\n\n intTimes = ires['x']*u.d\n \n intTimes[intTimes < 0.1*u.s] = 0.0*u.d\n \n return intTimes\n\n def choose_next_target(self, old_sInd, sInds, slewTimes, intTimes):\n \"\"\"\n \n Given a subset of targets (pre-filtered by method next_target or some \n other means), select the best next one. \n\n Args:\n old_sInd (integer):\n Index of the previous target star\n sInds (integer array):\n Indices of available targets\n slewTimes (astropy quantity array):\n slew times to all stars (must be indexed by sInds)\n intTimes (astropy Quantity array):\n Integration times for detection in units of day\n \n Returns:\n sInd (integer):\n Index of next target star\n waitTime (astropy Quantity):\n the amount of time to wait (this method returns None)\n \n \"\"\"\n \n # calcualte completeness values for current intTimes\n tmpsInds = sInds\n sInds = sInds[np.where(intTimes.value > 1e-15)]#filter out any intTimes that are essentially 0\n if len(sInds) == 0:#If there are no stars... arbitrarily assign 1 day for observation length...\n sInds = tmpsInds #revert to the saved sInds\n intTimes = (np.zeros(len(sInds)) + 1.)*u.d \n \n fZ = self.ZodiacalLight.fZ(self.Observatory, self.TargetList, sInds, \n self.TimeKeeping.currentTimeAbs + slewTimes[sInds], self.detmode)\n comps = self.Completeness.comp_per_intTime(intTimes[np.where(intTimes.value > 1e-15)], self.TargetList, sInds, fZ, \n self.ZodiacalLight.fEZ0, self.WAint[sInds], self.detmode)\n\n # choose target with maximum completeness\n valfZmax = self.valfZmax[sInds]\n valfZmin = self.valfZmin[sInds]\n selectInd = np.argmin((fZ - valfZmin)/(valfZmin - valfZmax)*(1/comps))\n sInd = sInds[selectInd]\n \n return sInd, None\n\n"
] |
[
[
"numpy.log",
"numpy.arange",
"scipy.optimize.minimize_scalar",
"scipy.optimize.minimize",
"numpy.argmin",
"numpy.where",
"numpy.sum"
]
] |
AjayThorve/cuxfilter
|
[
"537ff67de80439a43e0bad7373558f5e25dcb112"
] |
[
"python/cuXfilter/assets/numba_kernels/gpu_histogram.py"
] |
[
"import numpy as np\nfrom numba import cuda\nimport cudf\nimport numba\nimport pyarrow as pa\nimport pandas as pd\nimport io\nimport gc\nfrom typing import Type\n\nfrom ...charts.core.core_chart import BaseChart\n\n@numba.jit(nopython=True,parallel=True)\ndef compute_bin(x, n, xmin, xmax):\n '''\n description:\n compute actual bin number\n input:\n - x: ndarray\n - n: number of bins\n - xmin: min value in x ndarray\n - xmax: max value in x ndarray\n '''\n # special case to mirror NumPy behavior for last bin\n if x == xmax:\n return n - 1 # a_max always in last bin\n\n # SPEEDTIP: Remove the float64 casts if you don't need to exactly reproduce NumPy\n bin = np.int32(n * (np.float64(x) - np.float64(xmin)) / (np.float64(xmax) - np.float64(xmin)))\n\n if bin < 0 or bin >= n:\n return None\n else:\n return bin\n\n@cuda.jit\ndef min_max(x, min_max_array):\n '''\n description:\n cuda jit to calculate the min and max values for the ndarray\n input:\n - x: ndarray\n - min_max_array: cuda.to_device(np.array([dtype_max, dtype_min], dtype=np.float32))\n '''\n nelements = x.shape[0]\n\n start = cuda.grid(1)\n stride = cuda.gridsize(1)\n\n # Array already seeded with starting values appropriate for x's dtype\n # Not a problem if this array has already been updated\n local_min = min_max_array[0]\n local_max = min_max_array[1]\n\n for i in range(start, x.shape[0], stride):\n element = x[i]\n local_min = min(element, local_min)\n local_max = max(element, local_max)\n\n # Now combine each thread local min and max\n cuda.atomic.min(min_max_array, 0, local_min)\n cuda.atomic.max(min_max_array, 1, local_max)\n\n@cuda.jit\ndef histogram(x, x_range, histogram_out):\n '''\n description:\n calculate histogram using cuda.jit\n input:\n x -> ndarray(1-col)\n x_range -> (min,max)\n histogram_out -> cuda.to_device array(np.zeros) that will store the frequencies\n '''\n nbins = histogram_out.shape[0]\n xmin, xmax = x_range\n bin_width = (xmax - xmin) / nbins\n start = cuda.grid(1)\n stride = cuda.gridsize(1)\n for i in range(start, x.shape[0], stride):\n # note that calling a numba.jit function from CUDA automatically\n # compiles an equivalent CUDA device function!\n bin_number = compute_bin(x[i], nbins, xmin, xmax)\n # counter[0] = counter[0] + 1\n if bin_number >= 0 and bin_number < histogram_out.shape[0]:\n cuda.atomic.add(histogram_out, bin_number, 1)\n\n\ndef dtype_min_max(dtype):\n '''\n description:\n Get the min and max value for a numeric dtype\n input:\n dtype\n '''\n if np.issubdtype(dtype, np.integer):\n info = np.iinfo(dtype)\n else:\n info = np.finfo(dtype)\n return info.min, info.max\n\n@cuda.jit\ndef get_bin_edges(a_range, bin_edges):\n '''\n description:\n cuda jit function calculate the bin edge values\n input:\n - a_range: ndarray containin min and max values of the array\n - bin_edges: result ndarray of shape (binsize,)\n\n '''\n a_min,a_max = a_range\n nbins = bin_edges.shape[0]\n delta = (a_max - a_min) / nbins\n for i in range(bin_edges.shape[0]):\n bin_edges[i] = a_min + i * delta\n\n bin_edges[-1] = a_max # Avoid roundoff error on last point\n\n@cuda.jit\ndef calc_binwise_reduced_column(x, stride, a_range):\n '''\n description:\n cuda jit for creating a full-lenth column with only binned values\n input:\n - x -> single col nd-array\n - stride -> stride value\n - a_range -> min-max values (ndarray => shape(2,))\n '''\n a_min= a_range[0]\n a_max = a_range[1]\n _balancer = 1\n if a_max <= 1:\n _balancer = 100\n start = cuda.grid(1)\n s = cuda.gridsize(1)\n for i in range(start, x.shape[0],s):\n if x[i]>= a_min and x[i]<=a_max:\n x[i] = stride*np.int32((x[i])/stride)/_balancer\n else:\n x[i] = -1/_balancer\n\ndef get_binwise_reduced_column(a_gpu, stride, a_range):\n '''\n description:\n calls the cuda jit function calc_binwise_reduced_column and resturns the result\n input:\n - a_gpu -> single col nd-array\n - stride -> stride value\n - a_range -> min-max values (ndarray => shape(2,))\n output:\n - a_gpu -> single col resulting nd-array\n '''\n calc_binwise_reduced_column[64,64](a_gpu,np.float32(stride),a_range)\n return a_gpu\n\ndef calc_value_counts(a_gpu, bins):\n '''\n description:\n main function to calculate histograms\n input:\n - a_gpu: gpu array(cuda ndarray) -> 1-column only\n - bins: number of bins\n output:\n frequencies(ndarray), bin_edge_values(ndarray)\n '''\n ### Find min and max value in array\n dtype_min, dtype_max = dtype_min_max(a_gpu.dtype)\n # Put them in the array in reverse order so that they will be replaced by the first element in the array\n min_max_array_gpu = cuda.to_device(np.array([dtype_max, dtype_min], dtype=np.float32))\n # min_max[64, 64](a_gpu,index_gpu, min_max_array_gpu)\n min_max[64, 64](a_gpu, min_max_array_gpu)\n bin_edges = cuda.to_device(np.zeros(shape=(bins,), dtype=np.float64))\n\n get_bin_edges[64,64](min_max_array_gpu,bin_edges)\n\n ### Bin the data into a histogram\n histogram_out = cuda.to_device(np.zeros(shape=(bins,), dtype=np.int32))\n histogram[64, 64](a_gpu, min_max_array_gpu, histogram_out)\n return bin_edges.copy_to_host(), histogram_out.copy_to_host()\n\ndef calc_groupby(chart: Type[BaseChart], data):\n '''\n description:\n main function to calculate histograms\n input:\n - chart\n - data\n output:\n frequencies(ndarray), bin_edge_values(ndarray)\n '''\n\n y_min, y_max = data[chart.y].min(), data[chart.y].max()\n a_x_range = cuda.to_device(np.asarray([chart.min_value, chart.max_value], dtype=np.float32))\n a_y_range = cuda.to_device(np.asarray([y_min, y_max], dtype=np.float32))\n\n if y_max < 1:\n stride_y = (data[chart.y].max() - data[chart.y].min())/chart.data_points\n else:\n stride_y = chart.stride_type((data[chart.y].max() - data[chart.y].min())/chart.data_points)\n\n temp_df = cudf.DataFrame()\n temp_df.add_column(chart.x, get_binwise_reduced_column(data[chart.x].copy().to_gpu_array(), chart.stride, a_x_range))\n temp_df.add_column(chart.y, data[chart.y].copy().to_gpu_array())\n \n \n groupby_res = temp_df.groupby(by=[chart.x], as_index=False).agg({chart.y:chart.aggregate_fn}).to_pandas()\n\n del(temp_df)\n gc.collect()\n\n return groupby_res.to_numpy().transpose()\n\ndef aggregated_column_unique(chart: Type[BaseChart], data):\n '''\n description:\n main function to calculate histograms\n input:\n - chart\n - data\n output:\n list_of_unique_values\n '''\n\n a_range = cuda.to_device(np.array([chart.min_value, chart.max_value]))\n temp_df = cudf.DataFrame()\n temp_df.add_column(chart.x, get_binwise_reduced_column(data[chart.x].copy().to_gpu_array(), chart.stride, a_range))\n return temp_df[chart.x].unique().to_pandas().tolist()"
] |
[
[
"numpy.asarray",
"numpy.issubdtype",
"numpy.int32",
"numpy.finfo",
"numpy.iinfo",
"numpy.float32",
"numpy.float64",
"numpy.array",
"numpy.zeros"
]
] |
BiomedicalMachineLearning/HEMnet
|
[
"e9b49f09055f8af8f1a46e6645585eb42d296631"
] |
[
"HEMnet/train.py"
] |
[
"# Example command:\n# python /scratch/imb/Xiao/HEMnet/HEMnet/train.py -b /scratch/imb/Xiao/HE_test/10x/ -t train_dataset_10x_19_12_19_strict_Reinhard/tiles_10x/ -l valid_Reinhard/tiles_10x -o HEMnet_14_01_2020 -g 2 -e 10 -s -m vgg16 -a 64 -v\nimport argparse\nimport numpy as np\nimport os\nimport time\nfrom pathlib import Path\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.utils import class_weight\nfrom model import HEMnetModel\n\n\ndef get_class_weights(generator):\n class_weights = class_weight.compute_class_weight(\n 'balanced',\n np.unique(generator.classes),\n generator.classes)\n return class_weights\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-b', '--base_dir', type = Path,\n help = 'Base Directory')\n parser.add_argument('-t', '--train_dir', type = Path,\n help = 'Directory containing training input tiles - relative to base directory')\n parser.add_argument('-l', '--valid_dir', type=Path,\n help='Directory containing validation input tiles - relative to base directory')\n parser.add_argument('-o', '--out_dir', type = Path, default=Path(),\n help = 'Output Directory - relative to base directory')\n parser.add_argument('-m', '--cnn_base', type=str, default=\"xception\",\n help='pre-trained convolutional neural network base')\n parser.add_argument('-g', '--num_gpus', type=int, default=2,\n help='Number of GPUs for training model')\n parser.add_argument('-e', '--epochs', type=int, default=100,\n help='Number of epochs for training model')\n parser.add_argument('-a', '--batch_size', type=int, default=32,\n help='Number of tiles for each batch')\n parser.add_argument('-s', '--save_model', action = 'store_true',\n help = 'save model weights')\n parser.add_argument('-v', '--verbosity', action = 'store_true',\n help = 'Increase output verbosity')\n parser.add_argument('-w', '--transfer_learning', action='store_true',\n help='Use CNN base pre-trained from ImageNet')\n parser.add_argument('-f', '--fine_tuning', action='store_true',\n help='Fine-tuning pre-trained model')\n\n\n args = parser.parse_args()\n\n ####################\n # Paths and Inputs #\n ####################\n # Paths\n BASE_PATH = args.base_dir\n TRAIN_INPUT_PATH = BASE_PATH.joinpath(args.train_dir)\n VALID_INPUT_PATH = BASE_PATH.joinpath(args.valid_dir)\n OUTPUT_PATH = BASE_PATH.joinpath(args.out_dir)\n\n # User selectable parameters\n SAVE_MODEL = args.save_model\n CNN_BASE = args.cnn_base\n NUM_GPUS = args.num_gpus\n EPOCHS = args.epochs\n BATCH_SIZE = args.batch_size\n TRANSFER_LEARNING = args.transfer_learning\n FINE_TUNING = args.fine_tuning\n VERBOSE = args.verbosity\n\n\n # Verbose functions\n if VERBOSE:\n verbose_print = lambda *args: print(*args)\n verbose_save_img = lambda img, path, img_type: img.save(path, img_type)\n verbose_save_fig = lambda fig, path, dpi=300: fig.savefig(path, dpi=dpi)\n else:\n verbose_print = lambda *args: None\n verbose_save_img = lambda *args: None\n verbose_save_fig = lambda *args: None\n\n HEMnet = HEMnetModel(cnn_base=CNN_BASE,\n num_gpus=NUM_GPUS,\n transfer_learning=TRANSFER_LEARNING,\n fine_tuning=FINE_TUNING)\n input_size = (HEMnet.get_input_shape()[0], HEMnet.get_input_shape()[1])\n\n train_datagen = ImageDataGenerator(rescale=1. / 255,\n rotation_range=360,\n horizontal_flip=True,\n vertical_flip=True,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2)\n train_generator = train_datagen.flow_from_directory(TRAIN_INPUT_PATH,\n classes=['cancer', 'non-cancer'],\n target_size=input_size,\n batch_size=BATCH_SIZE,\n class_mode='binary',\n shuffle=True)\n valid_datagen = ImageDataGenerator(rescale=1./255)\n valid_generator = valid_datagen.flow_from_directory(VALID_INPUT_PATH,\n classes=['cancer', 'non-cancer'],\n target_size=input_size,\n batch_size=BATCH_SIZE,\n class_mode='binary',\n shuffle=True)\n\n HEMnet.train(train_generator, valid_generator, EPOCHS)\n\n OUTPUT_PATH = OUTPUT_PATH.joinpath('training_results')\n os.makedirs(OUTPUT_PATH, exist_ok=True)\n HEMnet.save_training_results(OUTPUT_PATH)\n if SAVE_MODEL:\n model_save_path = OUTPUT_PATH.joinpath(\"trained_model.h5\")\n HEMnet.save_model(model_save_path)\n\n\n\n\n"
] |
[
[
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.unique"
]
] |
mouradbelo/pytorch_connectomics
|
[
"bbed3a879ba2b4bcfa215eb2aba04b59533e180a"
] |
[
"connectomics/data/dataset/__init__.py"
] |
[
"import os,sys\nimport numpy as np\nfrom scipy.ndimage import zoom\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nimport torchvision.utils as vutils\n\nfrom .dataset_volume import VolumeDataset\nfrom .dataset_tile import TileDataset\nfrom ..utils import collate_fn_target, collate_fn_test, seg_widen_border, readvol\nfrom ..augmentation import *\n\n__all__ = ['VolumeDataset',\n 'TileDataset']\n\ndef _get_input(cfg, mode='train'):\n dir_name = cfg.DATASET.INPUT_PATH.split('@')\n img_name = cfg.DATASET.IMAGE_NAME.split('@')\n img_name = [dir_name[0] + x for x in img_name]\n\n label = None\n volume = [None]*len(img_name)\n if mode=='train':\n label_name = cfg.DATASET.LABEL_NAME.split('@')\n label_name = [dir_name[0] + x for x in label_name]\n assert len(img_name)==len(label_name)\n label = [None]*len(label_name)\n\n for i in range(len(img_name)):\n volume[i] = readvol(img_name[i])\n if (np.array(cfg.DATASET.DATA_SCALE)!=1).any():\n volume[i] = zoom(volume[i], cfg.DATASET.DATA_SCALE, order=1) \n volume[i] = np.pad(volume[i], ((cfg.DATASET.PAD_SIZE[0],cfg.DATASET.PAD_SIZE[0]), \n (cfg.DATASET.PAD_SIZE[1],cfg.DATASET.PAD_SIZE[1]), \n (cfg.DATASET.PAD_SIZE[2],cfg.DATASET.PAD_SIZE[2])), 'reflect')\n print(f\"volume shape: {volume[i].shape}\")\n\n if mode=='train':\n label[i] = readvol(label_name[i])\n if (np.array(cfg.DATASET.DATA_SCALE)!=1).any():\n label[i] = zoom(label[i], cfg.DATASET.DATA_SCALE, order=0) \n if cfg.DATASET.LABEL_EROSION!=0:\n label[i] = seg_widen_border(label[i],DATASET.LABEL_EROSION)\n if cfg.DATASET.LABEL_BINARY and label[i].max()>1:\n label[i] = label[i]//255\n if cfg.DATASET.LABEL_MAG !=0:\n label[i] = (label[i]/cfg.DATASET.LABEL_MAG).astype(np.float32)\n \n label[i] = np.pad(label[i], ((cfg.DATASET.PAD_SIZE[0],cfg.DATASET.PAD_SIZE[0]), \n (cfg.DATASET.PAD_SIZE[1],cfg.DATASET.PAD_SIZE[1]), \n (cfg.DATASET.PAD_SIZE[2],cfg.DATASET.PAD_SIZE[2])), 'reflect')\n print(f\"label shape: {label[i].shape}\")\n \n #assert volume[i].shape == label[i].shape !MB\n \n \n return volume, label\n\n\ndef get_dataset(cfg, augmentor, mode='train'):\n \"\"\"Prepare dataset for training and inference.\n \"\"\"\n assert mode in ['train', 'test']\n\n label_erosion = 0\n sample_label_size = cfg.MODEL.OUTPUT_SIZE\n sample_invalid_thres = cfg.DATASET.DATA_INVALID_THRES\n augmentor = augmentor\n topt,wopt = -1,-1\n if mode=='train':\n sample_volume_size = cfg.MODEL.INPUT_SIZE\n sample_volume_size = augmentor.sample_size\n sample_label_size = sample_volume_size\n label_erosion = cfg.DATASET.LABEL_EROSION\n sample_stride = (1,1,1)\n topt, wopt = cfg.MODEL.TARGET_OPT, cfg.MODEL.WEIGHT_OPT\n elif mode=='test':\n sample_stride = cfg.INFERENCE.STRIDE\n sample_volume_size = cfg.MODEL.INPUT_SIZE\n \n # dataset\n if cfg.DATASET.DO_CHUNK_TITLE==1:\n label_json = cfg.DATASET.INPUT_PATH+cfg.DATASET.LABEL_NAME if mode=='train' else ''\n dataset = TileDataset(chunk_num=cfg.DATASET.DATA_CHUNK_NUM, chunk_num_ind=cfg.DATASET.DATA_CHUNK_NUM_IND, chunk_iter=cfg.DATASET.DATA_CHUNK_ITER, chunk_stride=cfg.DATASET.DATA_CHUNK_STRIDE,\n volume_json=cfg.DATASET.INPUT_PATH+cfg.DATASET.IMAGE_NAME, label_json=label_json,\n sample_volume_size=sample_volume_size, sample_label_size=sample_label_size,\n sample_stride=sample_stride, sample_invalid_thres = sample_invalid_thres,\n augmentor=augmentor, target_opt = topt, weight_opt = wopt, mode = mode, \n label_erosion = label_erosion, pad_size=cfg.DATASET.PAD_SIZE)\n else:\n if cfg.DATASET.PRE_LOAD_DATA[0] is None: # load from cfg\n volume, label = _get_input(cfg, mode=mode)\n else:\n volume, label = cfg.DATASET.PRE_LOAD_DATA\n dataset = VolumeDataset(volume=volume, label=label, \n sample_volume_size=sample_volume_size, sample_label_size=sample_label_size,\n sample_stride=sample_stride, sample_invalid_thres=sample_invalid_thres, \n augmentor=augmentor, target_opt= topt, weight_opt= wopt, mode= mode,\n reject_size_thres= cfg.DATASET.REJECT_SIZE_THRES, reject_p= cfg.DATASET.REJECT_P)\n\n return dataset\n\ndef build_dataloader(cfg, augmentor, mode='train', dataset=None):\n \"\"\"Prepare dataloader for training and inference.\n \"\"\"\n print('Mode: ', mode)\n assert mode in ['train', 'test']\n SHUFFLE = (mode == 'train')\n cf = collate_fn_test \n if mode=='train':\n cf = collate_fn_target\n\n if dataset == None:\n dataset = get_dataset(cfg, augmentor, mode)\n \n img_loader = torch.utils.data.DataLoader(\n dataset, batch_size=cfg.SOLVER.SAMPLES_PER_BATCH, shuffle=SHUFFLE, collate_fn = cf,\n num_workers=cfg.SYSTEM.NUM_CPUS, pin_memory=True)\n return img_loader\n"
] |
[
[
"scipy.ndimage.zoom",
"numpy.array",
"torch.utils.data.DataLoader",
"numpy.pad"
]
] |
YyongXin/tf-mets
|
[
"dacd9398170f5135feb7135b635d4cc3f6869369",
"dacd9398170f5135feb7135b635d4cc3f6869369"
] |
[
"machinelearning-benchmark/dl/bert/gpu_movie_reviews.py",
"benchmark/mlp/swap-module.py"
] |
[
"import math\nimport datetime\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\n\nimport tensorflow as tf\n\nimport bert\nfrom bert import BertModelLayer\nfrom bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights\nfrom bert.tokenization.bert_tokenization import FullTokenizer\n\n## Load Data!!!\nfrom tensorflow import keras\nimport os\nimport re\n\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--epochs\", type=int,\n default=2,\n help='Number of epochs to run. (Default 2)')\nparser.add_argument(\"--steps\", type=int,\n default=5,\n help='Number of steps per epoch. (Default 5)')\nparser.add_argument(\"--batch_size\", type=int,\n default=48,\n help='Batch size. (Default 48)')\nargs = parser.parse_args()\n\n# Load all files from a directory in a DataFrame.\ndef load_directory_data(directory):\n data = {}\n data[\"sentence\"] = []\n data[\"sentiment\"] = []\n for file_path in tqdm(os.listdir(directory), desc=os.path.basename(directory)):\n with tf.io.gfile.GFile(os.path.join(directory, file_path), \"r\") as f:\n data[\"sentence\"].append(f.read())\n data[\"sentiment\"].append(re.match(\"\\d+_(\\d+)\\.txt\", file_path).group(1))\n return pd.DataFrame.from_dict(data)\n\n# Merge positive and negative examples, add a polarity column and shuffle.\ndef load_dataset(directory):\n pos_df = load_directory_data(os.path.join(directory, \"pos\"))\n neg_df = load_directory_data(os.path.join(directory, \"neg\"))\n pos_df[\"polarity\"] = 1\n neg_df[\"polarity\"] = 0\n return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)\n\n# Download and process the dataset files.\ndef download_and_load_datasets(force_download=False):\n dataset = tf.keras.utils.get_file(\n fname=\"aclImdb.tar.gz\",\n origin=\"http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\",\n extract=True)\n\n train_df = load_dataset(os.path.join(os.path.dirname(dataset),\n \"aclImdb\", \"train\"))\n test_df = load_dataset(os.path.join(os.path.dirname(dataset),\n \"aclImdb\", \"test\"))\n\n return train_df, test_df\n\nimport bert\nfrom bert import BertModelLayer\nfrom bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights\n\nclass MovieReviewData:\n DATA_COLUMN = \"sentence\"\n LABEL_COLUMN = \"polarity\"\n\n def __init__(self, tokenizer: FullTokenizer, sample_size=None, max_seq_len=1024):\n self.tokenizer = tokenizer\n self.sample_size = sample_size\n self.max_seq_len = 0\n train, test = download_and_load_datasets()\n\n train, test = map(lambda df: df.reindex(df[MovieReviewData.DATA_COLUMN].str.len().sort_values().index),\n [train, test])\n\n if sample_size is not None:\n assert sample_size % 128 == 0\n train, test = train.head(sample_size), test.head(sample_size)\n # train, test = map(lambda df: df.sample(sample_size), [train, test])\n\n ((self.train_x, self.train_y),\n (self.test_x, self.test_y)) = map(self._prepare, [train, test])\n\n print(\"max seq_len:\", self.max_seq_len)\n self.max_seq_len = min(self.max_seq_len, max_seq_len)\n ((self.train_x, self.train_x_token_types),\n (self.test_x, self.test_x_token_types)) = map(self._pad,\n [self.train_x, self.test_x])\n\n def _prepare(self, df):\n x, y = [], []\n with tqdm(total=df.shape[0], unit_scale=True) as pbar:\n for ndx, row in df.iterrows():\n text, label = row[MovieReviewData.DATA_COLUMN], row[MovieReviewData.LABEL_COLUMN]\n tokens = self.tokenizer.tokenize(text)\n tokens = [\"[CLS]\"] + tokens + [\"[SEP]\"]\n token_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n self.max_seq_len = max(self.max_seq_len, len(token_ids))\n x.append(token_ids)\n y.append(int(label))\n pbar.update()\n return np.array(x), np.array(y)\n\n def _pad(self, ids):\n x, t = [], []\n token_type_ids = [0] * self.max_seq_len\n for input_ids in ids:\n input_ids = input_ids[:min(len(input_ids), self.max_seq_len - 2)]\n input_ids = input_ids + [0] * (self.max_seq_len - len(input_ids))\n x.append(np.array(input_ids))\n t.append(token_type_ids)\n return np.array(x), np.array(t)\n\nbert_model_name=\"uncased_L-12_H-768_A-12\"\nbert_ckpt_dir = os.path.join(\".model/\",bert_model_name)\nbert_ckpt_file = os.path.join(bert_ckpt_dir, \"bert_model.ckpt\")\nbert_config_file = os.path.join(bert_ckpt_dir, \"bert_config.json\")\n\n# perparing data\ntokenizer = FullTokenizer(vocab_file=os.path.join(bert_ckpt_dir, \"vocab.txt\"))\ndata = MovieReviewData(tokenizer,\n sample_size=10*128*2,#5000,\n max_seq_len=128)\n\n\nprint(\" train_x:\", data.train_x.shape)\nprint(\"train_x_token_types:\", data.train_x_token_types.shape)\nprint(\" train_y:\", data.train_y.shape)\nprint(\" test_x:\", data.test_x.shape)\nprint(\" max_seq_len:\", data.max_seq_len)\n\n# train_x: (2560, 128)\n#train_x_token_types: (2560, 128)\n# train_y: (2560,)\n# test_x: (2560, 128)\n# max_seq_len: 128\n\ndef flatten_layers(root_layer):\n if isinstance(root_layer, keras.layers.Layer):\n yield root_layer\n for layer in root_layer._layers:\n for sub_layer in flatten_layers(layer):\n yield sub_layer\n\ndef freeze_bert_layers(l_bert):\n \"\"\"\n Freezes all but LayerNorm and adapter layers - see arXiv:1902.00751.\n \"\"\"\n for layer in flatten_layers(l_bert):\n if layer.name in [\"LayerNorm\", \"adapter-down\", \"adapter-up\"]:\n layer.trainable = True\n elif len(layer._layers) == 0:\n layer.trainable = False\n l_bert.embeddings_layer.trainable = False\n\ndef create_learning_rate_scheduler(max_learn_rate=5e-5,\n end_learn_rate=1e-7,\n warmup_epoch_count=10,\n total_epoch_count=90):\n def lr_scheduler(epoch):\n if epoch < warmup_epoch_count:\n res = (max_learn_rate/warmup_epoch_count) * (epoch + 1)\n else:\n res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1))\n return float(res)\n learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)\n return learning_rate_scheduler\n\ndef create_model(max_seq_len, adapter_size=64):\n \"\"\"Creates a classification model.\"\"\"\n #adapter_size = 64 # see - arXiv:1902.00751\n # create the bert layer\n with tf.io.gfile.GFile(bert_config_file, \"r\") as reader:\n bc = StockBertConfig.from_json_string(reader.read())\n bert_params = map_stock_config_to_params(bc)\n bert_params.adapter_size = adapter_size\n bert = BertModelLayer.from_params(bert_params, name=\"bert\")\n\n input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name=\"input_ids\")\n # token_type_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name=\"token_type_ids\")\n # output = bert([input_ids, token_type_ids])\n output = bert(input_ids)\n\n print(\"bert shape:\", output.shape)\n cls_out = keras.layers.Lambda(lambda seq: seq[:, 0, :])(output)\n cls_out = keras.layers.Dropout(0.5)(cls_out)\n logits = keras.layers.Dense(units=768, activation=\"tanh\")(cls_out)\n logits = keras.layers.Dropout(0.5)(logits)\n logits = keras.layers.Dense(units=2, activation=\"softmax\")(logits)\n\n # model = keras.Model(inputs=[input_ids, token_type_ids], outputs=logits)\n # model.build(input_shape=[(None, max_seq_len), (None, max_seq_len)])\n model = keras.Model(inputs=input_ids, outputs=logits)\n model.build(input_shape=(None, max_seq_len))\n\n # load the pre-trained model weights\n load_stock_weights(bert, bert_ckpt_file)\n\n # freeze weights if adapter-BERT is used\n if adapter_size is not None:\n freeze_bert_layers(bert)\n\n model.compile(optimizer=keras.optimizers.Adam(),\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[keras.metrics.SparseCategoricalAccuracy(name=\"acc\")])\n model.summary()\n return model\n\nadapter_size = None # use None to fine-tune all of BERT\nmodel = create_model(data.max_seq_len, adapter_size=adapter_size)\n\nlog_dir = \".log/movie_reviews/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%s\")\ntensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir)\n\ntotal_epoch_count = args.epochs\n# model.fit(x=(data.train_x, data.train_x_token_types), y=data.train_y,\nstart_time = int(round(time.time()*1000))\nmodel.fit(x=data.train_x, y=data.train_y,\n validation_split=0.1,\n batch_size=args.batch_size,\n steps_per_epoch=args.steps,\n shuffle=True,\n epochs=total_epoch_count,\n callbacks=[create_learning_rate_scheduler(max_learn_rate=1e-5,\n end_learn_rate=1e-7,\n warmup_epoch_count=20,\n total_epoch_count=total_epoch_count),\n keras.callbacks.EarlyStopping(patience=20, restore_best_weights=True),\n tensorboard_callback])\nend_time = int(round(time.time()*1000))\nthroughput = args.epochs * args.batch_size / (end_time - start_time) * 1000\nprint('\\n')\nprint('training throughput: {}'.format(throughput))\n#print('peak active bytes(MB): {}'.format(tf.experimental.get_peak_bytes_active(0)/1024.0/1024.0))\n#model.save_weights('./movie_reviews.h5', overwrite=True)\n\n#_, train_acc = model.evaluate(data.train_x, data.train_y)\n#print(\"train acc:\", train_acc)\n",
"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport sys\n\nfrom singa import module\nfrom singa import autograd\nfrom singa import tensor\nfrom singa.tensor import Tensor\n\n\nclass MLP(module.Module):\n\n def __init__(self, data_size=10, perceptron_size=100, num_classes=10):\n super(MLP, self).__init__()\n self.num_classes = num_classes\n self.dimension = 2\n\n self.w0 = Tensor(shape=(data_size, perceptron_size),\n requires_grad=True,\n stores_grad=True)\n self.w0.gaussian(0.0, 0.1)\n self.b0 = Tensor(shape=(perceptron_size,),\n requires_grad=True,\n stores_grad=True)\n self.b0.set_value(0.0)\n\n self.w1 = Tensor(shape=(perceptron_size, num_classes),\n requires_grad=True,\n stores_grad=True)\n self.w1.gaussian(0.0, 0.1)\n self.b1 = Tensor(shape=(num_classes,),\n requires_grad=True,\n stores_grad=True)\n self.b1.set_value(0.0)\n\n def forward(self, inputs):\n x = autograd.matmul(inputs, self.w0)\n x = autograd.add_bias(x, self.b0)\n x = autograd.relu(x)\n x = autograd.matmul(x, self.w1)\n x = autograd.add_bias(x, self.b1)\n return x\n\n def loss(self, out, ty):\n return autograd.softmax_cross_entropy(out, ty)\n\n def optim(self, loss, dist_option, spars):\n if dist_option == 'fp32':\n self.optimizer.backward_and_update(loss)\n elif dist_option == 'fp16':\n self.optimizer.backward_and_update_half(loss)\n elif dist_option == 'partialUpdate':\n self.optimizer.backward_and_partial_update(loss)\n elif dist_option == 'sparseTopK':\n self.optimizer.backward_and_sparse_update(loss,\n topK=True,\n spars=spars)\n elif dist_option == 'sparseThreshold':\n self.optimizer.backward_and_sparse_update(loss,\n topK=False,\n spars=spars)\n\n def set_optimizer(self, optimizer):\n self.optimizer = optimizer\n\n\ndef create_model(pretrained=False, **kwargs):\n \"\"\"Constructs a CNN model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained\n \"\"\"\n model = MLP(**kwargs)\n\n return model\n\n\n__all__ = ['MLP', 'create_model']\n\nif __name__ == \"__main__\":\n\n import numpy as np\n from singa import opt\n from singa import device\n\n # generate the boundary\n f = lambda x: (5 * x + 1)\n bd_x = np.linspace(-1.0, 1, 200)\n bd_y = f(bd_x)\n # generate the training data\n x = np.random.uniform(-1, 1, 400)\n y = f(x) + 2 * np.random.randn(len(x))\n # convert training data to 2d space\n label = np.asarray([5 * a + 1 > b for (a, b) in zip(x, y)]).astype(np.int32)\n data = np.array([[a, b] for (a, b) in zip(x, y)], dtype=np.float32)\n\n dev = device.create_swap_cuda_gpu_on(0)\n sgd = opt.SGD(0.05)\n tx = tensor.Tensor((400, 2), dev, tensor.float32)\n ty = tensor.Tensor((400,), dev, tensor.int32)\n model = MLP(data_size=2, perceptron_size=3, num_classes=2)\n\n # attached model to graph\n model.on_device(dev)\n model.set_optimizer(sgd)\n model.graph(True, False)\n model.train()\n\n for i in range(1001):\n tx.copy_from_numpy(data)\n ty.copy_from_numpy(label)\n out = model(tx)\n loss = model.loss(out, ty)\n model.optim(loss, 'fp32', spars=None)\n\n if i % 100 == 0:\n print(\"training loss = \", tensor.to_numpy(loss)[0])\n"
] |
[
[
"pandas.concat",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.io.gfile.GFile",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.LearningRateScheduler",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.Model",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.utils.get_file",
"pandas.DataFrame.from_dict",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.keras.layers.Dropout",
"numpy.array",
"tensorflow.keras.layers.Input"
],
[
"numpy.random.uniform",
"numpy.linspace"
]
] |
aloctavodia/Theano-PyMC
|
[
"6d39faafdc3182ee232461b36ed66407b08b5c05"
] |
[
"theano/tensor/basic.py"
] |
[
"\"\"\"A `Type` and `Op` classes to work with numpy.ndarrays symbolically.\"\"\"\n\nimport builtins\nimport logging\nimport numbers\nimport warnings\nfrom collections.abc import Sequence\nfrom functools import partial\n\nimport numpy as np\n\nimport theano\nimport theano.scalar.sharedvar\nfrom theano import compile, config, gof, printing\nfrom theano import scalar as scal\n\n# For history\nfrom theano.compile import Rebroadcast, Shape, shape\nfrom theano.gof import Apply, Constant, Op, ParamsType, Variable\nfrom theano.gof.type import Generic\n\n# We use these exceptions as well.\nfrom theano.gradient import DisconnectedType, grad_not_implemented, grad_undefined\nfrom theano.printing import min_informative_str, pprint\nfrom theano.scalar import int32\nfrom theano.tensor import elemwise\n\n# set up the external interface\nfrom theano.tensor.elemwise import CAReduce, DimShuffle, Elemwise, Sum\nfrom theano.tensor.type import TensorType, values_eq_approx_always_true\nfrom theano.tensor.type_other import NoneConst\nfrom theano.tensor.var import TensorConstant, TensorVariable, _tensor_py_operators\n\n\n_logger = logging.getLogger(\"theano.tensor.basic\")\n\n__docformat__ = \"restructuredtext en\"\n\n# This is needed as we will hide it later\npython_complex = complex\npython_any = any\npython_all = all\n\n# Define common subsets of dtypes (as strings).\ncomplex_dtypes = list(map(str, scal.complex_types))\ncontinuous_dtypes = list(map(str, scal.continuous_types))\nfloat_dtypes = list(map(str, scal.float_types))\ninteger_dtypes = list(map(str, scal.integer_types))\ndiscrete_dtypes = list(map(str, scal.discrete_types))\nall_dtypes = list(map(str, scal.all_types))\nint_dtypes = list(map(str, scal.int_types))\nuint_dtypes = list(map(str, scal.uint_types))\n\n\nclass ShapeError(Exception):\n \"\"\"Raised when the shape cannot be computed.\"\"\"\n\n\ndef check_equal_numpy(x, y):\n \"\"\"\n Return True iff x and y are equal.\n\n Checks the dtype and shape if x and y are numpy.ndarray instances.\n\n \"\"\"\n if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):\n return x.dtype == y.dtype and x.shape == y.shape and np.all(abs(x - y) < 1e-10)\n elif isinstance(x, np.random.RandomState) and isinstance(y, np.random.RandomState):\n return python_all(\n np.all(a == b) for a, b in zip(x.__getstate__(), y.__getstate__())\n )\n else:\n return x == y\n\n\ncompile.register_checker(check_equal_numpy)\n\n\n__oplist_constructor_list = []\n\"\"\"List of functions to be listed as op constructors in the oplist\n(`gen_oplist`, doc/oplist.txt).\"\"\"\n\n\ndef constructor(f):\n \"\"\"Add `f` to :doc:`oplist`.\n\n Make `f` appear as a constructor in the oplist (`gen_oplist`,\n doc/oplist.txt).\n\n \"\"\"\n __oplist_constructor_list.append(f)\n return f\n\n\ndef __oplist_tag(thing, tag):\n tags = getattr(thing, \"__oplist_tags\", [])\n tags.append(tag)\n thing.__oplist_tags = tags\n\n\ndef as_tensor_variable(x, name=None, ndim=None):\n \"\"\"Convert `x` into the appropriate `TensorType`.\n\n This function is often used by `make_node` methods of `Op` subclasses to\n turn ndarrays, numbers, `Scalar` instances, `Apply` instances and\n `TensorType` instances into valid input list elements.\n\n Parameters\n ----------\n x : Apply instance, Variable instance, numpy.ndarray, or number\n This thing will be transformed into a `Variable` in a sensible way. An\n ndarray argument will not be copied, but a list of numbers will be\n copied to make an ndarray.\n name : str or None\n If a new `Variable` instance is created, it will be named with this\n string.\n ndim : None or integer\n Return a Variable with this many dimensions.\n\n Raises\n ------\n TypeError\n If `x` cannot be converted to a TensorType Variable.\n\n \"\"\"\n if (\n isinstance(getattr(x, \"type\", None), TensorType)\n and (name is None or x.name == name)\n and (ndim is None or x.ndim == ndim)\n ):\n return x\n\n if hasattr(x, \"_as_TensorVariable\"):\n return x._as_TensorVariable() # TODO: pass name and ndim arguments\n\n if isinstance(x, gof.Apply):\n # use Apply's default output mechanism\n if (x.op.default_output is None) and (len(x.outputs) != 1):\n raise TypeError(\n \"Multi-output Op encountered. \"\n \"Retry using only one of the outputs directly.\"\n )\n\n x = x.default_output()\n\n if isinstance(x, Variable):\n\n if isinstance(x, Constant):\n return as_tensor_variable(x.data, name=name, ndim=ndim)\n\n if isinstance(x.type, scal.Scalar):\n x = tensor_from_scalar(x)\n\n if not isinstance(x.type, TensorType):\n raise TypeError(\n \"Tensor type field must be a TensorType; found {}.\".format(type(x.type))\n )\n\n if ndim is None:\n return x\n else:\n if x.type.ndim > ndim:\n # strip off leading broadcastable dimensions\n first_non_broadcastable = [\n idx for idx in range(x.ndim) if not x.broadcastable[idx]\n ][0]\n x = x.dimshuffle(list(range(x.ndim))[first_non_broadcastable:])\n if x.ndim > ndim:\n raise ValueError(\n \"Tensor of type {} could not be cast to have {} dimensions\".format(\n x.type, ndim\n )\n )\n return x\n elif x.type.ndim < ndim:\n return shape_padleft(x, n_ones=(ndim - x.type.ndim))\n else:\n return x\n\n elif isinstance(x, Sequence):\n\n def extract_constants(i):\n if isinstance(i, Variable):\n if isinstance(i, Constant):\n return i.data\n else:\n raise TypeError\n else:\n return i\n\n try:\n x = [extract_constants(i) for i in x]\n except TypeError:\n return stack(x)\n\n elif isinstance(x, bool):\n raise TypeError(\n \"Cannot cast True or False as a tensor variable. Please use \"\n \"np.array(True) or np.array(False) if you need these constants. \"\n \"This error might be caused by using the == operator on \"\n \"Variables. v == w does not do what you think it does, \"\n \"use theano.tensor.eq(v, w) instead.\"\n )\n\n return constant(x, name=name, ndim=ndim)\n\n\n# this has a different name, because _as_tensor_variable is the\n# function which ops use to upcast their arguments... this\n# internal-use function is a good place to put debugging stuff, better\n# than the global astensor.\n_as_tensor_variable = as_tensor_variable\n\nas_tensor = as_tensor_variable\n\n\ndef constant(x, name=None, ndim=None, dtype=None):\n \"\"\"Return a `TensorConstant` with value `x`.\n\n Raises\n ------\n TypeError\n `x` could not be converted to a numpy.ndarray.\n ValueError\n `x` could not be expanded to have ndim dimensions.\n\n \"\"\"\n if isinstance(x, TensorConstant):\n if (\n (name is None or x.name == name)\n and (ndim is None or x.ndim == ndim)\n and (dtype is None or x.dtype == dtype)\n ):\n return x\n else:\n x = x.data\n\n x_ = scal.convert(x, dtype=dtype)\n\n if ndim is not None:\n if x_.ndim < ndim:\n x_ = np.expand_dims(x_, axis=tuple(range(ndim - x_.ndim)))\n elif x_.ndim > ndim:\n try:\n x_ = np.squeeze(x_, axis=tuple(range(x_.ndim - ndim)))\n except np.AxisError:\n raise ValueError(\n \"ndarray could not be cast to constant with %i dimensions\" % ndim\n )\n\n assert x_.ndim == ndim\n\n ttype = TensorType(dtype=x_.dtype, broadcastable=[s == 1 for s in x_.shape])\n\n try:\n return TensorConstant(ttype, x_, name=name)\n except Exception:\n raise TypeError(\"Could not convert %s to TensorType\" % x, type(x))\n\n\ndef _obj_is_wrappable_as_tensor(x):\n try:\n constant(x)\n return True\n except TypeError:\n return False\n\n\nif int(config.tensor.cmp_sloppy) > 1:\n # This config variable is a quick-and-dirty way to get low-precision\n # comparisons. For a more precise setting of these tolerances set\n # them explicitly in your user code by assigning, for example,\n # \"theano.tensor.basic.float32_atol = ...\"\n\n # When config.tensor.cmp_sloppy>1 we are even more sloppy. This is\n # useful to test the GPU as they don't use extended precision and\n # this cause some difference bigger then the normal sloppy.\n float16_atol = 1e-2\n float16_rtol = 5e-2\n\n float32_atol = 5e-4\n float32_rtol = 1e-3\n\n float64_rtol = 1e-4\n float64_atol = 1e-3\nelif int(config.tensor.cmp_sloppy):\n float16_atol = 5e-3\n float16_rtol = 1e-2\n\n float32_atol = 1e-4\n float32_rtol = 1e-3\n\n float64_rtol = 1e-4\n float64_atol = 1e-3\nelse:\n # If you change those value in test don't forget to put them back\n # when the test end. Don't forget the case when the test fail.\n float16_atol = 1e-3\n float16_rtol = 1e-3\n\n float32_atol = 1e-5\n float32_rtol = 1e-5\n\n # defaults in numpy.allclose\n # Don't be more strict then numpy rtol\n # It cause useless error.\n float64_rtol = 1.0000000000000001e-05\n float64_atol = 1e-8\n\n\ndef _get_atol_rtol(a, b):\n tiny = (\"float16\",)\n narrow = (\"float32\", \"complex64\")\n if (str(a.dtype) in tiny) or (str(b.dtype) in tiny):\n atol = float16_atol\n rtol = float16_rtol\n elif (str(a.dtype) in narrow) or (str(b.dtype) in narrow):\n atol = float32_atol\n rtol = float32_rtol\n else:\n atol = float64_atol\n rtol = float64_rtol\n return atol, rtol\n\n\ndef _allclose(a, b, rtol=None, atol=None):\n a = np.asarray(a)\n b = np.asarray(b)\n atol_, rtol_ = _get_atol_rtol(a, b)\n if rtol is not None:\n rtol_ = rtol\n if atol is not None:\n atol_ = atol\n\n return np.allclose(a, b, atol=atol_, rtol=rtol_)\n\n\nclass NotScalarConstantError(Exception):\n \"\"\"\n Raised by get_scalar_constant_value if called on something that is\n not a scalar constant.\n \"\"\"\n\n\nclass EmptyConstantError(NotScalarConstantError):\n \"\"\"\n Raised by get_scalar_const_value if called on something that is a\n zero dimensional constant.\n \"\"\"\n\n\ndef numpy_scalar(data):\n \"\"\"Return a scalar stored in a numpy ndarray.\n\n Raises\n ------\n NotScalarConstantError\n If the numpy ndarray is not a scalar.\n\n \"\"\"\n\n # handle case where data is numpy.array([])\n if data.ndim > 0 and (len(data.shape) == 0 or builtins.max(data.shape) == 0):\n assert np.all(np.array([]) == data)\n raise EmptyConstantError()\n try:\n np.complex(data) # works for all numeric scalars\n return data\n except Exception:\n raise NotScalarConstantError(\n \"v.data is non-numeric, non-scalar, or has more than one\" \" unique value\",\n data,\n )\n\n\nget_scalar_constant_value_elemwises = (\n scal.Cast,\n scal.Switch,\n scal.NEQ,\n scal.EQ,\n scal.LT,\n scal.GT,\n scal.LE,\n scal.GE,\n scal.Sub,\n scal.Add,\n scal.Mod,\n scal.Mul,\n scal.IntDiv,\n scal.TrueDiv,\n scal.Minimum,\n scal.Maximum,\n)\n\n\ndef get_scalar_constant_value(\n orig_v, elemwise=True, only_process_constants=False, max_recur=10\n):\n \"\"\"Return the constant scalar(0-D) value underlying variable `v`.\n\n If `v` is the output of dimshuffles, fills, allocs, rebroadcasts,\n cast, OutputGuard, DeepCopyOp, ScalarFromTensor, ScalarOp, Elemwise\n and some pattern with Subtensor, this function digs through them.\n\n If `v` is not some view of constant scalar data, then raise a\n NotScalarConstantError.\n\n Parameters\n ----------\n elemwise : bool\n If False, we won't try to go into elemwise. So this call is faster.\n But we still investigate in Second Elemwise (as this is a substitute\n for Alloc)\n only_process_constants : bool\n If True, we only attempt to obtain the value of `orig_v` if it's\n directly constant and don't try to dig through dimshuffles, fills,\n allocs, and other to figure out its value.\n max_recur : int\n The maximum number of recursion.\n\n Notes\n -----\n There may be another function similar to this one in the code,\n but I'm not sure where it is.\n\n \"\"\"\n v = orig_v\n while True:\n if v is None:\n # None is not a scalar (and many uses of this function seem\n # to depend on passing it None)\n raise NotScalarConstantError()\n\n if isinstance(v, (np.integer, int, float)):\n return np.asarray(v)\n\n if isinstance(v, np.ndarray):\n return numpy_scalar(v).copy()\n\n if isinstance(v, Constant):\n if getattr(v.tag, \"unique_value\", None) is not None:\n data = v.tag.unique_value\n else:\n data = v.data\n if isinstance(data, np.ndarray):\n return numpy_scalar(data).copy()\n else:\n return data\n\n if not only_process_constants and getattr(v, \"owner\", None) and max_recur > 0:\n max_recur -= 1\n if isinstance(\n v.owner.op,\n (\n Alloc,\n DimShuffle,\n Rebroadcast,\n # outputguard is only used in debugmode but we\n # keep it here to avoid problems with old pickels.\n compile.ops.OutputGuard,\n compile.DeepCopyOp,\n ),\n ):\n v = v.owner.inputs[0]\n continue\n elif isinstance(v.owner.op, theano.compile.ops.Shape_i):\n i = v.owner.op.i\n inp = v.owner.inputs[0]\n if isinstance(inp, Constant):\n return np.asarray(inp.data.shape[i])\n # The shape of a broadcastable dimension is 1\n if hasattr(inp.type, \"broadcastable\") and inp.type.broadcastable[i]:\n return np.asarray(1)\n\n # Don't act as the constant_folding optimization here as this\n # fct is used too early in the optimization phase. This would\n # mess with the stabilization optimization and be too slow.\n # We put all the scalar Ops used by get_canonical_form_slice()\n # to allow it to determine the broadcast pattern correctly.\n elif isinstance(v.owner.op, (ScalarFromTensor, TensorFromScalar)):\n v = v.owner.inputs[0]\n continue\n elif isinstance(v.owner.op, theano.tensor.opt.Assert):\n # check if all conditions are constant and true\n cond = [\n get_scalar_constant_value(c, max_recur=max_recur)\n for c in v.owner.inputs[1:]\n ]\n if builtins.all([0 == c.ndim and c != 0 for c in cond]):\n v = v.owner.inputs[0]\n continue\n elif isinstance(v.owner.op, scal.ScalarOp):\n if isinstance(v.owner.op, scal.Second):\n # We don't need both input to be constant for second\n shp, val = v.owner.inputs\n v = val\n continue\n if isinstance(v.owner.op, get_scalar_constant_value_elemwises):\n const = [\n get_scalar_constant_value(i, max_recur=max_recur)\n for i in v.owner.inputs\n ]\n ret = [[None]]\n v.owner.op.perform(v.owner, const, ret)\n return ret[0][0].copy()\n # In fast_compile, we don't enable local_fill_to_alloc, so\n # we need to investigate Second as Alloc. So elemwise\n # don't disable the check for Second.\n elif isinstance(v.owner.op, Elemwise):\n if isinstance(v.owner.op.scalar_op, scal.Second):\n # We don't need both input to be constant for second\n shp, val = v.owner.inputs\n v = val\n continue\n elif elemwise and isinstance(\n v.owner.op.scalar_op, get_scalar_constant_value_elemwises\n ):\n const = [\n get_scalar_constant_value(i, max_recur=max_recur)\n for i in v.owner.inputs\n ]\n ret = [[None]]\n v.owner.op.perform(v.owner, const, ret)\n return ret[0][0].copy()\n elif (\n isinstance(v.owner.op, theano.tensor.subtensor.Subtensor)\n and v.ndim == 0\n ):\n if isinstance(v.owner.inputs[0], TensorConstant):\n cdata = tuple(v.owner.op.get_constant_idx(v.owner.inputs))\n try:\n return v.owner.inputs[0].data.__getitem__(cdata).copy()\n except IndexError:\n raise IndexError(\n str(tuple(v.owner.op.idx_list))\n + \" is not a valid index into \"\n + str(v.owner.inputs[0].data)\n )\n\n # The index list 'idx_list' should have length the same\n # shape as the input.\n # TODO: implement the case where we take a scalar in a matrix\n assert len(v.owner.op.idx_list) == v.owner.inputs[0].ndim\n\n # Needed to make better graph in this test in\n # theano/tensor/tests/test_sharedvar.py:\n # test_shared_options.test_specify_shape_partial\n if (\n v.owner.inputs[0].owner\n and isinstance(v.owner.inputs[0].owner.op, Join)\n and len(v.owner.op.idx_list) == 1\n ):\n # Ensure the Join is joining only scalar variables (so that\n # the constant value can be found at the same index as the\n # one used in the sub-tensor).\n if python_all(\n var.ndim == 0 for var in v.owner.inputs[0].owner.inputs[1:]\n ):\n idx = v.owner.op.idx_list[0]\n if isinstance(idx, gof.Type):\n idx = get_scalar_constant_value(\n v.owner.inputs[1], max_recur=max_recur\n )\n # Note the '+ 1' is because the first argument to Join\n # is the axis.\n ret = v.owner.inputs[0].owner.inputs[idx + 1]\n ret = get_scalar_constant_value(ret, max_recur=max_recur)\n # join can cast implicitly its input in some case.\n return theano._asarray(ret, dtype=v.type.dtype)\n if python_all(\n var.ndim == 1 for var in v.owner.inputs[0].owner.inputs[1:]\n ):\n idx = v.owner.op.idx_list[0]\n if isinstance(idx, gof.Type):\n idx = get_scalar_constant_value(\n v.owner.inputs[1], max_recur=max_recur\n )\n try:\n # TODO: assert joined axis is 0.\n length = 0\n loop = False\n for joined in v.owner.inputs[0].owner.inputs[1:]:\n ll = get_vector_length(joined)\n if idx < length + ll:\n v = joined[idx - length]\n loop = True\n break\n length += ll\n if loop:\n continue\n except TypeError:\n pass\n except ValueError:\n pass\n\n elif (\n v.owner.inputs[0].owner\n and isinstance(\n v.owner.inputs[0].owner.op, theano.tensor.opt.MakeVector\n )\n and\n # MakeVector normally accept only scalar as input.\n # We put this check in case there is change in the future\n python_all(var.ndim == 0 for var in v.owner.inputs[0].owner.inputs)\n and len(v.owner.op.idx_list) == 1\n ):\n\n idx = v.owner.op.idx_list[0]\n if isinstance(idx, gof.Type):\n idx = get_scalar_constant_value(\n v.owner.inputs[1], max_recur=max_recur\n )\n # Python 2.4 does not support indexing with numpy.integer\n # So we cast it.\n idx = int(idx)\n ret = v.owner.inputs[0].owner.inputs[idx]\n ret = get_scalar_constant_value(ret, max_recur=max_recur)\n # MakeVector can cast implicitly its input in some case.\n return theano._asarray(ret, dtype=v.type.dtype)\n\n # This is needed when we take the grad as the Shape op\n # are not already changed into MakeVector\n owner = v.owner\n leftmost_parent = owner.inputs[0]\n if leftmost_parent.owner and isinstance(\n leftmost_parent.owner.op, theano.tensor.Shape\n ):\n op = owner.op\n idx_list = op.idx_list\n idx = idx_list[0]\n if isinstance(idx, gof.Type):\n idx = get_scalar_constant_value(\n owner.inputs[1], max_recur=max_recur\n )\n grandparent = leftmost_parent.owner.inputs[0]\n gp_broadcastable = grandparent.type.broadcastable\n ndim = grandparent.type.ndim\n if grandparent.owner and isinstance(\n grandparent.owner.op, Rebroadcast\n ):\n ggp_broadcastable = grandparent.owner.inputs[0].broadcastable\n l = [\n b1 or b2\n for b1, b2 in zip(ggp_broadcastable, gp_broadcastable)\n ]\n gp_broadcastable = tuple(l)\n\n assert ndim == len(gp_broadcastable)\n\n if not (idx < len(gp_broadcastable)):\n msg = (\n \"get_scalar_constant_value detected \"\n + \"deterministic IndexError: x.shape[%d] \"\n + \"when x.ndim=%d.\"\n ) % (idx, ndim)\n if config.exception_verbosity == \"high\":\n msg += \" x=%s\" % min_informative_str(v)\n else:\n msg += \" x=%s\" % str(v)\n raise ValueError(msg)\n\n if gp_broadcastable[idx]:\n return np.asarray(1)\n\n raise NotScalarConstantError(v)\n\n\n# Easy constructors\n\n\ndef tensor(*args, **kwargs):\n name = kwargs.pop(\"name\", None)\n return TensorType(*args, **kwargs)(name=name)\n\n\ndef _multi(*fns):\n def f2(f, *names):\n if names and isinstance(names[0], int):\n if names == 1:\n return f()\n else:\n return [f() for i in range(names[0])]\n if isinstance(names, tuple):\n if len(names) == 1:\n names = names[0]\n if len(names) == 1:\n return f(names)\n else:\n return [f(name) for name in names]\n\n if len(fns) == 1:\n return partial(f2, fns)\n else:\n return [partial(f2, f) for f in fns]\n\n\ncscalar = TensorType(\"complex64\", ())\nzscalar = TensorType(\"complex128\", ())\nfscalar = TensorType(\"float32\", ())\ndscalar = TensorType(\"float64\", ())\nbscalar = TensorType(\"int8\", ())\nwscalar = TensorType(\"int16\", ())\niscalar = TensorType(\"int32\", ())\nlscalar = TensorType(\"int64\", ())\n\n\ndef scalar(name=None, dtype=None):\n \"\"\"Return a symbolic scalar variable.\n\n Parameters\n ----------\n dtype: numeric\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, ())\n return type(name)\n\n\nscalars, fscalars, dscalars, iscalars, lscalars = _multi(\n scalar, fscalar, dscalar, iscalar, lscalar\n)\n\nint_types = bscalar, wscalar, iscalar, lscalar\nfloat_types = fscalar, dscalar\ncomplex_types = cscalar, zscalar\nint_scalar_types = int_types\nfloat_scalar_types = float_types\ncomplex_scalar_types = complex_types\n\ncvector = TensorType(\"complex64\", (False,))\nzvector = TensorType(\"complex128\", (False,))\nfvector = TensorType(\"float32\", (False,))\ndvector = TensorType(\"float64\", (False,))\nbvector = TensorType(\"int8\", (False,))\nwvector = TensorType(\"int16\", (False,))\nivector = TensorType(\"int32\", (False,))\nlvector = TensorType(\"int64\", (False,))\n\n\ndef vector(name=None, dtype=None):\n \"\"\"Return a symbolic vector variable.\n\n Parameters\n ----------\n dtype: numeric\n None means to use theano.config.floatX.\n name\n A name to attach to this variable\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False,))\n return type(name)\n\n\nvectors, fvectors, dvectors, ivectors, lvectors = _multi(\n vector, fvector, dvector, ivector, lvector\n)\n\nint_vector_types = bvector, wvector, ivector, lvector\nfloat_vector_types = fvector, dvector\ncomplex_vector_types = cvector, zvector\n\ncmatrix = TensorType(\"complex64\", (False, False))\nzmatrix = TensorType(\"complex128\", (False, False))\nfmatrix = TensorType(\"float32\", (False, False))\ndmatrix = TensorType(\"float64\", (False, False))\nbmatrix = TensorType(\"int8\", (False, False))\nwmatrix = TensorType(\"int16\", (False, False))\nimatrix = TensorType(\"int32\", (False, False))\nlmatrix = TensorType(\"int64\", (False, False))\n\n\ndef matrix(name=None, dtype=None):\n \"\"\"Return a symbolic matrix variable.\n\n Parameters\n ----------\n dtype: numeric\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False, False))\n return type(name)\n\n\nmatrices, fmatrices, dmatrices, imatrices, lmatrices = _multi(\n matrix, fmatrix, dmatrix, imatrix, lmatrix\n)\n\nint_matrix_types = bmatrix, wmatrix, imatrix, lmatrix\nfloat_matrix_types = fmatrix, dmatrix\ncomplex_matrix_types = cmatrix, zmatrix\n\ncrow = TensorType(\"complex64\", (True, False))\nzrow = TensorType(\"complex128\", (True, False))\nfrow = TensorType(\"float32\", (True, False))\ndrow = TensorType(\"float64\", (True, False))\nbrow = TensorType(\"int8\", (True, False))\nwrow = TensorType(\"int16\", (True, False))\nirow = TensorType(\"int32\", (True, False))\nlrow = TensorType(\"int64\", (True, False))\n\n\ndef row(name=None, dtype=None):\n \"\"\"Return a symbolic row variable (ndim=2, broadcastable=[True,False]).\n\n Parameters\n ----------\n dtype: numeric type\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (True, False))\n return type(name)\n\n\nrows, frows, drows, irows, lrows = _multi(row, frow, drow, irow, lrow)\n\nccol = TensorType(\"complex64\", (False, True))\nzcol = TensorType(\"complex128\", (False, True))\nfcol = TensorType(\"float32\", (False, True))\ndcol = TensorType(\"float64\", (False, True))\nbcol = TensorType(\"int8\", (False, True))\nwcol = TensorType(\"int16\", (False, True))\nicol = TensorType(\"int32\", (False, True))\nlcol = TensorType(\"int64\", (False, True))\n\n\ndef col(name=None, dtype=None):\n \"\"\"Return a symbolic column variable (ndim=2, broadcastable=[False,True]).\n\n Parameters\n ----------\n dtype : numeric\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False, True))\n return type(name)\n\n\ncols, fcols, dcols, icols, lcols = _multi(col, fcol, dcol, icol, lcol)\n\nctensor3 = TensorType(\"complex64\", ((False,) * 3))\nztensor3 = TensorType(\"complex128\", ((False,) * 3))\nftensor3 = TensorType(\"float32\", ((False,) * 3))\ndtensor3 = TensorType(\"float64\", ((False,) * 3))\nbtensor3 = TensorType(\"int8\", ((False,) * 3))\nwtensor3 = TensorType(\"int16\", ((False,) * 3))\nitensor3 = TensorType(\"int32\", ((False,) * 3))\nltensor3 = TensorType(\"int64\", ((False,) * 3))\n\n\ndef tensor3(name=None, dtype=None):\n \"\"\"Return a symbolic 3-D variable.\n\n Parameters\n ----------\n dtype: numeric type\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False, False, False))\n return type(name)\n\n\ntensor3s, ftensor3s, dtensor3s, itensor3s, ltensor3s = _multi(\n tensor3, ftensor3, dtensor3, itensor3, ltensor3\n)\n\nctensor4 = TensorType(\"complex64\", ((False,) * 4))\nztensor4 = TensorType(\"complex128\", ((False,) * 4))\nftensor4 = TensorType(\"float32\", ((False,) * 4))\ndtensor4 = TensorType(\"float64\", ((False,) * 4))\nbtensor4 = TensorType(\"int8\", ((False,) * 4))\nwtensor4 = TensorType(\"int16\", ((False,) * 4))\nitensor4 = TensorType(\"int32\", ((False,) * 4))\nltensor4 = TensorType(\"int64\", ((False,) * 4))\n\n\ndef tensor4(name=None, dtype=None):\n \"\"\"Return a symbolic 4-D variable.\n\n Parameters\n ----------\n dtype: numeric type\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False, False, False, False))\n return type(name)\n\n\ntensor4s, ftensor4s, dtensor4s, itensor4s, ltensor4s = _multi(\n tensor4, ftensor4, dtensor4, itensor4, ltensor4\n)\n\nctensor5 = TensorType(\"complex64\", ((False,) * 5))\nztensor5 = TensorType(\"complex128\", ((False,) * 5))\nftensor5 = TensorType(\"float32\", ((False,) * 5))\ndtensor5 = TensorType(\"float64\", ((False,) * 5))\nbtensor5 = TensorType(\"int8\", ((False,) * 5))\nwtensor5 = TensorType(\"int16\", ((False,) * 5))\nitensor5 = TensorType(\"int32\", ((False,) * 5))\nltensor5 = TensorType(\"int64\", ((False,) * 5))\n\n\ndef tensor5(name=None, dtype=None):\n \"\"\"Return a symbolic 5-D variable.\n\n Parameters\n ----------\n dtype: numeric type\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False, False, False, False, False))\n return type(name)\n\n\ntensor5s, ftensor5s, dtensor5s, itensor5s, ltensor5s = _multi(\n tensor5, ftensor5, dtensor5, itensor5, ltensor5\n)\n\nctensor6 = TensorType(\"complex64\", ((False,) * 6))\nztensor6 = TensorType(\"complex128\", ((False,) * 6))\nftensor6 = TensorType(\"float32\", ((False,) * 6))\ndtensor6 = TensorType(\"float64\", ((False,) * 6))\nbtensor6 = TensorType(\"int8\", ((False,) * 6))\nwtensor6 = TensorType(\"int16\", ((False,) * 6))\nitensor6 = TensorType(\"int32\", ((False,) * 6))\nltensor6 = TensorType(\"int64\", ((False,) * 6))\n\n\ndef tensor6(name=None, dtype=None):\n \"\"\"Return a symbolic 6-D variable.\n\n Parameters\n ----------\n dtype: numeric type\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False,) * 6)\n return type(name)\n\n\ntensor6s, ftensor6s, dtensor6s, itensor6s, ltensor6s = _multi(\n tensor6, ftensor6, dtensor6, itensor6, ltensor6\n)\n\nctensor7 = TensorType(\"complex64\", ((False,) * 7))\nztensor7 = TensorType(\"complex128\", ((False,) * 7))\nftensor7 = TensorType(\"float32\", ((False,) * 7))\ndtensor7 = TensorType(\"float64\", ((False,) * 7))\nbtensor7 = TensorType(\"int8\", ((False,) * 7))\nwtensor7 = TensorType(\"int16\", ((False,) * 7))\nitensor7 = TensorType(\"int32\", ((False,) * 7))\nltensor7 = TensorType(\"int64\", ((False,) * 7))\n\n\ndef tensor7(name=None, dtype=None):\n \"\"\"Return a symbolic 7-D variable.\n\n Parameters\n ----------\n dtype: numeric type\n None means to use theano.config.floatX.\n name\n A name to attach to this variable.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n type = TensorType(dtype, (False,) * 7)\n return type(name)\n\n\ntensor7s, ftensor7s, dtensor7s, itensor7s, ltensor7s = _multi(\n tensor7, ftensor7, dtensor7, itensor7, ltensor7\n)\n\n\nTensor = TensorType\n\n\n# This bizarre push-import avoids a circular dependency.\nelemwise.as_tensor_variable = as_tensor_variable\nelemwise.TensorType = TensorType\nelemwise.TensorVariable = TensorVariable\nelemwise.TensorConstant = TensorConstant\n\n#########################\n# Utilities\n#########################\n\n\ndef _scal_elemwise_with_nfunc(nfunc, nin, nout):\n \"\"\"\n Replace a symbol definition with an elementwise version of the\n corresponding scalar Op. If it is not None, the nfunc argument\n should be a string such that getattr(numpy, nfunc) implements\n a vectorized version of the elemwise operation. nin is the number\n of inputs expected by that function, and nout is the number of\n **destination** inputs it takes. That is, the function should\n take nin+nout inputs. nout == 0 means that the numpy function\n does not take a numpy array argument to put its result in.\n\n \"\"\"\n\n def construct(symbol):\n symbolname = symbol.__name__\n inplace = symbolname.endswith(\"_inplace\")\n if inplace:\n msg = \"inplace\"\n else:\n msg = \"no_inplace\"\n\n n = \"Elemwise{{{},{}}}\".format(symbolname, msg)\n\n if inplace:\n scalar_op = getattr(scal, symbolname[: -len(\"_inplace\")])\n inplace_scalar_op = scalar_op.__class__(scal.transfer_type(0))\n rval = elemwise.Elemwise(\n inplace_scalar_op,\n {0: 0},\n name=n,\n nfunc_spec=(nfunc and (nfunc, nin, nout)),\n )\n else:\n scalar_op = getattr(scal, symbolname)\n rval = elemwise.Elemwise(\n scalar_op, name=n, nfunc_spec=(nfunc and (nfunc, nin, nout))\n )\n\n if getattr(symbol, \"__doc__\", False):\n rval.__doc__ = symbol.__doc__ + \"\\n\" + rval.__doc__\n\n # for the meaning of this see the ./epydoc script\n # it makes epydoc display rval as if it were a function, not an object\n rval.__epydoc_asRoutine = symbol\n rval.__module__ = \"tensor\"\n\n pprint.assign(rval, printing.FunctionPrinter(symbolname))\n\n return rval\n\n return construct\n\n\n_scal_elemwise = _scal_elemwise_with_nfunc(None, None, None)\n\n\ndef _pack(x):\n \"\"\"\n Convert x to a list if it is an iterable, otherwise wrap it in a list.\n \"\"\"\n try:\n return list(x)\n except TypeError:\n return [x]\n\n\ndef check_and_normalize_axes(x, axis):\n \"\"\"\n Check axes, normalize and convert them to a Python list of integers.\n Return an empty list if argument is None.\n\n Parameters\n ----------\n x: Tensor variable\n axis = Integer, tuple or list of integers\n\n Returns\n -------\n axis: list of integers\n \"\"\"\n x = as_tensor_variable(x)\n if axis is None:\n axis = []\n elif isinstance(axis, (int, np.integer)) or (\n isinstance(axis, np.ndarray) and axis.ndim == 0\n ):\n axis = [int(axis)]\n elif isinstance(axis, (tuple, list, np.ndarray)):\n axis = [int(i) for i in axis]\n elif isinstance(axis, Variable):\n if NoneConst.equals(axis):\n axis = []\n elif not isinstance(axis, TensorConstant):\n raise TypeError(\"Computation needs a constant axis. Got %s\" % axis)\n else:\n assert axis.dtype in integer_dtypes\n if isinstance(axis.data, (int, np.integer)) or (\n isinstance(axis.data, np.ndarray) and axis.data.ndim == 0\n ):\n axis = [int(axis.data)]\n elif isinstance(axis.data, (list, np.ndarray)):\n axis = [int(i) for i in axis.data]\n else:\n raise TypeError(\n \"Axis must be an integer, tuple, list of integers or a TensorVariable. Got %s\"\n % axis\n )\n if len(axis) > 0:\n for i in range(len(axis)):\n if axis[i] < 0:\n axis[i] += x.type.ndim\n if axis[i] < 0 or axis[i] >= x.type.ndim:\n raise ValueError(\n \"Computation needs a valid axis number for %d-D tensor. Got %d\"\n % (x.type.ndim, axis[i])\n )\n axis = list(set(axis))\n axis.sort()\n return axis\n\n\n#########################\n# Casting Operations\n#########################\n\n\nclass TensorFromScalar(Op):\n\n __props__ = ()\n\n def make_node(self, s):\n assert isinstance(s.type, scal.Scalar)\n return Apply(self, [s], [tensor(dtype=s.type.dtype, broadcastable=())])\n\n def perform(self, node, inp, out_):\n (s,) = inp\n (out,) = out_\n out[0] = np.asarray(s)\n\n def infer_shape(self, node, in_shapes):\n return [()]\n\n def grad(self, inp, grads):\n (s,) = inp\n (dt,) = grads\n if s.type.dtype in float_dtypes:\n assert dt.type.dtype in float_dtypes\n return [scalar_from_tensor(dt)]\n\n # If the input dtype is an integer, then so is the output dtype,\n # and the \"zero\" gradient can be represented in that int dtype.\n # Currently, theano.grad insists that the dtype of the returned\n # gradient has a float dtype, so we use floatX.\n if s.type.dtype in discrete_dtypes:\n return [s.zeros_like().astype(theano.config.floatX)]\n\n raise NotImplementedError(\"grad not implemented for complex dtypes\")\n\n\ntensor_from_scalar = TensorFromScalar()\n\n\nclass ScalarFromTensor(Op):\n\n __props__ = ()\n\n def make_node(self, t):\n assert isinstance(t.type, TensorType)\n assert t.type.broadcastable == ()\n return Apply(\n self, [t], [scal.get_scalar_type(dtype=t.type.dtype).make_variable()]\n )\n\n def perform(self, node, inp, out_):\n (s,) = inp\n (out,) = out_\n out[0] = s.flatten()[0]\n\n def infer_shape(self, node, in_shapes):\n return [()]\n\n def grad(self, inp, grads):\n (s,) = inp\n (dt,) = grads\n return [tensor_from_scalar(dt)]\n\n def R_op(self, inputs, eval_points):\n if None in eval_points:\n return [None]\n return self.make_node(*eval_points).outputs\n\n def c_code(self, node, name, inputs, outputs, sub):\n (x,) = inputs\n (z,) = outputs\n fail = sub[\"fail\"]\n return (\n \"\"\"\n %(z)s = ((dtype_%(x)s*)(PyArray_DATA(%(x)s)))[0];\n \"\"\"\n % locals()\n )\n\n def c_code_cache_version(self):\n return (1,)\n\n\nscalar_from_tensor = ScalarFromTensor()\n\n\n# to be removed as we get the epydoc routine-documenting thing going\n# -JB 20080924\ndef _conversion(real_value, name):\n __oplist_tag(real_value, \"casting\")\n real_value.__module__ = \"tensor.basic\"\n pprint.assign(real_value, printing.FunctionPrinter(name))\n return real_value\n\n\n# These _conver_to_<type> functions have leading underscores to indicate that\n# they should not be called directly. They do not perform sanity checks about\n# what types you are casting to what. That logic is implemented by the\n# `cast()` function below.\n\n_convert_to_bool = _conversion(elemwise.Elemwise(scal.convert_to_bool), \"bool\")\n\"\"\"Cast to boolean\"\"\"\n\n_convert_to_int8 = _conversion(elemwise.Elemwise(scal.convert_to_int8), \"int8\")\n\"\"\"Cast to 8-bit integer\"\"\"\n\n_convert_to_int16 = _conversion(elemwise.Elemwise(scal.convert_to_int16), \"int16\")\n\"\"\"Cast to 16-bit integer\"\"\"\n\n_convert_to_int32 = _conversion(elemwise.Elemwise(scal.convert_to_int32), \"int32\")\n\"\"\"Cast to 32-bit integer\"\"\"\n\n_convert_to_int64 = _conversion(elemwise.Elemwise(scal.convert_to_int64), \"int64\")\n\"\"\"Cast to 64-bit integer\"\"\"\n\n_convert_to_uint8 = _conversion(elemwise.Elemwise(scal.convert_to_uint8), \"uint8\")\n\"\"\"Cast to unsigned 8-bit integer\"\"\"\n\n_convert_to_uint16 = _conversion(elemwise.Elemwise(scal.convert_to_uint16), \"uint16\")\n\"\"\"Cast to unsigned 16-bit integer\"\"\"\n\n_convert_to_uint32 = _conversion(elemwise.Elemwise(scal.convert_to_uint32), \"uint32\")\n\"\"\"Cast to unsigned 32-bit integer\"\"\"\n\n_convert_to_uint64 = _conversion(elemwise.Elemwise(scal.convert_to_uint64), \"uint64\")\n\"\"\"Cast to unsigned 64-bit integer\"\"\"\n\n_convert_to_float16 = _conversion(elemwise.Elemwise(scal.convert_to_float16), \"float16\")\n\"\"\"Cast to half-precision floating point\"\"\"\n\n_convert_to_float32 = _conversion(elemwise.Elemwise(scal.convert_to_float32), \"float32\")\n\"\"\"Cast to single-precision floating point\"\"\"\n\n_convert_to_float64 = _conversion(elemwise.Elemwise(scal.convert_to_float64), \"float64\")\n\"\"\"Cast to double-precision floating point\"\"\"\n\n_convert_to_complex64 = _conversion(\n elemwise.Elemwise(scal.convert_to_complex64), \"complex64\"\n)\n\"\"\"Cast to single-precision complex\"\"\"\n\n_convert_to_complex128 = _conversion(\n elemwise.Elemwise(scal.convert_to_complex128), \"complex128\"\n)\n\"\"\"Cast to double-precision complex\"\"\"\n\n_cast_mapping = {\n \"bool\": _convert_to_bool,\n \"int8\": _convert_to_int8,\n \"int16\": _convert_to_int16,\n \"int32\": _convert_to_int32,\n \"int64\": _convert_to_int64,\n \"uint8\": _convert_to_uint8,\n \"uint16\": _convert_to_uint16,\n \"uint32\": _convert_to_uint32,\n \"uint64\": _convert_to_uint64,\n \"float16\": _convert_to_float16,\n \"float32\": _convert_to_float32,\n \"float64\": _convert_to_float64,\n \"complex64\": _convert_to_complex64,\n \"complex128\": _convert_to_complex128,\n}\n\n\n@constructor\ndef cast(x, dtype):\n \"\"\"Symbolically cast `x` to a Tensor of type `dtype`.\"\"\"\n if dtype == \"floatX\":\n dtype = config.floatX\n\n _x = as_tensor_variable(x)\n if _x.type.dtype == dtype:\n return _x\n if _x.type.dtype.startswith(\"complex\") and not dtype.startswith(\"complex\"):\n raise TypeError(\n \"Casting from complex to real is ambiguous: consider real(), \"\n \"imag(), angle() or abs()\"\n )\n return _cast_mapping[dtype](x)\n\n\n##########################\n# Unary Operations\n##########################\n\n\nclass MaxAndArgmax(Op):\n \"\"\"\n Calculate the max and argmax over a given axis or over all axes.\n\n \"\"\"\n\n nin = 2 # tensor, axis\n nout = 2 # max val, max idx\n E_axis = \"invalid axis\"\n params_type = Generic()\n __props__ = (\"axis\",)\n _f16_ok = True\n\n def __init__(self, axis):\n assert isinstance(axis, list)\n self.axis = tuple(axis)\n\n def get_params(self, node):\n return self.axis\n\n def make_node(self, x):\n x = _as_tensor_variable(x)\n\n # We keep the original broadcastable flags for dimensions on which\n # we do not perform the max / argmax.\n all_axes = set(self.axis)\n broadcastable = [\n b for i, b in enumerate(x.type.broadcastable) if i not in all_axes\n ]\n inputs = [x]\n outputs = [\n tensor(x.type.dtype, broadcastable, name=\"max\"),\n tensor(\"int64\", broadcastable, name=\"argmax\"),\n ]\n return Apply(self, inputs, outputs)\n\n def perform(self, node, inp, outs, params):\n x = inp[0]\n axes = params\n max, max_idx = outs\n if axes is None:\n axes = tuple(range(x.ndim))\n else:\n axes = tuple(int(ax) for ax in axes)\n max[0] = theano._asarray(np.max(x, axes), dtype=node.outputs[0].dtype)\n # Numpy does not support multiple axes for argmax\n # Work around\n keep_axes = np.array([i for i in range(x.ndim) if i not in axes], dtype=\"int64\")\n # Not-reduced axes in front\n transposed_x = np.transpose(x, np.concatenate((keep_axes, axes)))\n kept_shape = transposed_x.shape[: len(keep_axes)]\n reduced_shape = transposed_x.shape[len(keep_axes) :]\n\n # Numpy.prod returns 1.0 when arg is empty, so we cast it to int64\n # Otherwise reshape would complain citing float arg\n new_shape = kept_shape + (np.prod(reduced_shape, dtype=\"int64\"),)\n reshaped_x = transposed_x.reshape(new_shape)\n\n max_idx[0] = theano._asarray(np.argmax(reshaped_x, axis=-1), dtype=\"int64\")\n\n def c_code(self, node, name, inp, out, sub):\n if len(self.axis) != 1 and len(self.axis) != node.inputs[0].ndim:\n raise NotImplementedError(\n \"NumPy C-API can compute max and argmax only for 1 axis or for all axes.\"\n )\n x = inp[0]\n axis = sub[\"params\"]\n max, argmax = out\n fail = sub[\"fail\"]\n ret = \"\"\"\n #if PY_MAJOR_VERSION >= 3\n #ifndef PyInt_AS_LONG\n #define PyInt_AS_LONG PyLong_AS_LONG\n #endif\n #endif\n\n int axis;\n\n if (PyTuple_GET_SIZE(%(axis)s) == PyArray_NDIM(%(x)s)) {\n axis = NPY_MAXDIMS;\n } else if(PyTuple_GET_SIZE(%(axis)s) == 1) {\n PyObject* axis_object = PyTuple_GET_ITEM(%(axis)s, 0);\n axis = (int)PyInt_AS_LONG(axis_object);\n if (axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)) {\n PyErr_SetString(PyExc_ValueError,\n \"MaxAndArgmax: bad axis argument\");\n %(fail)s\n }\n } else {\n PyErr_SetString(PyExc_NotImplementedError,\n \"MaxAndArgmax: NumPy C-API can compute max and argmax only for 1 axis or for all axes.\");\n %(fail)s\n }\n\n Py_CLEAR(%(max)s);\n Py_CLEAR(%(argmax)s);//todo pass them as out parameter.\n\n %(max)s = (PyArrayObject*)PyArray_Max(%(x)s, axis, NULL);\n if (%(max)s == NULL) {\n %(fail)s;\n }\n if (!PyArray_CheckExact(%(max)s)) {\n %(max)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(max)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);\n if(%(max)s == NULL){\n %(fail)s;\n }\n }\n\n %(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);\n if (%(argmax)s == NULL) {\n Py_CLEAR(%(max)s);\n %(fail)s;\n }\n if (!PyArray_CheckExact(%(argmax)s)) {\n %(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);\n if(%(argmax)s == NULL){\n %(fail)s;\n }\n }\n if (PyArray_TYPE(%(argmax)s) != NPY_INT64) {\n PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);\n if (NULL == tmp){\n %(fail)s;\n }\n Py_DECREF(%(argmax)s);\n %(argmax)s = (PyArrayObject*)tmp;\n }\n \"\"\"\n return ret % locals()\n\n def c_code_cache_version(self):\n return (5,)\n\n def infer_shape(self, node, shapes):\n ishape = shapes[0]\n rval = tuple(\n ishape[i]\n for (i, b) in enumerate(node.inputs[0].type.broadcastable)\n if i not in self.axis\n )\n return [rval, rval]\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None:\n return [None, None]\n if len(self.axis) != 1:\n raise ValueError(\"R_op supported for arg_max only for \" \"one axis!\")\n if self.axis[0] > 1:\n raise ValueError(\"R_op supported for arg_max only when \" \" axis is 0 or 1\")\n if inputs[0].ndim != 2:\n raise ValueError(\n \"R_op supported for arg_max only when \" \" input is a matrix\"\n )\n max_vals, max_pos = self.make_node(*inputs).outputs\n if self.axis[0] == 0:\n return [eval_points[0][max_pos, arange(eval_points[0].shape[1])], None]\n else:\n return [eval_points[0][arange(eval_points[0].shape[0]), max_pos], None]\n\n def grad(self, inp, grads):\n # The strict sense mathematical gradient of the maximum function is\n # not calculated here for it is not defined at every point where some\n # coordinates are identical. However, since the latter set has null\n # Lebesgue measure, the result may be interpreted as weak gradient.\n\n # @note: This function should work correctly for L{vector}s.\n # (x, y), (gz, gw)\n # gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy\n # gMax * dMax/dx + gArgMax * dArgMax/dx,\n # gMax * dMax/daxis + gArgMax * dArgMax/daxis\n # g_max has one less dimension than x, so you need to complete\n # g_max to x's shape when axis=0 the broadcasting mechanism\n # does it automatically\n x = inp[0]\n axis = _as_tensor_variable(self.axis)\n g_max, g_max_idx = grads\n\n g_max_disconnected = isinstance(g_max.type, DisconnectedType)\n g_max_idx_disconnected = isinstance(g_max_idx.type, DisconnectedType)\n\n # if the op is totally disconnected, so are its inputs\n if g_max_disconnected and g_max_idx_disconnected:\n return [DisconnectedType()(), DisconnectedType()()]\n\n # if the max is disconnected but the argmax is not,\n # the gradient on its inputs is zero\n if g_max_disconnected:\n return [x.zeros_like()]\n if NoneConst.equals(axis):\n axis_ = list(range(x.ndim))\n else:\n axis_ = axis\n xmax = max(x, axis_)\n\n # Raise the g_max and xmax to the same number of dim as the input.\n pattern = []\n out_dim = 0\n if NoneConst.equals(axis):\n # We are taking the max/argmax over all dimensions.\n axis = None\n for i in range(x.ndim):\n if axis is None or i in axis.data:\n pattern.append(\"x\")\n else:\n pattern.append(out_dim)\n out_dim += 1\n g_max_pad = DimShuffle(g_max.broadcastable, pattern)(g_max)\n xmax_pad = DimShuffle(xmax.broadcastable, pattern)(xmax)\n\n # Set the grad to the correct position.\n g_x = eq(xmax_pad, x) * g_max_pad\n return (g_x,)\n\n\nclass Argmax(Op):\n \"\"\"\n Calculate the argmax over a given axis or over all axes.\n \"\"\"\n\n nin = 2 # tensor, axis\n nout = 1\n E_axis = \"invalid axis\"\n __props__ = (\"axis\",)\n _f16_ok = True\n\n params_type = ParamsType(c_axis=scal.int64)\n\n def __init__(self, axis):\n if axis is not None:\n axis = tuple(axis)\n self.axis = tuple(axis)\n\n def get_params(self, node):\n if self.axis is not None and len(self.axis) == 1:\n c_axis = np.int64(self.axis[0])\n else:\n # The value here doesn't matter, it won't be used\n c_axis = np.int64(-1)\n return self.params_type.get_params(c_axis=c_axis)\n\n def make_node(self, x, axis=None):\n x = _as_tensor_variable(x)\n if self.axis is None:\n all_axes = list(range(x.ndim))\n else:\n all_axes = self.axis\n inputs = [x]\n\n # We keep the original broadcastable flags for dimensions on which\n # we do not perform the argmax.\n broadcastable = [\n b for i, b in enumerate(x.type.broadcastable) if i not in all_axes\n ]\n outputs = [tensor(\"int64\", broadcastable, name=\"argmax\")]\n return Apply(self, inputs, outputs)\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n if len(node.inputs) == 2:\n raise ValueError(\n \"You are trying to compile a graph with an old Argmax node. Either reoptimize your graph or rebuild it to get the new node format.\"\n )\n\n def perform(self, node, inp, outs, params):\n (x,) = inp\n axes = self.axis\n (max_idx,) = outs\n if axes is None:\n axes = tuple(range(x.ndim))\n\n # Numpy does not support multiple axes for argmax\n # Work around\n keep_axes = np.array([i for i in range(x.ndim) if i not in axes], dtype=\"int64\")\n # Not-reduced axes in front\n transposed_x = np.transpose(x, np.concatenate((keep_axes, axes)))\n kept_shape = transposed_x.shape[: len(keep_axes)]\n reduced_shape = transposed_x.shape[len(keep_axes) :]\n new_shape = kept_shape + (np.prod(reduced_shape),)\n reshaped_x = transposed_x.reshape(new_shape)\n\n max_idx[0] = theano._asarray(np.argmax(reshaped_x, axis=-1), dtype=\"int64\")\n\n def c_code(self, node, name, inp, out, sub):\n (x,) = inp\n (argmax,) = out\n fail = sub[\"fail\"]\n params = sub[\"params\"]\n if self.axis is None:\n axis_code = \"axis = NPY_MAXDIMS;\"\n else:\n if len(self.axis) > 1:\n raise NotImplementedError()\n # params is only used here for now\n axis_code = (\n \"\"\"\n axis = %(params)s->c_axis;\n if(axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)){\n PyErr_SetString(PyExc_ValueError,\n \"Argmax, bad axis argument\");\n %(fail)s\n }\n \"\"\"\n % locals()\n )\n ret = \"\"\"\n int axis;\n\n Py_CLEAR(%(argmax)s);//todo pass them as out parameter.\n %(axis_code)s\n\n %(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);\n if(%(argmax)s == NULL){\n %(fail)s;\n }\n if(!PyArray_CheckExact(%(argmax)s)){\n %(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);\n if(%(argmax)s == NULL){\n %(fail)s;\n }\n }\n if(PyArray_TYPE(%(argmax)s) != NPY_INT64){\n PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);\n if (NULL == tmp){\n %(fail)s;\n }\n Py_DECREF(%(argmax)s);\n %(argmax)s = (PyArrayObject*)tmp;\n }\n \"\"\"\n return ret % locals()\n\n def c_code_cache_version(self):\n return (1,)\n\n def infer_shape(self, node, shapes):\n (ishape,) = shapes\n if self.axis is None:\n return [()]\n rval = tuple(\n [\n ishape[i]\n for (i, b) in enumerate(node.inputs[0].type.broadcastable)\n if i not in self.axis\n ]\n )\n return [rval]\n\n def grad(self, inp, grads):\n (x,) = inp\n\n return [x.zeros_like()]\n\n\ndef makeKeepDims(x, y, axis):\n \"\"\"\n Reintroduces in y with length one the axes of x which have been left out\n in a prior reduction of x. With this option, the resulting tensor will\n broadcast correctly against the original tensor x.\n\n \"\"\"\n x = as_tensor_variable(x)\n y = as_tensor_variable(y)\n\n if axis is None:\n axis = list(range(x.type.ndim))\n elif isinstance(axis, (int, np.integer)):\n axis = [axis]\n elif isinstance(axis, np.ndarray) and axis.ndim == 0:\n axis = [int(axis)]\n else:\n axis = [int(a) for a in axis]\n newaxis = []\n for a in axis:\n if not isinstance(a, int):\n raise ValueError(\"keepdims option can be used only with constant axis\")\n if a < 0:\n a += x.type.ndim\n newaxis.append(a)\n i = 0\n new_dims = []\n for j, _ in enumerate(x.type.broadcastable):\n if j in newaxis:\n new_dims.append(\"x\")\n else:\n new_dims.append(i)\n i += 1\n return DimShuffle(y.type.broadcastable, new_dims)(y)\n\n\n@constructor\ndef max_and_argmax(a, axis=None, keepdims=False):\n \"\"\"\n Returns maximum elements and their indices obtained by iterating over\n given axis.\n\n When axis is None (the default value), the max is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims : bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n # Check axis and convert it to a Python list of integers.\n # Axis will be used as an op param of MaxAndArgmax.\n a = as_tensor_variable(a)\n axis = check_and_normalize_axes(a, axis)\n if len(axis) == 0:\n axis = list(range(a.type.ndim))\n out, argout = MaxAndArgmax(axis)(a)\n\n if keepdims:\n out = makeKeepDims(a, out, axis)\n argout = makeKeepDims(a, argout, axis)\n return [out, argout]\n\n\nclass Max(CAReduce):\n nfunc_spec = (\"max\", 1, 1)\n\n def __init__(self, axis):\n super().__init__(scal.maximum, axis)\n\n\nclass Min(CAReduce):\n nfunc_spec = (\"min\", 1, 1)\n\n def __init__(self, axis):\n super().__init__(scal.minimum, axis)\n\n\n@constructor\ndef max(x, axis=None, keepdims=False):\n \"\"\"\n Returns maximum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the max is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n Notes\n -----\n We return an error as numpy when we reduce a dim with a shape of 0.\n\n \"\"\"\n\n # We have a choice of implementing this call with the\n # CAReduce op or the MaxAndArgmax op.\n\n # MaxAndArgmax supports grad and Rop, so we prefer to use that.\n # CAReduce is faster, but optimizations will replace MaxAndArgmax[0]\n # with CAReduce at compile time, so at this stage the important\n # thing is supporting all user interface features, not speed.\n # Some cases can be implemented only with CAReduce.\n\n # We thus prefer to use MaxAndArgmax, if possible. It does not\n # support all axis arguments, so we may need to fall back to CAReduce.\n\n try:\n out = max_and_argmax(x, axis)[0]\n except Exception:\n out = Max(axis)(x)\n\n if keepdims:\n out = makeKeepDims(x, out, axis)\n return out\n\n\n@constructor\ndef argmax(x, axis=None, keepdims=False):\n \"\"\"\n Returns indices of maximum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the argmax is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims : bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n argout = max_and_argmax(x, axis)[1]\n\n if keepdims:\n argout = makeKeepDims(x, argout, axis)\n return argout\n\n\n@constructor\ndef min(x, axis=None, keepdims=False):\n \"\"\"\n Returns minimum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the min is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n x = as_tensor_variable(x)\n str_x_type = str(x.dtype)\n if str_x_type.startswith(\"float\") or str_x_type in int_dtypes:\n return -max(-x, axis=axis, keepdims=keepdims)\n elif str_x_type in uint_dtypes:\n itype = np.iinfo(x.dtype)\n max_val = np.array(itype.max, dtype=itype.dtype)\n return max_val - max(max_val - x, axis=axis, keepdims=keepdims)\n elif str_x_type == \"bool\":\n return ~max(~x, axis=axis, keepdims=keepdims)\n else:\n # Be careful about unsigned integers, complex\n raise NotImplementedError()\n\n\n@constructor\ndef argmin(x, axis=None, keepdims=False):\n \"\"\"\n Returns indices of minimum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the argmin is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n x = as_tensor_variable(x)\n str_x_type = str(x.dtype)\n if str_x_type.startswith(\"float\") or str_x_type in int_dtypes:\n return argmax(-x, axis=axis, keepdims=keepdims)\n elif str_x_type in uint_dtypes:\n itype = np.iinfo(x.dtype)\n return argmax(itype.max - x, axis=axis, keepdims=keepdims)\n elif str_x_type == \"bool\":\n return argmax(~x, axis=axis, keepdims=keepdims)\n else:\n # Be careful about unsigned integers, complex\n raise NotImplementedError()\n\n\n@constructor\ndef smallest(*args):\n \"\"\"\n Return the [elementwise] smallest of a variable number of arguments.\n\n Like python's min.\n\n \"\"\"\n if len(args) == 2:\n a, b = args\n return switch(a < b, a, b)\n else:\n return min(stack(args), axis=0)\n\n\n@constructor\ndef largest(*args):\n \"\"\"\n Return the [elementwise] largest of a variable number of arguments.\n\n Like python's max.\n\n \"\"\"\n if len(args) == 2:\n a, b = args\n return switch(a > b, a, b)\n else:\n return max(stack(args), axis=0)\n\n\n##########################\n# Comparison\n##########################\n\n\n@_scal_elemwise\ndef lt(a, b):\n \"\"\"a < b\"\"\"\n\n\n@_scal_elemwise\ndef gt(a, b):\n \"\"\"a > b\"\"\"\n\n\n@_scal_elemwise\ndef le(a, b):\n \"\"\"a <= b\"\"\"\n\n\n@_scal_elemwise\ndef ge(a, b):\n \"\"\"a >= b\"\"\"\n\n\n@_scal_elemwise\ndef eq(a, b):\n \"\"\"a == b\"\"\"\n\n\n@_scal_elemwise\ndef neq(a, b):\n \"\"\"a != b\"\"\"\n\n\n@_scal_elemwise\ndef isnan(a):\n \"\"\"isnan(a)\"\"\"\n\n\n# Rename isnan to isnan_ to allow to bypass it when not needed.\n# glibc 2.23 don't allow isnan on int, so we remove it from the graph.\nisnan_ = isnan\n\n\ndef isnan(a):\n \"\"\"isnan(a)\"\"\"\n a = as_tensor_variable(a)\n if a.dtype in discrete_dtypes:\n return alloc(\n np.asarray(False, dtype=\"bool\"), *[a.shape[i] for i in range(a.ndim)]\n )\n return isnan_(a)\n\n\n@_scal_elemwise\ndef isinf(a):\n \"\"\"isinf(a)\"\"\"\n\n\n# Rename isnan to isnan_ to allow to bypass it when not needed.\n# glibc 2.23 don't allow isnan on int, so we remove it from the graph.\nisinf_ = isinf\n\n\ndef isinf(a):\n \"\"\"isinf(a)\"\"\"\n a = as_tensor_variable(a)\n if a.dtype in discrete_dtypes:\n return alloc(\n np.asarray(False, dtype=\"bool\"), *[a.shape[i] for i in range(a.ndim)]\n )\n return isinf_(a)\n\n\ndef allclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):\n \"\"\"\n Implement Numpy's ``allclose`` on tensors.\n\n ``absolute(a - b) <= (atol + rtol * absolute(b))``\n\n Parameters\n ----------\n a : tensor\n Input to compare.\n b : tensor\n Input to compare.\n rtol : float\n The relative tolerance parameter.\n atol : float\n The absolute tolerance parameter.\n equal_nan: bool\n Whether to consider nan's in the same place to be close.\n\n Returns\n -------\n bool\n A boolean value (of type int8 returned by the tensor elementwise `all`\n function) whether all elements in a and b are in the tolerance range\n defined above.\n\n Notes\n -----\n Not a symmetric equation. See Numpy's documentation.\n\n \"\"\"\n return all(isclose(a, b, rtol, atol, equal_nan))\n\n\ndef isclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):\n \"\"\"\n Implements Numpy's ``isclose`` on tensors.\n\n The tolerance values are positive, typically very small numbers. The\n relative difference (`rtol` * abs(`b`)) and the absolute difference\n `atol` are added together to compare against the absolute difference\n between `a` and `b`.\n\n ``absolute(a - b) <= (atol + rtol * absolute(b))``\n\n Parameters\n ----------\n a : tensor\n Input to compare.\n b : tensor\n Input to compare.\n rtol : float\n The relative tolerance parameter.\n atol : float\n The absolute tolerance parameter.\n equal_nan : bool\n Whether to consider nan's in the same place to be close\n\n Returns\n -------\n int8\n A boolean (int8) array where two arrays are element-wise equal\n within a tolerance.\n\n Notes\n -----\n Not a symmetric equation. See Numpy's documentation.\n\n Examples\n --------\n >>> import theano\n >>> import numpy as np\n >>> a = theano._asarray([1e10, 1e-7], dtype=\"float64\")\n >>> b = theano._asarray([1.00001e10, 1e-8], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b).eval()\n array([1, 0], dtype=int8)\n >>> a = theano._asarray([1e10, 1e-8], dtype=\"float64\")\n >>> b = theano._asarray([1.00001e10, 1e-9], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b).eval()\n array([1, 1], dtype=int8)\n >>> a = theano._asarray([1e10, 1e-8], dtype=\"float64\")\n >>> b = theano._asarray([1.0001e10, 1e-9], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b).eval()\n array([0, 1], dtype=int8)\n >>> a = theano._asarray([1.0, np.nan], dtype=\"float64\")\n >>> b = theano._asarray([1.0, np.nan], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b).eval()\n array([1, 0], dtype==int8)\n >>> a = theano._asarray([1.0, np.nan], dtype=\"float64\")\n >>> b = theano._asarray([1.0, np.nan], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b, equal_nan=True).eval()\n array([1, 1], dtype==int8)\n >>> a = theano._asarray([1.0, np.inf], dtype=\"float64\")\n >>> b = theano._asarray([1.0, -np.inf], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b).eval()\n array([1, 0], dtype==int8)\n >>> a = theano._asarray([1.0, np.inf], dtype=\"float64\")\n >>> b = theano._asarray([1.0, np.inf], dtype=\"float64\")\n >>> theano.tensor.isclose(a, b).eval()\n array([1, 1], dtype==int8)\n\n \"\"\"\n # close will be an int8 array of 1 where within tolerance\n # and 0 where not within tolerance or there was a nan or inf value.\n diff = abs(a - b)\n tolerance = atol + rtol * abs(b)\n close_prelim = le(diff, tolerance)\n\n a_nan = isnan(a)\n b_nan = isnan(b)\n nans = bitwise_or(a_nan, b_nan)\n\n a_inf = isinf(a)\n b_inf = isinf(b)\n infs = bitwise_or(a_inf, b_inf)\n\n nans_or_infs = bitwise_or(nans, infs)\n\n # close is now an array of 0's except where elements are not nan or inf\n # and are within the tolerance.\n close = bitwise_and(close_prelim, bitwise_not(nans_or_infs))\n\n # deal with signed inf values. this will make an array inf_eq of 0's\n # except where inf values have the same sign.\n both_infs = bitwise_and(a_inf, b_inf)\n inf_signs_eq = eq(a_inf * sgn(a), b_inf * sgn(b))\n inf_eq = bitwise_and(both_infs, inf_signs_eq)\n\n # now create the potential result combining close and inf_eq\n close_with_infs = bitwise_or(close, inf_eq)\n\n # deal with comparing nan's.\n if equal_nan:\n both_nans = bitwise_and(a_nan, b_nan)\n return bitwise_or(close_with_infs, both_nans)\n # otherwise nan's aren't considered close.\n else:\n return close_with_infs\n\n\n##########################\n# Condition\n##########################\n\n\n@_scal_elemwise\ndef switch(cond, ift, iff):\n \"\"\"if cond then ift else iff\"\"\"\n\n\nwhere = switch\n##########################\n# Bit-wise\n##########################\n\n\n@_scal_elemwise\ndef and_(a, b):\n \"\"\"bitwise a & b\"\"\"\n\n\nbitwise_and = and_ # numpy name for it\n\n\n@_scal_elemwise\ndef or_(a, b):\n \"\"\"bitwise a | b\"\"\"\n\n\nbitwise_or = or_ # numpy name for it\n\n\n@_scal_elemwise\ndef xor(a, b):\n \"\"\"bitwise a ^ b\"\"\"\n\n\nbitwise_xor = xor # numpy name for it\n\n\n@_scal_elemwise\ndef invert(a):\n \"\"\"bitwise ~a\"\"\"\n\n\nbitwise_not = invert # numpy alias for it\n\n\n##########################\n# Math\n##########################\n\n\n@_scal_elemwise\ndef abs_(a):\n \"\"\"|`a`|\n\n TensorVariable overloads the `TensorVariable.__abs__` operator so that\n this function is called when you type abs(a).\n\n \"\"\"\n\n\npprint.assign(abs_, printing.PatternPrinter((\"|%(0)s|\", -1000)))\n\n\n@_scal_elemwise\ndef exp(a):\n \"\"\"e^`a`\"\"\"\n\n\n@_scal_elemwise\ndef exp2(a):\n \"\"\"2^`a`\"\"\"\n\n\n@_scal_elemwise\ndef expm1(a):\n \"\"\"e^`a` - 1\"\"\"\n\n\n@_scal_elemwise\ndef neg(a):\n \"\"\"-a\"\"\"\n\n\n# numpy.reciprocal does integer division on integer inputs\n# (which is not very interesting)\n@_scal_elemwise\ndef inv(a):\n \"\"\"1.0/a\"\"\"\n\n\n@_scal_elemwise\ndef log(a):\n \"\"\"base e logarithm of a\"\"\"\n\n\n@_scal_elemwise\ndef log2(a):\n \"\"\"base 2 logarithm of a\"\"\"\n\n\n@_scal_elemwise\ndef log10(a):\n \"\"\"base 10 logarithm of a\"\"\"\n\n\n@_scal_elemwise\ndef log1p(a):\n \"\"\"log(1+a)\"\"\"\n\n\n@_scal_elemwise\ndef sgn(a):\n \"\"\"sign of a\"\"\"\n\n\n@_scal_elemwise\ndef ceil(a):\n \"\"\"ceiling of a\"\"\"\n\n\n@_scal_elemwise\ndef floor(a):\n \"\"\"floor of a\"\"\"\n\n\n@_scal_elemwise\ndef trunc(a):\n \"\"\"trunc of a\"\"\"\n\n\n@constructor\ndef iround(a, mode=None):\n \"\"\"cast(round(a,mode),'int64')\"\"\"\n return cast(round(a, mode), \"int64\")\n\n\n@constructor\ndef round(a, mode=None):\n \"\"\"round_mode(a) with mode in [half_away_from_zero, half_to_even].\n Default to half_to_even.\"\"\"\n if mode is None:\n mode = \"half_to_even\"\n if config.warn.round:\n warnings.warn(\n \"theano.tensor.round() changed its default from\"\n \" `half_away_from_zero` to `half_to_even` to have\"\n \" the same default as NumPy. Use the Theano flag\"\n \" `warn.round=False` to disable this warning.\"\n )\n if mode == \"half_away_from_zero\":\n return round_half_away_from_zero(a)\n elif mode == \"half_to_even\":\n return round_half_to_even(a)\n else:\n raise Exception(\"round mode %s is not implemented.\" % mode)\n\n\n@_scal_elemwise\ndef round_half_to_even(a):\n \"\"\"round_half_to_even(a)\"\"\"\n\n\n@_scal_elemwise\ndef round_half_away_from_zero(a):\n \"\"\"round_half_away_from_zero(a)\"\"\"\n\n\n@_scal_elemwise\ndef sqr(a):\n \"\"\"square of a\"\"\"\n\n\n# alias to sqr, included to maintain similarity with numpy interface\nsquare = sqr\n\n\ndef cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None):\n \"\"\"Calculate the covariance matrix.\n Covariance indicates the level to which two variables vary together.\n If we examine N-dimensional samples, :math:`m = [x_1, x_2, ... x_N]^T`,\n then the covariance matrix element :math:`C_{ij}` is the covariance of\n :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance\n of :math:`x_i`. Code and docstring ported from numpy.\n ----------\n m : array_like\n A 2-D array containing multiple variables and observations.\n Each row of `m` represents a variable, and each column is\n observations of all those variables.\n y : array_like, optional\n An additional set of variables and observations. `y` has the same form\n as that of `m`.\n rowvar : bool, optional\n If `rowvar` is True (default), then each row represents a\n variable, with observations in the columns. Otherwise, the relationship\n is transposed: each column represents a variable, while the rows\n contain observations.\n bias : bool, optional\n Default normalization (False) is by ``(N - 1)``, where ``N`` is the\n number of observations given (unbiased estimate). If `bias` is True, then\n normalization is by ``N``. These values can be overridden by using the\n keyword ``ddof``.\n ddof : int, optional\n If not ``None`` the default value implied by `bias` is overridden.\n The default value is ``None``.\n Returns\n -------\n out : The covariance matrix of the variables.\n \"\"\"\n\n if fweights is not None:\n raise NotImplementedError(\"fweights are not implemented\")\n if aweights is not None:\n raise NotImplementedError(\"aweights are not implemented\")\n\n if not rowvar and m.shape[0] != 1:\n m = m.T\n\n if y is not None:\n if not rowvar and y.shape[0] != 1:\n y = y.T\n m = theano.tensor.concatenate((m, y), axis=0)\n\n if ddof is None:\n if not bias:\n ddof = 1\n else:\n ddof = 0\n\n # Determine the normalization\n fact = m.shape[1] - ddof\n\n m -= m.mean(axis=1, keepdims=1)\n c = m.dot(m.T)\n c *= theano.tensor.constant(1) / fact\n return c.squeeze()\n\n\n@_scal_elemwise\ndef sqrt(a):\n \"\"\"square root of a\"\"\"\n\n\n@_scal_elemwise\ndef deg2rad(a):\n \"\"\"convert degree a to radian\"\"\"\n\n\n@_scal_elemwise\ndef rad2deg(a):\n \"\"\"convert radian a to degree\"\"\"\n\n\n@_scal_elemwise\ndef cos(a):\n \"\"\"cosine of a\"\"\"\n\n\n@_scal_elemwise\ndef arccos(a):\n \"\"\"arccosine of a\"\"\"\n\n\n@_scal_elemwise\ndef sin(a):\n \"\"\"sine of a\"\"\"\n\n\n@_scal_elemwise\ndef arcsin(a):\n \"\"\"arcsine of a\"\"\"\n\n\n@_scal_elemwise\ndef tan(a):\n \"\"\"tangent of a\"\"\"\n\n\n@_scal_elemwise\ndef arctan(a):\n \"\"\"arctangent of a\"\"\"\n\n\n@_scal_elemwise\ndef arctan2(a, b):\n \"\"\"arctangent of a / b\"\"\"\n\n\n@_scal_elemwise\ndef cosh(a):\n \"\"\"hyperbolic cosine of a\"\"\"\n\n\n@_scal_elemwise\ndef arccosh(a):\n \"\"\"hyperbolic arc cosine of a\"\"\"\n\n\n@_scal_elemwise\ndef sinh(a):\n \"\"\"hyperbolic sine of a\"\"\"\n\n\n@_scal_elemwise\ndef arcsinh(a):\n \"\"\"hyperbolic arc sine of a\"\"\"\n\n\n@_scal_elemwise\ndef tanh(a):\n \"\"\"hyperbolic tangent of a\"\"\"\n\n\n@_scal_elemwise\ndef arctanh(a):\n \"\"\"hyperbolic arc tangent of a\"\"\"\n\n\n@_scal_elemwise\ndef erf(a):\n \"\"\"error function\"\"\"\n\n\n@_scal_elemwise\ndef erfc(a):\n \"\"\"complementary error function\"\"\"\n\n\n@_scal_elemwise\ndef erfcx(a):\n \"\"\"scaled complementary error function\"\"\"\n\n\n@_scal_elemwise\ndef erfinv(a):\n \"\"\"inverse error function\"\"\"\n\n\n@_scal_elemwise\ndef erfcinv(a):\n \"\"\"inverse complementary error function\"\"\"\n\n\n@_scal_elemwise\ndef gamma(a):\n \"\"\"gamma function\"\"\"\n\n\n@_scal_elemwise\ndef gammaln(a):\n \"\"\"log gamma function\"\"\"\n\n\n@_scal_elemwise\ndef psi(a):\n \"\"\"derivative of log gamma function\"\"\"\n\n\n@_scal_elemwise\ndef tri_gamma(a):\n \"\"\"second derivative of the log gamma function\"\"\"\n\n\n@_scal_elemwise\ndef chi2sf(x, k):\n \"\"\"chi squared survival function\"\"\"\n\n\n@_scal_elemwise\ndef gammainc(k, x):\n \"\"\"Regularized lower gamma function\"\"\"\n\n\n@_scal_elemwise\ndef gammaincc(k, x):\n \"\"\"Regularized upper gamma function\"\"\"\n\n\n@_scal_elemwise\ndef gammau(k, x):\n \"\"\"Upper incomplete gamma function.\"\"\"\n\n\n@_scal_elemwise\ndef gammal(k, x):\n \"\"\"Lower incomplete gamma function.\"\"\"\n\n\n@_scal_elemwise\ndef j0(x):\n \"\"\"Bessel function of the first kind of order 0.\"\"\"\n\n\n@_scal_elemwise\ndef j1(x):\n \"\"\"Bessel function of the first kind of order 1.\"\"\"\n\n\n@_scal_elemwise\ndef jv(v, x):\n \"\"\"Bessel function of the first kind of order v (real).\"\"\"\n\n\n@_scal_elemwise\ndef i0(x):\n \"\"\"Modified Bessel function of the first kind of order 0.\"\"\"\n\n\n@_scal_elemwise\ndef i1(x):\n \"\"\"Modified Bessel function of the first kind of order 1.\"\"\"\n\n\n@_scal_elemwise\ndef iv(v, x):\n \"\"\"Modified Bessel function of the first kind of order v (real).\"\"\"\n\n\n@_scal_elemwise\ndef real(z):\n \"\"\"Return real component of complex-valued tensor `z`\"\"\"\n\n\n_tensor_py_operators.real = property(real)\n\n\n@_scal_elemwise\ndef imag(z):\n \"\"\"Return imaginary component of complex-valued tensor `z`\"\"\"\n\n\n_tensor_py_operators.imag = property(imag)\n\n\n@_scal_elemwise\ndef angle(z):\n \"\"\"Return polar-coordinate angle of complex-valued tensor `z`\"\"\"\n\n\n@_scal_elemwise # numpy.complex cannot build tensors\ndef complex(real, imag):\n \"\"\"Return complex-valued tensor with `real` and `imag` components\"\"\"\n\n\n@_scal_elemwise\ndef conj(z):\n \"\"\"Return the complex conjugate of `z`.\"\"\"\n\n\n@_scal_elemwise\ndef complex_from_polar(abs, angle):\n \"\"\"Return complex-valued tensor from polar coordinate specification.\"\"\"\n\n\n##########################\n# Misc\n##########################\n\n\n# fill, _fill_inplace = _elemwise(scal.second, 'fill',\n# \"\"\"fill WRITEME (elemwise)\"\"\")\n@_scal_elemwise\ndef second(a, b):\n \"\"\"Create a matrix by filling the shape of a with b\"\"\"\n\n\nfill = second\npprint.assign(fill, printing.FunctionPrinter(\"fill\"))\n\n\n@constructor\ndef ones_like(model, dtype=None, opt=False):\n \"\"\"equivalent of numpy.ones_like\n Parameters\n ----------\n model : tensor\n dtype : data-type, optional\n opt : If True, we will return a constant instead of a graph when possible.\n Useful for Theano optimization, not for user building a graph as this\n have the consequence that model isn't always in the graph.\n\n Returns\n -------\n tensor\n tensor the shape of model containing ones of the type of dtype.\n \"\"\"\n if dtype is None:\n dtype = model.type.dtype\n ret = constant(1.0, dtype=dtype)\n if opt and ret.type == model.type:\n return ret\n return fill(model, ret)\n\n\n@constructor\ndef zeros_like(model, dtype=None, opt=False):\n \"\"\"equivalent of numpy.zeros_like\n Parameters\n ----------\n model : tensor\n dtype : data-type, optional\n opt : If True, we will return a constant instead of a graph when possible.\n Useful for Theano optimization, not for user building a graph as this\n have the consequence that model isn't always in the graph.\n\n Returns\n -------\n tensor\n tensor the shape of model containing zeros of the type of dtype.\n \"\"\"\n\n if dtype is None:\n dtype = model.type.dtype\n ret = constant(0.0, dtype=dtype)\n if opt and ret.type == model.type:\n return ret\n return fill(model, ret)\n\n\ndef zeros(shape, dtype=None):\n \"\"\"\n Create a Tensor filled with zeros, closer to Numpy's syntax than ``alloc``.\n \"\"\"\n if not isinstance(shape, (np.ndarray, Sequence, TensorVariable)):\n shape = [shape]\n if dtype is None:\n dtype = config.floatX\n return alloc(np.array(0, dtype=dtype), *shape)\n\n\ndef ones(shape, dtype=None):\n \"\"\"\n Create a Tensor filled with ones, closer to Numpy's syntax than ``alloc``.\n \"\"\"\n if not isinstance(shape, (np.ndarray, Sequence, TensorVariable)):\n shape = [shape]\n if dtype is None:\n dtype = config.floatX\n return alloc(np.array(1, dtype=dtype), *shape)\n\n\nclass Nonzero(gof.Op):\n \"\"\"\n Return the indices of the elements that are non-zero.\n\n Parameters\n ----------\n a: array_like\n Input array.\n\n Returns\n -------\n indices: list\n A list containing the indices of the non-zero elements of `a`.\n\n See Also\n --------\n nonzero_values : Return the non-zero elements of the input array\n flatnonzero : Return the indices of the non-zero elements of the\n flattened input array.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a):\n a = as_tensor_variable(a)\n if a.ndim == 0:\n raise ValueError(\"Nonzero only supports non-scalar arrays.\")\n output = [\n TensorType(dtype=\"int64\", broadcastable=(False,))() for i in range(a.ndim)\n ]\n return gof.Apply(self, [a], output)\n\n def perform(self, node, inp, out_):\n a = inp[0]\n\n result_tuple = np.nonzero(a)\n for i, res in enumerate(result_tuple):\n out_[i][0] = res.astype(\"int64\")\n\n def grad(self, inp, grads):\n return [grad_undefined(self, 0, inp[0])]\n\n\n_nonzero = Nonzero()\n\n\ndef nonzero(a, return_matrix=False):\n \"\"\"\n Returns one of the following:\n\n If return_matrix is False (default, same as NumPy):\n A tuple of vector arrays such that the ith element of the jth array\n is the index of the ith non-zero element of the input array in the\n jth dimension.\n\n If return_matrix is True (same as Theano Op):\n Returns a matrix of shape (ndim, number of nonzero elements) such\n that element (i,j) is the index in the ith dimension of the jth\n non-zero element.\n\n Parameters\n ----------\n a : array_like\n Input array.\n return_matrix : bool\n If True, returns a symbolic matrix. If False, returns a tuple of\n arrays. Defaults to False.\n\n Returns\n -------\n tuple of vectors or matrix\n\n See Also\n --------\n nonzero_values : Return the non-zero elements of the input array\n flatnonzero : Return the indices of the non-zero elements of the\n flattened input array.\n\n \"\"\"\n res = _nonzero(a)\n if isinstance(res, list):\n res = tuple(res)\n else:\n res = (res,)\n\n if return_matrix:\n if len(res) > 1:\n return stack(res, 0)\n elif len(res) == 1:\n return shape_padleft(res[0])\n else:\n return res\n\n\ndef flatnonzero(a):\n \"\"\"Return a vector of indices that are non-zero in the flattened version of `a`.\n\n Parameters\n ----------\n a : tensor\n Input tensor\n\n Returns\n -------\n vector\n Output vector, containing the indices of the elements of `a.flatten()`\n that are non-zero.\n\n See Also\n --------\n nonzero : Return the indices of the non-zero elements of the input array.\n nonzero_values : Return the non-zero elements of the input array\n\n \"\"\"\n if a.ndim == 0:\n raise ValueError(\"Nonzero only supports non-scalar arrays.\")\n return nonzero(a.flatten(), return_matrix=False)[0]\n\n\ndef nonzero_values(a):\n \"\"\"Return a vector of non-zero elements contained in the input array.\n\n Parameters\n ----------\n a : tensor\n Input tensor\n\n Returns\n -------\n vector\n Output vector, containing the non-zero elements of a.\n\n See Also\n --------\n nonzero : Return the indices of the non-zero elements of the input array.\n flatnonzero : Return the indices of the non-zero elements of the\n flattened input array.\n\n \"\"\"\n return a.flatten()[flatnonzero(a)]\n\n\nclass Tri(gof.Op):\n\n __props__ = (\"dtype\",)\n\n def __init__(self, dtype=None):\n if dtype is None:\n dtype = config.floatX\n self.dtype = dtype\n\n def make_node(self, N, M, k):\n N = as_tensor_variable(N)\n M = as_tensor_variable(M)\n k = as_tensor_variable(k)\n return gof.Apply(\n self,\n [N, M, k],\n [TensorType(dtype=self.dtype, broadcastable=(False, False))()],\n )\n\n def perform(self, node, inp, out_):\n N, M, k = inp\n (out,) = out_\n out[0] = np.tri(N, M, k, dtype=self.dtype)\n\n def infer_shape(self, node, in_shapes):\n out_shape = [node.inputs[0], node.inputs[1]]\n return [out_shape]\n\n def grad(self, inp, grads):\n return [grad_undefined(self, i, inp[i]) for i in range(3)]\n\n\ndef tri(N, M=None, k=0, dtype=None):\n \"\"\"\n An array with ones at and below the given diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the array.\n M : int, optional\n Number of columns in the array.\n By default, `M` is taken equal to `N`.\n k : int, optional\n The sub-diagonal at and below which the array is filled.\n `k` = 0 is the main diagonal, while `k` < 0 is below it,\n and `k` > 0 is above. The default is 0.\n dtype : dtype, optional\n Data type of the returned array. The default is float.\n\n Returns\n -------\n Array of shape (N, M)\n Array with its lower triangle filled with ones and zero elsewhere;\n in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n if M is None:\n M = N\n op = Tri(dtype)\n return op(N, M, k)\n\n\ndef tril(m, k=0):\n \"\"\"\n Lower triangle of an array.\n\n Return a copy of an array with elements above the `k`-th diagonal zeroed.\n\n Parameters\n ----------\n m : array_like, shape (M, N)\n Input array.\n k : int, optional\n Diagonal above which to zero elements. `k = 0` (the default) is the\n main diagonal, `k < 0` is below it and `k > 0` is above.\n\n Returns\n -------\n array, shape (M, N)\n Lower triangle of `m`, of same shape and data-type as `m`.\n\n See Also\n --------\n triu : Same thing, only for the upper triangle.\n\n \"\"\"\n return m * tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype)\n\n\ndef triu(m, k=0):\n \"\"\"\n Upper triangle of an array.\n\n Return a copy of a matrix with the elements below the `k`-th diagonal\n zeroed.\n\n Please refer to the documentation for `tril` for further details.\n\n See Also\n --------\n tril : Lower triangle of an array.\n\n \"\"\"\n return m * (\n constant(1, dtype=m.dtype) - tri(m.shape[0], m.shape[1], k=k - 1, dtype=m.dtype)\n )\n\n\nclass Eye(gof.Op):\n\n __props__ = (\"dtype\",)\n\n def __init__(self, dtype=None):\n if dtype is None:\n dtype = config.floatX\n self.dtype = dtype\n\n def make_node(self, n, m, k):\n n = as_tensor_variable(n)\n m = as_tensor_variable(m)\n k = as_tensor_variable(k)\n assert n.ndim == 0\n assert m.ndim == 0\n assert k.ndim == 0\n return gof.Apply(\n self,\n [n, m, k],\n [TensorType(dtype=self.dtype, broadcastable=(False, False))()],\n )\n\n def perform(self, node, inp, out_):\n n, m, k = inp\n (out,) = out_\n out[0] = np.eye(n, m, k, dtype=self.dtype)\n\n def infer_shape(self, node, in_shapes):\n out_shape = [node.inputs[0], node.inputs[1]]\n return [out_shape]\n\n def grad(self, inp, grads):\n return [grad_undefined(self, i, inp[i]) for i in range(3)]\n\n\ndef eye(n, m=None, k=0, dtype=None):\n \"\"\"Return a 2-D array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n n : int\n Number of rows in the output.\n m : int, optional\n Number of columns in the output. If None, defaults to `N`.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal, and a negative value\n to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n\n Returns\n -------\n ndarray of shape (N,M)\n An array where all elements are equal to zero, except for the `k`-th\n diagonal, whose values are equal to one.\n\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n if m is None:\n m = n\n localop = Eye(dtype)\n return localop(n, m, k)\n\n\ndef identity_like(x):\n return eye(x.shape[0], x.shape[1], k=0, dtype=x.dtype)\n\n\ndef alloc_validate_shape(shape):\n sh = [as_tensor_variable(s) for s in shape]\n bcast = []\n for i, s in enumerate(sh):\n\n def err_str():\n if config.exception_verbosity == \"high\":\n return \"\\n\" + min_informative_str(s)\n else:\n return str(s)\n\n if s.type.dtype not in integer_dtypes:\n s_as_str = err_str()\n raise TypeError(\n \"Shape arguments to Alloc must be integers, \"\n \"but argument %s is not for apply node: %s\" % (i, s_as_str)\n )\n if s.ndim != 0:\n s_as_str = err_str()\n raise TypeError(\n \"Each shape dimension to Alloc must be a scalar, \",\n \"but dimension %s have %d dimensions for apply node: %s\"\n % (i, s.ndim, s_as_str),\n )\n\n # if s is constant 1, then we're broadcastable in that dim\n try:\n const_shp = get_scalar_constant_value(s)\n except NotScalarConstantError:\n const_shp = None\n bcast.append(1 == const_shp)\n return sh, bcast\n\n\nclass Alloc(gof.Op):\n \"\"\"Create a Tensor from an initial value and a desired shape.\n\n alloc(value, shape0, shape1, ..., shapeN)\n\n Returns an N-dimensional tensor initialized by `value` using something\n equivalent to\n\n z = numpy.zeros(shape, value.dtype)\n z += value\n\n The result has N dimensions, has the dtype of `value` and is obtained by\n broadcasting value over the output ndarray.\n\n This Op is used to replace fill() during optimizations because after shapes\n are lifted, the first argument to fill can often be pruned from the graph.\n\n \"\"\"\n\n _f16_ok = True\n __props__ = ()\n\n def validate_shape(self, shape):\n return alloc_validate_shape(shape)\n\n def make_node(self, value, *shape):\n v = as_tensor_variable(value)\n sh, bcast = alloc_validate_shape(shape)\n if v.ndim > len(sh):\n raise TypeError(\n \"The Alloc value to use has more dimensions\"\n \" than the specified dimensions\",\n v.ndim,\n len(sh),\n )\n otype = TensorType(dtype=v.dtype, broadcastable=bcast)\n return gof.Apply(self, [v] + sh, [otype()])\n\n def perform(self, node, inputs, out_):\n (out,) = out_\n v = inputs[0]\n sh = tuple([int(i) for i in inputs[1:]])\n if out[0] is None or out[0].shape != sh:\n if v.size == 1 and v.item() == 0:\n out[0] = np.zeros(sh, dtype=v.dtype)\n else:\n out[0] = np.empty(sh, dtype=v.dtype)\n out[0][...] = v # broadcast v to fill us up\n else:\n # reuse the allocated memory.\n out[0][...] = v # broadcast v to fill us up\n\n def c_code(self, node, name, inp, out, sub):\n vv = inp[0]\n ndim = len(inp[1:])\n (zz,) = out\n fail = sub[\"fail\"]\n\n code = \"\"\"\n npy_intp shape[%(ndim)s];\n \"\"\" % dict(\n ndim=ndim\n )\n\n # Initialize shape\n for i, shp_i in enumerate(inp[1:]):\n code += \"\"\"\n shape[%(i)s] = ((dtype_%(shp_i)s*) PyArray_DATA(%(shp_i)s))[0];\n \"\"\" % dict(\n i=i, shp_i=shp_i\n )\n\n code += \"\"\"\n int need_new_out = (NULL == %(zz)s);\n for (int i = 0; i < %(ndim)s; i++)\n need_new_out = (need_new_out\n || (PyArray_DIMS(%(zz)s)[i] != shape[i]));\n\n if (need_new_out)\n {\n Py_XDECREF(%(zz)s);\n %(zz)s = (PyArrayObject*) PyArray_SimpleNew(%(ndim)s,\n shape, PyArray_TYPE((PyArrayObject*) py_%(vv)s));\n if (!%(zz)s)\n {\n PyErr_SetString(PyExc_MemoryError, \"alloc failed\");\n %(fail)s\n }\n }\n\n // This function takes care of broadcasting\n if (PyArray_CopyInto(%(zz)s, %(vv)s) == -1)\n %(fail)s\n \"\"\" % dict(\n vv=vv, ndim=ndim, zz=zz, fail=fail\n )\n\n return code\n\n def c_code_cache_version(self):\n return (2,)\n\n def infer_shape(self, node, input_shapes):\n return [node.inputs[1:]]\n\n def connection_pattern(self, node):\n\n rval = [[True]]\n\n for ipt in node.inputs[1:]:\n rval.append([False])\n\n return rval\n\n def grad(self, inputs, grads):\n x = inputs[0]\n gz = grads[0]\n n_axes_to_sum = gz.ndim - x.ndim\n # The number of dimensions added\n axis = list(range(n_axes_to_sum))\n # The broadcasted dimensions\n axis_broadcasted = []\n axis_kept = []\n for i, (ib, gb) in enumerate(\n zip(\n inputs[0].broadcastable,\n # We need the dimensions corresponding to x\n grads[0].broadcastable[-inputs[0].ndim :],\n )\n ):\n if ib and not gb:\n axis_broadcasted.append(i + n_axes_to_sum)\n else:\n axis_kept.append(i)\n gx = gz.sum(axis=axis + axis_broadcasted)\n if axis_broadcasted:\n new_order = [\"x\"] * x.ndim\n for idx, axis in enumerate(axis_kept):\n new_order[axis] = idx\n gx = gx.dimshuffle(new_order)\n # Dimshuffle to add back the broadcasted dims\n # The *elements* of the output are not connected to\n # the inputs that specify the shape. If you grow the\n # shape by epsilon, the existing elements do not\n # change.\n return [gx] + [DisconnectedType()() for i in inputs[1:]]\n\n def __call__(self, val, *shapes, **kwargs):\n \"\"\"\n If the alloc would be useless, this function returns val.\n\n If this function is called outside of a graph optimization context\n (for instance, it is manually called by a user building a graph),\n then we always return an Alloc node, to allow for DebugMode to check\n for size mismatches.\n\n If you always want an Alloc node, call make_node.\n\n \"\"\"\n ret = super().__call__(val, *shapes, **kwargs)\n try:\n # It makes optimization difficult when useless allocs are thrown\n # into the graph at every stage of optimization. This little logic\n # tries to help at least in some cases.\n if hasattr(val, \"fgraph\") and (val.type == ret.type):\n return val\n except AttributeError:\n pass\n return ret\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None:\n return [None]\n return self(eval_points[0], *inputs[1:], **dict(return_list=True))\n\n def do_constant_folding(self, node):\n if not getattr(node.outputs[0], \"clients\", []):\n # If there are no clients then there is no point doing constant\n # folding.\n return False\n for client in node.outputs[0].clients:\n if client[0] == \"output\":\n # If the output is a constant, it will have to be deepcopied\n # each time the function is called. So we do not fold.\n return False\n elif (\n # The following ops work inplace of their input id 0.\n client[1] == 0\n and isinstance(\n client[0].op,\n (\n # Ops that will work inplace on the Alloc. So if they\n # get constant_folded, they would copy the\n # constant and this is less efficients.\n # Not doing the constant folding could also lower\n # the peak memory usage, as we the \"constant\" won't\n # always exists.\n theano.tensor.subtensor.IncSubtensor,\n theano.tensor.subtensor.AdvancedIncSubtensor1,\n theano.tensor.subtensor.AdvancedIncSubtensor,\n theano.tensor.blas.Gemv,\n theano.tensor.blas_c.CGemv,\n theano.tensor.blas.Ger,\n theano.tensor.blas_c.CGer,\n theano.tensor.blas_scipy.ScipyGer,\n ),\n )\n ):\n return False\n # If the clients is a transfer to the GPU, we don't want to\n # fold. We let the Alloc being moved to the GPU, then we\n # let the GPU algo decide if it need to fold it or not.\n elif client[0].op.__class__.__name__.lower().startswith(\"gpu\"):\n return False\n return True\n\n\nalloc = Alloc()\npprint.assign(alloc, printing.FunctionPrinter(\"alloc\"))\n\n\ndef transfer(var, target):\n \"\"\"\n Return a version of `var` transferred to `target`.\n\n `cpu` mean a TensorType (on the CPU). Other types may define\n additional targets.\n\n Parameters\n ----------\n var : variable\n A theano variable\n target : str\n The target of the transfer\n \"\"\"\n if target == \"cpu\":\n return as_tensor_variable(var)\n else:\n for trans in transfer._others:\n res = trans(var, target)\n if res is not None:\n return res\n raise ValueError(\"Can't transfer to target {}\".format(target))\n\n\ntransfer._others = []\n\n\ndef register_transfer(fn):\n \"\"\"\n Register a transfer function for alternative targets.\n\n Parameters\n ----------\n fn : callable\n \"\"\"\n transfer._others.append(fn)\n\n\n\"\"\"Create a duplicate of `a` (with duplicated storage)\"\"\"\ntensor_copy = elemwise.Elemwise(scal.identity)\npprint.assign(tensor_copy, printing.IgnorePrinter())\n\n\n@constructor\ndef sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):\n \"\"\"\n Computes the sum along the given axis(es) of a tensor `input`.\n\n When axis is None (the default value), the sum is performed\n over the flattened tensor.\n\n For full documentation see ``tensor.elemwise.Sum``.\n In particular please pay attention to the important warning when using\n a custom acc_dtype.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n\n out = elemwise.Sum(axis=axis, dtype=dtype, acc_dtype=acc_dtype)(input)\n\n if keepdims:\n out = makeKeepDims(input, out, axis)\n return out\n\n\npprint.assign(Sum(), printing.FunctionPrinter(\"sum\"))\n\n\n@constructor\ndef prod(\n input,\n axis=None,\n dtype=None,\n keepdims=False,\n acc_dtype=None,\n no_zeros_in_input=False,\n):\n \"\"\"\n Computes the product along the given axis(es) of a tensor `input`.\n\n When axis is None (the default value), the product is performed\n over the flattened tensor.\n\n For full documentation see ``tensor.elemwise.Prod``.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n\n out = elemwise.Prod(\n axis, dtype=dtype, acc_dtype=acc_dtype, no_zeros_in_input=no_zeros_in_input\n )(input)\n\n if keepdims:\n out = makeKeepDims(input, out, axis)\n return out\n\n\nclass Mean(elemwise.CAReduce):\n def __init__(self, axis=None):\n super().__init__(scal.add, axis)\n assert self.axis is None or len(self.axis) == 1\n\n def __str__(self):\n if self.axis is not None:\n return \"Mean{%s}\" % (\", \".join(str(x) for x in self.axis))\n else:\n return \"Mean\"\n\n def _output_dtype(self, idtype):\n # we want to protect against overflow\n return \"float64\"\n\n def perform(self, node, inp, out):\n (input,) = inp\n (output,) = out\n if self.axis is None:\n axis = None\n else:\n axis = self.axis[0]\n # numpy.asarray is needed as otherwise we can end up with a\n # numpy scalar.\n output[0] = np.asarray(np.mean(input, dtype=\"float64\", axis=axis))\n\n def c_code(self, node, name, inames, onames, sub):\n if self.axis is not None:\n return super(Op, self).c_code(node, name, inames, onames, sub)\n ret = super().c_code(node, name, inames, onames, sub)\n # TODO: c_code perform support only axis is None\n return (\n ret\n + \"\"\"\n *((double *)PyArray_DATA(%s)) /= PyArray_SIZE(%s);\n \"\"\"\n % (onames[0], inames[0])\n )\n\n\n# TODO: implement the grad. When done and tested, you can make this the default\n# version.\n# def grad(self, (x,), (gout,)):\n# import pdb;pdb.set_trace()\n# return grad(mean(x, self.axis, op=False),[x])\n\n\n@constructor\ndef mean(input, axis=None, dtype=None, op=False, keepdims=False, acc_dtype=None):\n \"\"\"\n Computes the mean value along the given axis(es) of a tensor `input`.\n\n Parameters\n ----------\n axis : None or int or (list of int) (see `Sum`)\n Compute the mean along this axis of the tensor.\n None means all axes (like numpy).\n dtype: None or string\n Dtype to cast the result of the inner summation into.\n For instance, by default, a sum of a float32 tensor will be\n done in float64 (acc_dtype would be float64 by default),\n but that result will be casted back in float32.\n keepdims: bool\n If this is set to True, the axes which are reduced are\n left in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original tensor.\n acc_dtype: None or string\n Dtype to use for the inner summation. This will not\n necessarily be the dtype of the output (in particular\n if it is a discrete (int/uint) dtype, the output will\n be in a float type). If None, then we use the same rules as `sum()`.\n\n Notes\n -----\n For gpu, if you specify dtype=float32, everything will be done on the gpu.\n\n \"\"\"\n input = as_tensor_variable(input)\n if op:\n if dtype not in (None, \"float64\"):\n raise NotImplementedError(\n \"The Mean op does not support the dtype argument, \"\n \"and will always use float64. If you want to specify \"\n \"the dtype, call tensor.mean(..., op=False).\",\n dtype,\n )\n if acc_dtype not in (None, \"float64\"):\n raise NotImplementedError(\n \"The Mean op does not support the acc_dtype argument, \"\n \"and will always use float64. If you want to specify \"\n \"acc_dtype, call tensor.mean(..., op=False).\",\n dtype,\n )\n out = Mean(axis)(input)\n if keepdims:\n out = makeKeepDims(input, out, axis)\n return out\n\n if dtype is not None:\n # The summation will be done with the specified dtype.\n # sum() will complain if it is not suitable.\n sum_dtype = dtype\n else:\n sum_dtype = None\n # float16 overflows on the cast way too often\n if input.dtype == \"float16\":\n sum_dtype = \"float32\"\n\n s = sum(input, axis=axis, dtype=sum_dtype, keepdims=keepdims, acc_dtype=acc_dtype)\n shp = shape(input)\n\n # Cast shp into a float type\n # TODO Once we have a consistent casting policy, we could simply\n # use true_div.\n if s.dtype in (\"float16\", \"float32\", \"complex64\"):\n shp = cast(shp, \"float32\")\n else:\n shp = cast(shp, \"float64\")\n\n if axis is None:\n axis = list(range(input.ndim))\n elif isinstance(axis, (int, np.integer)):\n axis = [axis]\n elif isinstance(axis, np.ndarray) and axis.ndim == 0:\n axis = [int(axis)]\n else:\n axis = [int(a) for a in axis]\n\n # This sequential division will possibly be optimized by Theano:\n for i in axis:\n s = true_div(s, shp[i])\n\n # This can happen when axis is an empty list/tuple\n if s.dtype != shp.dtype and s.dtype in discrete_dtypes:\n s = cast(s, shp.dtype)\n\n if dtype == \"float16\" or (dtype is None and input.dtype == \"float16\"):\n s = cast(s, \"float16\")\n s.name = \"mean\"\n return s\n\n\n@constructor\ndef var(input, axis=None, ddof=0, keepdims=False, corrected=False):\n \"\"\"\n Computes the variance along the given axis(es) of a tensor `input`.\n\n Parameters\n ----------\n axis: None or int or (list of int) (see `Sum`)\n Compute the variance along this axis of the tensor.\n None means all axes (like numpy).\n ddof: Degrees of freedom; 0 would compute the ML estimate, 1 would compute\n the unbiased estimate.\n keepdims : bool\n If this is set to True, the axes which are reduced are\n left in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original tensor.\n corrected : bool\n If this is set to True, the 'corrected_two_pass' algorithm is\n used to compute the variance.\n Refer : http://www.cs.yale.edu/publications/techreports/tr222.pdf\n\n Notes\n -----\n Default uses the two-pass algorithm (reference below).\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm\n Also supports 'corrected_two_pass' algorithm (using the 'corrected' flag)\n which is numerically more stable. There exist other implementations that\n offer better stability, but probably slower.\n\n \"\"\"\n\n if isinstance(ddof, (bool)):\n raise ValueError(\n \"Parameter keepdims is now at index 3: (input, \\\n axis=None, ddof=0, keepdims=False, corrected=False)\"\n )\n\n input_ndim = input.type.ndim\n if axis is None:\n axis = list(range(input_ndim))\n elif isinstance(axis, (int, np.integer)):\n axis = [axis]\n elif isinstance(axis, np.ndarray) and axis.ndim == 0:\n axis = [int(axis)]\n else:\n axis = [int(a) for a in axis]\n\n # compute the axis-wise mean\n mean_input = mean(input, axis, keepdims=True)\n\n # center the input\n centered_input = input - mean_input\n\n # return the mean sqr\n two = constant(2, dtype=centered_input.dtype)\n if ddof == 0:\n v = mean((centered_input ** two), axis, keepdims=keepdims)\n else:\n shp = shape(input) - ddof\n v = sum((centered_input ** two), axis=axis, keepdims=keepdims)\n for i in axis:\n v = true_div(v, shp[i])\n\n # use 'corrected_two_pass' algorithm\n if corrected:\n if ddof == 0:\n error = mean(centered_input, axis, keepdims=keepdims) ** 2\n else:\n shp = shape(input) - ddof\n shp_inp = shape(input)\n error = sum(centered_input, axis=axis, keepdims=keepdims) ** 2\n for i in axis:\n error = true_div(error, shp[i] * shp_inp[i])\n v = v - error\n\n v.name = \"var\"\n return v\n\n\n@constructor\ndef std(input, axis=None, ddof=0, keepdims=False, corrected=False):\n \"\"\"\n Computes the standard deviation along the given axis(es) of a tensor `input`.\n\n Parameters\n ----------\n axis: None or int or (list of int) (see `Sum`)\n Compute the variance along this axis of the tensor.\n None means all axes (like numpy).\n ddof: Degrees of freedom; 0 would compute the ML estimate, 1 would compute\n the unbiased estimate.\n keepdims : bool\n If this is set to True, the axes which are reduced are\n left in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original tensor.\n corrected : bool\n If this is set to True, the 'corrected_two_pass' algorithm is\n used to compute the variance.\n Refer : http://www.cs.yale.edu/publications/techreports/tr222.pdf\n\n Notes\n -----\n It calls 'var()' and 'var()' uses the two-pass algorithm (reference below).\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm\n Function 'var()' also supports 'corrected_two_pass' algorithm (using the\n 'corrected' flag) which is numerically more stable. There exist other\n implementations that offer better stability, but probably slower.\n\n \"\"\"\n\n if isinstance(ddof, (bool)):\n raise ValueError(\n \"Parameter keepdims is now at index 3: (input, \\\n axis=None, ddof=0, keepdims=False, corrected=False)\"\n )\n\n ret = sqrt(\n var(input=input, axis=axis, ddof=ddof, keepdims=keepdims, corrected=corrected)\n )\n ret.name = \"std\"\n return ret\n\n\nclass Default(gof.Op):\n \"\"\"\n Takes an input x and a default value.\n\n If the input is not None, a reference to it is returned.\n If the input is None, a copy of the default value is returned instead.\n The input and the default must have exactly the same type.\n\n \"\"\"\n\n view_map = {0: [0]}\n __props__ = ()\n\n def make_node(self, x, default):\n x, default = as_tensor_variable(x), as_tensor_variable(default)\n if x.type != default.type:\n raise TypeError(\"Both default() arguments must have same type\", x, default)\n return gof.Apply(self, [x, default], [default.type()])\n\n def perform(self, node, inp, out_):\n x, default = inp\n (out,) = out_\n if x is None:\n # why copy? Theano can't yet understand out[0] being a view of\n # either x or y, so we can be a view of x, but only a copy of y.\n out[0] = default.copy()\n else:\n out[0] = x\n\n\ndefault = Default()\nsetdefault = default # legacy\n\n\n##########################\n# Arithmetics\n##########################\n@_scal_elemwise\ndef maximum(x, y):\n \"\"\"elemwise maximum. See max for the maximum in one tensor\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef minimum(x, y):\n \"\"\"elemwise minimum. See min for the minimum in one tensor\"\"\"\n # see decorator for function body\n\n\ndef div_proxy(x, y):\n \"\"\"Proxy for either true_div or int_div, depending on types of x, y.\"\"\"\n f = scal.int_or_true_div(\n as_tensor_variable(x).dtype in discrete_dtypes,\n as_tensor_variable(y).dtype in discrete_dtypes,\n )\n if f is scal.int_div:\n return int_div(x, y)\n else:\n return true_div(x, y)\n\n\ndef divmod(x, y):\n \"\"\"elementvise divmod, using floor_div and mod_check\"\"\"\n return floor_div(x, y), mod_check(x, y)\n\n\n@_scal_elemwise\ndef add(a, *other_terms):\n \"\"\"elementwise addition\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef sub(a, b):\n \"\"\"elementwise subtraction\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef mul(a, *other_terms):\n \"\"\"elementwise multiplication\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef true_div(a, b):\n \"\"\"elementwise [true] division (inverse of multiplication)\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef int_div(a, b):\n \"\"\"elementwise [floor] division (inverse of multiplication)\"\"\"\n # see decorator for function body\n\n\n# floor_div and int_div are the same thing\nfloor_div = int_div\n\n\ndef ceil_intdiv(a, b):\n \"\"\"\n Safely compute ceil(float_division(a, b)).\n\n Works for all dtypes, but mostly useful when a and b are int.\n\n \"\"\"\n # If a and b are int with not many significant bits, we could\n # cast them to float to avoid doing the modulo. We do not know if this\n # is faster or not. But this is not safe for int64 as the cast will\n # lose precision.\n # e.g.: cast(cast(a, scalar.upcast(a, 'float32')) / b, scal.upcast(a, b))\n\n # We cast for the case when a and b are uint*. Otherwise neq will\n # force their upcast to int.\n div = int_div(a, b)\n ret = cast(neq(a % b, 0), div.dtype) + div\n assert ret.dtype == scal.upcast(div.owner.inputs[0], div.owner.inputs[1])\n return ret\n\n\ndef mod_check(x, y):\n \"\"\"Make sure we do not try to use complex numbers.\"\"\"\n if (\n as_tensor_variable(x).dtype in complex_dtypes\n or as_tensor_variable(y).dtype in complex_dtypes\n ):\n # Currently forbidden.\n raise scal.Mod.complex_error\n else:\n return mod(x, y)\n\n\n@_scal_elemwise\ndef mod(a, b):\n \"\"\"elementwise modulo\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef pow(a, b):\n \"\"\"elementwise power\"\"\"\n # see decorator for function body\n\n\n@_scal_elemwise\ndef clip(x, min, max):\n \"\"\"\n Clip x to be between min and max.\n\n Notes\n -----\n When `x` is equal to the boundaries, the output is considered\n to be `x`, so at these points, the gradient of the cost wrt the output\n will be propagated to `x`, not to `min` nor `max`. In other words,\n on these points, the gradient wrt `x` will be equal to the gradient wrt\n the output, and the gradient wrt `min` and `max` will be zero.\n\n \"\"\"\n # see decorator for function body\n # for grep: clamp, bound\n\n\npprint.assign(add, printing.OperatorPrinter(\"+\", -2, \"either\"))\npprint.assign(mul, printing.OperatorPrinter(\"*\", -1, \"either\"))\npprint.assign(sub, printing.OperatorPrinter(\"-\", -2, \"left\"))\npprint.assign(neg, printing.OperatorPrinter(\"-\", 0, \"either\"))\npprint.assign(true_div, printing.OperatorPrinter(\"/\", -1, \"left\"))\npprint.assign(int_div, printing.OperatorPrinter(\"//\", -1, \"left\"))\npprint.assign(pow, printing.OperatorPrinter(\"**\", 1, \"right\"))\n\n\n##########################\n# View Operations\n##########################\n\n\ndef extract_constant(x, elemwise=True, only_process_constants=False):\n \"\"\"\n This function is basically a call to tensor.get_scalar_constant_value.\n\n The main difference is the behaviour in case of failure. While\n get_scalar_constant_value raises an TypeError, this function returns x,\n as a tensor if possible. If x is a ScalarVariable from a\n scalar_from_tensor, we remove the conversion. If x is just a\n ScalarVariable, we convert it to a tensor with tensor_from_scalar.\n\n \"\"\"\n try:\n x = get_scalar_constant_value(x, elemwise, only_process_constants)\n except NotScalarConstantError:\n pass\n if isinstance(x, scal.ScalarVariable) or isinstance(\n x, scal.sharedvar.ScalarSharedVariable\n ):\n if x.owner and isinstance(x.owner.op, ScalarFromTensor):\n x = x.owner.inputs[0]\n else:\n x = tensor_from_scalar(x)\n return x\n\n\ndef transpose(x, axes=None):\n \"\"\"\n Reorder the dimensions of x. (Default: reverse them)\n\n This is a macro around dimshuffle that matches the numpy.transpose function.\n\n \"\"\"\n if axes is None:\n axes = list(range((x.ndim - 1), -1, -1))\n ret = DimShuffle(x.broadcastable, axes)(x)\n if x.name and axes == list(range((x.ndim - 1), -1, -1)):\n ret.name = x.name + \".T\"\n return ret\n\n\ndef batched_dot(a, b):\n \"\"\"\n Compute the batched dot product of two variables:\n\n batched_dot(a, b)[i] = dot(a[i], b[i])\n\n Note that this batched_dot function does one of three things, in the\n following sequence:\n\n 1. If either a or b is a vector, it returns the batched elementwise\n product without calling the Theano BatchedDot op.\n\n 2. If both a and b have either 2 or 3 dimensions, it calls Theano's\n BatchedDot op on a and b.\n\n 3. If either a or b has more than 3 dimensions, it calls Theano's\n batched_tensordot function with appropriate axes. The\n batched_tensordot function expresses high-dimensional batched\n dot products in terms of batched matrix-matrix dot products, so\n it may be possible to futherize optimize for performance.\n \"\"\"\n a, b = as_tensor_variable(a), as_tensor_variable(b)\n\n if a.ndim == 0:\n raise TypeError(\"a must have at least one (batch) axis\")\n elif b.ndim == 0:\n raise TypeError(\"b must have at least one (batch) axis\")\n elif a.ndim == 1:\n return a.dimshuffle(*([0] + [\"x\"] * (b.ndim - 1))) * b\n elif b.ndim == 1:\n return a * b.dimshuffle(*([0] + [\"x\"] * (a.ndim - 1)))\n elif a.ndim > 3 or b.ndim > 3:\n return batched_tensordot(a, b, [[a.ndim - 1], [np.maximum(1, b.ndim - 2)]])\n else:\n # avoid circular import\n return theano.tensor.blas.BatchedDot()(a, b)\n\n\ndef batched_tensordot(x, y, axes=2):\n \"\"\"\n Compute a batched tensordot product.\n\n A hybrid of batched_dot and tensordot, this function computes the\n tensordot product between the two tensors, by iterating over the\n first dimension to perform a sequence of tensordots.\n\n Parameters\n ----------\n x : tensor\n A Tensor with sizes e.g.: for 3D (dim1, dim3, dim2)\n y : tensor\n A Tensor with sizes e.g.: for 3D (dim1, dim2, dim4)\n axes: int or array-like of length 2\n If an integer, the number of axes to sum over.\n If an array, it must have two array elements containing the axes to sum\n over in each tensor.\n\n If an integer i, it is converted to an array containing\n the last i dimensions of the first tensor and the first\n i dimensions of the second tensor (excluding the first\n (batch) dimension):\n axes = [list(range(a.ndim - i, b.ndim)), list(range(1,i+1))]\n\n If an array, its two elements must contain compatible axes\n of the two tensors. For example, [[1, 2], [2, 4]] means sum\n over the 2nd and 3rd axes of a and the 3rd and 5th axes of b.\n (Remember axes are zero-indexed!) The 2nd axis of a and the\n 3rd axis of b must have the same shape; the same is true for\n the 3rd axis of a and the 5th axis of b.\n\n Like tensordot, this function uses a series of dimshuffles and\n reshapes to reduce the tensor dot product to a matrix or vector\n dot product. Finally, it calls batched_dot to compute the result.\n \"\"\"\n return _tensordot_as_dot(x, y, axes, dot=batched_dot, batched=True)\n\n\ndef split(x, splits_size, n_splits, axis=0):\n the_split = Split(n_splits)\n return the_split(x, axis, splits_size)\n\n\nclass Split(Op):\n \"\"\"Partition a `TensorVariable` along some axis.\n\n Examples\n --------\n >>> x = vector()\n >>> splits = lvector()\n You have to declare right away how many split_points there will be.\n >>> ra, rb, rc = split(x, splits, n_splits = 3, axis = 0)\n >>> f = function([x, splits], [ra, rb, rc])\n >>> a, b, c = f([0,1,2,3,4,5], [3, 2, 1])\n a == [0,1,2]\n b == [3, 4]\n c == [5]\n\n \"\"\"\n\n len_splits = None\n \"\"\"A Split instance will have this many outputs, and require that\n the splits argument to `perform` have exactly this many elements.\n \"\"\"\n __props__ = (\"len_splits\",)\n\n def __init__(self, len_splits):\n self.len_splits = int(len_splits)\n\n def __str__(self):\n return self.__class__.__name__ + \"{%s}\" % self.len_splits\n\n def make_node(self, x, axis, splits):\n \"\"\"WRITEME\"\"\"\n x = as_tensor_variable(x)\n axis = as_tensor_variable(axis)\n splits = as_tensor_variable(splits)\n\n if splits.type not in int_vector_types:\n raise TypeError(\"splits must have type tensor.lvector\", splits.type)\n if axis.type not in int_types:\n raise TypeError(\"axis must have type lscalar\", axis.type)\n\n # # The following lines are necessary if we allow splits of zero\n # if isinstance(axis, gof.Constant):\n # x = unbroadcast(x, int(axis.data))\n # else:\n # x = unbroadcast(x, *range(x.type.ndim))\n\n inputs = [x, axis, splits]\n outputs = [x.type() for i in range(self.len_splits)]\n\n return Apply(self, inputs, outputs)\n\n def perform(self, node, inputs, outputs):\n \"\"\"WRITEME\"\"\"\n x, axis, splits = inputs\n\n try:\n len_along_axis = x.shape[axis]\n except Exception:\n raise ValueError(\n \"Split.perform() with axis=(%s) is invalid\"\n \" for x.shape==(%s)\" % (axis, x.shape)\n )\n if len(splits) != self.len_splits:\n raise ValueError(\n \"In Split.perform(), len(splits) != len_splits.\",\n (len(splits), self.len_splits),\n )\n\n if np.sum(splits) != len_along_axis:\n raise ValueError(\n \"The splits sum to {}, expected {}\".format(\n np.sum(splits), len_along_axis\n )\n )\n if python_any([nb < 0 for nb in splits]):\n raise ValueError(\n \"Split: you tried to make an ndarray with a \"\n \"negative number of elements.\"\n )\n\n # Checking is done, let's roll the splitting algorithm!\n # Basically we step along the given axis of x, extracting\n # subtensors of size splits[i] as we go along.\n\n general_key = [slice(None, None, None) for s in x.shape]\n lower_idx = 0\n for i in range(self.len_splits):\n upper_idx = lower_idx + splits[i]\n general_key[axis] = slice(lower_idx, upper_idx, None)\n outputs[i][0] = x.__getitem__(tuple(general_key)).copy()\n lower_idx = upper_idx\n\n def infer_shape(self, node, in_shapes):\n axis = node.inputs[1]\n splits = node.inputs[2]\n shp_x, shp_axis, shp_splits = in_shapes\n out_shapes = []\n for i in range(self.len_splits):\n temp = as_tensor_variable(shp_x)\n temp = theano.tensor.subtensor.set_subtensor(temp[axis], splits[i])\n temp = [temp[i] for i in range(len(shp_x))]\n out_shapes.append(temp)\n return out_shapes\n\n def grad(self, inputs, g_outputs):\n \"\"\"Join the gradients along the axis that was used to split x.\"\"\"\n x, axis, n = inputs\n outputs = self(*inputs, **dict(return_list=True))\n # If all the output gradients are disconnected, then so are the inputs\n if python_all([isinstance(g.type, DisconnectedType) for g in g_outputs]):\n return [\n DisconnectedType()(),\n grad_undefined(self, 1, axis),\n grad_undefined(self, 2, n),\n ]\n # Else, we have to make them zeros before joining them\n new_g_outputs = []\n for o, g in zip(outputs, g_outputs):\n if isinstance(g.type, DisconnectedType):\n new_g_outputs.append(o.zeros_like())\n else:\n new_g_outputs.append(g)\n\n return [\n join(axis, *new_g_outputs),\n grad_undefined(self, 1, axis),\n grad_undefined(self, 2, n),\n ]\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None:\n return [None for i in self.len_splits]\n return self.make_node(eval_points[0], *inputs[1:]).outputs\n\n def c_code_cache_version(self):\n return (2,)\n\n def c_support_code(self):\n return \"\"\"\n /* Return 1 if output has the correct shape. */\n int split_output_shape_is_correct (\n PyArrayObject* output, PyArrayObject* array_to_split, int axis_to_split, npy_intp split_size\n ) {\n return\n PyArray_NDIM(output) == PyArray_NDIM(array_to_split)\n && memcmp(\n PyArray_DIMS(output),\n PyArray_DIMS(array_to_split),\n axis_to_split * sizeof(npy_intp)\n ) == 0\n && memcmp(\n PyArray_DIMS(output) + axis_to_split + 1,\n PyArray_DIMS(array_to_split) + axis_to_split + 1,\n (PyArray_NDIM(array_to_split) - axis_to_split - 1) * sizeof(npy_intp)\n ) == 0\n && split_size == PyArray_DIM(output, axis_to_split);\n }\n \"\"\"\n\n def c_code(self, node, name, inputs, outputs, sub):\n if self.len_splits == 0:\n # There are no outputs, then nothing to do.\n return \"\"\n\n # outputs_pointers lists the addresses of the pointers to the outputs.\n outputs_pointers = \"&\" + (\", &\".join(outputs))\n x, axis, splits = inputs\n fail = sub[\"fail\"]\n x_typenum = np.dtype(node.inputs[0].dtype).num\n x_itemsize = np.dtype(node.inputs[0].dtype).itemsize\n axis_dtype = node.inputs[1].type.dtype_specs()[1]\n splits_dtype = node.inputs[2].type.dtype_specs()[1]\n expected_splits_count = self.len_splits\n\n return (\n \"\"\"\n int ndim = PyArray_NDIM(%(x)s);\n int axis = (int)(*(%(axis_dtype)s*)PyArray_GETPTR1(%(axis)s, 0));\n int splits_count = PyArray_DIM(%(splits)s, 0);\n npy_intp len_along_axis, sum_of_splits = 0, current_split_length = 0, current_split_start = 0;\n npy_intp* split_dims = NULL;\n PyObject* split_view = NULL;\n npy_intp data_offset;\n int i;\n PyArrayObject** outputs[] = {%(outputs_pointers)s};\n\n /* Check inputs. */\n\n if (splits_count != %(expected_splits_count)s) {\n PyErr_Format(PyExc_ValueError,\n \"Split: splits count (%%d) != expected count (%%d).\", splits_count, %(expected_splits_count)s);\n %(fail)s\n }\n\n if (axis < 0) {\n axis += ndim;\n }\n if (axis < 0 || axis >= ndim) {\n PyErr_Format(PyExc_IndexError, \"Split: invalid axis %%d for a %%d-D array.\", axis, ndim);\n %(fail)s\n }\n len_along_axis = PyArray_DIM(%(x)s, axis);\n\n for (i = 0; i < splits_count; ++i) {\n current_split_length = (npy_intp)(*(%(splits_dtype)s*)PyArray_GETPTR1(%(splits)s, i));\n if (current_split_length < 0) {\n PyErr_Format(PyExc_ValueError,\n \"Split: you try to take a negative number (%%ld) of elements.\", current_split_length);\n %(fail)s\n }\n sum_of_splits += current_split_length;\n }\n if (sum_of_splits != len_along_axis) {\n PyErr_Format(PyExc_ValueError, \"Split: the splits sums to %%ld, expected %%ld.\", sum_of_splits, len_along_axis);\n %(fail)s\n }\n\n /* Check outputs. */\n\n split_dims = (npy_intp*) malloc(ndim * sizeof(npy_intp));\n if (split_dims == NULL) {\n PyErr_NoMemory();\n %(fail)s\n }\n\n memcpy(split_dims, PyArray_DIMS(%(x)s), ndim * sizeof(npy_intp));\n\n for (i = 0; i < splits_count; ++i) {\n PyArrayObject** output = outputs[i];\n current_split_length = (npy_intp) (* (%(splits_dtype)s*) PyArray_GETPTR1(%(splits)s, i));\n if (*output == NULL || !split_output_shape_is_correct(*output, %(x)s, axis, current_split_length)) {\n Py_XDECREF(*output);\n split_dims[axis] = current_split_length;\n *output = (PyArrayObject*)PyArray_EMPTY(ndim, split_dims, %(x_typenum)s, PyArray_IS_F_CONTIGUOUS(%(x)s));\n if (outputs == NULL) {\n PyErr_SetString(PyExc_RuntimeError, \"Split: unable to allocate an output.\");\n free(split_dims);\n %(fail)s\n }\n }\n }\n\n /* Compute split. */\n\n for (i = 0; i < splits_count; ++i) {\n current_split_length = (npy_intp) (* (%(splits_dtype)s*) PyArray_GETPTR1(%(splits)s, i));\n data_offset = PyArray_STRIDE(%(x)s, axis) * current_split_start;\n split_dims[axis] = current_split_length;\n split_view = PyArray_New(&PyArray_Type,\n ndim, split_dims,\n %(x_typenum)s,\n PyArray_STRIDES(%(x)s),\n PyArray_BYTES(%(x)s) + data_offset,\n %(x_itemsize)s,\n PyArray_FLAGS(%(x)s),\n NULL);\n if (split_view == NULL) {\n PyErr_SetString(PyExc_RuntimeError, \"Split: unable to create a view for a split.\");\n free(split_dims);\n %(fail)s\n }\n if (PyArray_CopyInto(*outputs[i], (PyArrayObject*)split_view) != 0) {\n PyErr_SetString(PyExc_RuntimeError, \"Split: unable to copy a split view into the output.\");\n Py_XDECREF(split_view);\n free(split_dims);\n %(fail)s\n }\n Py_XDECREF(split_view);\n current_split_start += current_split_length;\n }\n\n free(split_dims);\n \"\"\"\n % locals()\n )\n\n\ndef addbroadcast(x, *axes):\n \"\"\"\n Make the input broadcastable in the specified axes.\n\n For example, addbroadcast(x, 0) will make the first dimension of\n x broadcastable. When performing the function, if the length of\n x along that dimension is not 1, a ValueError will be raised.\n\n We apply the opt here not to pollute the graph especially during\n the gpu optimization\n\n Parameters\n ----------\n x : tensor_like\n Input theano tensor.\n axis : an int or an iterable object such as list or tuple of int values\n The dimension along which the tensor x should be broadcastable.\n If the length of x along these dimensions is not 1, a ValueError will\n be raised.\n\n Returns\n -------\n tensor\n A theano tensor, which is broadcastable along the specified dimensions.\n\n \"\"\"\n rval = Rebroadcast(*[(axis, True) for axis in axes])(x)\n return theano.tensor.opt.apply_rebroadcast_opt(rval)\n\n\ndef unbroadcast(x, *axes):\n \"\"\"\n Make the input impossible to broadcast in the specified axes.\n\n For example, addbroadcast(x, 0) will make the first dimension\n of x broadcastable. When performing the function, if the length\n of x along that dimension is not 1, a ValueError will be raised.\n\n We apply the opt here not to pollute the graph especially during\n the gpu optimization\n\n Parameters\n ----------\n x : tensor_like\n Input theano tensor.\n axis : an int or an iterable object such as list or tuple of int values\n The dimension along which the tensor x should be unbroadcastable.\n If the length of x along these dimensions is not 1, a ValueError will\n be raised.\n\n Returns\n -------\n tensor\n A theano tensor, which is unbroadcastable along the specified dimensions.\n\n \"\"\"\n rval = Rebroadcast(*[(axis, False) for axis in axes])(x)\n return theano.tensor.opt.apply_rebroadcast_opt(rval)\n\n\ndef patternbroadcast(x, broadcastable):\n \"\"\"\n Make the input adopt a specific broadcasting pattern.\n\n Broadcastable must be iterable. For example,\n patternbroadcast(x, (True, False)) will make the first\n dimension of x broadcastable and the second dimension\n not broadcastable, so x will now be a row.\n\n We apply the opt here not to pollute the graph especially during the gpu\n optimization.\n\n Parameters\n ----------\n x : tensor_like\n Input theano tensor.\n broadcastable : an iterable object such as list or tuple of bool values\n A set of boolean values indicating whether a dimension should be\n broadcastable or not. If the length of x along these dimensions is\n not 1, a ValueError will be raised.\n\n Returns\n -------\n tensor\n A theano tensor, which is unbroadcastable along the specified dimensions.\n\n \"\"\"\n rval = Rebroadcast(*[(i, broadcastable[i]) for i in range(len(broadcastable))])(x)\n return theano.tensor.opt.apply_rebroadcast_opt(rval)\n\n\nclass Join(Op):\n \"\"\"\n Concatenate multiple `TensorVariable`s along some axis.\n\n The axis must be given as first argument. All tensors must have the same\n shape along all dimensions other than this axis.\n Of course, TensorVariable instances do not have a shape, so this error\n cannot be caught until runtime. See `perform()`.\n\n See Also\n --------\n stack : For joins involving scalar values\n\n Examples\n --------\n >>> x, y, z = tensor.matrix(), tensor.matrix(), tensor.matrix()\n >>> u = tensor.vector()\n\n >>> r = join(0, x, y, z)\n >>> c = join(1, x, y, z)\n >>> join(2, x, y, z) # WRONG: the axis has to be an index into the shape\n >>> join(0, x, u) # WRONG: joined tensors must have the same rank\n\n \"\"\"\n\n check_input = False\n __props__ = (\"view\",)\n\n def __init__(self, view=-1):\n self.view = view\n if view != -1:\n # since the first input is always the axis, the tensors\n # start from index 1.\n self.view_map = {0: [1 + view]}\n\n def __str__(self):\n if self.view == -1:\n return self.__class__.__name__\n else:\n return \"{}{{{}}}\".format(\n self.__class__.__name__,\n \", \".join(\n \"{}={!r}\".format(p, getattr(self, p)) for p in self.__props__\n ),\n )\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n if not hasattr(self, \"view\"):\n self.view = -1\n\n def make_node(self, *axis_and_tensors):\n \"\"\"\n Parameters\n ----------\n axis: an Int or integer-valued Variable\n tensors\n A variable number (but not zero) of tensors to\n concatenate along the specified axis. These tensors must have\n the same shape along all dimensions other than this axis.\n\n Returns\n -------\n A symbolic Variable\n It has the same ndim as the input tensors, and the most inclusive\n dtype.\n\n \"\"\"\n axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]\n if not tensors:\n raise ValueError(\"Cannot join an empty list of tensors\")\n as_tensor_variable_args = [as_tensor_variable(x) for x in tensors]\n\n dtypes = [x.type.dtype for x in as_tensor_variable_args]\n out_dtype = scal.upcast(*dtypes)\n\n def output_maker(bcastable):\n return tensor(dtype=out_dtype, broadcastable=bcastable)\n\n return self._make_node_internal(\n axis, tensors, as_tensor_variable_args, output_maker\n )\n\n def _make_node_internal(self, axis, tensors, as_tensor_variable_args, output_maker):\n if not python_all(targs.type.ndim for targs in as_tensor_variable_args):\n raise TypeError(\n \"Join cannot handle arguments of dimension 0.\"\n \" For joining scalar values, see @stack\"\n )\n # Handle single-tensor joins immediately.\n if len(as_tensor_variable_args) == 1:\n bcastable = list(as_tensor_variable_args[0].type.broadcastable)\n else:\n # When the axis is fixed, a dimension should be\n # broadcastable if at least one of the inputs is\n # broadcastable on that dimension (see justification below),\n # except for the axis dimension.\n # Initialize bcastable all false, and then fill in some trues with\n # the loops.\n bcastable = [False] * len(as_tensor_variable_args[0].type.broadcastable)\n ndim = len(bcastable)\n # Axis can also be a constant\n if not isinstance(axis, int):\n try:\n # Note : `get_scalar_constant_value` returns a ndarray not\n # an int\n axis = int(get_scalar_constant_value(axis))\n\n except NotScalarConstantError:\n pass\n if isinstance(axis, int):\n # Basically, broadcastable -> length 1, but the\n # converse does not hold. So we permit e.g. T/F/T\n # joins, and if they fail at runtime they fail, but if\n # they don't then it means that the argument where\n # that broadcastable flag was False had length 1 along\n # this dimension, and therefore this dimension should\n # be broadcastable for the output.\n\n if axis < -ndim:\n raise IndexError(\n \"Join axis %d out of bounds [0, %d)\" % (axis, ndim)\n )\n if axis < 0:\n axis += ndim\n\n for x in as_tensor_variable_args:\n for current_axis, bflag in enumerate(x.type.broadcastable):\n # Constant negative axis can no longer be negative at\n # this point. It safe to compare this way.\n if current_axis == axis:\n continue\n if bflag:\n bcastable[current_axis] = True\n try:\n bcastable[axis] = False\n except IndexError:\n raise ValueError(\n 'Join argument \"axis\" is out of range'\n \" (given input dimensions)\"\n )\n else:\n # When the axis may vary, no dimension can be guaranteed to be\n # broadcastable.\n bcastable = [False] * len(as_tensor_variable_args[0].type.broadcastable)\n\n if not python_all(\n [x.ndim == len(bcastable) for x in as_tensor_variable_args[1:]]\n ):\n raise TypeError(\n \"Join() can only join tensors with the same \" \"number of dimensions.\"\n )\n\n inputs = [as_tensor_variable(axis)] + list(as_tensor_variable_args)\n if inputs[0].type not in int_types:\n raise TypeError(\n \"Axis could not be cast to an integer type\",\n axis,\n inputs[0].type,\n int_types,\n )\n\n outputs = [output_maker(bcastable)]\n\n node = Apply(self, inputs, outputs)\n return node\n\n def perform(self, node, axis_and_tensors, out_):\n (out,) = out_\n view = self.view\n axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]\n # we check these tensors for being empty.\n if (view != -1) and np.all(\n [\n tensor.shape[axis] == 0\n for tensor in tensors[0:view] + tensors[view + 1 :]\n ]\n ):\n out[0] = tensors[view]\n\n else:\n ndim = tensors[0].ndim\n if axis < -ndim:\n raise IndexError(\"Join axis %d out of bounds [0, %d)\" % (axis, ndim))\n\n out[0] = theano._asarray(\n np.concatenate(tensors, axis=axis), dtype=node.outputs[0].type.dtype\n )\n\n def c_code_cache_version(self):\n return (5,)\n\n def c_code(self, node, name, inputs, outputs, sub):\n axis, tensors = inputs[0], inputs[1:]\n view = self.view\n non_empty_tensor = tensors[view]\n input_1 = tensors[0]\n l = len(tensors)\n (out,) = outputs\n fail = sub[\"fail\"]\n adtype = node.inputs[0].type.dtype_specs()[1]\n copy_to_list = []\n\n for i, inp in enumerate(tensors):\n copy_to_list.append(\n \"\"\"Py_INCREF(%s);\n PyList_SetItem(list, %s, (PyObject*)%s);\"\"\"\n % (inp, i, inp)\n )\n\n copy_inputs_to_list = \"\\n\".join(copy_to_list)\n n = len(tensors)\n\n code = (\n \"\"\"\n int axis = ((%(adtype)s *)PyArray_DATA(%(axis)s))[0];\n PyObject* list = PyList_New(%(l)s);\n %(copy_inputs_to_list)s\n int tensors_lens_sum;\n if(%(view)s != -1) {\n tensors_lens_sum = 0;\n\n for(int i=0; i < %(n)s; i++){\n tensors_lens_sum += PyArray_DIM((PyArrayObject *)(PyList_GetItem(list, i)), axis);\n }\n tensors_lens_sum -= PyArray_DIM(%(non_empty_tensor)s, axis);\n }\n if(%(view)s != -1 && tensors_lens_sum == 0) {\n Py_XDECREF(%(out)s);\n Py_INCREF(%(non_empty_tensor)s);\n %(out)s = %(non_empty_tensor)s;\n }else{\n //PyObject* PyArray_Concatenate(PyObject* obj, int axis)\n int ndim = PyArray_NDIM(%(input_1)s);\n if( axis < -ndim ){\n PyErr_Format(PyExc_IndexError,\n \"Join axis %%d out of bounds [0, %%d)\", axis, ndim);\n %(fail)s\n }\n Py_XDECREF(%(out)s);\n %(out)s = (PyArrayObject *)PyArray_Concatenate(list, axis);\n Py_DECREF(list);\n if(!%(out)s){\n %(fail)s\n }\n }\n \"\"\"\n % locals()\n )\n return code\n\n def R_op(self, inputs, eval_points):\n if None in eval_points[1:]:\n return [None]\n return self.make_node(inputs[0], *eval_points[1:]).outputs\n\n def grad(self, axis_and_tensors, grads):\n \"\"\"The gradient wrt a join op is a `Split`, used to partition\n the gradient along the `axis` which was used for joining.\n \"\"\"\n (gz,) = grads\n axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]\n\n rval = [grad_undefined(self, 0, axis)]\n\n dtypes = [as_tensor_variable(x).type.dtype for x in tensors]\n out_dtype = scal.upcast(*dtypes)\n\n if \"float\" in out_dtype or \"complex\" in out_dtype:\n # assume that this is differentiable\n split = Split(len(tensors))\n split_gz = split(gz, axis, stack([shape(x)[axis] for x in tensors]))\n # If there is only one split, it might not be in a list.\n if not isinstance(split_gz, list):\n split_gz = [split_gz]\n # Split.make_node isn't always able to infer the right\n # broadcast. As the grad need to keep the information,\n # read it if needed.\n split_gz = [\n patternbroadcast(g, t.broadcastable) for t, g in zip(tensors, split_gz)\n ]\n rval = rval + split_gz\n else:\n # the output has integer type, so the gradient through it\n # is 0\n rval = rval + [tensor.zeros_like(dtype=config.floatX) for tensor in tensors]\n\n return rval\n\n def infer_shape(self, node, ishapes):\n # ishapes[0] contains the size of the axis on which we join\n # Join op should get at least one input to join\n assert len(ishapes) > 1\n n_dim = len(ishapes[1])\n for shp in ishapes[1:]:\n assert shp is not None\n assert len(shp) == n_dim\n\n # The joining dimension could be negative, but we need it to be\n # in [0, n_dim) in the loop below.\n # An axis < -n_dim or >= ndim would be invalid, but this is\n # not checked here. An Assert op would be a way of addressing that,\n # but it may disrupt optimizations.\n join_dim = switch(ge(node.inputs[0], 0), node.inputs[0], node.inputs[0] + n_dim)\n out_shapes = []\n for dim in range(n_dim):\n # we have to deal with 2 possible cases in here :\n # a) we are dealing with the dimension for which we join\n # (called t_side from true side of the if, where the if\n # compares current dimension with the joining dimension)\n # b) a non joining dimension ( in which maybe a symbolic\n # assertion can be used to make sure all tensors have\n # the same number of elements on this non-joined dimension\n # this is f_side\n # initialize\n t_side = ishapes[1][dim]\n f_side = ishapes[1][dim]\n # loop over tensors and sum for the joining dimension\n for shp in ishapes[2:]:\n t_side = t_side + shp[dim]\n # return the dimensions found\n out_shapes.append(switch(eq(dim, join_dim), t_side, f_side))\n\n return [tuple(out_shapes)]\n\n\njoin_ = Join()\npprint.assign(Join, printing.FunctionPrinter(\"join\"))\n\n\ndef join(axis, *tensors_list):\n \"\"\"\n Convenience function to concatenate `TensorType`s along the given axis.\n\n This function will not add the op in the graph when it is not useful.\n For example, in the case that the list of tensors to be concatenated\n is one, it will just return the tensor.\n\n Parameters\n ----------\n tensors : list of tensors (or list-like)\n A list of tensors to be concatenated along the given axis.\n The shapes of the tensors to be concatenated must be all\n identical, except in the dimension (`axis`) on which they are to\n be joined.\n axis : int (symbolic or literal)\n On which dimension should the tensors be joined? The `axis`\n must be a valid index into the shape of the tensors to be\n concatenated.\n The `axis` parameter may either be an integer or an object that\n can be converted to a scalar using `as_scalar`(`axis`). In the\n former case, the axis is fixed at construction, while in the\n latter it may vary over time depending on the value of the\n `axis` variable.\n \"\"\"\n if len(tensors_list) == 1:\n return tensors_list[0]\n else:\n return join_(axis, *tensors_list)\n\n\ndef roll(x, shift, axis=None):\n \"\"\"\n Convenience function to roll TensorTypes along the given axis.\n\n Syntax copies numpy.roll function.\n\n Parameters\n ----------\n x : tensor_like\n Input tensor.\n shift : int (symbolic or literal)\n The number of places by which elements are shifted.\n axis : int (symbolic or literal), optional\n The axis along which elements are shifted. By default, the array\n is flattened before shifting, after which the original\n shape is restored.\n\n Returns\n -------\n tensor\n Output tensor, with the same shape as ``x``.\n\n \"\"\"\n if axis is None:\n if x.ndim > 1:\n y = x.flatten()\n return roll(y, shift, axis=0).reshape(x.shape)\n else:\n axis = 0\n\n if axis < 0:\n axis += x.ndim\n\n # Shift may be larger than the size of the axis. If so, since the\n # roll operation is cyclic, we can take the shift modulo the size\n # of the axis\n shift = shift % x.shape[axis]\n\n # A slice of all elements in a dimension ':'\n allslice = slice(None)\n # List of slices describing the front half [:, :, shift:, :]\n front_slice = slice(-shift, None)\n front_list = [allslice] * axis + [front_slice] + [allslice] * (x.ndim - axis - 1)\n # List of slices describing the back half [:, :, :shift, :]\n end_slice = slice(0, -shift)\n end_list = [allslice] * axis + [end_slice] + [allslice] * (x.ndim - axis - 1)\n return join(axis, x.__getitem__(tuple(front_list)), x.__getitem__(tuple(end_list)))\n\n\n@constructor\ndef shape_padleft(t, n_ones=1):\n \"\"\"Reshape `t` by left-padding the shape with `n_ones` 1s.\n\n See Also\n --------\n shape_padaxis\n shape_padright\n Dimshuffle\n\n \"\"\"\n _t = as_tensor_variable(t)\n\n pattern = [\"x\"] * n_ones + [i for i in range(_t.type.ndim)]\n return DimShuffle(_t.broadcastable, pattern)(_t)\n\n\n@constructor\ndef shape_padright(t, n_ones=1):\n \"\"\"Reshape `t` by right-padding the shape with `n_ones` 1s.\n\n See Also\n --------\n shape_padaxis\n shape_padleft\n Dimshuffle\n\n \"\"\"\n _t = as_tensor_variable(t)\n\n pattern = [i for i in range(_t.type.ndim)] + [\"x\"] * n_ones\n return DimShuffle(_t.broadcastable, pattern)(_t)\n\n\n@constructor\ndef shape_padaxis(t, axis):\n \"\"\"Reshape `t` by inserting 1 at the dimension `axis`.\n\n Examples\n --------\n >>> tensor = theano.tensor.tensor3()\n >>> theano.tensor.shape_padaxis(tensor, axis=0)\n DimShuffle{x,0,1,2}.0\n >>> theano.tensor.shape_padaxis(tensor, axis=1)\n DimShuffle{0,x,1,2}.0\n >>> theano.tensor.shape_padaxis(tensor, axis=3)\n DimShuffle{0,1,2,x}.0\n >>> theano.tensor.shape_padaxis(tensor, axis=-1)\n DimShuffle{0,1,2,x}.0\n\n See Also\n --------\n shape_padleft\n shape_padright\n Dimshuffle\n\n \"\"\"\n _t = as_tensor_variable(t)\n\n ndim = _t.ndim + 1\n if not -ndim <= axis < ndim:\n msg = \"axis {0} is out of bounds [-{1}, {1})\".format(axis, ndim)\n raise IndexError(msg)\n if axis < 0:\n axis += ndim\n\n pattern = [i for i in range(_t.type.ndim)]\n pattern.insert(axis, \"x\")\n return DimShuffle(_t.broadcastable, pattern)(_t)\n\n\n@constructor\ndef stack(*tensors, **kwargs):\n \"\"\"Stack tensors in sequence on given axis (default is 0).\n\n Take a sequence of tensors and stack them on given axis to make a single\n tensor. The size in dimension `axis` of the result will be equal to the number\n of tensors passed.\n\n Note: The interface stack(*tensors) is deprecated, you should use\n stack(tensors, axis=0) insted.\n\n Parameters\n ----------\n tensors : list or tuple of tensors\n A list of tensors to be stacked.\n axis : int\n The index of the new axis. Default value is 0.\n\n Examples\n --------\n >>> a = theano.tensor.scalar()\n >>> b = theano.tensor.scalar()\n >>> c = theano.tensor.scalar()\n >>> x = theano.tensor.stack([a, b, c])\n >>> x.ndim # x is a vector of length 3.\n 1\n >>> a = theano.tensor.tensor4()\n >>> b = theano.tensor.tensor4()\n >>> c = theano.tensor.tensor4()\n >>> x = theano.tensor.stack([a, b, c])\n >>> x.ndim # x is a 5d tensor.\n 5\n >>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))\n >>> rval.shape # 3 tensors are stacked on axis 0\n (3, 2, 2, 2, 2)\n >>> x = theano.tensor.stack([a, b, c], axis=3)\n >>> x.ndim\n 5\n >>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))\n >>> rval.shape # 3 tensors are stacked on axis 3\n (2, 2, 2, 3, 2)\n >>> x = theano.tensor.stack([a, b, c], axis=-2)\n >>> x.ndim\n 5\n >>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))\n >>> rval.shape # 3 tensors are stacked on axis -2\n (2, 2, 2, 3, 2)\n \"\"\"\n # ---> Remove this when moving to the new interface:\n if not tensors and not kwargs:\n raise Exception(\n \"theano.tensor.stack(tensors, axis) must have at least\" \" one parameter\"\n )\n\n if not kwargs and not isinstance(tensors[0], (list, tuple)):\n warnings.warn(\n \"stack(*tensors) interface is deprecated, use\"\n \" stack(tensors, axis=0) instead.\",\n DeprecationWarning,\n stacklevel=3,\n )\n axis = 0\n elif \"tensors\" in kwargs:\n tensors = kwargs[\"tensors\"]\n if \"axis\" in kwargs:\n axis = kwargs[\"axis\"]\n else:\n axis = 0\n else:\n if len(tensors) == 2:\n axis = tensors[1]\n elif \"axis\" in kwargs:\n axis = kwargs[\"axis\"]\n else:\n axis = 0\n tensors = tensors[0]\n # <--- Until here.\n\n if len(tensors) == 0:\n raise Exception(\n \"tensors is empty. You should at least provide one\"\n \" tensor to theano.tensor.stack(tensors, axis).\"\n )\n\n # If all tensors are scalars of the same type, call make_vector.\n # It makes the graph simpler, by not adding DimShuffles and Rebroadcasts\n\n # This should be an optimization!\n # Doing it here make the graph less canonicalized\n # (more type need to be understood by all optimization)\n # And DebugMode can't detect error in this code as it is not in an\n # optimization.\n # See ticket #660\n if np.all(\n [ # in case there is direct int in tensors.\n isinstance(t, (np.number, float, int, python_complex))\n or (\n isinstance(t, Variable)\n and isinstance(t.type, TensorType)\n and t.ndim == 0\n )\n for t in tensors\n ]\n ):\n # in case there is direct int\n tensors = list(map(as_tensor_variable, tensors))\n dtype = scal.upcast(*[i.dtype for i in tensors])\n return theano.tensor.opt.MakeVector(dtype)(*tensors)\n return join(axis, *[shape_padaxis(t, axis) for t in tensors])\n\n\n@constructor\ndef concatenate(tensor_list, axis=0):\n \"\"\"Alias for `join`(axis, *tensor_list).\n\n This function is similar to `join`, but uses the signature of\n numpy's concatenate function.\n\n Raises\n ------\n TypeError\n The tensor_list must be a tuple or list.\n\n \"\"\"\n # Check someone did not make the common mistake to do something like:\n # c = concatenate(x, y)\n # instead of\n # c = concatenate((x, y))\n if not isinstance(tensor_list, (tuple, list)):\n raise TypeError(\n \"The 'tensors' argument must be either a tuple \"\n \"or a list, make sure you did not forget () or [] around \"\n \"arguments of concatenate.\",\n tensor_list,\n )\n return join(axis, *tensor_list)\n\n\ndef get_vector_length(v):\n \"\"\"Return the run-time length of a symbolic vector.\n\n Parameters\n ----------\n v\n A rank-1 TensorType variable.\n\n Raises\n ------\n TypeError\n `v` hasn't the proper type.\n ValueError\n No special case applies, the length is not known.\n In general this is not possible, but for a number of special cases\n the length can be determined at compile / graph-construction time.\n This function implements these special cases.\n\n \"\"\"\n v = as_tensor_variable(v)\n if v.ndim != 1:\n raise TypeError(\"argument must be symbolic vector, got '%s'\" % v)\n if v.type.broadcastable[0]:\n return 1\n if isinstance(v, theano.tensor.sharedvar.TensorSharedVariable) and v.type.ndim == 1:\n return len(v.get_value())\n if isinstance(v, gof.Constant) and v.type.ndim == 1:\n return len(v.data)\n if v.owner and isinstance(v.owner.op, theano.tensor.opt.MakeVector):\n return len(v.owner.inputs)\n if v.owner and isinstance(v.owner.op, Shape):\n return v.owner.inputs[0].type.ndim\n # If we take a slice, we know how many elements it will result in\n if (\n v.owner\n and isinstance(v.owner.op, theano.tensor.subtensor.Subtensor)\n and isinstance(v.owner.op.idx_list[0], slice)\n and v.owner.inputs[0].owner\n and isinstance(v.owner.inputs[0].owner.op, theano.compile.ops.Shape)\n ):\n start = extract_constant(\n theano.tensor.subtensor.get_idx_list(v.owner.inputs, v.owner.op.idx_list)[\n 0\n ].start\n )\n stop = extract_constant(\n theano.tensor.subtensor.get_idx_list(v.owner.inputs, v.owner.op.idx_list)[\n 0\n ].stop\n )\n step = extract_constant(\n theano.tensor.subtensor.get_idx_list(v.owner.inputs, v.owner.op.idx_list)[\n 0\n ].step\n )\n\n ndim = v.owner.inputs[0].owner.inputs[0].ndim\n types = (numbers.Integral, np.integer)\n if start is None:\n start = 0\n elif isinstance(start, types) and start < 0:\n start += ndim\n if start < 0:\n start = 0\n if stop is None:\n stop = ndim\n elif isinstance(stop, types):\n if stop > ndim:\n stop = ndim\n elif stop < 0:\n stop += ndim\n if step is None:\n step = 1\n\n if (\n isinstance(stop, types)\n and isinstance(start, types)\n and isinstance(step, types)\n and start >= 0\n and stop >= 0\n and step > 0\n and stop >= start\n ):\n return (stop - start - 1) // step + 1\n if isinstance(v, Variable):\n msg = theano.printing.debugprint(v, file=\"str\")\n else:\n msg = str(v)\n raise ValueError(\"length not known: %s\" % msg)\n\n\n@constructor\ndef horizontal_stack(*args):\n \"\"\"\n Horizontally stack two L{TensorType}s.\n\n Stack two L{TensorType}s along the second axis (column wise). These\n L{TensorType}s must have the same shape along all dimensions but the\n second.\n\n \"\"\"\n # Note: 'horizontal_stack' and 'vertical_stack' do not behave exactly like\n # Numpy's hstack and vstack functions. This is intended, because Numpy's\n # functions have potentially confusing/incoherent behavior (try them on 1D\n # arrays). If this is fixed in a future version of Numpy, it may be worth\n # trying to get closer to Numpy's way of doing things. In the meantime,\n # better keep different names to emphasize the implementation divergences.\n assert len(args) >= 2\n for arg in args:\n assert arg.type.ndim == 2\n return concatenate(args, axis=1)\n\n\n@constructor\ndef vertical_stack(*args):\n assert len(args) >= 2\n for arg in args:\n assert arg.type.ndim == 2\n return concatenate(args, axis=0)\n\n\nclass Reshape(Op):\n \"\"\"Perform a reshape operation of the input x to the new shape shp.\n The number of dimensions to which to reshape to (ndim) must be\n known at graph build time.\n \"\"\"\n\n view_map = {0: [0]} # output 0 is potentially aliased to inputs [0]\n _f16_ok = True\n\n check_input = False\n __props__ = (\"ndim\",)\n params_type = ParamsType(ndim=int32)\n # name does not participate because it doesn't affect computations\n\n def __init__(self, ndim, name=None):\n self.ndim = int(ndim)\n if ndim < 0:\n raise ValueError(\"The output dimensions after reshape must be 0 or greater\")\n assert name is None, \"name attribute for Reshape has been deprecated\"\n\n def __str__(self):\n return \"{}{{{}}}\".format(self.__class__.__name__, self.ndim)\n\n def make_node(self, x, shp):\n x = as_tensor_variable(x)\n shp_orig = shp\n shp = as_tensor_variable(shp, ndim=1)\n if not (\n shp.dtype in int_dtypes\n or (isinstance(shp, TensorConstant) and shp.data.size == 0)\n ):\n # It raises an error if shp is not of integer type,\n # except when shp is constant and empty\n # (in this case, shp.dtype does not matter anymore).\n raise TypeError(\"Shape must be integers\", shp, shp.dtype)\n assert shp.ndim == 1\n if isinstance(shp, TensorConstant):\n bcast = [s == 1 for s in shp.data]\n return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcast)])\n else:\n bcasts = [False] * self.ndim\n shp_list = shp_orig\n if hasattr(shp_orig, \"ndim\") and shp_orig.ndim == 0:\n shp_list = [shp_orig]\n for index in range(self.ndim):\n y = shp_list[index]\n y = as_tensor_variable(y)\n # Try to see if we can infer that y has a constant value of 1.\n # If so, that dimension should be broadcastable.\n try:\n bcasts[index] = (\n hasattr(y, \"get_scalar_constant_value\")\n and y.get_scalar_constant_value() == 1\n )\n except NotScalarConstantError:\n pass\n return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcasts)])\n\n def perform(self, node, inp, out_, params):\n x, shp = inp\n (out,) = out_\n if len(shp) != self.ndim:\n raise ValueError(\n \"shape argument to Reshape.perform has incorrect\"\n \" length %i\"\n \", should be %i\" % (len(shp), self.ndim),\n shp,\n )\n try:\n out[0] = np.reshape(x, shp)\n except Exception:\n raise ValueError(\n \"Cannot reshape input of shape {} to shape {}\".format(x.shape, shp)\n )\n\n def connection_pattern(self, node):\n return [[True], [False]]\n\n def grad(self, inp, grads):\n x, shp = inp\n (g_out,) = grads\n return [reshape(g_out, shape(x), ndim=x.ndim), DisconnectedType()()]\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None:\n return [None]\n return self(eval_points[0], *inputs[1:], **dict(return_list=True))\n\n def infer_shape(self, node, ishapes):\n # inputs[1] can contain at most one value of '-1', meaning the actual\n # shape of the output will be automatically computed by reshape, so\n # that the total number of elements stays the same.\n # TODO: Maybe put that formula here?\n # It's not trivial, because we would have to check if the product of\n # all the non-minus-one shapes is a divisor of the product of the\n # original shapes.\n\n # The following expression leads to cycles in feature_shape,\n # because it tries to replace the Shape_i node by the switch\n # statement, which depends on Shape_i.\n # return [tuple([switch(eq(node.inputs[1][i], -1),\n # theano.tensor.opt.Shape_i(i)(node.outputs[0]),\n # node.inputs[1][i])\n # for i in range(self.ndim)]\n # )]\n\n # Here, we only simplify if the shape (node.inputs[1]) is a constant,\n # ideally it would suffice to check that it is always non-negative.\n\n # If current variable is a scalar and its dimensionality should\n # change to self.ndim, then use size 1 for all new dimensions.\n if len(ishapes[0]) == 0:\n return [(1,) * self.ndim]\n\n requ = node.inputs[1]\n input_size = mul(*ishapes[0])\n if isinstance(requ, theano.tensor.TensorConstant):\n requ = list(requ.data)\n requ_part = [ele for ele in requ if ele != -1]\n crit = len(requ) - len(requ_part)\n if crit == 1 and len(requ_part) > 0:\n # If there are both 0 and -1 in requ_size, it is impossible\n # to determine a right output, but we can at least prevent\n # a division by 0. We do not want to keep a negative\n # size here as it could lead to further weird errors\n # after other optimizations.\n requ_size = mul(*requ_part)\n missing = input_size // (1 if requ_size == 0 else requ_size)\n for i, ele in enumerate(requ):\n if ele == -1:\n requ[i] = missing\n elif crit == 1: # we reshape to -1\n requ = [input_size] if ishapes[0] else [1]\n elif crit > 1:\n raise ValueError(\n \"shape argument to Reshape.perform\"\n \" must have at most one entry equal to -1\"\n )\n return [requ]\n else:\n requ = [requ[i] for i in range(self.ndim)]\n # since new_dims can have negative value (-1), the\n # multiplication of all values should be negated\n # to give a positive value.\n # To avoid optimization complexity, we avoid checking\n # for the case when there are two or more '-1' values.\n if self.ndim:\n requ_size = -mul(*requ)\n # If there are both 0 and -1 in requ_size, it is impossible\n # to determine a right output, but we can at least prevent\n # a division by 0. We do not want to keep a negative\n # size here as it could lead to further weird errors\n # after other optimizations.\n rest_size = input_size // maximum(requ_size, 1)\n return [\n tuple(\n [\n switch(eq(requ[i], -1), rest_size, requ[i])\n for i in range(self.ndim)\n ]\n )\n ]\n\n def c_code_cache_version(self):\n return (8,)\n\n def c_code(self, node, name, inputs, outputs, sub):\n if isinstance(node.inputs[0], TensorVariable):\n x, shp = inputs\n (z,) = outputs\n sdtype = node.inputs[1].type.dtype_specs()[1]\n fail = sub[\"fail\"]\n params = sub[\"params\"]\n return (\n \"\"\"\n assert (PyArray_NDIM(%(shp)s) == 1);\n npy_intp new_dims[%(params)s->ndim];\n PyArray_Dims newshape;\n newshape.ptr = new_dims;\n newshape.len = %(params)s->ndim;\n for (int ii = 0; ii < %(params)s->ndim; ++ii)\n {\n // -- We do not want an explicit cast here. the shp can be any\n // -- int* dtype. The compiler will explicitly upcast it, but\n // -- will err if this will downcast. This could happen if the\n // -- user pass an int64 dtype, but npy_intp endup being int32.\n new_dims[ii] = ((%(sdtype)s*)(\n PyArray_BYTES(%(shp)s) +\n ii * PyArray_STRIDES(%(shp)s)[0]))[0];\n }\n Py_XDECREF(%(z)s);\n %(z)s = (PyArrayObject *) PyArray_Newshape(%(x)s, &newshape, NPY_CORDER);\n if (!%(z)s)\n {\n //The error message should have been set by PyArray_Newshape\n %(fail)s;\n }\n \"\"\"\n % locals()\n )\n else:\n return Op.c_code(self, node, name, inputs, outputs, sub)\n\n\ndef reshape(x, newshape, ndim=None):\n if ndim is None:\n newshape = as_tensor_variable(newshape)\n if newshape.ndim != 1:\n raise TypeError(\n \"New shape in reshape must be a vector or a list/tuple of\"\n \" scalar. Got %s after conversion to a vector.\" % newshape\n )\n try:\n ndim = get_vector_length(newshape)\n except ValueError:\n raise ValueError(\n \"The length of the provided shape (%s) cannot \"\n \"be automatically determined, so Theano is not able \"\n \"to know what the number of dimensions of the reshaped \"\n \"variable will be. You can provide the 'ndim' keyword \"\n \"argument to 'reshape' to avoid this problem.\" % newshape\n )\n op = Reshape(ndim)\n rval = op(x, newshape)\n return rval\n\n\nclass Flatten(Op):\n \"\"\"\n Flatten a tensor.\n\n Flattens a tensor to `outdim` dimensions by preserving the leading\n outdim - 1 shape components.\n\n .. note:: The interface Flatten(Op) is deprecated, you should use flatten.\n \"\"\"\n\n view_map = {0: [0]}\n\n check_input = False\n __props__ = (\"outdim\",)\n\n def __init__(self, outdim=1):\n warnings.warn(\n \"Flatten class is deprecated, \" \"please use flatten method instead.\",\n DeprecationWarning,\n stacklevel=4,\n )\n self.outdim = int(outdim)\n\n def __str__(self):\n return \"{}{{{}}}\".format(self.__class__.__name__, self.outdim)\n\n def make_node(self, x):\n t_x = as_tensor_variable(x)\n if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):\n raise ValueError(\n \"invalid output ndimensions (%i) for tensor of \"\n \"rank %i\" % (self.outdim, t_x.ndim)\n )\n\n # Infer the broadcastable pattern of the output. For every dimension\n # unaffected by the flatten, the broadcast flag should be unchanged.\n # For the dimension resulting from the collapse of other dimensions,\n # it should be broadcastable iff all the collapsed dimensions were\n # broadcastable.\n bcast_kept_dims = x.broadcastable[: self.outdim - 1]\n bcast_new_dim = python_all(x.broadcastable[self.outdim - 1 :])\n broadcastable = bcast_kept_dims + (bcast_new_dim,)\n\n return gof.Apply(self, [t_x], [tensor(x.type.dtype, broadcastable)])\n\n def perform(self, node, inp, out_):\n (x,) = inp\n (out,) = out_\n outdim = self.outdim\n if outdim == 1:\n try:\n out[0] = x.reshape(x.size)\n except AttributeError:\n out[0] = x.reshape((np.prod(x.shape),))\n elif outdim == len(x.shape):\n out[0] = x\n else:\n newshape = x.shape[: outdim - 1] + (np.prod(x.shape[outdim - 1 :]),)\n out[0] = x.reshape(newshape)\n\n def infer_shape(self, node, in_shapes):\n (in_shp,) = in_shapes\n part1 = in_shp[: self.outdim - 1]\n part2 = in_shp[self.outdim - 1 :]\n\n if len(part2) > 1:\n part2 = (prod(part2, dtype=\"int64\"),)\n elif len(part2) == 1:\n # We do not want to force an upcast of part2 if its length is 1\n pass\n else:\n if len(in_shp) == 0 and self.outdim == 1:\n part2 = (1,)\n else:\n raise ValueError(\n \"invalid output ndimensions (%i) for tensor \"\n \"of rank %i\" % (self.outdim, len(in_shp))\n )\n\n out_shape = part1 + part2\n return [out_shape]\n\n def grad(self, inp, grads):\n (x,) = inp\n (g_out,) = grads\n return [reshape(g_out, shape(x), x.ndim)]\n\n def R_op(self, inputs, eval_points):\n if None in eval_points:\n return [None]\n return self.make_node(*eval_points).outputs\n\n def c_code_cache_version(self):\n return (1, 1)\n\n def c_code(self, node, name, inputs, outputs, sub):\n (x,) = inputs\n (out,) = outputs\n outdim = self.outdim\n fail = sub[\"fail\"]\n return (\n \"\"\"\n if (%(outdim)s == PyArray_NDIM(%(x)s))\n {\n Py_XDECREF(%(out)s);\n Py_XINCREF(%(x)s);\n %(out)s = %(x)s;\n }\n else\n {\n Py_XDECREF(%(out)s);\n\n if (%(outdim)s == 1)\n {\n npy_intp size = PyArray_SIZE(%(x)s);\n PyArray_Dims newshape;\n newshape.ptr = &size;\n newshape.len = 1;\n %(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,\n &newshape,\n NPY_CORDER);\n }\n else\n {\n npy_intp *oldshape = PyArray_DIMS(%(x)s);\n npy_intp newshape_dims[%(outdim)s];\n\n int i;\n for (i = 0; i < %(outdim)s - 1; ++i)\n newshape_dims[i] = oldshape[i];\n\n newshape_dims[i] = 1;\n\n for (int j = %(outdim)s - 1; j < PyArray_NDIM(%(x)s); ++j)\n newshape_dims[i] *= oldshape[j];\n\n PyArray_Dims newshape;\n newshape.ptr = newshape_dims;\n newshape.len = %(outdim)s;\n %(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,\n &newshape,\n NPY_CORDER);\n }\n }\n if (!%(out)s)\n {\n //The error message should have been set by\n // PyArray_Newshape\n %(fail)s;\n }\n \"\"\"\n % locals()\n )\n\n\ndef is_flat(var, ndim=None, outdim=None):\n \"\"\"\n Verifies the dimensionality of the var is equal to\n outdim. This method is usually called after flatten method on a\n variable, where the first outdim-1 dimension size(s) of the variable\n is kept intact, and the last dimension size of the variable is made\n equal to the multiplication of its remaining dimension size(s), such that\n the variable would end up with as many dimension as outdim.\n\n Parameters\n ----------\n var : theano.tensor.var.TensorVariable\n the theano var on which the dimensionality is checked.\n\n outdim : int\n the expected dimensionality of var.\n\n Returns\n -------\n bool\n the comparison result of var's dim\n and the expected outdim.\n \"\"\"\n if outdim is None and ndim is None:\n ndim = 1\n elif outdim is not None and ndim is not None:\n raise ValueError(\"You should only specify ndim\")\n elif outdim is not None:\n warnings.warn(\"flatten outdim parameter is deprecated, use ndim instead.\")\n ndim = outdim\n return var.ndim == ndim\n\n\ndef flatten(x, ndim=None, outdim=None):\n \"\"\"\n Reshapes the variable x by keeping\n the first outdim-1 dimension size(s) of x the same,\n and making the last dimension size of x equal to\n the multiplication of its remaining dimension size(s).\n\n Parameters\n ----------\n x : theano.tensor.var.TensorVariable\n the variable that should be reshaped.\n\n ndim : int\n the number of dimensions of the returned variable\n Default 1.\n outdim : int\n DEPRECATED synonym for ndim\n Returns\n -------\n theano.tensor.var.TensorVariable\n the flattend variable with dimensionality of outdim\n \"\"\"\n if outdim is None and ndim is None:\n ndim = 1\n elif outdim is not None and ndim is not None:\n raise ValueError(\"You should only specify ndim\")\n elif outdim is not None:\n warnings.warn(\"flatten outdim parameter is deprecated, use ndim instead.\")\n\n ndim = outdim\n # Any input variable can be flattened to have ndim of 1,\n # even if it's a scalar. Otherwise, ndim must be positive\n # and smaller than x.ndim.\n if ndim < 1 or (ndim > 1 and ndim > x.ndim):\n raise ValueError(\"ndim %s out of bound [1, %d)\" % (ndim, x.ndim + 1))\n\n if ndim > 1:\n dims = tuple(x.shape[: ndim - 1]) + (-1,)\n else:\n dims = (-1,)\n x_reshaped = x.reshape(dims)\n bcast_kept_dims = x.broadcastable[: ndim - 1]\n bcast_new_dim = python_all(x.broadcastable[ndim - 1 :])\n broadcastable = bcast_kept_dims + (bcast_new_dim,)\n x_reshaped = theano.tensor.addbroadcast(\n x_reshaped, *filter(lambda i: broadcastable[i], range(ndim))\n )\n return x_reshaped\n\n\n# class TileGrad(Op):\n# \"\"\"\n# Calculates the gradient of the Tile Op.\n# \"\"\"\n# # this is so weird, I can't think of how to make this a general thing.\n# def make_node(self, x, reps, g_out):\n# return gof.Apply(self, [x, reps, g_out], [x.type()])\n#\n# def perform(self, node, inp, out):\n# x, reps, g_out = inp\n# gx, = out\n# xsh = x.shape\n# if len(reps) == 2 and reps[1] == 1 and len(x.shape) == 1:\n# gx[0] = numpy.sum(g_out, axis=0)\n# else:\n# raise NotImplementedError('x.shape, reps combination not '\n# 'supported', (x.shape, reps))\n#\n# tilegrad = TileGrad()\n\n\nclass Tile(Op):\n \"\"\"\n Construct an array by repeating the input x according to reps pattern.\n\n .. note:: Deprecated\n Use tile() instead.\n\n Tiles its input according to reps. The length of reps is the number of\n dimension of x and contains the number of times to tile x in each\n dimension.\n\n See Also\n --------\n numpy.tile : http://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html\n\n \"\"\"\n\n __props__ = (\"ndim\",)\n\n def __init__(self, ndim):\n self.ndim = ndim\n\n def __str__(self):\n return self.__class__.__name__ + \"{ndim=%d}\" % self.ndim\n\n def make_node(self, x, reps):\n warnings.warn(\n (\"Tile op is deprecated, use tile function instead.\"), stacklevel=3\n )\n x = as_tensor_variable(x)\n reps = as_tensor_variable(reps)\n return gof.Apply(self, [x, reps], [tensor(x.type.dtype, [False] * self.ndim)])\n\n def perform(self, node, inp, out_):\n x, reps = inp\n (out,) = out_\n res = np.tile(x, reps)\n if res.ndim != self.ndim:\n raise ValueError(\"Tile.perform produced incorrect number of dimensions\")\n\n if (np.asarray(reps) == 1).all():\n # In that case, some NumPy version return a view! As this\n # op isn't declared as inplace, we need to check that and\n # copy the data.\n if np.may_share_memory(res, x):\n res = res.copy()\n out[0] = res\n\n def infer_shape(self, node, in_shapes):\n # Note: in contrast with numpy, it is assumed that x.shape and reps\n # have equal length; see also tile function below\n\n # Note: if reps were to be allowed not to be a constant and x.shape\n # and reps to be unequal, the following block of code could be used:\n # prepend 1 to x.shape if needed\n # if self.ndim > x.ndim:\n # shp = concatenate(ones(self.ndim - x.ndim), shp)\n # prepend 1 to reps if needed\n # reps = concatenate(ones(self.ndim - reps.shape[0]), reps)\n\n x, reps = node.inputs\n shp = in_shapes[0]\n tiled_shp = shp * reps\n out_shape = []\n for i in range(self.ndim):\n out_shape.append(tiled_shp[i])\n return [out_shape]\n\n def grad(self, inp, grads):\n x, reps = inp\n (g_out,) = grads\n # return [tilegrad(x, reps, g_out), None]\n raise NotImplementedError()\n\n\ndef tile(x, reps, ndim=None):\n \"\"\"\n Tile input array `x` according to `reps`.\n\n See the docstring of `numpy.tile` for details.\n\n 'reps' can be constant integer (e.g. 3), constant vector(e.g. [2 3]),\n symbolic scalar (e.g. tensor.iscalar()), symbolic vector (e.g. tensor.ivector())\n or a list of symbolic scalar (e.g. [tensor.iscalar(), tensor.iscalar()]).\n\n ndim is the number of the dimensions of the output, if it is provided, ndim\n should be equal or larger than x.ndim and len(reps), otherwise, we will use\n max(x.ndim, len(reps)) as ndim. If reps is symbolic vector, the ndim has to\n be provided.\n\n \"\"\"\n\n if ndim is not None and ndim < x.ndim:\n raise ValueError(\"ndim should be equal or larger than x.ndim\")\n\n # if reps is tensor.scalar, integer or tensor.vector, we convert it to a list.\n if not isinstance(reps, (list, tuple)):\n reps_astensor = as_tensor_variable(reps)\n ndim_check = reps_astensor.ndim\n if reps_astensor.dtype not in theano.tensor.discrete_dtypes:\n raise ValueError(\"elements of reps must be integer dtype\")\n\n # tensor.scalar/integer case\n if ndim_check == 0:\n reps = [reps]\n\n # tensor.vector case\n elif ndim_check == 1:\n if ndim is None:\n raise ValueError(\n \"if reps is tensor.vector, you should specify \" \"the ndim\"\n )\n else:\n offset = ndim - reps.shape[0]\n\n # assert that reps.shape[0] does not exceed ndim\n offset = theano.tensor.opt.assert_(offset, ge(offset, 0))\n\n # if reps.ndim is less than x.ndim, we pad the reps with\n # \"1\" so that reps will have the same ndim as x.\n reps_ = [switch(i < offset, 1, reps[i - offset]) for i in range(ndim)]\n reps = reps_\n\n # other raise error\n else:\n raise ValueError(\"the dimension of reps should not exceed 1\")\n else:\n if ndim is not None and len(reps) > ndim:\n raise ValueError(\"len(reps) should be equal or less than ndim\")\n if not np.all(\n [\n isinstance(r, int)\n or (\n isinstance(r, TensorVariable)\n and r.dtype in theano.tensor.discrete_dtypes\n )\n for r in reps\n ]\n ):\n raise ValueError(\"elements of reps must be scalars of integer dtype\")\n\n # if reps.ndim is less than x.ndim, we pad the reps with\n # \"1\" so that reps will have the same ndim as x.\n reps = list(reps)\n if ndim is None:\n ndim = builtins.max(len(reps), x.ndim)\n if len(reps) < ndim:\n reps = [1] * (ndim - len(reps)) + reps\n\n shape = [1] * (ndim - x.ndim) + [x.shape[i] for i in range(x.ndim)]\n alloc_shape = reps + shape\n y = alloc(x, *alloc_shape)\n shuffle_ind = np.arange(ndim * 2).reshape(2, ndim)\n shuffle_ind = shuffle_ind.transpose().flatten()\n y = y.dimshuffle(*shuffle_ind)\n new_shapes = [sh * reps[i] for i, sh in enumerate(shape)]\n y = y.reshape(new_shapes)\n\n return y\n\n\nclass ARange(Op):\n \"\"\"Create an array containing evenly spaced values within a given interval.\n\n Parameters and behaviour are the same as numpy.arange().\n\n \"\"\"\n\n __props__ = (\"dtype\",)\n\n def __init__(self, dtype):\n self.dtype = dtype\n\n def make_node(self, start, stop, step):\n start, stop, step = map(as_tensor_variable, (start, stop, step))\n assert start.ndim == 0\n assert stop.ndim == 0\n assert step.ndim == 0\n\n inputs = [start, stop, step]\n outputs = [tensor(self.dtype, (False,))]\n\n return Apply(self, inputs, outputs)\n\n @theano.configparser.change_flags(warn_float64=\"ignore\")\n def infer_shape(self, node, i_shapes):\n # Note start, stop and step can be float numbers.\n start, stop, step = node.inputs\n\n def is_constant_value(var, value):\n try:\n v = get_scalar_constant_value(var)\n return np.all(v == value)\n except NotScalarConstantError:\n pass\n return False\n\n def upcast(var):\n if (\n var.dtype in integer_dtypes\n and\n # We do not want to cast uint64 to int64 as this can\n # loose information. If we upcast uint64 with int64,\n # this give float64. This is safer then checking for\n # uint64 in case we support [u]int128 or other in the\n # future.\n scal.upcast(var.dtype, \"int64\") == \"int64\"\n ):\n return cast(var, \"int64\")\n return var\n\n if is_constant_value(step, 1):\n if is_constant_value(start, 0):\n return [(cast(stop, \"int64\"),)]\n else:\n stop = upcast(stop)\n start = upcast(start)\n return [(maximum(cast(stop - start, \"int64\"), 0),)]\n else:\n stop = upcast(stop)\n start = upcast(start)\n return [\n (\n maximum(\n cast(ceil(cast((stop - start), \"float64\") / step), \"int64\"), 0\n ),\n )\n ]\n\n def perform(self, node, inp, out_):\n start, stop, step = inp\n (out,) = out_\n start = start.item()\n stop = stop.item()\n step = step.item()\n out[0] = np.arange(start, stop, step, dtype=self.dtype)\n\n def connection_pattern(self, node):\n\n return [[True], [False], [True]]\n\n def L_op(self, inputs, outputs, grads):\n start, stop, step = inputs\n (gz,) = grads\n # `start` and `step` affect the output values\n # but the outputs are integers so there's\n # no gradient through them.\n # When they are not integers, the gradients are\n # as expressed below.\n # `stop` does not affect the output values,\n # just the output shape, so it is disconnected.\n\n if self.dtype in discrete_dtypes:\n return [\n start.zeros_like(dtype=config.floatX),\n DisconnectedType()(),\n step.zeros_like(dtype=config.floatX),\n ]\n else:\n num_steps_taken = outputs[0].shape[0]\n return [\n gz.sum(),\n DisconnectedType()(),\n (gz * arange(num_steps_taken, dtype=self.dtype)).sum(),\n ]\n\n def R_op(self, inputs, eval_points):\n return [None]\n\n\n_arange = {}\n\n\ndef arange(start, stop=None, step=1, dtype=None):\n # If only one argument is provided, it is in fact the \"stop\" argument,\n # and start is 0.\n if stop is None:\n start, stop = 0, start\n\n start, stop, step = map(as_tensor_variable, (start, stop, step))\n # If dtype is not provided, infer it from the other arguments\n if dtype is None:\n dtype = scal.upcast(start.type.dtype, stop.type.dtype, step.type.dtype)\n # don't try to be stingy and byte-optimize, this leads to\n # overflow problems.\n if dtype in int_dtypes:\n dtype = \"int64\"\n if dtype in uint_dtypes:\n dtype = \"uint64\"\n if config.cast_policy in (\"numpy\", \"numpy+floatX\"):\n # We enforce numpy semantics, except in the special case where\n # `config.cast_policy` is 'numpy+floatX' and we want to use float32\n # rather than float64.\n # As an example, if `start`, `stop` and `step` are all int32,\n # `numpy.arange` returns an int64 array (on 64-bit platforms),\n # while the upcast above returns int32.\n numpy_dtype = np.arange(\n start=np.array(0, dtype=start.dtype),\n stop=np.array(1, dtype=stop.dtype),\n step=np.array(1, dtype=step.dtype),\n ).dtype\n if numpy_dtype != dtype:\n if (\n config.cast_policy == \"numpy+floatX\"\n and config.floatX == \"float32\"\n and numpy_dtype == \"float64\"\n and\n # No explicit float64 in the three arguments?\n python_all(\n dt != \"float64\" for dt in [s.dtype for s in (start, stop, step)]\n )\n ):\n # We use float32 instead.\n assert dtype != \"float64\"\n dtype = \"float32\"\n else:\n # We use the same dtype as numpy instead of the result of\n # the upcast.\n dtype = str(numpy_dtype)\n\n if dtype not in _arange:\n _arange[dtype] = ARange(dtype)\n return _arange[dtype](start, stop, step)\n\n\nclass _nd_grid:\n \"\"\"Create a dense n-dimensional 'meshgrid' with equally spaced points.\n\n Used to create the instance ``mgrid`` and ``ogrid`` which act similarly\n to their numpy equivalents.\n\n Parameters\n ----------\n sparse : boolean, optional, default=True\n Specifying False leads to the equivalent of numpy's mgrid functionality.\n Specifying True leads to the equivalent of ogrid.\n\n Examples\n --------\n >>> a = T.mgrid[0:5, 0:3]\n >>> a[0].eval()\n array([[0, 0, 0],\n [1, 1, 1],\n [2, 2, 2],\n [3, 3, 3],\n [4, 4, 4]], dtype=int8)\n >>> a[1].eval()\n array([[0, 1, 2],\n [0, 1, 2],\n [0, 1, 2],\n [0, 1, 2],\n [0, 1, 2]], dtype=int8)\n >>> b = T.ogrid[0:5, 0:3]\n >>> b[0].eval()\n array([[0],\n [1],\n [2],\n [3],\n [4]], dtype=int8)\n >>> b[1].eval()\n array([[0, 1, 2, 3]], dtype=int8)\n\n \"\"\"\n\n def __init__(self, sparse=False):\n self.sparse = sparse\n\n def __getitem__(self, *args):\n\n if isinstance(args[0], slice):\n sl = args[0]\n return arange(sl.start or 0, sl.stop, sl.step or 1)\n\n ndim = len(args[0])\n for sl in args[0]:\n if isinstance(sl.step, python_complex):\n raise NotImplementedError(\n \"Not implemented for slices \" \"whose step is complex\"\n )\n ranges = [arange(sl.start or 0, sl.stop, sl.step or 1) for sl in args[0]]\n shapes = [\n tuple([1] * j + [r.shape[0]] + [1] * (ndim - 1 - j))\n for j, r in enumerate(ranges)\n ]\n ranges = [r.reshape(shape) for r, shape in zip(ranges, shapes)]\n if self.sparse:\n grids = ranges\n else:\n grids = []\n ones = [ones_like(r) for r in ranges]\n for i in range(ndim):\n grid = 1\n for j in range(ndim):\n if j == i:\n grid = grid * ranges[j]\n else:\n grid = grid * ones[j]\n grids.append(grid)\n return grids\n\n\nmgrid = _nd_grid()\nogrid = _nd_grid(sparse=True)\n\n\nclass PermuteRowElements(Op):\n \"\"\"Permute the elements of each row (inner-most dim) of a tensor.\n\n A permutation will be applied to every row (vector) of the input tensor x.\n Depending on the dimensionality of x and the permutation tensor y,\n different cases are possible.\n If y.ndim = 1, y is a single permutation, that will be applied to every\n vector of x. For instance, if x is a matrix, the same permutation will be\n applied to each row of x.\n If x.ndim = y.ndim, each row of x corresponds to a row of y, containing\n a permutation that will be applied to that row. For instance, if x and y\n are two matrices, a different permutation will be applied to each row of x.\n If x.ndim > y.ndim, y will be broadcasted to fit x, then each row (vector)\n of x will be reordered according to the corresponding row of y. (This is\n a generalization of the first case).\n If x.ndim = 1, every permutation in y will be applied to x, and the output\n will contain all the results.\n If x.ndim < y.ndim, x will be broadcasted to fit y, and different\n permutations contained in y will be applied to each vector in x. (This is\n a generalization of the previous case).\n\n If the \"inverse\" argument is True, the Op will perform the inverse\n permutation instead.\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, x, y, inverse):\n x = as_tensor_variable(x)\n y = as_tensor_variable(y)\n if inverse: # as_tensor_variable does not accept booleans\n inverse = as_tensor_variable(1)\n else:\n inverse = as_tensor_variable(0)\n\n # y should contain integers\n assert y.type.dtype in integer_dtypes\n # Inverse should be an integer scalar\n assert inverse.type.ndim == 0 and inverse.type.dtype in integer_dtypes\n\n # Match shapes of x and y\n x_dim = x.type.ndim\n y_dim = y.type.ndim\n\n if x_dim > y_dim:\n y = shape_padleft(y, n_ones=(x_dim - y_dim))\n elif x_dim < y_dim:\n x = shape_padleft(x, n_ones=(y_dim - x_dim))\n\n # Compute the broadcastable pattern of the output\n out_broadcastable = [\n xb and yb for xb, yb in zip(x.type.broadcastable, y.type.broadcastable)\n ]\n out_type = tensor(dtype=x.type.dtype, broadcastable=out_broadcastable)\n\n inputlist = [x, y, inverse]\n outputlist = [out_type]\n return Apply(self, inputlist, outputlist)\n\n def _rec_perform(self, node, x, y, inverse, out, curdim):\n \"\"\"Perform the permutation by doing a recursion over the input\n dimensions.\n\n For every dimension, starting with the leftmost, the right set of\n indices is determined (depending if broadcasting or not), then\n the function is recursively called on the appropriate subtensors.\n\n The terminal case is reached when the current tensors are vector,\n then the permutation contained in y is applied to x.\n\n Parameters\n ----------\n x : tensor\n The input tensor, on which the permutation is applied.\n y : tensor\n Tensor containing the permutations to apply.\n out : tensor\n Tensor storing the output result.\n curdim : int\n Counter of the current depth of recursion.\n inverse\n Wether to apply permutations or their inverse.\n\n \"\"\"\n if len(x.shape) == 1:\n # Numpy advanced indexing works in this case\n if inverse:\n out[y] = x[:]\n else:\n out[:] = x[y]\n else:\n xs0 = x.shape[0]\n ys0 = y.shape[0]\n if xs0 == ys0:\n for i in range(xs0):\n self._rec_perform(node, x[i], y[i], inverse, out[i], curdim + 1)\n elif ys0 == 1 and node.inputs[1].type.broadcastable[curdim]:\n # Broadcast y\n for i in range(xs0):\n self._rec_perform(node, x[i], y[0], inverse, out[i], curdim + 1)\n elif xs0 == 1 and node.inputs[0].type.broadcastable[curdim]:\n # Broadcast x\n for i in range(ys0):\n self._rec_perform(node, x[0], y[i], inverse, out[i], curdim + 1)\n else:\n raise ValueError(\"Dimension mismatch: {}, {}\".format(xs0, ys0))\n\n def perform(self, node, inp, out):\n x, y, inverse = inp\n (outs,) = out\n x_s = x.shape\n y_s = y.shape\n assert len(x_s) == len(y_s)\n\n # Make sure the output is big enough\n out_s = []\n for xdim, ydim in zip(x_s, y_s):\n if xdim == ydim:\n outdim = xdim\n elif xdim == 1:\n outdim = ydim\n elif ydim == 1:\n outdim = xdim\n else:\n raise ValueError(\"Dimension mismatch: {}, {}\".format(xdim, ydim))\n out_s.append(outdim)\n\n if outs[0] is None or outs[0].shape != out_s:\n outs[0] = np.empty(out_s, dtype=x.dtype)\n\n self._rec_perform(node, x, y, inverse, outs[0], curdim=0)\n\n def infer_shape(self, node, in_shapes):\n shp_x = in_shapes[0]\n shp_y = in_shapes[1]\n assert len(shp_x) == len(shp_y)\n out_shape = []\n for i in range(len(shp_x)):\n out_shape.append(maximum(shp_x[i], shp_y[i]))\n return [out_shape]\n\n def grad(self, inp, grads):\n x, y, inverse = inp\n (gz,) = grads\n # First, compute the gradient wrt the broadcasted x.\n # If 'inverse' is False (0), apply the inverse of y on gz.\n # Else, apply y on gz.\n gx = permute_row_elements(gz, y, eq(inverse, 0))\n\n # If x has been broadcasted along some axes, we need to sum\n # the gradient over these axes, but keep the dimension (as\n # broadcastable)\n broadcasted_dims = [\n dim\n for dim in range(gz.type.ndim)\n if x.type.broadcastable[dim] and not gz.type.broadcastable[dim]\n ]\n gx = Sum(axis=broadcasted_dims)(gx)\n\n # Sum(...) removed the dimensions in broadcasted_dims,\n # so we need to put them back.\n newdims = []\n i = 0\n for dim in range(gz.type.ndim):\n if dim in broadcasted_dims:\n newdims.append(\"x\")\n else:\n newdims.append(i)\n i += 1\n\n gx = DimShuffle(gx.type.broadcastable, newdims)(gx)\n assert gx.type.broadcastable == x.type.broadcastable\n\n # if x is an integer type, then so is the output.\n # this means f(x+eps) = f(x) so the gradient with respect\n # to x is zero\n if x.type.dtype in discrete_dtypes:\n gx = x.zeros_like()\n\n # The elements of y and of inverse both affect the output,\n # so they are connected to the output,\n # and the transformation isn't defined if their values\n # are non-integer, so the gradient with respect to them is\n # undefined\n\n return [gx, grad_undefined(self, 1, y), grad_undefined(self, 1, inverse)]\n\n\n_permute_row_elements = PermuteRowElements()\n\n\ndef permute_row_elements(x, y, inverse=0):\n return _permute_row_elements(x, y, inverse)\n\n\ndef inverse_permutation(perm):\n \"\"\"Computes the inverse of permutations.\n\n Each row of input should contain a permutation of the first integers.\n\n \"\"\"\n return permute_row_elements(\n arange(perm.shape[-1], dtype=perm.dtype), perm, inverse=True\n )\n\n\n#########################\n# Linalg : Dot\n#########################\n#\n# For BLAS-related ops see blas.py\n#\n# TODO: Dotinv should go here, Eigs, Svd, etc.\n\n\nclass Dot(Op):\n \"\"\"\n Computes the dot product of two variables. For two matrices, this is\n equivalent to matrix multiplication. For two vectors, this is the inner\n product.\n\n Notes\n -----\n Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops\n (see tensor.blas).\n Vector-vector products are sometimes optimized to Ger or CGer (see\n tensor.blas).\n Matrix-vector products are sometimes optimized to Gemv, CGemv (see\n tensor.blas).\n\n \"\"\"\n\n __props__ = ()\n\n # the rationale for Dot22 is related to getting GEMM Ops into the\n # graph. See Dot22 in tensor.blas for details.\n\n def make_node(self, *inputs):\n inputs = list(map(as_tensor_variable, inputs))\n\n if len(inputs) != 2:\n raise TypeError(\n \"theano.tensor.Dot: 2 arguments required, %d given \" % len(inputs)\n )\n if inputs[0].ndim not in (1, 2):\n raise TypeError(\n \"theano.tensor.Dot: input 0 (0-indexed) must have ndim of \"\n \"1 or 2, %d given. Consider calling theano.tensor.dot \"\n \"instead.\" % inputs[0].ndim\n )\n if inputs[1].ndim not in (1, 2):\n raise TypeError(\n \"theano.tensor.Dot: input 1 (0-indexed) must have ndim of \"\n \"1 or 2, %d given. Consider calling theano.tensor.dot \"\n \"instead.\" % inputs[1].ndim\n )\n\n i_broadcastables = [input.type.broadcastable for input in inputs]\n bx, by = i_broadcastables\n if len(by) == 2: # y is a matrix\n bz = bx[:-1] + by[-1:]\n elif len(by) == 1: # y is vector\n bz = bx[:-1]\n\n i_dtypes = [input.type.dtype for input in inputs]\n outputs = [tensor(scal.upcast(*i_dtypes), bz)]\n return Apply(self, inputs, outputs)\n\n def perform(self, node, inp, out):\n x, y = inp\n (z,) = out\n\n # the asarray is here because dot between two vectors\n # gives a numpy float object but we need to return a 0d\n # ndarray\n z[0] = np.asarray(np.dot(x, y))\n\n def grad(self, inp, grads):\n\n x, y = inp\n (gz,) = grads\n xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim\n\n # grad is scalar, so x is vector and y is vector\n if gdim == 0:\n xgrad = gz * y\n ygrad = gz * x\n\n # x is vector, y is matrix, grad is vector\n elif xdim == 1 and ydim == 2:\n xgrad = dot(gz, y.T)\n ygrad = outer(x.T, gz)\n\n # x is matrix, y is vector, grad is vector\n elif xdim == 2 and ydim == 1:\n xgrad = outer(gz, y.T)\n ygrad = dot(x.T, gz)\n\n # x is matrix, y is matrix, grad is matrix\n elif xdim == ydim == 2:\n xgrad = dot(gz, y.T)\n ygrad = dot(x.T, gz)\n\n # If x or y contain broadcastable dimensions but only one of\n # them know that a matching dimensions is broadcastable, the\n # above code don't always return the right broadcast pattern.\n # This cause problem down the road. See gh-1461.\n if xgrad.broadcastable != x.broadcastable:\n xgrad = patternbroadcast(xgrad, x.broadcastable)\n if ygrad.broadcastable != y.broadcastable:\n ygrad = patternbroadcast(ygrad, y.broadcastable)\n\n rval = xgrad, ygrad\n\n for elem in rval:\n assert elem.dtype.find(\"float\") != -1\n\n return rval\n\n def R_op(self, inputs, eval_points):\n # R_op for a \\dot b evaluted at c for a and d for b is\n # simply c \\dot b + a \\dot d\n\n assert len(inputs) == 2\n assert len(eval_points) == 2\n if eval_points[0] is None and eval_points[1] is None:\n return [None]\n\n if eval_points[0]:\n t1 = self(eval_points[0], inputs[1])\n if eval_points[1]:\n t2 = self(inputs[0], eval_points[1])\n\n if eval_points[0] and eval_points[1]:\n return [t1 + t2]\n elif eval_points[0]:\n return [t1]\n else:\n return [t2]\n\n def infer_shape(self, node, shapes):\n xshp, yshp = shapes\n x, y = node.inputs\n\n # vector / vector\n if x.ndim == 1 and y.ndim == 1:\n return [()]\n # matrix / vector\n if x.ndim == 2 and y.ndim == 1:\n return [xshp[:-1]]\n # vector / matrix\n if x.ndim == 1 and y.ndim == 2:\n return [yshp[-1:]]\n # matrix / matrix\n if x.ndim == 2 and y.ndim == 2:\n return [xshp[:-1] + yshp[-1:]]\n raise NotImplementedError()\n\n def __str__(self):\n return \"dot\"\n\n\n_dot = Dot()\npprint.assign(\n _dot, printing.OperatorPrinter(printing.special[\"middle_dot\"], -1, \"left\")\n)\n\n\ndef dot(l, r):\n \"\"\"Return a symbolic dot product.\n\n This is designed to work with both sparse and dense tensors types.\n \"\"\"\n try:\n res = l.__dot__(r)\n if res is NotImplemented:\n raise NotImplementedError\n except (NotImplementedError, AttributeError, TypeError):\n res = r.__rdot__(l)\n if res is NotImplemented:\n raise NotImplementedError()\n\n return res\n\n\ndef dense_dot(a, b):\n \"\"\"\n Computes the dot product of two variables.\n\n For two matrices, this is equivalent to matrix multiplication.\n For two vectors, this is the inner product.\n When one variable is a scalar, this is like elementwise multiplication.\n For N dimensions, this is a sum product over the last axis\n of the first array and the second-to-last axis of the second array:\n\n dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])\n\n Note that this dot function does one of three things, in the following\n sequence:\n\n 1. If either a or b is scalar, it returns the elementwise product\n without calling the Theano Dot op.\n\n 2. If either a or b has more than 2 dimensions, it calls Theano's\n tensordot function with appropriate axes. The tensordot function\n expresses high-dimensional dot products in terms of 2D matrix\n multiplications, so it may be possible to futherize optimize for\n performance.\n\n 3. If both a and b have either 1 or 2 dimensions, it calls Theano's\n Dot op on a and b.\n\n Notes\n -----\n Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops\n (see tensor.blas).\n Vector-vector products are sometimes optimized to Ger or CGer (see\n tensor.blas).\n Matrix-vector products are sometimes optimized to Gemv, CGemv (see\n tensor.blas).\n\n \"\"\"\n a, b = as_tensor_variable(a), as_tensor_variable(b)\n\n if a.ndim == 0 or b.ndim == 0:\n return a * b\n elif a.ndim > 2 or b.ndim > 2:\n return tensordot(a, b, [[a.ndim - 1], [np.maximum(0, b.ndim - 2)]])\n else:\n return _dot(a, b)\n\n\n#########################\n# Linalg : TensorDot\n#########################\n\n\ndef _tensordot_as_dot(a, b, axes, dot, batched):\n \"\"\"\n Reduces a tensor dot product to a matrix or vector dot product. Based\n on code from Tijmen Tieleman's gnumpy\n (http://www.cs.toronto.edu/~tijmen/gnumpy.html).\n\n Please see the documentation of tensordot for the meaning of the a, b\n and axes arguments.\n\n :param dot: a function that accepts two symbolic variables and computes\n the appropriate dot product (e.g. dot, batched_dot)\n :type dot: function\n\n :param batched: whether to treat the first axis of a and b as a batch\n axis. If so, this axis will be preserved in the output,\n allowing this function to be used also for batched\n tensor dot products.\n :type batched: boolean\n\n :returns: a tensor with shape equal to the concatenation of a's shape\n (less any dimensions that were summed over) and b's shape\n (less the first dimension and any dimensions that were summed\n over).\n :rtype: symbolic tensor\n \"\"\"\n a, b = as_tensor_variable(a), as_tensor_variable(b)\n\n if not np.isscalar(axes) and len(axes) != 2:\n raise ValueError(\n \"Axes should be an integer or a \"\n \"list/tuple of len 2 (%s was provided)\" % str(axes)\n )\n\n # if 'axes' is a number of axes to multiply and sum over (trailing axes\n # of a, leading axes of b), we can just reshape and use dot.\n elif np.isscalar(axes):\n axes = int(axes)\n\n for operand_name, operand in ((\"a\", a), (\"b\", b)):\n if axes > operand.ndim:\n raise ValueError(\n \"axes can not be larger than the dimension of %s \"\n \"(%s.ndim=%i, axes=%i)\"\n % (operand_name, operand_name, operand.ndim, axes)\n )\n if batched and axes == operand.ndim:\n raise ValueError(\n \"axes to sum over must not include the batch axis \"\n \"of %s (%s.ndim=%i, axes=%i)\"\n % (operand_name, operand_name, operand.ndim, axes)\n )\n\n batch_axes = 1 if batched else 0\n a_outaxes = slice(0, a.ndim - axes)\n b_outaxes = slice(batch_axes + axes, b.ndim)\n outshape = concatenate([a.shape[a_outaxes], b.shape[b_outaxes]])\n outbcast = a.broadcastable[a_outaxes] + b.broadcastable[b_outaxes]\n outndim = len(outbcast)\n\n a_shape = [1] * 2\n b_shape = [1] * 2\n\n # compute total size of summed axes\n for i in range(0, axes):\n a_shape[1] *= a.shape[-(i + 1)]\n b_shape[0] *= b.shape[batch_axes + i]\n # compute total size of other axes\n for i in range(0, a.ndim - axes - batch_axes):\n a_shape[0] *= a.shape[batch_axes + i]\n for i in range(0, b.ndim - axes - batch_axes):\n b_shape[1] *= b.shape[-(i + 1)]\n\n if batched:\n a_shape.insert(0, a.shape[0])\n b_shape.insert(0, b.shape[0])\n\n a_reshaped = a.reshape(a_shape)\n b_reshaped = b.reshape(b_shape)\n\n out_reshaped = dot(a_reshaped, b_reshaped)\n out = out_reshaped.reshape(outshape, outndim)\n # Make sure the broadcastable pattern of the result is correct,\n # since some shape information can be lost in the reshapes.\n return patternbroadcast(out, outbcast)\n\n # if 'axes' is a list, transpose a and b such that the summed axes of a\n # are last and the summed axes of b are first.\n else:\n axes = [_pack(axes_) for axes_ in axes]\n\n if len(axes[0]) != len(axes[1]):\n raise ValueError(\"Axes elements must have the same length.\")\n\n for i, (operand_name, operand) in enumerate(((\"a\", a), (\"b\", b))):\n if len(axes[i]) > operand.ndim:\n raise ValueError(\n \"axes[%i] should be array_like with length less than \"\n \"the dimensions of %s (%s.ndim=%i, len(axes[0])=%i).\"\n % (i, operand_name, operand_name, operand.ndim, len(axes[i]))\n )\n if len(axes[i]) > 0 and np.max(axes[i]) >= operand.ndim:\n raise ValueError(\n \"axes[%i] contains dimensions greater than or equal \"\n \"to %s.ndim (%s.ndim=%i, max(axes[0])=%i).\"\n % (\n i,\n operand_name,\n operand_name,\n operand.ndim,\n np.max(np.array(axes[i])),\n )\n )\n if batched and 0 in axes[i]:\n raise ValueError(\n \"axes to sum over must not contain the batch axis \"\n \"(axes[%i]=%s)\" % (i, axes[i])\n )\n\n batch_axes = [0] if batched else []\n other_axes = [\n [x for x in range(operand.ndim) if x not in axes[i] and x not in batch_axes]\n for i, operand in enumerate((a, b))\n ]\n\n a_shuffled = a.dimshuffle(batch_axes + other_axes[0] + axes[0])\n b_shuffled = b.dimshuffle(batch_axes + axes[1] + other_axes[1])\n\n # now that a and b are in the right order, recur with integer axes\n return _tensordot_as_dot(\n a_shuffled, b_shuffled, len(axes[0]), dot=dot, batched=batched\n )\n\n\ndef tensordot(a, b, axes=2):\n \"\"\"\n Compute a generalized dot product over provided axes.\n\n Given two tensors a and b, tensordot computes a generalized dot product over\n the provided axes. Theano's implementation reduces all expressions to\n matrix or vector dot products and is based on code from Tijmen Tieleman's\n gnumpy (http://www.cs.toronto.edu/~tijmen/gnumpy.html).\n\n Parameters\n ----------\n a: symbolic tensor\n The first tensor variable.\n b: symbolic tensor\n The second tensor variable\n axes: int or array-like of length 2\n If an integer, the number of axes to sum over.\n If an array, it must have two array elements containing the axes\n to sum over in each tensor.\n\n Note that the default value of 2 is not guaranteed to work\n for all values of a and b, and an error will be raised if\n that is the case. The reason for keeping the default is to\n maintain the same signature as numpy's tensordot function\n (and np.tensordot raises analogous errors for non-compatible\n inputs).\n\n If an integer i, it is converted to an array containing\n the last i dimensions of the first tensor and the first\n i dimensions of the second tensor:\n axes = [list(range(a.ndim - i, b.ndim)), list(range(i))]\n\n If an array, its two elements must contain compatible axes\n of the two tensors. For example, [[1, 2], [2, 0]] means sum\n over the 2nd and 3rd axes of a and the 3rd and 1st axes of b.\n (Remember axes are zero-indexed!) The 2nd axis of a and the\n 3rd axis of b must have the same shape; the same is true for\n the 3rd axis of a and the 1st axis of b.\n\n Returns\n -------\n symbolic tensor\n A tensor with shape equal to the concatenation of a's shape\n (less any dimensions that were summed over) and b's shape\n (less any dimensions that were summed over).\n\n Examples\n --------\n It may be helpful to consider an example to see what tensordot does.\n Theano's implementation is identical to NumPy's. Here a has shape (2, 3, 4)\n and b has shape (5, 6, 4, 3). The axes to sum over are [[1, 2], [3, 2]] --\n note that a.shape[1] == b.shape[3] and a.shape[2] == b.shape[2]; these axes\n are compatible. The resulting tensor will have shape (2, 5, 6) -- the\n dimensions that are not being summed:\n\n >>> a = np.random.random((2,3,4))\n >>> b = np.random.random((5,6,4,3))\n\n #tensordot\n >>> c = np.tensordot(a, b, [[1,2],[3,2]])\n\n #loop replicating tensordot\n >>> a0, a1, a2 = a.shape\n >>> b0, b1, _, _ = b.shape\n >>> cloop = np.zeros((a0,b0,b1))\n\n #loop over non-summed indices -- these exist\n #in the tensor product.\n >>> for i in range(a0):\n ... for j in range(b0):\n ... for k in range(b1):\n ... #loop over summed indices -- these don't exist\n ... #in the tensor product.\n ... for l in range(a1):\n ... for m in range(a2):\n ... cloop[i,j,k] += a[i,l,m] * b[j,k,m,l]\n\n >>> np.allclose(c, cloop)\n true\n\n This specific implementation avoids a loop by transposing a and b such that\n the summed axes of a are last and the summed axes of b are first. The\n resulting arrays are reshaped to 2 dimensions (or left as vectors, if\n appropriate) and a matrix or vector dot product is taken. The result is\n reshaped back to the required output dimensions.\n\n In an extreme case, no axes may be specified. The resulting tensor\n will have shape equal to the concatenation of the shapes of a and b:\n\n >>> c = np.tensordot(a, b, 0)\n >>> print(a.shape)\n (2,3,4)\n >>> print(b.shape)\n (5,6,4,3)\n >>> print(c.shape)\n (2,3,4,5,6,4,3)\n\n See the documentation of numpy.tensordot for more examples.\n\n \"\"\"\n return _tensordot_as_dot(a, b, axes, dot=dot, batched=False)\n\n\ndef outer(x, y):\n \"\"\"Return vector-vector outer product.\n\n If an input isn't a vector, we flatten it first.\n\n \"\"\"\n if x.ndim != 1:\n x = x.flatten()\n if y.ndim != 1:\n y = y.flatten()\n return dot(x.dimshuffle(0, \"x\"), y.dimshuffle(\"x\", 0))\n\n\ndef any(x, axis=None, keepdims=False):\n out = elemwise.Any(axis)(x)\n\n if keepdims:\n out = makeKeepDims(x, out, axis)\n return out\n\n\ndef all(x, axis=None, keepdims=False):\n out = elemwise.All(axis)(x)\n\n if keepdims:\n out = makeKeepDims(x, out, axis)\n return out\n\n\n# Some NumPy version like 1.9.2 return a view for numpy.diagonal\nx = np.zeros((4, 4))\nnumpy_diagonal_return_view = np.may_share_memory(np.diagonal(x), x)\ndel x\n\n\nclass ExtractDiag(Op):\n \"\"\"\n Return specified diagonals.\n\n If x is 2-D, returns the diagonal of x with the given offset,\n i.e., the collection of elements of the form x[i, i+offset].\n If x has more than two dimensions, then the axes specified by\n axis1 and axis2 are used to determine the 2-D sub-array whose\n diagonal is returned. The shape of the resulting array can be\n determined by removing axis1 and axis2 and appending an index\n to the right equal to the size of the resulting diagonals.\n\n Parameters\n ----------\n x: A tensor variable with x.ndim >= 2.\n\n offset: Offset of the diagonal from the main diagonal.\n Can be positive or negative.\n Defaults to main diagonal (0).\n\n axis1: Axis to be used as the first axis of the 2-D\n sub-arrays from which the diagonals should be taken.\n Defaults to first axis (0).\n\n axis2: Axis to be used as the second axis of the 2-D\n sub-arrays from which the diagonals should be taken.\n Defaults to second axis (1).\n\n\n\n Returns\n -------\n array_of_diagonals:\n If x is 2-D, a 1-D array of the same type as a\n containing the diagonal is returned.\n If the dimension of x is greater than two, then an\n array of diagonals is returned, \"packed\" from left-most\n dimension to right-most (e.g., if x is 3-D, then the\n diagonals are \"packed\" along rows).\n\n\n\n Raises\n ------\n ValueError\n If the dimension of x is less than 2.\n\n\n See Also\n --------\n numpy.diagonal:\n https://docs.scipy.org/doc/numpy-dev/reference/generated/numpy.diagonal.html\n \"\"\"\n\n __props__ = (\"offset\", \"axis1\", \"axis2\", \"view\")\n\n def __init__(self, offset=0, axis1=0, axis2=1, view=False):\n self.view = view\n if self.view and not numpy_diagonal_return_view:\n warnings.warn(\n \"View will forced to False. ExtractDiag property view is \"\n \"set to True but numpy version %s and prior versions of \"\n \"numpy.diagonal() do not return a view. Update \"\n \"numpy to use ExtractDiag(view=True)\" % np.version.version\n )\n self.view = False\n if self.view:\n self.view_map = {0: [0]}\n self.offset = offset\n self.axis1 = axis1\n self.axis2 = axis2\n\n def make_node(self, x):\n x = as_tensor_variable(x)\n\n if x.ndim < 2:\n raise ValueError(\n \"ExtractDiag needs an input with 2 or more \" \"dimensions\", x\n )\n return Apply(\n self,\n [x],\n [x.type.__class__(dtype=x.dtype, broadcastable=[False] * (x.ndim - 1))()],\n )\n\n def perform(self, node, inputs, outputs):\n (x,) = inputs\n (z,) = outputs\n z[0] = x.diagonal(self.offset, self.axis1, self.axis2)\n if not self.view:\n z[0] = z[0].copy()\n\n def grad(self, inputs, gout):\n (x,) = inputs\n (gz,) = gout\n\n if x.ndim == 2:\n x = theano.tensor.zeros_like(x)\n xdiag = theano.tensor.AllocDiag(offset=self.offset)(gz)\n return [\n theano.tensor.set_subtensor(\n x[: xdiag.shape[0], : xdiag.shape[1]], xdiag\n )\n ]\n else:\n warnings.warn(\n \"gradient of theano.tensor.basic.ExtractDiag only\" \"works for matrices.\"\n )\n return [grad_not_implemented(self, 0, x)]\n\n def infer_shape(self, node, shapes):\n (in_shape,) = shapes\n dim1 = in_shape[self.axis1]\n dim2 = in_shape[self.axis2]\n out_shape = [\n d for i, d in enumerate(in_shape) if i not in (self.axis1, self.axis2)\n ]\n # The following logic is inspired by C code of PyArray_Diagonal().\n offset = self.offset\n if offset > 0:\n diag_size = clip(dim2 - offset, 0, dim1)\n elif offset < 0:\n diag_size = clip(dim1 + offset, 0, dim2)\n else:\n diag_size = minimum(dim1, dim2)\n out_shape.append(diag_size)\n return [tuple(out_shape)]\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n if self.view and not numpy_diagonal_return_view:\n warnings.warn(\n \"View will forced to False. ExtractDiag property view is \"\n \"set to True but numpy version %s and prior versions of \"\n \"numpy.diagonal() do not return a view. Update \"\n \"numpy to use ExtractDiag(view=True)\" % np.version.version\n )\n self.view = False\n\n if self.view:\n self.view_map = {0: [0]}\n\n if \"offset\" not in state:\n self.offset = 0\n if \"axis1\" not in state:\n self.axis1 = 0\n if \"axis2\" not in state:\n self.axis2 = 1\n\n\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n \"\"\"\n A helper function for `theano.tensor.ExtractDiag`. It accepts tensor with\n `ndim >= 2` as input. The name `diagonal` is just meant to keep it\n consistent with numpy.\n\n Parameters\n ----------\n a : symbolic tensor\n offset : int\n offset\n axis1 : int\n axis2 : int\n\n Returns\n -------\n tensor : symbolic tensor\n\n \"\"\"\n return ExtractDiag(offset, axis1, axis2)(a)\n\n\nclass AllocDiag(Op):\n \"\"\"\n An op that copies a vector to the diagonal of an empty matrix. It does the\n inverse of ExtractDiag.\n\n Usage: T.AllocDiag()(x)\n\n `x` should be a tensor vector. The parenthesis in the front should indicate\n which main diagonal the vector value goes into. By default it is set to\n `0`, which corresponds to setting the values of x to the main diagonal in\n the returned matrix.\n\n Parameters\n ----------\n axis1: Axis to be used as the first axis of the 2-D\n sub-arrays to which the diagonals will be allocated.\n Defaults to first axis (0).\n\n axis2: Axis to be used as the second axis of the 2-D\n sub-arrays to which the diagonals will be allocated.\n Defaults to second axis (1).\n\n offset: Offset of the diagonal from the main diagonal defined by `axis1`\n and `axis2`.\n Can be positive or negative.\n Defaults to main diagonal (0).\n\n x: symbolic vector\n A tensor vector consists of diagonal values.\n\n Returns\n -------\n tensor : symbolic tenstor\n A tensor with passed tensor values at their corresponding diagonals.\n\n \"\"\"\n\n __props__ = (\"offset\", \"axis1\", \"axis2\")\n\n def __init__(self, offset=0, axis1=0, axis2=1):\n self.offset = offset\n self.axis1 = axis1\n self.axis2 = axis2\n\n def make_node(self, diag):\n diag = as_tensor_variable(diag)\n if diag.type.ndim < 1:\n raise ValueError(\n \"AllocDiag needs an input with 1 or more \" \"dimensions\", diag.type\n )\n return Apply(\n self,\n [diag],\n [\n diag.type.__class__(\n dtype=diag.dtype, broadcastable=[False] * (diag.ndim + 1)\n )()\n ],\n )\n\n def perform(self, node, inputs, outputs):\n (x,) = inputs\n (z,) = outputs\n\n axis1 = np.minimum(self.axis1, self.axis2)\n axis2 = np.maximum(self.axis1, self.axis2)\n offset = self.offset\n\n # Create array with one extra dimension for resulting matrix\n result_shape = x.shape[:-1] + (x.shape[-1] + abs(offset),) * 2\n result = np.zeros(result_shape, dtype=x.dtype)\n\n # Create slice for diagonal in final 2 axes\n idxs = np.arange(x.shape[-1])\n diagonal_slice = (len(result_shape) - 2) * [slice(None)] + [\n idxs + np.maximum(0, -offset),\n idxs + np.maximum(0, offset),\n ]\n\n # Fill in final 2 axes with x\n result[tuple(diagonal_slice)] = x\n\n if len(x.shape) > 1:\n # Re-order axes so they correspond to diagonals at axis1, axis2\n axes = list(range(len(x.shape[:-1])))\n last_idx = axes[-1]\n axes = axes[:axis1] + [last_idx + 1] + axes[axis1:]\n axes = axes[:axis2] + [last_idx + 2] + axes[axis2:]\n result = result.transpose(axes)\n\n z[0] = result\n\n def grad(self, inputs, gout):\n (gz,) = gout\n return [diagonal(gz, offset=self.offset, axis1=self.axis1, axis2=self.axis2)]\n\n def infer_shape(self, nodes, shapes):\n (x_shape,) = shapes\n axis1 = np.minimum(self.axis1, self.axis2)\n axis2 = np.maximum(self.axis1, self.axis2)\n\n result_shape = list(x_shape[:-1])\n diag_shape = x_shape[-1] + abs(self.offset)\n result_shape = result_shape[:axis1] + [diag_shape] + result_shape[axis1:]\n result_shape = result_shape[:axis2] + [diag_shape] + result_shape[axis2:]\n return [tuple(result_shape)]\n\n def __setstate__(self, state):\n if \"view_map\" in state:\n del state[\"view_map\"]\n\n self.__dict__.update(state)\n\n if \"offset\" not in state:\n self.offset = 0\n if \"axis1\" not in state:\n self.axis1 = 0\n if \"axis2\" not in state:\n self.axis2 = 1\n\n\ndef diag(v, k=0):\n \"\"\"\n A helper function for two ops: `theano.tensor.ExtractDiag` and\n `theano.tensor.AllocDiag`. The name `diag` is meant to keep it consistent\n with numpy. It both accepts tensor vector and tensor matrix.\n While the passed tensor variable `v` has `v.ndim>=2`, it builds a\n `ExtractDiag` instance, and returns a vector with its entries equal to\n `v`'s main diagonal; otherwise if `v.ndim` is `1`, it builds an `AllocDiag`\n instance, and returns a matrix with `v` at its k-th diaogonal.\n\n Parameters\n ----------\n v : symbolic tensor\n k : int\n offset\n\n Returns\n -------\n tensor : symbolic tensor\n\n \"\"\"\n\n if v.ndim == 1:\n return AllocDiag(k)(v)\n elif v.ndim >= 2:\n return diagonal(v, offset=k)\n else:\n raise ValueError(\"Input must has v.ndim >= 1.\")\n\n\ndef stacklists(arg):\n \"\"\"\n Recursively stack lists of tensors to maintain similar structure.\n\n This function can create a tensor from a shaped list of scalars:\n\n Examples\n --------\n >>> from theano.tensor import stacklists, scalars, matrices\n >>> from theano import function\n >>> a, b, c, d = scalars('abcd')\n >>> X = stacklists([[a, b], [c, d]])\n >>> f = function([a, b, c, d], X)\n >>> f(1, 2, 3, 4)\n array([[ 1., 2.],\n [ 3., 4.]], dtype=float32)\n\n We can also stack arbitrarily shaped tensors. Here we stack matrices into\n a 2 by 2 grid:\n\n >>> from numpy import ones\n >>> a, b, c, d = matrices('abcd')\n >>> X = stacklists([[a, b], [c, d]])\n >>> f = function([a, b, c, d], X)\n >>> x = ones((4, 4), 'float32')\n >>> f(x, x, x, x).shape\n (2, 2, 4, 4)\n\n \"\"\"\n if isinstance(arg, (tuple, list)):\n return stack(list(map(stacklists, arg)))\n else:\n return arg\n\n\ndef ptp(a, axis=None):\n \"\"\"\n Range of values (maximum - minimum) along an axis.\n\n The name of the function comes from the acronym for peak to peak.\n\n Parameters\n ----------\n a\n Input tensor.\n axis\n Axis along which to find the peaks. By default, flatten the array.\n\n Returns\n -------\n array\n A new array holding the result.\n\n \"\"\"\n\n a = as_tensor_variable(a)\n\n out = max(a, axis) - min(a, axis)\n\n return out\n\n\ndef power(x, y):\n return x ** y\n\n\ndef swapaxes(y, axis1, axis2):\n \"swap axes of inputted tensor\"\n y = as_tensor_variable(y)\n ndim = y.ndim\n li = list(range(0, ndim))\n li[axis1], li[axis2] = li[axis2], li[axis1]\n return y.dimshuffle(li)\n\n\ndef choose(a, choices, out=None, mode=\"raise\"):\n \"\"\"\n Construct an array from an index array and a set of arrays to choose from.\n\n First of all, if confused or uncertain, definitely look at the Examples -\n in its full generality, this function is less simple than it might seem\n from the following code description (below ndi = numpy.lib.index_tricks):\n\n np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)]).\n\n But this omits some subtleties. Here is a fully general summary:\n\n Given an ``index`` array (a) of integers and a sequence of n arrays\n (choices), a and each choice array are first broadcast, as necessary,\n to arrays of a common shape; calling these Ba and\n Bchoices[i], i = 0,...,n-1 we have that, necessarily,\n Ba.shape == Bchoices[i].shape for each i.\n Then, a new array with shape Ba.shape is created as follows:\n\n - if mode=raise (the default), then, first of all, each element of a\n (and thus Ba) must be in the range [0, n-1]; now, suppose that\n i (in that range) is the value at the (j0, j1, ..., jm) position in Ba -\n then the value at the same position in the new array is the value in\n Bchoices[i] at that same position;\n\n - if mode=wrap, values in a (and thus Ba) may be any (signed) integer;\n modular arithmetic is used to map integers outside the range [0, n-1]\n back into that range; and then the new array is constructed as above;\n\n - if mode=clip, values in a (and thus Ba) may be any (signed) integer;\n negative integers are mapped to 0; values greater than n-1 are mapped\n to n-1; and then the new array is constructed as above.\n\n Parameters\n ----------\n a : int array\n This array must contain integers in [0, n-1], where n is the number of\n choices, unless mode=wrap or mode=clip, in which cases any integers\n are permissible.\n choices : sequence of arrays\n Choice arrays. a and all of the choices must be broadcastable to\n the same shape. If choices is itself an array (not recommended),\n then its outermost dimension (i.e., the one corresponding to\n choices.shape[0]) is taken as defining the ``sequence``.\n out : array, optional\n If provided, the result will be inserted into this array.\n It should be of the appropriate shape and dtype.\n mode : {``raise`` (default), ``wrap``, ``clip``}, optional\n Specifies how indices outside [0, n-1] will be treated:\n ``raise`` : an exception is raised\n ``wrap`` : value becomes value mod n\n ``clip`` : values < 0 are mapped to 0, values > n-1 are mapped to n-1\n\n Returns\n -------\n merged_array - array\n The merged result.\n\n Raises\n ------\n ValueError - shape mismatch\n If a and each choice array are not all broadcastable to the same shape.\n\n \"\"\"\n # This is done to keep the same function signature then NumPy.\n assert out is None\n return Choose(mode)(a, choices)\n\n\nclass Choose(Op):\n __props__ = (\"mode\",)\n\n def __init__(self, mode):\n assert mode in (\"raise\", \"wrap\", \"clip\")\n self.mode = mode\n\n def infer_shape(self, node, shapes):\n\n if isinstance(node.inputs[1], TensorVariable):\n # We have padded node.inputs[0] to the right number of\n # dimensions for the output\n l = []\n for sh1, sh2, b1 in zip(\n shapes[0], shapes[1][1:], node.inputs[0].broadcastable\n ):\n if b1:\n l.append(sh2)\n else:\n l.append(sh1)\n return [tuple(l)]\n else:\n import theano.typed_list\n\n assert isinstance(node.inputs[1], theano.typed_list.TypedListVariable)\n raise ShapeError(\"Case not implemented\")\n shape = shapes[0]\n for i in range(len(shapes[0]) - 1):\n shape[i] = shapes[1][i]\n return [(shape)]\n\n def make_node(self, a, choices):\n # Import here as it isn't imported by default and we can't\n # import at the top as it would cause circular import.\n import theano.typed_list\n\n a = as_tensor_variable(a)\n if a.dtype not in theano.tensor.discrete_dtypes:\n raise TypeError(\n \"choose first argument must have an [u]int* dtype. Got %s.\" % a.dtype\n )\n\n if isinstance(choices, (tuple, list, theano.typed_list.TypedListVariable)):\n choice = theano.typed_list.make_list(choices)\n choice_ndim = choice.ttype.ndim\n choice_bcast = choice.ttype.broadcastable\n else:\n choice = as_tensor_variable(choices)\n choice_ndim = choice.ndim - 1\n choice_bcast = choice.broadcastable[1:]\n out_ndim = np.max([a.ndim, choice_ndim])\n\n # Make explicit all added broadcastable dimensions.\n a = shape_padleft(a, out_ndim - a.ndim)\n if len(choice_bcast) != out_ndim:\n if isinstance(choice.type, TensorType):\n choice = choice.dimshuffle(\n 0,\n *((\"x\",) * (out_ndim - choice_ndim) + tuple(range(1, choice.ndim))),\n )\n choice_ndim = choice.ndim - 1\n choice_bcast = choice.broadcastable[1:]\n else:\n raise NotImplementedError(\n \"We currently didn't implemented that case. \"\n \"To make it work, explicitly add dimensions \"\n \"of size one for dimensions that will be broadcasted\"\n )\n\n bcast = [False] * out_ndim\n for idx, (b1, b2) in enumerate(\n zip(a.broadcastable, (True,) * (out_ndim - choice_ndim) + choice_bcast)\n ):\n if b1 and b2:\n bcast[idx] = True\n o = TensorType(choice.dtype, bcast)\n return Apply(self, [a, choice], [o()])\n\n def perform(self, node, inputs, outputs):\n (z,) = outputs\n a = inputs[0]\n choice = inputs[1]\n # TODO reuse out?\n z[0] = np.choose(a, choice, mode=self.mode)\n\n\nclass AllocEmpty(gof.Op):\n \"\"\"Implement Alloc on the cpu, but without initializing memory.\"\"\"\n\n __props__ = (\"dtype\",)\n params_type = ParamsType(typecode=int32)\n\n # specify the type of the data\n def __init__(self, dtype):\n assert isinstance(dtype, str), dtype\n self.dtype = dtype.lower()\n\n @property\n def typecode(self):\n return np.dtype(self.dtype).num\n\n def make_node(self, *shape):\n shape, bcast = alloc_validate_shape(shape)\n otype = TensorType(dtype=self.dtype, broadcastable=bcast)\n output = otype()\n\n output.tag.values_eq_approx = values_eq_approx_always_true\n # The outut can contain nan/inf. output.type is a new\n # instance, so we can do this only for that variable.\n output.type.filter_checks_isfinite = False\n\n # We can't reuse filter_checks_isfinite as by default it is\n # False and it is set to true only in DebugMode.\n # We can't set it in the type as other make_node can reuse the type.\n # We can't set it in the variable as it isn't copied when we copy\n # the variale. So we set it in the tag.\n output.tag.nan_guard_mode_check = False\n return Apply(self, shape, [output])\n\n def debug_perform(self, node, inputs, out_, params):\n self.perform(node, inputs, out_, params)\n out_[0][0].fill(-123456789)\n\n def perform(self, node, inputs, out_, params):\n (out,) = out_\n sh = tuple([int(i) for i in inputs])\n if out[0] is None or out[0].shape != sh:\n out[0] = np.empty(sh, dtype=self.dtype)\n\n def c_code(self, node, name, inputs, out_, sub):\n (out,) = out_\n fail = sub[\"fail\"]\n shps = inputs\n nd = len(shps)\n params = sub[\"params\"]\n str = \"npy_intp dims[%(nd)s];\\n\" % locals()\n for idx, sh in enumerate(shps):\n str += (\n \"dims[%(idx)s] =\"\n \"((npy_intp)((dtype_%(sh)s*)\"\n \" PyArray_DATA(%(sh)s))[0]);\\n\" % locals()\n )\n\n # Validate that the output storage exists\n str += \"if(%(out)s==NULL\\n\" % locals()\n for idx, sh in enumerate(shps):\n str += \"||PyArray_DIMS(%(out)s)[%(idx)s]!=dims[%(idx)s]\" % locals()\n\n str += (\n \"\"\"){\n /* Reference received to invalid output variable.\n Decrease received reference's ref count and allocate new\n output variable */\n Py_XDECREF(%(out)s);\n %(out)s = (PyArrayObject*)PyArray_EMPTY(%(nd)s,\n dims,\n %(params)s->typecode,\n 0);\n if (!%(out)s)\n {\n PyErr_SetString(PyExc_MemoryError, \"alloc failed\");\n %(fail)s;\n }\n }\n \"\"\"\n % locals()\n )\n return str\n\n def infer_shape(self, node, input_shapes):\n return [node.inputs]\n\n def c_code_cache_version(self):\n return (4,)\n\n def do_constant_folding(self, node):\n return False\n\n def connection_pattern(self, node):\n return [[False] for i in node.inputs]\n\n def grad(self, inputs, grads):\n return [DisconnectedType()() for i in inputs]\n\n def R_op(self, inputs, eval_points):\n return [zeros(inputs, self.dtype)]\n"
] |
[
[
"numpy.dot",
"numpy.minimum",
"numpy.asarray",
"numpy.dtype",
"numpy.concatenate",
"numpy.max",
"numpy.all",
"numpy.mean",
"numpy.tri",
"numpy.iinfo",
"numpy.allclose",
"numpy.may_share_memory",
"numpy.reshape",
"numpy.arange",
"numpy.eye",
"numpy.choose",
"numpy.argmax",
"numpy.zeros",
"numpy.nonzero",
"numpy.int64",
"numpy.array",
"numpy.diagonal",
"numpy.sum",
"numpy.maximum",
"numpy.tile",
"numpy.complex",
"numpy.isscalar",
"numpy.prod",
"numpy.empty"
]
] |
jasonyuan97/angle-first-gait-recognition
|
[
"d99c8717cac488b858b4a2b8e26a66d6b12c2c1f"
] |
[
"inference.py"
] |
[
"__author__ = 'jason'\n\nimport tensorflow as tf \nimport sys\nfrom tool import NUM_PARTS\n\nINPUT_CHANNEL = 1\nCONV1_DEEP = 16\nCONV1_SIZE = 7\nCONV2_DEEP = 64\nCONV2_SIZE = 7\nCONV3_DEEP = 256\nCONV3_SIZE = 7\nDROP_RATE = 0.5\nWEIGHT_DECAY=0.0005\n\ndef weightVariable(shape, weight_decay):\n\tinit = tf.Variable(tf.random_normal(shape, stddev=0.01))\n\ttf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(weight_decay)(init))\n\treturn init\n\ndef biasVariable(shape):\n\tinit = tf.random_normal(shape)\n\treturn tf.Variable(init)\n\ndef conv2d(x, W):\n\treturn tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')\n\ndef maxPool(x):\n\treturn tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1],padding='SAME')\n\ndef normalize(x):\n\treturn tf.nn.local_response_normalization(x, 5/2, 2, 1e-4, 0.75)\n\ndef dropout(x, keep):\n\treturn tf.nn.dropout(x, keep)\n\ndef full_inference(net, probe_gei, gallery_gei):\n\tif net==\"MT\":\n\t\treturn MT_inference(probe_gei, gallery_gei)\n\telse:\n\t\tprint(\"Error: Wrong net type\")\n\t\tsys.exit(1)\n\ndef partial_inference(net, parted_probe_gei, parted_gallery_gei):\n\tif net==\"TMT\":\n\t\treturn TMT_inference(parted_probe_gei, parted_gallery_gei)\t\n\telif net==\"BMT\":\n\t\treturn BMT_inference(parted_probe_gei, parted_gallery_gei)\n\telse:\n\t\tprint(\"Error: Wrong net type\")\n\t\tsys.exit(1)\n\ndef part_MT_inference(part_gei0, part_gei1):\n\tW11 = weightVariable([CONV1_SIZE,CONV1_SIZE,INPUT_CHANNEL,CONV1_DEEP],WEIGHT_DECAY)\n\tb11 = biasVariable([CONV1_DEEP])\n\tconv11 = normalize(tf.nn.relu(conv2d(part_gei0,W11)+b11))\n\tpool11 = maxPool(conv11)\n\t\n\tW12 = weightVariable([CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],WEIGHT_DECAY)\n\tb12 = biasVariable([CONV2_DEEP])\n\tconv12 = normalize(tf.nn.relu(conv2d(pool11,W12)+b12))\n\tpool12 = maxPool(conv12)\n\t\n\tW21 = weightVariable([CONV1_SIZE,CONV1_SIZE,INPUT_CHANNEL,CONV1_DEEP],WEIGHT_DECAY)\n\tb21 = biasVariable([CONV1_DEEP])\n\tconv21 = normalize(tf.nn.relu(conv2d(part_gei1,W21)+b21))\n\tpool21 = maxPool(conv21)\n\t\n\tW22 = weightVariable([CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],WEIGHT_DECAY)\n\tb22 = biasVariable([CONV2_DEEP])\n\tconv22 = normalize(tf.nn.relu(conv2d(pool21,W22)+b22))\n\tpool22 = maxPool(conv22)\n\n\tW3 = weightVariable([CONV3_SIZE,CONV3_SIZE,CONV2_DEEP,CONV3_DEEP],WEIGHT_DECAY)\n\tb3 = biasVariable([CONV3_DEEP])\n\tconv3 = tf.nn.relu(conv2d(pool12,W3)+conv2d(pool22,W3)+b3)\n\tfeature_map = dropout(conv3, DROP_RATE)\n\t#print feature_map.shape\n\treturn feature_map\n\ndef classify(feature_map):\n\t#print(\"feature_map.shape:\", feature_map.shape)\n\ttmp = int(feature_map.shape[1]*feature_map.shape[2]*feature_map.shape[3])\t\n\tWf = weightVariable([tmp,2],WEIGHT_DECAY)\n\tbf = biasVariable([2])\n\tflatted_map = tf.reshape(feature_map, [-1, tmp])\n\tout = tf.add(tf.matmul(flatted_map, Wf), bf)\n\treturn out\n\t\ndef concat_maps(maps):\n\t#height=sum([int(maps[i].shape[1]) for i in [0,1,2,4,5]])\n\t#width=maps[0].shape[2]\n\t#print \"height, width:\", height, width\n\ttmp_map1=tf.concat([maps[0], maps[1]], 1)\n\ttmp_map2=tf.concat([maps[2], maps[3]], 2)\n\ttmp_map3=tf.concat([maps[4], maps[5]], 1)\n\tfeature_map=tf.concat([tmp_map1, tmp_map2, tmp_map3], 1)\n\t#print \"feature_map.shape:\", feature_map.shape\n\treturn feature_map\n\ndef MT_inference(gei0, gei1):\n\treturn part_MT_inference(gei0, gei1)\n\ndef TMT_inference(gei0, gei1):\n\tmaps=[]\n\tfor i in range(len(gei0)):\n\t\tmaps.append(part_MT_inference(gei0[i], gei1[i]))\t\n\tfeature_map = concat_maps(maps)\n\treturn feature_map\n\ndef BMT_inference(gei0, gei1):\n\tW11=[]\n\tb11=[]\n\tconv11=[]\n\tpool11=[]\n\tW21=[]\n\tb21=[]\n\tconv21=[]\n\tpool21=[]\n\t\n\tfor i in range(len(gei0)):\n\t\tW11.append(weightVariable([CONV1_SIZE,CONV1_SIZE,INPUT_CHANNEL,CONV1_DEEP],WEIGHT_DECAY))\n\t\tb11.append(biasVariable([CONV1_DEEP]))\n\t\tconv11.append(normalize(tf.nn.relu(conv2d(gei0[i],W11[i])+b11[i])))\n\t\tpool11.append(maxPool(conv11[i]))\n\tfor i in range(len(gei1)):\n\t\tW21.append(weightVariable([CONV1_SIZE,CONV1_SIZE,INPUT_CHANNEL,CONV1_DEEP],WEIGHT_DECAY))\n\t\tb21.append(biasVariable([CONV1_DEEP]))\n\t\tconv21.append(normalize(tf.nn.relu(conv2d(gei1[i],W21[i])+b21[i])))\n\t\tpool21.append(maxPool(conv21[i]))\n\tconcated_pool11=concat_maps(pool11)\t\n\tconcated_pool21=concat_maps(pool21)\t\n\n\tW12 = weightVariable([CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],WEIGHT_DECAY)\n\tb12 = biasVariable([CONV2_DEEP])\n\tconv12 = normalize(tf.nn.relu(conv2d(concated_pool11,W12)+b12))\n\tpool12 = maxPool(conv12)\n\n\tW22 = weightVariable([CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],WEIGHT_DECAY)\n\tb22 = biasVariable([CONV2_DEEP])\n\tconv22 = normalize(tf.nn.relu(conv2d(concated_pool21,W22)+b22))\n\tpool22 = maxPool(conv22)\n\n\tW3 = weightVariable([CONV3_SIZE,CONV3_SIZE,CONV2_DEEP,CONV3_DEEP],WEIGHT_DECAY)\n\tb3 = biasVariable([CONV3_DEEP])\n\tconv3 = tf.nn.relu(conv2d(pool12,W3)+conv2d(pool22,W3)+b3)\n\tfeature_map = dropout(conv3, DROP_RATE)\n\n\treturn feature_map\n\t\t\ndef LB_inference(gei0, gei1):\n\tW1 = weightVariable([CONV1_SIZE,CONV1_SIZE,INPUT_CHANNEL,CONV1_DEEP],WEIGHT_DECAY)\n\tb1 = biasVariable([CONV1_DEEP])\n\tconv1 = normalize(tf.nn.relu(conv2d(gei0,W1)+conv2d(gei1,W1)+b1))\n\tpool1 = maxPool(conv1)\n\n\tW2 = weightVariable([CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],WEIGHT_DECAY)\n\tb2 = biasVariable([CONV2_DEEP])\n\tconv2 = normalize(tf.nn.relu(conv2d(pool1,W2)+b2))\n\tpool2 = maxPool(conv2)\n\t\n\tW3 = weightVariable([CONV3_SIZE,CONV3_SIZE,CONV2_DEEP,CONV3_DEEP],WEIGHT_DECAY)\n\tb3 = biasVariable([CONV3_DEEP])\n\tconv3 = tf.nn.relu(conv2d(pool2,W3)+b3)\n\tdrop3 = dropout(conv3, DROP_RATE)\n\t\n\tWf = weightVariable([11*21*CONV3_DEEP,2],WEIGHT_DECAY)\n\tbf = biasVariable([2])\n\tdrop3_flat = tf.reshape(drop3, [-1, 11*21*CONV3_DEEP])\n\tout = tf.add(tf.matmul(drop3_flat, Wf), bf)\n\n\treturn out\n"
] |
[
[
"tensorflow.matmul",
"tensorflow.concat",
"tensorflow.Variable",
"tensorflow.nn.max_pool",
"tensorflow.reshape",
"tensorflow.random_normal",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.nn.dropout",
"tensorflow.nn.local_response_normalization",
"tensorflow.nn.conv2d"
]
] |
Quiet-Clicking-Sounds/StringCalculator
|
[
"cd06759d214aa00b69182f42bf66e0393d127b29"
] |
[
"interface/visualization.py"
] |
[
"from __future__ import annotations\n\nimport tkinter as tk\nfrom tkinter import ttk\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n\nfrom interface.instrument_class import Instrument\nfrom interface.visualization_plotting import plot_type_dict, plot_func_type\n\n\nclass PlotFrame(ttk.Frame):\n def __init__(self, parent, instrument: Instrument):\n super(PlotFrame, self).__init__(parent)\n self.instrument = instrument\n # header fixed to the top of the screen\n self.header = PlotHeader(self)\n self.header.pack(fill='x', expand=False, side=\"top\")\n\n # plot body for all plots to be placed - also the creator of said plots\n self.plot_body = PlotBody(self, self.instrument)\n self.plot_body.pack(fill='both', expand=True, side=\"bottom\")\n\n def create_plot(self, name: str):\n self.plot_body.new_plot(name)\n\n\nclass PlotHeader(ttk.Frame):\n def __init__(self, parent: PlotFrame):\n super(PlotHeader, self).__init__(parent)\n self.parent = parent\n max_cols = 6\n\n # add plot buttons\n for n, key in enumerate(plot_type_dict.keys()):\n button = ttk.Button(self, text=key,\n command=lambda key=key: self.parent.create_plot(key))\n button.grid(row=n // max_cols, column=n % max_cols)\n\n\nclass PlotBody(ttk.Frame):\n plot: tk.Canvas\n\n def __init__(self, parent, instrument: Instrument):\n super(PlotBody, self).__init__(parent)\n self.instrument = instrument\n self.plots: dict[str: tk.Canvas] = dict()\n\n def new_plot(self, name='Tension'):\n try:\n self.plot.destroy()\n except AttributeError:\n pass\n\n fig: plot_func_type = plot_type_dict[name](self.instrument, (1920, 800))\n canvas = FigureCanvasTkAgg(fig, self)\n widget = canvas.get_tk_widget()\n widget.pack(fill='x', expand=True, side=\"top\")\n self.plot = widget\n"
] |
[
[
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
]
] |
wikicoo/spot-trend-grid
|
[
"b631145db73e18cc35077c56c63a3bacc400d674"
] |
[
"run.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom app.BinanceAPI import BinanceAPI\nfrom app.authorization import api_key,api_secret\nfrom data.runBetData import RunBetData\nfrom app.dingding import Message\nfrom data.calcIndex import CalcIndex\nimport time\n\nimport mplfinance as mpf\nimport matplotlib as mpl# 用于设置曲线参数\nfrom cycler import cycler# 用于定制线条颜色\nimport pandas as pd# 导入DataFrame数据\nimport matplotlib.pyplot as plt\n\nbinan = BinanceAPI(api_key,api_secret)\nrunbet = RunBetData()\nmsg = Message()\n\nindex = CalcIndex()\n\nclass Run_Main():\n\n def __init__(self):\n self.coinType = runbet.get_cointype() # 交易币种\n pass\n\n\n def loop_run(self):\n while True:\n cur_market_price = binan.get_ticker_price(runbet.get_cointype()) # 当前交易对市价\n grid_buy_price = runbet.get_buy_price() # 当前网格买入价格\n grid_sell_price = runbet.get_sell_price() # 当前网格卖出价格\n quantity = runbet.get_quantity() # 买入量\n step = runbet.get_step() # 当前步数\n right_size = len(str(cur_market_price).split(\".\")[1])\n\n if grid_buy_price >= cur_market_price and index.calcAngle(self.coinType,\"5m\",False,right_size): # 是否满足买入价\n res = msg.buy_market_msg(self.coinType, quantity)\n if res['orderId']: # 挂单成功\n runbet.modify_price(cur_market_price, step+1) #修改data.json中价格、当前步数\n time.sleep(60*2) # 挂单后,停止运行1分钟\n else:\n break\n\n elif grid_sell_price < cur_market_price and index.calcAngle(self.coinType,\"5m\",True,right_size): # 是否满足卖出价\n if step==0: # setp=0 防止踏空,跟随价格上涨\n runbet.modify_price(grid_sell_price,step)\n else:\n res = msg.sell_market_msg(self.coinType, runbet.get_quantity(False))\n if res['orderId']:\n # runbet.set_ratio(runbet.get_cointype()) 启动动态改变比率\n runbet.modify_price(cur_market_price, step - 1)\n time.sleep(60*2) # 挂单后,停止运行1分钟\n else:\n break\n else:\n print(\"当前市价:{market_price}。未能满足交易,继续运行\".format(market_price = cur_market_price))\n\n def macdCalc(self):\n kline = binan.get_klines(\"DOGEUSDT\", \"1m\", 500)\n columns = ['开盘时间', 'Open', 'High', 'Low', 'Close', 'volume', '收盘时间',\n '成交额', '成交笔数', '主动买入成交量', '主动买入成交额', '请忽略该参数']\n # df = pd.read_json('data/kline.json')\n df = pd.read_json(kline)\n df.columns = columns\n df['开盘时间'] = pd.to_datetime(df['开盘时间'], unit='ms')\n df = df.set_index('开盘时间').iloc[-10:, :5]\n\n # mpf.plot(df, type='candle',mav=(5,10,20))\n\n # MACD默认参数为12、26、9,计算过程分为三步,\n # 第一步计算EMA:\n # 12日EMA\n # EMA(12) = 2 / (12 + 1) * 今日收盘价(12) + 11 / (12 + 1) * 昨日EMA(12)\n df['ema12'] = 0\n df['ema12'] = (2 / (12 + 1) * df['Close']) + (11 / (12 + 1) * df['ema12'].shift(1))\n a = 2 / (12 + 1) * df['Close']\n b = 11 / (12 + 1) * df['ema12'].shift(1)\n df['ema123'] = a + b\n # 26日EMA\n # EMA(26) = 2 / (26 + 1) * 今日收盘价(26) + 25 / (26 + 1) * 昨日EMA(26)\n # 第二步计算DIFF:\n # DIFF = EMA(12) - EMA(26)\n # 第三步计算DEA:\n # DEA = 2 / (9 + 1) * 今日DIFF + 8 / (9 + 1) * 昨日DEA\n # 第四步计算MACD柱线:\n # MACD柱线 = 2 * (DIFF - DEA)\n print(df)\n\n# if __name__ == \"__main__\":\n# instance = Run_Main()\n# try:\n# instance.loop_run()\n# except Exception as e:\n# error_info = \"报警:币种{coin},服务停止.错误原因{info}\".format(coin=instance.coinType,info=str(e))\n# msg.dingding_warn(error_info)\n\n# 调试看报错运行下面,正式运行用上面\nif __name__ == \"__main__\":\n instance = Run_Main()\n instance.macdCalc()\n # instance.loop_run()\n"
] |
[
[
"pandas.to_datetime",
"pandas.read_json"
]
] |
AiNoTsubasa/tic-tac-toe
|
[
"71a1e3bbffbdc2e1a7acbc8f0f86b44c85a4071f"
] |
[
"src/TicTacToe.py"
] |
[
"import numpy as np\nimport random\n\nclass TicTacToe:\n def __init__(self):\n self.__O_MOVES = []\n self.__X_MOVES = []\n self.__o_player = \"human\"\n self.__x_player = \"ai\"\n self.__WIN_PATTERNS = [\n [0, 3, 6],\n [1, 4, 7],\n [2, 5, 8],\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8],\n [0, 4, 8],\n [2, 4, 6]\n ]\n self.__boards = np.array([\"_\"] * 9)\n self.__debug_mode = False\n self.__is_o_player_win = False\n self.__is_x_player_win = False\n \n def set_debug_mode(self, debug_mode):\n self.__debug_mode = debug_mode\n \n def __display_board(self):\n if len(self.__O_MOVES) > 0:\n self.__boards[self.__O_MOVES] = [\"O\"]\n if len(self.__X_MOVES) > 0:\n self.__boards[self.__X_MOVES] = [\"X\"]\n print(self.__boards.reshape([3, 3]), \"\\n\")\n \n def __check_win(self, player_moves):\n for win_pattern in self.__WIN_PATTERNS:\n if all(w in player_moves for w in win_pattern):\n return True\n return False\n\n def __get_available_moves(self, moved):\n boards = np.array(range(0, 9))\n return np.setdiff1d(boards, moved)\n \n def __check_game_end(self):\n boards = np.array(range(0, 9))\n moved = self.__O_MOVES + self.__X_MOVES\n return len(boards) == len(moved) or self.__is_o_player_win or self.__is_x_player_win\n \n def __get_random_move(self):\n avaliable_moves = self.__get_available_moves(self.__O_MOVES + self.__X_MOVES)\n return random.choice(avaliable_moves)\n \n def __minimax(self, o_moves, x_moves, player, depth):\n result = { 'score': 0, 'move': None }\n available_moves = self.__get_available_moves(o_moves + x_moves)\n if self.__check_win(o_moves):\n result['score'] = 10 - depth\n return result\n elif self.__check_win(x_moves):\n result['score'] = depth -10 \n return result\n elif len(available_moves) == 0:\n result['score'] = 0\n return result\n else:\n best_moves = []\n if player == self.__o_player:\n indicator = -100\n for next_move in available_moves:\n next_o_moves = o_moves.copy()\n next_o_moves.append(next_move)\n path_result = self.__minimax(next_o_moves, x_moves, self.__x_player, depth + 1)\n path_result['move'] = next_move\n if path_result['score'] > indicator:\n best_moves = [path_result]\n indicator = path_result['score']\n elif path_result['score'] == indicator:\n best_moves.append(path_result)\n \n return random.choice(best_moves)\n else:\n indicator = 100\n for next_move in available_moves:\n next_x_moves = x_moves.copy()\n next_x_moves.append(next_move)\n path_result = self.__minimax(o_moves, next_x_moves, self.__o_player, depth + 1)\n path_result['move'] = next_move\n if path_result['score'] < indicator:\n best_moves = [path_result]\n indicator = path_result['score']\n elif path_result['score'] == indicator:\n best_moves.append(path_result)\n \n if self.__debug_mode and depth == 0:\n print('Next move score: ', path_result)\n\n if self.__debug_mode and depth == 0:\n print('Best next move score: ', best_moves)\n return random.choice(best_moves)\n\n\n \n def __find_ai_next_move(self):\n next_move_result = self.__minimax(self.__O_MOVES, self.__X_MOVES, self.__x_player, 0)\n return next_move_result['move']\n \n def __execute_human_turn(self):\n print(\"Your turn...\")\n move = int(input(\"\\tPlease enter your move (1~9): \")) - 1\n while move in (self.__O_MOVES + self.__X_MOVES):\n available_moves = self.__get_available_moves(self.__O_MOVES + self.__X_MOVES)\n move = int(input(\"\\tInvalid move!! Please enter your move (\"+ \", \".join([str(m + 1) for m in available_moves]) +\"): \")) - 1\n self.__O_MOVES.append(move)\n self.__is_o_player_win = self.__check_win(self.__O_MOVES)\n\n def __execute_ai_turn(self):\n print(\"AI's turn...\")\n if len(self.__X_MOVES) == 0:\n move = self.__get_random_move()\n else:\n move = self.__find_ai_next_move()\n \n self.__X_MOVES.append(move)\n self.__is_x_player_win = self.__check_win(self.__X_MOVES)\n \n def start(self):\n print(\"===== GAME START! =====\")\n self.__display_board()\n while not self.__check_game_end():\n count_turn = len(self.__O_MOVES + self.__X_MOVES)\n if count_turn % 2 == 0:\n self.__execute_human_turn()\n else:\n self.__execute_ai_turn()\n self.__display_board()\n \n if self.__is_o_player_win:\n print(\"You win!!!\")\n elif self.__is_x_player_win:\n print(\"AI win!!!\")\n else:\n print(\"### Draw!!! ###\")\n print(\"===== GAME END! =====\")"
] |
[
[
"numpy.array",
"numpy.setdiff1d"
]
] |
sanderslab/magellanmapper
|
[
"16d55df6dc1f0e5baf3938a30edcdd634e0ffd85",
"16d55df6dc1f0e5baf3938a30edcdd634e0ffd85"
] |
[
"magmap/stats/clustering.py",
"magmap/cv/segmenter.py"
] |
[
"# Cluster measurement.\n# Author: David Young, 2019\n\"\"\"Clustering measurements.\"\"\"\n\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import cluster\nfrom sklearn import neighbors\n\nfrom magmap.cv import chunking, detector\nfrom magmap.settings import config\nfrom magmap.io import libmag\nfrom magmap.io import np_io\nfrom magmap.atlas import ontology\nfrom magmap.plot import plot_2d\nfrom magmap.settings import profiles\nfrom magmap.io import sitk_io\nfrom magmap.io import df_io\n\n\ndef knn_dist(blobs, n, max_dist=None, max_pts=None, show=True):\n \"\"\"Measure the k-nearest-neighbors distance.\n \n Args:\n blobs (:obj:`np.ndarray`): Sequence given as\n ``[n_samples, n_features]``, where features typically is of the\n form, ``[z, y, x, ...]``.\n n (int): Number of neighbors. The farthest neighbor will be used\n for sorting, filtering, and plotting.\n max_dist (float): Cap the maximum distance of points to plot, given\n as factor of the median distance; defaults to None to show\n neighbors of all distances.\n max_pts (int): Cap the maximum number of points for the zoomed\n plot if the 90th percentile exceeds this number; defaults\n to None.\n show (bool): True to immediately show the plot the distances;\n defaults to True. Will still plot and save in the background\n if :attr:`config.savefig` is set.\n\n Returns:\n :obj:`neighbors.NearestNeighbors`, :obj:`np.ndarray`,\n List[:obj:`pd.DataFrame`]:\n Tuple of ``NearestNeighbors`` object, all distances from\n ``kneighbors`` sorted by the ``n``th neighbor, and a list of\n data frames at different zoom levels (``[df_overview, df_zoomed]``).\n\n \"\"\"\n def plot(mod=\"\"):\n # plot sorted distances as line and return data frame\n df = pd.DataFrame(\n {\"point\": np.arange(len(dist_disp)), \"dist\": dist_disp})\n plot_2d.plot_lines(\n \"knn_dist{}\".format(mod), \"point\", (\"dist\", ), df=df, show=show,\n title=config.plot_labels[config.PlotLabels.TITLE])\n return df\n \n #blobs = blobs[::int(len(blobs) / 1000)] # TESTING: small num of blobs\n knn = neighbors.NearestNeighbors(n, n_jobs=-1).fit(blobs)\n print(knn)\n dist, _ = knn.kneighbors(blobs)\n # sort distances based on nth neighbors\n dist = dist[np.argsort(dist[:, n - 1])]\n dfs = []\n if show or config.savefig:\n distn = dist[:, n - 1]\n if max_dist:\n # remove all distances where nth neighbor is beyond threshold\n distn = distn[distn < max_dist * np.median(distn)]\n len_distn = len(distn)\n \n # line plot of nth neighbor distances by ascending order,\n # downsampling for overview plot\n step = int(len_distn / 1000)\n if step < 1: step = 1\n dist_disp = distn[::step]\n dfs.append(plot())\n \n # zoom to >= 90th percentile or max points, whichever is smaller\n above_pct = distn > np.percentile(distn, 90)\n if max_pts and max_pts < np.sum(above_pct):\n print(\"limiting zoomed plot to last {} points\".format(max_pts))\n dist_disp = distn[len_distn-max_pts:]\n else:\n dist_disp = distn[above_pct]\n dfs.append(plot(\"_zoomed\"))\n return knn, dist, dfs\n\n\ndef plot_knns(img_paths, suffix=None, show=False, names=None):\n \"\"\"Plot k-nearest-neighbor distances for multiple sets of blobs,\n overlaying on a single plot.\n\n Args:\n img_paths (List[str]): Base paths from which registered labels and\n blobs files will be found and output blobs file save location\n will be constructed.\n suffix (str): Suffix for ``path``; defaults to None.\n show (bool): True to plot the distances; defaults to False.\n names (List[str]): Sequence of names corresponding to ``img_paths``\n for the plot legend.\n\n \"\"\"\n cluster_settings = config.atlas_profile[\n profiles.RegKeys.METRICS_CLUSTER]\n knn_n = cluster_settings[profiles.RegKeys.KNN_N]\n if not knn_n:\n knn_n = cluster_settings[profiles.RegKeys.DBSCAN_MINPTS] - 1\n print(\"Calculating k-nearest-neighbor distances and plotting distances \"\n \"for neighbor {}\".format(knn_n))\n \n # set up combined data frames for all samples at each zoom level\n df_keys = (\"ov\", \"zoom\")\n dfs_comb = {key: [] for key in df_keys}\n names_disp = names if names else []\n for i, img_path in enumerate(img_paths):\n # load blobs associated with image\n mod_path = img_path\n if suffix is not None:\n mod_path = libmag.insert_before_ext(img_path, suffix)\n labels_img_np = sitk_io.load_registered_img(\n mod_path, config.RegNames.IMG_LABELS.value)\n blobs = detector.Blobs().load_blobs(np_io.img_to_blobs_path(img_path))\n scaling, res = np_io.find_scaling(\n img_path, labels_img_np.shape,\n load_size=config.atlas_profile[\"target_size\"])\n if blobs is None:\n libmag.warn(\"unable to load nuclei coordinates for\", img_path)\n continue\n # convert to physical units and display k-nearest-neighbors for nuclei\n blobs_phys = np.multiply(blobs.blobs[:, :3], res)\n # TESTING: given the same blobs, simply shift\n #blobs = np.multiply(blobs[i*10000000:, :3], res)\n _, _, dfs = knn_dist(blobs_phys, knn_n, 2, 1000000, False)\n if names is None:\n # default to naming from filename\n names_disp.append(os.path.basename(mod_path))\n for j, df in enumerate(dfs):\n dfs_comb[df_keys[j]].append(df)\n \n for key in dfs_comb:\n # combine data frames at each zoom level, save, and plot with\n # different colors for each image\n df = df_io.join_dfs(dfs_comb[key], \"point\")\n dist_cols = [col for col in df.columns if col.startswith(\"dist\")]\n rename_cols = {col: name for col, name in zip(dist_cols, names_disp)}\n df = df.rename(rename_cols, axis=1)\n out_path = \"knn_dist_combine_{}\".format(key)\n df_io.data_frames_to_csv(df, out_path)\n plot_2d.plot_lines(\n out_path, \"point\", rename_cols.values(), df=df, show=show,\n title=config.plot_labels[config.PlotLabels.TITLE])\n\n\ndef cluster_dbscan_metrics(labels):\n \"\"\"Calculate basic metrics for DBSCAN.\n \n Args:\n labels (:obj:`np.ndarray`): Cluster labels.\n\n Returns:\n int, int, int: Tuple of number of clusters, number of noise blobs,\n and number of blobs contained within the largest cluster.\n\n \"\"\"\n lbl_unique, lbl_counts = np.unique(\n labels[labels != -1], return_counts=True)\n num_clusters = len(lbl_unique)\n # number of blobs in largest cluster\n num_largest = np.nan if len(lbl_counts) == 0 else np.amax(lbl_counts)\n # number of blobs not in a cluster\n num_noise = np.sum(labels == -1)\n return num_clusters, num_noise, num_largest\n\n\nclass ClusterByLabel(object):\n blobs = None\n \n @classmethod\n def cluster_by_label(cls, blobs, labels_img_np, blobs_lbl_scaling,\n blobs_iso_scaling, all_labels=False):\n coord_scaled = ontology.scale_coords(blobs, blobs_lbl_scaling)\n blobs_lbls = ontology.get_label_ids_from_position(\n coord_scaled, labels_img_np)\n blobs = np.multiply(blobs[:, :3], blobs_iso_scaling)\n blobs_clus = np.zeros((len(blobs), 5), dtype=int)\n blobs_clus[:, :3] = blobs\n blobs_clus[:, 3] = blobs_lbls\n cls.blobs = blobs_clus\n print(np.unique(blobs_clus[:, 3]))\n print(cls.blobs)\n\n # TODO: shift to separate func once load blobs without req labels img\n\n label_ids = np.unique(labels_img_np)\n cluster_settings = config.atlas_profile[\n profiles.RegKeys.METRICS_CLUSTER]\n eps = cluster_settings[profiles.RegKeys.DBSCAN_EPS]\n minpts = cluster_settings[profiles.RegKeys.DBSCAN_MINPTS]\n \n if all_labels:\n # cluster all labels together\n # TODO: n_jobs appears to be ignored despite reported fixes\n _, labels = cls.cluster_within_label(None, eps, minpts, -1)\n cls.blobs[:, 4] = labels\n else:\n # cluster by individual label\n pool = chunking.get_mp_pool()\n pool_results = []\n for label_id in label_ids:\n # add rotation argument if necessary\n pool_results.append(\n pool.apply_async(\n cls.cluster_within_label,\n args=(label_id, eps, minpts, None)))\n \n for result in pool_results:\n label_id, labels = result.get()\n if labels is not None:\n cls.blobs[cls.blobs[:, 3] == label_id, 4] = labels\n pool.close()\n pool.join()\n cls.blobs[:, :3] = np.divide(blobs[:, :3], blobs_iso_scaling)\n \n return cls.blobs\n \n @classmethod\n def cluster_within_label(cls, label_id, eps, minpts, n_jobs):\n blobs = cls.blobs\n if label_id is not None:\n blobs = blobs[blobs[:, 3] == label_id]\n clus_lbls = None\n if len(blobs) > 0:\n clusters = cluster.DBSCAN(\n eps=eps, min_samples=minpts, leaf_size=30,\n n_jobs=n_jobs).fit(blobs)\n num_clusters, num_noise, num_largest = cluster_dbscan_metrics(\n clusters.labels_)\n print(\"label {}: num clusters: {}, noise blobs: {}, \"\n \"largest cluster: {}\"\n .format(label_id, num_clusters, num_noise, num_largest))\n clus_lbls = clusters.labels_\n return label_id, clus_lbls\n\n\ndef cluster_blobs(img_path, suffix=None):\n \"\"\"Cluster blobs and save to Numpy archive.\n \n Args:\n img_path (str): Base path from which registered labels and blobs files\n will be found and output blobs file save location will be\n constructed.\n suffix (str): Suffix for ``path``; defaults to None.\n\n Returns:\n\n \"\"\"\n mod_path = img_path\n if suffix is not None:\n mod_path = libmag.insert_before_ext(img_path, suffix)\n labels_img_np = sitk_io.load_registered_img(\n mod_path, config.RegNames.IMG_LABELS.value)\n blobs = detector.Blobs().load_blobs(np_io.img_to_blobs_path(img_path))\n scaling, res = np_io.find_scaling(\n img_path, labels_img_np.shape,\n load_size=config.atlas_profile[\"target_size\"])\n if blobs is None:\n libmag.warn(\"unable to load nuclei coordinates\")\n return\n \n # append label IDs to blobs and scale to make isotropic\n blobs_clus = ClusterByLabel.cluster_by_label(\n blobs.blobs[:, :3], labels_img_np, scaling, res)\n print(blobs_clus)\n out_path = libmag.combine_paths(mod_path, config.SUFFIX_BLOB_CLUSTERS)\n np.save(out_path, blobs_clus)\n",
"# Segmentation methods\n# Author: David Young, 2018, 2019\n\"\"\"Segment regions based on blobs, labels, and underlying features.\n\"\"\"\n\nfrom multiprocessing import sharedctypes\nfrom time import time\nfrom typing import Any, List, Optional, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import ndimage\nfrom skimage import feature\nfrom skimage import filters\nfrom skimage import segmentation\nfrom skimage import measure\nfrom skimage import morphology\n\nfrom magmap.settings import config\nfrom magmap.cv import chunking, cv_nd, detector\nfrom magmap.io import libmag\nfrom magmap.plot import plot_3d\nfrom magmap.io import df_io\n\n_logger = config.logger.getChild(__name__)\n\n\ndef _markers_from_blobs(roi, blobs):\n # use blobs as seeds by converting blobs into marker image\n markers = np.zeros(roi.shape, dtype=np.uint8)\n coords = libmag.coords_for_indexing(blobs[:, :3].astype(int))\n markers[tuple(coords)] = 1\n markers = morphology.dilation(markers, morphology.ball(1))\n markers = measure.label(markers)\n return markers\n\n\ndef _carve_segs(roi, blobs):\n # carve out background from segmented area\n carved = roi\n if blobs is None:\n # clean up by using simple threshold to remove all background\n carved, _ = cv_nd.carve(carved)\n else:\n # use blobs as ellipsoids to identify background to remove; \n # TODO: consider setting spacing in config since depends on \n # microscopy characteristics, such as elongation from \n # thick lightsheet\n thresholded = plot_3d.build_ground_truth(\n np.zeros(carved.shape, dtype=bool), blobs, ellipsoid=True)\n #thresholded = thresholded.astype(bool)\n carved[~thresholded] = 0\n return carved\n\n\ndef segment_rw(roi, channel, beta=50.0, vmin=0.6, vmax=0.65, remove_small=None, \n erosion=None, blobs=None, get_labels=False):\n \"\"\"Segments an image using the Random-Walker algorithm.\n \n Args:\n roi: Region of interest to segment.\n channel: Channel to pass to :func:``plot_3d.setup_channels``.\n beta: Random-Walker beta term.\n vmin: Values under which to exclude in markers; defaults to 0.6. \n Ignored if ``blobs`` is given.\n vmax: Values above which to exclude in markers; defaults to 0.65. \n Ignored if ``blobs`` is given.\n remove_small: Threshold size of small objects to remove; defaults \n to None to ignore.\n erosion: Structuring element size for erosion; defaults \n to None to ignore.\n blobs: Blobs to use for markers; defaults to None, in which \n case markers will be determined based on ``vmin``/``vmax`` \n thresholds.\n get_labels: True to measure and return labels from the \n resulting segmentation instead of returning the segmentations \n themselves; defaults to False.\n \n Returns:\n List of the Random-Walker segmentations for the given channels, \n If ``get_labels`` is True, the measured labels for the segmented \n regions will be returned instead of the segmentations themselves.\n \"\"\"\n print(\"Random-Walker based segmentation...\")\n labels = []\n walkers = []\n multichannel, channels = plot_3d.setup_channels(roi, channel, 3)\n for i in channels:\n roi_segment = roi[..., i] if multichannel else roi\n if blobs is None:\n # mark unknown pixels as 0 by distinguishing known background \n # and foreground\n markers = np.zeros(roi_segment.shape, dtype=np.uint8)\n markers[roi_segment < vmin] = 2\n markers[roi_segment >= vmax] = 1\n else:\n # derive markers from blobs\n markers = _markers_from_blobs(roi_segment, blobs)\n \n # perform the segmentation; conjugate gradient with multigrid\n # preconditioner option (cg_mg), which is faster but req pyamg\n walker = segmentation.random_walker(\n roi_segment, markers, beta=beta, mode=\"cg_mg\")\n \n # clean up segmentation\n \n #lib_clrbrain.show_full_arrays()\n walker = _carve_segs(walker, blobs)\n if remove_small:\n # remove artifacts\n walker = morphology.remove_small_objects(walker, remove_small)\n if erosion:\n # attempt to reduce label connections by eroding\n walker = morphology.erosion(walker, morphology.octahedron(erosion))\n \n if get_labels:\n # label neighboring pixels to segmented regions\n # TODO: check if necessary; useful only if blobs not given?\n label = measure.label(walker, background=0)\n labels.append(label)\n #print(\"label:\\n\", label)\n \n walkers.append(walker)\n #print(\"walker:\\n\", walker)\n \n if get_labels:\n return labels\n return walkers\n\n\ndef segment_ws(roi, channel, thresholded=None, blobs=None): \n \"\"\"Segment an image using a compact watershed, including the option \n to use a 3D-seeded watershed approach.\n \n Args:\n roi: ROI as a Numpy array in (z, y, x) order.\n channel: Channel to pass to :func:``plot_3d.setup_channels``.\n thresholded: Thresholded image such as a segmentation into foreground/\n background given by Random-walker (:func:``segment_rw``). \n Defaults to None, in which case Otsu thresholding will be performed.\n blobs: Blobs as a Numpy array in [[z, y, x, ...], ...] order, which \n are used as seeds for the watershed. Defaults to None, in which \n case peaks on a distance transform will be used.\n \n Returns:\n List of watershed labels for each given channel, with each set \n of labels given as an image of the same shape as ``roi``.\n \"\"\"\n labels = []\n labels_ws = None\n multichannel, channels = plot_3d.setup_channels(roi, channel, 3)\n for i in channels:\n roi_segment = roi[..., i] if multichannel else roi\n if thresholded is None:\n # Ostu thresholing and object separate based on local max \n # rather than seeded watershed approach\n roi_thresh = filters.threshold_otsu(roi, 64)\n thresholded = roi_segment > roi_thresh\n else:\n # r-w assigned 0 values to > 0 val labels\n thresholded = thresholded[0] - 1\n \n if blobs is None:\n # default to finding peaks of distance transform if no blobs \n # given, using an anisotropic footprint\n distance = ndimage.distance_transform_edt(thresholded)\n try:\n local_max = feature.peak_local_max(\n distance, indices=False, footprint=np.ones((1, 3, 3)), \n labels=thresholded)\n except IndexError as e:\n print(e)\n raise e\n markers = measure.label(local_max)\n else:\n markers = _markers_from_blobs(thresholded, blobs)\n \n # watershed with slight increase in compactness to give basins with \n # more regular, larger shape\n labels_ws = watershed_distance(thresholded, markers, compactness=0.1)\n \n # clean up segmentation\n labels_ws = _carve_segs(labels_ws, blobs)\n labels_ws = morphology.remove_small_objects(labels_ws, min_size=100)\n #print(\"num ws blobs: {}\".format(len(np.unique(labels_ws)) - 1))\n labels_ws = labels_ws[None]\n labels.append(labels_ws)\n return labels_ws\n\n\ndef labels_to_markers_blob(labels_img):\n \"\"\"Convert a labels image to markers as blobs.\n \n These markers can be used in segmentation algorithms such as \n watershed.\n \n Args:\n labels_img: Labels image as an integer Numpy array, where each \n unique int is a separate label.\n \n Returns:\n Image array of the same shape as ``img`` and the same number of \n labels as in ``labels_img``, with labels reduced to smaller \n markers.\n \"\"\"\n blobs = {}\n labels_unique = np.unique(labels_img)\n #labels_unique = np.concatenate((labels_unique[:5], labels_unique[-5:]))\n for label_id in labels_unique:\n if label_id == 0: continue\n print(\"finding centroid for label ID {}\".format(label_id))\n props = cv_nd.get_label_props(labels_img, label_id)\n if len(props) >= 1: \n # get centroid and convert to ellipsoid marker\n blob = [int(n) for n in props[0].centroid]\n blob.append(5)\n blobs[label_id] = np.array(blob)\n print(\"storing centroid as {}\".format(blobs[label_id]))\n \n # build markers from centroids\n spacing = detector.calc_scaling_factor()\n spacing = spacing / np.amin(spacing)\n markers = plot_3d.build_ground_truth(\n np.zeros_like(labels_img), np.array(list(blobs.values())), \n ellipsoid=True, labels=list(blobs.keys()), spacing=spacing)\n return markers\n\n\nclass LabelToMarkerErosion(chunking.SharedArrsContainer):\n \"\"\"Convert a label to an eroded marker for multiprocessing\n \n Uses class methods as an encapsulated way to use in forked multiprocessing\n without requirement for global variables. In non-forked multiprocessing\n (eg \"spawn\" on Windows), regions and weights should be pickled directly.\n \n Attributes:\n labels_img: Integer labels images as a Numpy array.\n wt_dists: Array of distances by which to weight the filter size.\n \"\"\"\n labels_img: np.ndarray = None\n wt_dists: np.ndarray = None\n \n @classmethod\n def set_labels_img(cls, labels_img: np.ndarray, wt_dists: np.ndarray):\n \"\"\"Set the labels image.\n \n Args:\n labels_img: Labels image to set as class attribute.\n wt_dists: Distance weights image to set as class attribute.\n \"\"\"\n cls.labels_img = labels_img\n cls.wt_dists = wt_dists\n \n @classmethod\n def meas_wt(\n cls, labels_img: np.ndarray, label_id: int, wt_dists: np.ndarray\n ) -> float:\n \"\"\"Measure the weight for a label based on weighted distances.\n \n Args:\n labels_img: Labels image.\n label_id: Label ID.\n wt_dists: Array of distances by which to weight the filter size.\n\n Returns:\n Normalized weight for ``label_id``.\n\n \"\"\"\n return np.median(wt_dists[labels_img == label_id]) / np.amax(wt_dists)\n \n @classmethod\n def erode_label(\n cls, label_id: int, filter_size: int, target_frac: float = None,\n min_filter_size: int = 1, use_min_filter: bool = False,\n skel_eros_filt_size: Union[int, bool] = False,\n wt: float = None) -> Tuple[\n Tuple[int, np.ndarray, np.ndarray, Any],\n Union[Optional[List[slice]], Any], Any]:\n \"\"\"Convert a label to a marker as an eroded version of the label.\n \n By default, labels will be eroded with the given ``filter_size`` \n as long as their final size is > 20% of the original volume. If \n the eroded volume is below threshold, ``filter_size`` will be \n progressively decreased until the filter cannot be reduced further.\n \n Skeletonization of the labels recovers some details by partially\n preserving the original labels' extent, including thin regions that\n would be eroded away, thus serving a similar function as that of\n adaptive morphological filtering. ``skel_eros_filt_size`` allows\n titrating the amount of the labels` extent to be preserved.\n \n If :attr:`wt_dists` is present, the label's distance will be used\n to weight the starting filter size.\n \n Args:\n label_id: ID of label to erode.\n filter_size: Size of structing element to start erosion.\n target_frac: Target fraction of original label to erode. \n Erosion will start with ``filter_size`` and use progressively\n smaller filters until remaining above this target. Defaults\n to None to use a fraction of 0.2. Titrates the relative\n amount of erosion allowed.\n min_filter_size: Minimum filter size, below which the\n original, uneroded label will be used instead. Defaults to 1.\n Use 0 to erode at size 1 even if below ``target_frac``.\n Titrates the absolute amount of erosion allowed.\n use_min_filter: True to erode at ``min_filter_size`` if\n a smaller filter size would otherwise be required; defaults\n to False to revert to original, uneroded size if a filter\n smaller than ``min_filter_size`` would be needed.\n skel_eros_filt_size: Erosion filter size before\n skeletonization to balance how much of the labels' extent will\n be preserved during skeletonization. Increase to reduce the\n skeletonization. Defaults to False, which will cause\n skeletonization to be skipped.\n wt: Multiplier weight for ``filter_size``. Defaults to None, in\n which case the weighte will be calculated from\n :attr:``wt_dists`` if available, or ignored if not.\n \n Returns:\n Tuple of stats,including ``label_id`` for reference and \n sizes of labels; list of slices denoting where to insert \n the eroded label; and the eroded label itself.\n \n Raises:\n ValueError: if ``region`` is None and :attr:`labels_img` is not\n available.\n \n \"\"\"\n if cls.labels_img is None:\n cls.labels_img = cls.convert_shared_arr(config.RegNames.IMG_LABELS)\n\n if (wt is None and cls.wt_dists is not None and\n cls.labels_img is not None):\n # weight the filter size by the fractional distance from median\n # of label distance and max dist\n wt = cls.meas_wt(cls.labels_img, label_id, cls.wt_dists)\n if wt is not None:\n filter_size = int(filter_size * wt)\n print(f\"Label {label_id}: distance weight {wt}, adjusted filter \"\n f\"size to {filter_size}\")\n if use_min_filter and filter_size < min_filter_size:\n filter_size = min_filter_size\n \n # get region as mask; assume that label exists and will yield a \n # bounding box since labels here are generally derived from the \n # labels image itself\n region, slices = cv_nd.extract_region(cls.labels_img, label_id)\n label_mask_region = region == label_id\n region_size = np.sum(label_mask_region)\n filtered, chosen_selem_size = cv_nd.filter_adaptive_size(\n label_mask_region, morphology.binary_erosion, filter_size,\n min_filter_size, use_min_filter, target_frac,\n f\"Label ID: {label_id}\")\n region_size_filtered = np.sum(filtered)\n if skel_eros_filt_size is not False and np.sum(filtered) > 0:\n # skeletonize the labels to recover details from erosion;\n # need another labels erosion before skeletonization to avoid\n # preserving too much of the original labels' extent\n label_mask_region = morphology.binary_erosion(\n label_mask_region,\n cv_nd.get_selem(label_mask_region.ndim)(skel_eros_filt_size))\n filtered = np.logical_or(\n filtered, \n morphology.skeletonize_3d(label_mask_region).astype(bool))\n \n stats_eros = (label_id, region_size, region_size_filtered,\n chosen_selem_size)\n return stats_eros, slices, filtered\n\n\ndef labels_to_markers_erosion(\n labels_img: np.ndarray, filter_size: int = 8,\n target_frac: Optional[float] = None,\n min_filter_size: Optional[int] = None, use_min_filter: bool = False, \n skel_eros_filt_size: Optional[int] = None,\n wt_dists: Optional[np.ndarray] = None, multiprocess: bool = True\n) -> Tuple[np.ndarray, pd.DataFrame]:\n \"\"\"Convert a labels image to markers as eroded labels via multiprocessing.\n \n These markers can be used in segmentation algorithms such as \n watershed.\n \n Args:\n labels_img: Labels image as an integer Numpy array,\n where each unique int is a separate label.\n filter_size: Size of structing element for erosion, which should\n be > 0; defaults to 8.\n target_frac: Target fraction of original label to erode,\n passed to :func:`LabelToMarkerErosion.erode_label`. Defaults\n to None.\n min_filter_size: Minimum erosion filter size; defaults to None\n to use half of ``filter_size``, rounded down.\n use_min_filter: True to erode even if ``min_filter_size``\n is reached; defaults to False to avoid any erosion if this size\n is reached.\n skel_eros_filt_size: Erosion filter size before skeletonization\n in :func:`LabelToMarkerErosion.erode_labels`. Defaults to None to\n use the minimum filter size, which is half of ``filter_size``.\n wt_dists: Array of distances by which to weight\n the filter size, such as a distance transform to the outer\n perimeter of ``labels_img`` to weight central labels more\n heavily. Defaults to None.\n multiprocess: True to use multiprocessing; defaults to True.\n \n Returns:\n Tuple of an image array of the same shape as ``img`` and the\n same number of labels as in ``labels_img``, with eroded labels, and\n a data frame of erosion metrics.\n \n \"\"\"\n def handle_eroded_label():\n # mutate markers outside of mp for changes to persist and collect stats\n markers[tuple(slices)][filtered] = stats_eros[0]\n for col, stat in zip(cols, stats_eros):\n sizes_dict.setdefault(col, []).append(stat)\n \n # set up labels erosion\n start_time = time()\n _logger.info(\n \"Eroding labels to markers with filter size %s, min filter size %s, \"\n \"and target fraction %s\", filter_size, min_filter_size, target_frac)\n markers = np.zeros_like(labels_img)\n labels_unique = np.unique(labels_img)\n if min_filter_size is None:\n min_filter_size = filter_size // 2\n if skel_eros_filt_size is None:\n skel_eros_filt_size = filter_size // 2\n sizes_dict = {}\n cols = (config.AtlasMetrics.REGION.value, \"SizeOrig\", \"SizeMarker\",\n config.SmoothingMetrics.FILTER_SIZE.value)\n \n # share large images as class attributes for forked or non-multiprocessing\n LabelToMarkerErosion.set_labels_img(labels_img, wt_dists)\n\n is_fork = False\n pool_results = None\n pool = None\n if multiprocess:\n # set up multiprocessing\n is_fork = chunking.is_fork()\n initializer = None\n initargs = None\n if not is_fork:\n # set up labels image as a shared array for spawned mode\n initializer, initargs = LabelToMarkerErosion.build_pool_init({\n config.RegNames.IMG_LABELS: labels_img})\n \n pool = chunking.get_mp_pool(initializer, initargs)\n pool_results = []\n \n for label_id in labels_unique:\n if label_id == 0: continue\n # erode labels to generate markers, excluding labels small enough\n # that they would require a filter smaller than half of original size\n args = [label_id, filter_size, target_frac, min_filter_size,\n use_min_filter, skel_eros_filt_size]\n if not is_fork:\n # pickle distance weight directly in spawned mode (not necessary\n # for non-multiprocessed but equivalent)\n if wt_dists is not None:\n args.append(LabelToMarkerErosion.meas_wt(\n labels_img, label_id, wt_dists))\n if pool is None:\n # process labels without multiprocessing\n stats_eros, slices, filtered = LabelToMarkerErosion.erode_label(\n *args)\n handle_eroded_label()\n else:\n # process in multiprocessing\n pool_results.append(\n pool.apply_async(LabelToMarkerErosion.erode_label, args=args))\n \n if multiprocess:\n # handle multiprocessing output\n for result in pool_results:\n stats_eros, slices, filtered = result.get()\n handle_eroded_label()\n pool.close()\n pool.join()\n \n # show erosion stats\n df = df_io.dict_to_data_frame(sizes_dict, show=True)\n \n _logger.info(\n \"Time elapsed to erode labels into markers: %s\", time() - start_time)\n return markers, df\n\n\ndef mask_atlas(atlas, labels_img):\n \"\"\"Generate a mask of an atlas by combining its thresholded image \n with its associated labels image.\n \n The labels image may be insufficient to find the whole atlas foreground \n if the labels have missing regions or around edges, while the \n thresholded atlas may have many holes. As a simple workaround, \n combine these foregrounds to obtain a more complete mask of the atlas.\n \n Args:\n img: Image as a Numpy array to segment.\n labels_img: Labels image of the same shape as ``img``, where all \n values except 0 will be taken as an additional \n part of the resulting mask.\n Returns:\n Boolean array the same shape as ``img`` with True for all \n pixels above threshold in ``img`` or within the \n foreground of ``labels_img``.\n \"\"\"\n thresh = filters.threshold_otsu(atlas)\n mask = np.logical_or(atlas > thresh, labels_img != 0)\n return mask\n\n\ndef segment_from_labels(edges, markers, labels_img, atlas_img=None,\n exclude_labels=None,\n mask_filt=config.SmoothingModes.opening,\n mask_filt_size=2):\n \"\"\"Segment an image using markers from a labels image.\n \n Labels images may have been generally manually and thus may not \n perfectly match the underlying image. As a way to check or \n augment the label image, segment the underlying image using \n the labels as the seeds to prescribe the number and initial \n location of each label.\n \n Args:\n edges (:obj:`np.ndarray`): Image as a Numpy array to segment,\n typically an edge-detected image of the main atlas.\n markers (:obj:`np.ndarray`): Image as an integer Numpy array of same\n shape as ``img`` to use as seeds for the watershed segmentation.\n This array is generally constructed from an array similar to\n ``labels_img``.\n labels_img (:obj:`np.ndarray`): Labels image as Numpy array of same\n shape as ``img``, used to generate a mask for the watershed.\n If None, a mask will be generated from a thresholded version of\n ``atlas_img``, so should only be None if ``atlas_img`` is not None. \n atlas_img (:obj:`np.ndarray`): Atlas image as a Numpy array to use\n for finding foreground; defaults to None. If both ``labels_img``\n and ``atlas_img`` are not None, their combined volume will be\n used as a mask.\n exclude_labels (List[int]): Sequence of labels to exclude from the\n segmentation; defaults to None.\n mask_filt (:obj:`config.SmoothingModes`): Enumeration for a filter\n mode to use for the watershed mask; defaults to\n :obj:`config.SmoothingModes.opening`. Ignored if ``atlas_img``\n or both ``atlas_img`` and ``labels_img`` are given to generate\n the mask.\n mask_filt_size (int): Size of structuring element for the filter\n specified by ``mask_filt``; defaults to 2.\n \n Returns:\n :obj:`np.ndarray`: Segmented image of the same shape as ``img`` with\n the same number of labels as in ``markers``.\n \n \"\"\"\n # generate mask for watershed\n if atlas_img is not None and labels_img is not None:\n # broad mask from both atlas and labels\n mask = mask_atlas(atlas_img, labels_img)\n elif atlas_img is not None:\n # otsu seems to give more inclusive threshold for these atlases\n _, mask = cv_nd.carve(\n atlas_img, thresh=filters.threshold_otsu(atlas_img), \n holes_area=5000)\n else:\n # default to using label foreground\n mask = labels_img != 0\n fn_mask = None\n if mask_filt is config.SmoothingModes.opening:\n # default filter opens the mask to prevent spillover across\n # artifacts that may bridge otherwise separate structures\n fn_mask = morphology.binary_opening\n elif mask_filt is config.SmoothingModes.closing:\n fn_mask = morphology.binary_closing\n if fn_mask and mask_filt_size:\n print(\"Filtering watershed mask with {}, size {}\"\n .format(fn_mask, mask_filt_size))\n mask = fn_mask(mask, cv_nd.get_selem(labels_img.ndim)(\n mask_filt_size))\n \n exclude = None\n if exclude_labels is not None:\n # remove excluded labels from mask\n exclude = np.isin(labels_img, exclude_labels)\n mask[exclude] = False\n # WORKAROUND: remove excluded markers from marker image itself for\n # apparent Scikit-image bug (see PR 3809, fixed in 0.15)\n markers[np.isin(markers, exclude_labels)] = 0\n \n watershed = watershed_distance(\n edges == 0, markers, compactness=0.005, mask=mask)\n if exclude is not None:\n # add excluded labels directly to watershed image\n watershed[exclude] = labels_img[exclude]\n return watershed\n\n\ndef watershed_distance(foreground, markers=None, num_peaks=np.inf, \n compactness=0, mask=None):\n \"\"\"Perform watershed segmentation based on distance from foreground \n to background.\n \n Args:\n foreground: Boolean array where True represents foreground. The \n distances will be measured from foreground to the \n nearest background.\n markers: Array of same size as ``foreground`` with seeds to \n use for the watershed. Defaults to None, in which case \n markers will be generated from local peaks in the \n distance transform.\n num_peaks: Number of peaks to include when generating markers; \n defaults to infinity.\n compactness (float): Compactness factor for watershed; defaults to 0.\n mask: Boolean or binary array of same size as ``foreground`` \n where True or 1 pixels will be filled by the watershed; \n defaults to None to fill the whole image.\n \n Returns:\n The segmented image as an array of the same shape as that of \n ``foreground``.\n \"\"\"\n distance = ndimage.distance_transform_edt(foreground)\n if markers is None:\n # generate a limited number of markers from local peaks in the \n # distance transform if markers are not given\n local_max = feature.peak_local_max(\n distance, indices=False, num_peaks=num_peaks)\n markers = measure.label(local_max)\n watershed = morphology.watershed(\n -distance, markers, compactness=compactness, mask=mask)\n return watershed\n\n\nclass SubSegmenter(object):\n \"\"\"Sub-segment a label based on anatomical boundaries.\n \n All images should be of the same shape.\n \n Attributes:\n labels_img_np: Integer labels image as a Numpy array.\n atlas_edge: Numpy array of atlas reduced to binary image of its edges.\n \"\"\"\n labels_img_np = None\n atlas_edge = None\n \n @classmethod\n def set_images(cls, labels_img_np, atlas_edge):\n \"\"\"Set the images.\"\"\"\n cls.labels_img_np = labels_img_np\n cls.atlas_edge = atlas_edge\n \n @classmethod\n def sub_segment(cls, label_id, dtype):\n \"\"\"Calculate metrics for a given label or set of labels.\n \n Wrapper to call :func:``measure_variation`` and \n :func:``measure_edge_dist``.\n \n Args:\n label_id: Integer of the label in :attr:``labels_img_np`` \n to sub-divide.\n \n Returns:\n Tuple of the given label ID, list of slices where the label \n resides in :attr:``labels_img_np``, and an array in the \n same shape of the original label, now sub-segmented. The base \n value of this sub-segmented array is multiplied by \n :const:``config.SUB_SEG_MULT``, with each sub-region \n incremented by 1.\n \"\"\"\n label_mask = cls.labels_img_np == label_id\n label_size = np.sum(label_mask)\n \n labels_seg = None\n slices = None\n if label_size > 0:\n props = measure.regionprops(label_mask.astype(np.int))\n _, slices = cv_nd.get_bbox_region(props[0].bbox)\n \n # work on a view of the region for efficiency\n labels_region = np.copy(cls.labels_img_np[tuple(slices)])\n label_mask_region = labels_region == label_id\n atlas_edge_region = cls.atlas_edge[tuple(slices)]\n #labels_region[atlas_edge_region != 0] = 0\n labels_region[~label_mask_region] = 0\n \n # segment from anatomic borders, limiting peaks to get only \n # dominant regions\n labels_seg = watershed_distance(\n atlas_edge_region == 0, num_peaks=5, compactness=0.01)\n labels_seg[~label_mask_region] = 0\n #labels_seg = measure.label(labels_region)\n \n # ensure that sub-segments occupy at least a certain \n # percentage of the total label\n labels_retained = np.zeros_like(labels_region, dtype=dtype)\n labels_unique = np.unique(labels_seg[labels_seg != 0])\n print(\"found {} subregions for label ID {}\"\n .format(labels_unique.size, label_id))\n i = 0\n for seg_id in labels_unique:\n seg_mask = labels_seg == seg_id\n size = np.sum(seg_mask)\n ratio = size / label_size\n if ratio > 0.1:\n # relabel based on original label, expanded to \n # allow for sub-labels\n unique_id = np.abs(label_id) * config.SUB_SEG_MULT + i\n unique_id = int(unique_id * label_id / np.abs(label_id))\n print(\"keeping subregion {} of size {} (ratio {}) within \"\n \"label {}\".format(unique_id, size, ratio, label_id))\n labels_retained[seg_mask] = unique_id\n i += 1\n \n retained_unique = np.unique(labels_retained[labels_retained != 0])\n print(\"labels retained within {}: {}\"\n .format(label_id, retained_unique))\n '''\n # find neighboring sub-labels to merge into retained labels\n neighbor_added = True\n done = []\n while len(done) < retained_unique.size:\n for seg_id in retained_unique:\n if seg_id in done: continue\n neighbor_added = False\n seg_mask = labels_retained == seg_id\n exterior = plot_3d.exterior_nd(seg_mask)\n neighbors = np.unique(labels_seg[exterior])\n for neighbor in neighbors:\n mask = np.logical_and(\n labels_seg == neighbor, labels_retained == 0)\n if neighbor == 0 or np.sum(mask) == 0: continue\n print(\"merging in neighbor {} (size {}) to label {}\"\n .format(neighbor, np.sum(mask), seg_id))\n labels_retained[mask] = seg_id\n neighbor_added = True\n if not neighbor_added:\n print(\"{} is done\".format(seg_id))\n done.append(seg_id)\n print(done, retained_unique)\n labels_seg = labels_retained\n '''\n if retained_unique.size > 0:\n # in-paint missing space from non-retained sub-labels\n labels_seg = cv_nd.in_paint(\n labels_retained, labels_retained == 0)\n labels_seg[~label_mask_region] = 0\n else:\n # if no sub-labels retained, replace whole region with \n # new label\n labels_seg[label_mask_region] = label_id * config.SUB_SEG_MULT\n \n return label_id, slices, labels_seg\n\n\ndef sub_segment_labels(labels_img_np, atlas_edge):\n \"\"\"Sub-segment a labels image into sub-labels based on anatomical \n boundaries.\n \n Args:\n labels_img_np: Integer labels image as a Numpy array.\n atlas_edge: Numpy array of atlas reduced to binary image of its edges.\n \n Returns:\n Image as a Numpy array of same shape as ``labels_img_np`` with \n each label sub-segmented based on anatomical boundaries. Labels \n in this image will correspond to the original labels \n multiplied by :const:``config.SUB_SEG_MULT`` to make room for \n sub-labels, which will each be incremented by 1.\n \"\"\"\n start_time = time()\n \n # use a class to set and process the label without having to \n # reference the labels image as a global variable\n SubSegmenter.set_images(labels_img_np, atlas_edge)\n \n pool = chunking.get_mp_pool()\n pool_results = []\n label_ids = np.unique(labels_img_np)\n max_val = np.amax(labels_img_np) * (config.SUB_SEG_MULT + 1)\n dtype = libmag.dtype_within_range(-max_val, max_val, True)\n subseg = np.zeros_like(labels_img_np, dtype=dtype)\n \n for label_id in label_ids:\n # skip background\n if label_id == 0: continue\n pool_results.append(\n pool.apply_async(\n SubSegmenter.sub_segment, args=(label_id, dtype)))\n \n for result in pool_results:\n label_id, slices, labels_seg = result.get()\n # can only mutate markers outside of mp for changes to persist\n labels_seg_mask = labels_seg != 0\n subseg[tuple(slices)][labels_seg_mask] = labels_seg[labels_seg_mask]\n print(\"finished sub-segmenting label ID {}\".format(label_id))\n pool.close()\n pool.join()\n \n print(\"time elapsed to sub-segment labels image:\", time() - start_time)\n return subseg\n"
] |
[
[
"numpy.amax",
"numpy.multiply",
"numpy.unique",
"numpy.median",
"numpy.save",
"numpy.percentile",
"sklearn.cluster.DBSCAN",
"numpy.argsort",
"sklearn.neighbors.NearestNeighbors",
"numpy.sum",
"numpy.divide"
],
[
"numpy.amax",
"numpy.abs",
"numpy.unique",
"numpy.amin",
"numpy.median",
"scipy.ndimage.distance_transform_edt",
"numpy.ones",
"numpy.logical_or",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.isin"
]
] |
myelinio/SpectralNet
|
[
"9366942b7b98f6c2abf7159101feddbcc7c1bee5"
] |
[
"src/core/pairs.py"
] |
[
"\"\"\"\npairs.py: contains functions used for creating pairs from labeled and unlabeled data (currently used only for the siamese network)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport pickle\nimport random\nfrom collections import defaultdict\nfrom random import randint\n\nimport h5py\nimport numpy as np\nfrom sklearn import metrics\nfrom sklearn.neighbors import NearestNeighbors\n\n\n##### Helper functions #####\ndef get_choices(arr, num_choices, valid_range=[-1, np.inf], not_arr=None, replace=False):\n \"\"\"\n Select n=num_choices choices from arr, with the following constraints for\n each choice:\n choice > valid_range[0],\n choice < valid_range[1],\n choice not in not_arr\n if replace == True, draw choices with replacement\n if arr is an integer, the pool of choices is interpreted as [0, arr]\n (inclusive)\n * in the implementation, we use an identity function to create the\n identity map arr[i] = i\n \"\"\"\n if not_arr is None:\n not_arr = []\n if isinstance(valid_range, int):\n valid_range = [0, valid_range]\n # make sure we have enough valid points in arr\n if isinstance(arr, tuple):\n if min(arr[1], valid_range[1]) - max(arr[0], valid_range[0]) < num_choices:\n raise ValueError(\"Not enough elements in arr are outside of valid_range!\")\n n_arr = arr[1]\n arr0 = arr[0]\n arr = defaultdict(lambda: -1)\n get_arr = lambda x: x\n replace = True\n else:\n greater_than = np.array(arr) > valid_range[0]\n less_than = np.array(arr) < valid_range[1]\n if np.sum(np.logical_and(greater_than, less_than)) < num_choices:\n raise ValueError(\"Not enough elements in arr are outside of valid_range!\")\n # make a copy of arr, since we'll be editing the array\n n_arr = len(arr)\n arr0 = 0\n arr = np.array(arr, copy=True)\n get_arr = lambda x: arr[x]\n not_arr_set = set(not_arr)\n def get_choice():\n arr_idx = randint(arr0, n_arr-1)\n while get_arr(arr_idx) in not_arr_set:\n arr_idx = randint(arr0, n_arr-1)\n return arr_idx\n if isinstance(not_arr, int):\n not_arr = list(not_arr)\n choices = []\n for _ in range(num_choices):\n arr_idx = get_choice()\n while get_arr(arr_idx) <= valid_range[0] or get_arr(arr_idx) >= valid_range[1]:\n arr_idx = get_choice()\n choices.append(int(get_arr(arr_idx)))\n if not replace:\n arr[arr_idx], arr[n_arr-1] = arr[n_arr-1], arr[arr_idx]\n n_arr -= 1\n return choices\n\ndef create_pairs_from_labeled_data(x, digit_indices, use_classes=None):\n \"\"\"\n Positive and negative pair creation from labeled data.\n Alternates between positive and negative pairs.\n\n digit_indices: nested array of depth 2 (in other words a jagged\n matrix), where row i contains the indices in x of\n all examples labeled with class i\n use_classes: in cases where we only want pairs from a subset\n of the classes, use_classes is a list of the\n classes to draw pairs from, else it is None\n \"\"\"\n n_clusters = len(digit_indices)\n if use_classes == None:\n use_classes = list(range(n_clusters))\n if not isinstance(use_classes, list):\n raise Exception(\"use_classes must be None or a list of integer indices!\")\n pairs = []\n labels = []\n n = min([len(digit_indices[d]) for d in range(n_clusters)]) - 1\n for d in use_classes:\n for i in range(n):\n z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]\n pairs += [[x[z1], x[z2]]]\n inc = random.randrange(1, n_clusters)\n dn = (d + inc) % n_clusters\n z1, z2 = digit_indices[d][i], digit_indices[dn][i]\n pairs += [[x[z1], x[z2]]]\n labels += [1, 0]\n pairs = np.array(pairs).reshape((len(pairs), 2) + x.shape[1:])\n labels = np.array(labels)\n return pairs, labels\n\ndef create_pairs_from_unlabeled_data(x1, x2=None, y=None, p=None, k=5, tot_pairs=None, precomputed_knn_path='', use_approx=False, pre_shuffled=False, verbose=None):\n \"\"\"\n Generates positive and negative pairs for the siamese network from\n unlabeled data. Draws from the k nearest neighbors (where k is the\n provided parameter) of each point to form pairs. Number of neighbors\n to draw is determined by tot_pairs, if provided, or k if not provided.\n\n x1: input data array\n x2: parallel data array (pairs will exactly shadow the indices of x1,\n but be drawn from x2)\n y: true labels (if available) purely for checking how good our pairs are\n p: permutation vector - in cases where the array is shuffled and we\n use a precomputed knn matrix (where knn is performed on unshuffled\n data), we keep track of the permutations with p, and apply the same\n permutation to the precomputed knn matrix\n k: the number of neighbors to use (the 'k' in knn)\n\n tot_pairs: total number of pairs to produce\n precomputed_knn_path: location of stored precomputed knn results -\n empty string means we do not load precomputed\n neighbors\n use_approx: flag for running with LSH instead of KNN, in other\n words, an approximation of KNN\n verbose: flag for extra debugging printouts\n\n returns: pairs for x1, (pairs for x2 if x2 is provided), labels\n (inferred by knn), (labels_true, the absolute truth, if y\n is provided\n \"\"\"\n if x2 is not None and x1.shape != x2.shape:\n raise ValueError(\"x1 and x2 must be the same shape!\")\n\n n = len(p) if p is not None else len(x1)\n\n pairs_per_pt = max(1, min(k, int(tot_pairs/(n*2)))) if tot_pairs is not None else max(1, k)\n\n if p is not None and not pre_shuffled:\n x1 = x1[p[:n]]\n y = y[p[:n]]\n\n pairs = []\n pairs2 = []\n labels = []\n true = []\n verbose = True\n if len(precomputed_knn_path):\n # load precomputed weights\n if verbose:\n print('loading precomputed weights...')\n print('load path:', precomputed_knn_path)\n if precomputed_knn_path.endswith('.h5'):\n with h5py.File(precomputed_knn_path, 'r') as f:\n kn_idxs_untouched = np.asarray(f.get('kn_idxs'), dtype='uint32')\n else:\n kn_idxs_untouched = pickle.load(open(precomputed_knn_path, 'rb'))\n if isinstance(kn_idxs_untouched, tuple):\n kn_idxs_untouched = kn_idxs_untouched[1]\n\n assert (kn_idxs_untouched >= 0).all()\n if p is None:\n Idx = kn_idxs_untouched\n else:\n # if we have shuffled the array with p, we must convert our neighbors\n # matrix to correspond to the shuffled indices\n import pyximport; pyximport.install()\n from core.convert_idxs import convert_idxs\n Idx = convert_idxs(kn_idxs_untouched.astype(np.int32, copy=False), p.astype(np.int32, copy=False), k, n)\n print('converted all indices')\n\n else:\n if verbose:\n print('computing k={} nearest neighbors...'.format(k))\n if len(x1.shape)>2:\n x1_flat = x1.reshape(x1.shape[0], np.prod(x1.shape[1:]))[:n]\n else:\n x1_flat = x1[:n]\n\n if use_approx:\n from annoy import AnnoyIndex\n ann = AnnoyIndex(x1_flat.shape[1], metric='euclidean')\n for i, x_ in enumerate(x1_flat):\n ann.add_item(i, x_)\n ann.build(50)\n Idx = np.empty((len(x1_flat), k+1))\n for i in range(len(x1_flat)):\n nn_i = ann.get_nns_by_item(i, k+1, include_distances=False)\n Idx[i,:] = np.array(nn_i)\n else:\n nbrs = NearestNeighbors(n_neighbors=k+1).fit(x1_flat)\n _, Idx = nbrs.kneighbors(x1_flat)\n\n # for each row, remove the element itself from its list of neighbors\n # (we don't care that each point is its own closest neighbor)\n new_Idx = np.empty((Idx.shape[0], Idx.shape[1] - 1))\n assert (Idx >= 0).all()\n for i in range(Idx.shape[0]):\n try:\n new_Idx[i] = Idx[i, Idx[i] != i][:Idx.shape[1] - 1]\n except Exception as e:\n print(Idx[i, ...], new_Idx.shape, Idx.shape)\n raise e\n Idx = new_Idx.astype(np.int)\n k_max = min(Idx.shape[1], k+1)\n\n if verbose:\n print('creating pairs...')\n print(\"ks\", n, k_max, k, pairs_per_pt)\n\n # pair generation loop (alternates between true and false pairs)\n consecutive_fails = 0\n for i in range(n):\n # get_choices sometimes fails with precomputed results. if this happens\n # too often, we relax the constraint on k\n if consecutive_fails > 5:\n k_max = min(Idx.shape[1], int(k_max*2))\n consecutive_fails = 0\n if verbose and i % 10000 == 0:\n print(\"Iter: {}/{}\".format(i,n))\n # pick points from neighbors of i for positive pairs\n try:\n choices = get_choices(Idx[i,:k_max], pairs_per_pt, replace=False)\n consecutive_fails = 0\n except ValueError:\n consecutive_fails += 1\n continue\n assert i not in choices\n # form the pairs\n new_pos = [[x1[i], x1[c]] for c in choices]\n if x2 is not None:\n new_pos2 = [[x2[i], x2[c]] for c in choices]\n if y is not None:\n pos_labels = [[y[i] == y[c]] for c in choices]\n # pick points *not* in neighbors of i for negative pairs\n try:\n choices = get_choices((0, n), pairs_per_pt, not_arr=Idx[i,:k_max], replace=False)\n consecutive_fails = 0\n except ValueError:\n consecutive_fails += 1\n continue\n # form negative pairs\n new_neg = [[x1[i], x1[c]] for c in choices]\n if x2 is not None:\n new_neg2 = [[x2[i], x2[c]] for c in choices]\n if y is not None:\n neg_labels = [[y[i] == y[c]] for c in choices]\n\n # add pairs to our list\n labels += [1]*len(new_pos) + [0]*len(new_neg)\n pairs += new_pos + new_neg\n if x2 is not None:\n pairs2 += new_pos2 + new_neg2\n if y is not None:\n true += pos_labels + neg_labels\n\n # package return parameters for output\n ret = [np.array(pairs).reshape((len(pairs), 2) + x1.shape[1:])]\n if x2 is not None:\n ret.append(np.array(pairs2).reshape((len(pairs2), 2) + x2.shape[1:]))\n ret.append(np.array(labels))\n if y is not None:\n true = np.array(true).astype(np.int).reshape(-1,1)\n if verbose:\n # if true vectors are provided, we can take a peek to check\n # the validity of our kNN approximation\n print(\"confusion matrix for pairs and approximated labels:\")\n print(metrics.confusion_matrix(true, labels)/true.shape[0])\n print(metrics.confusion_matrix(true, labels))\n ret.append(true)\n\n return ret\n\n"
] |
[
[
"numpy.logical_and",
"sklearn.metrics.confusion_matrix",
"sklearn.neighbors.NearestNeighbors",
"numpy.prod",
"numpy.array",
"numpy.empty"
]
] |
stjordanis/QMLT
|
[
"fc9487f89bf894576d2001abe5a5d07f35f19d7a"
] |
[
"examples/numerical/supervised_num.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright 2018 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n.. currentmodule:: qmlt.examples.numerical\n\n.. code-author:: Maria Schuld <maria@xanadu.ai>\n\nExample of a simple supervised learning task with the numerical circuit learner.\n\n\"\"\"\n\nimport strawberryfields as sf\nfrom strawberryfields.ops import Dgate, BSgate\nimport numpy as np\nfrom qmlt.numerical import CircuitLearner\nfrom qmlt.numerical.helpers import make_param\nfrom qmlt.numerical.losses import square_loss\n\nsteps = 100\n\n# Create a parameter with an initial value of 2.\nmy_init_params = [make_param(name='phi', constant=2.)]\n\n\n# Define the variational circuit and its output\ndef circuit(X, params):\n\n eng, q = sf.Engine(2)\n\n # Since X is a batch of data, define a circuit for a single input\n # If you use the tf backend, you can pass batches into gates\n # like in the supervised tf learner example.\n def single_input_circuit(x):\n\n eng.reset()\n with eng:\n Dgate(x[0], 0.) | q[0]\n Dgate(x[1], 0.) | q[1]\n BSgate(phi=params[0]) | (q[0], q[1])\n BSgate() | (q[0], q[1])\n state = eng.run('fock', cutoff_dim=10, eval=True)\n\n # Define the output as the probability of measuring |0,2> as opposed to |2,0>\n p0 = state.fock_prob([0, 2])\n p1 = state.fock_prob([2, 0])\n normalization = p0 + p1 + 1e-10\n output = p1 / normalization\n return output\n\n # Apply the single circuit to every input in the batch\n circuit_output = [single_input_circuit(x) for x in X]\n\n return circuit_output\n\n\n# Define a loss function that takes the outputs of the variational circuit\n# and compares them to the targets\ndef myloss(circuit_output, targets):\n # We use the square loss function provided by MLT\n return square_loss(outputs=circuit_output, targets=targets)\n\n\n# Define how to translate the outputs of the circuit into model predictions\ndef outputs_to_predictions(circuit_output):\n return round(circuit_output)\n\n\n# Generate some data\nX_train = np.array([[0.2, 0.4], [0.6, 0.8], [0.4, 0.2], [0.8, 0.6]])\nY_train = np.array([1., 1., 0., 0.])\nX_test = np.array([[0.25, 0.5], [0.5, 0.25]])\nY_test = np.array([1., 0.])\nX_pred = np.array([[0.4, 0.5], [0.5, 0.4]])\n\n# Set the hyperparameters of the model and the training algorithm\nhyperparams = {'circuit': circuit,\n 'init_circuit_params': my_init_params,\n 'task': 'supervised',\n 'loss': myloss,\n 'optimizer': 'SGD',\n 'init_learning_rate': 0.5\n }\n\n# Create the learner\nlearner = CircuitLearner(hyperparams=hyperparams)\n\n# Train the learner\nlearner.train_circuit(X=X_train, Y=Y_train, steps=steps)\n\n# Evaluate the score of a test set\ntest_score = learner.score_circuit(X=X_test, Y=Y_test, outputs_to_predictions=outputs_to_predictions)\n# The score_circuit() function returns a dictionary of different metrics.\nprint(\"\\nPossible scores to print: {}\".format(list(test_score.keys())))\n# We select the accuracy and loss.\nprint(\"Accuracy on test set: {}\".format(test_score['accuracy']))\nprint(\"Loss on test set: {}\".format(test_score['loss']))\n\noutcomes = learner.run_circuit(X=X_pred, outputs_to_predictions=outputs_to_predictions)\n# The run_circuit() function returns a dictionary of different outcomes.\nprint(\"\\nPossible outcomes to print: {}\".format(list(outcomes.keys())))\n# We select the predictions\nprint(\"Predictions for new inputs: {}\".format(outcomes['predictions']))\n\n\n\n\n"
] |
[
[
"numpy.array"
]
] |
developer0hye/AS-MLP
|
[
"a18569e61e1a0ffe4635ba149d5f3d19bbe74831"
] |
[
"main.py"
] |
[
"\nimport os\nimport time\nimport argparse\nimport datetime\nimport numpy as np\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\n\nfrom timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy\nfrom timm.utils import accuracy, AverageMeter\n\nfrom config import get_config\nfrom models import build_model\nfrom data import build_loader\nfrom lr_scheduler import build_scheduler\nfrom optimizer import build_optimizer\nfrom logger import create_logger\nfrom utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n\n\n\n\n\ntry:\n # noinspection PyUnresolvedReferences\n from apex import amp\nexcept ImportError:\n amp = None\n\ndef parse_option():\n parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)\n parser.add_argument('--cfg', type=str, required=True, metavar=\"FILE\", help='path to config file', )\n parser.add_argument(\n \"--opts\",\n help=\"Modify config options by adding 'KEY VALUE' pairs. \",\n default=None,\n nargs='+',\n )\n\n # easy config modification\n parser.add_argument('--batch-size', type=int, help=\"batch size for single GPU\")\n parser.add_argument('--data-path', type=str, help='path to dataset')\n parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')\n parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],\n help='no: no cache, '\n 'full: cache all data, '\n 'part: sharding the dataset into nonoverlapping pieces and only cache one piece')\n parser.add_argument('--resume', help='resume from checkpoint')\n parser.add_argument('--accumulation-steps', type=int, help=\"gradient accumulation steps\")\n parser.add_argument('--use-checkpoint', action='store_true',\n help=\"whether to use gradient checkpointing to save memory\")\n parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],\n help='mixed precision opt level, if O0, no amp is used')\n parser.add_argument('--output', default='output', type=str, metavar='PATH',\n help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')\n parser.add_argument('--tag', help='tag of experiment')\n parser.add_argument('--eval', action='store_true', help='Perform evaluation only')\n parser.add_argument('--throughput', action='store_true', help='Test throughput only')\n\n # distributed training\n parser.add_argument(\"--local_rank\", type=int, required=True, help='local rank for DistributedDataParallel')\n\n args, unparsed = parser.parse_known_args()\n\n config = get_config(args)\n\n return args, config\n\n\ndef main(config):\n dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn = build_loader(config)\n \n logger.info(f\"Creating model:{config.MODEL.TYPE}/{config.MODEL.NAME}\")\n model = build_model(config)\n model.cuda()\n logger.info(str(model))\n\n optimizer = build_optimizer(config, model)\n if config.AMP_OPT_LEVEL != \"O0\":\n model, optimizer = amp.initialize(model, optimizer, opt_level=config.AMP_OPT_LEVEL)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False)\n model_without_ddp = model.module\n\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\n logger.info(f\"number of params: {n_parameters}\")\n if hasattr(model_without_ddp, 'flops'):\n flops = model_without_ddp.flops()\n logger.info(f\"number of GFLOPs: {flops / 1e9}\")\n\n lr_scheduler = build_scheduler(config, optimizer, len(data_loader_train))\n\n if config.AUG.MIXUP > 0.:\n # smoothing is handled with mixup label transform\n criterion = SoftTargetCrossEntropy()\n elif config.MODEL.LABEL_SMOOTHING > 0.:\n criterion = LabelSmoothingCrossEntropy(smoothing=config.MODEL.LABEL_SMOOTHING)\n else:\n criterion = torch.nn.CrossEntropyLoss()\n\n max_accuracy = 0.0\n #ipdb.set_trace()\n\n if config.TRAIN.AUTO_RESUME:\n resume_file = auto_resume_helper(config.OUTPUT)\n if resume_file:\n if config.MODEL.RESUME:\n logger.warning(f\"auto-resume changing resume file from {config.MODEL.RESUME} to {resume_file}\")\n config.defrost()\n config.MODEL.RESUME = resume_file\n config.freeze()\n logger.info(f'auto resuming from {resume_file}')\n else:\n logger.info(f'no checkpoint found in {config.OUTPUT}, ignoring auto resume')\n\n if config.MODEL.RESUME:\n max_accuracy = load_checkpoint(config, model_without_ddp, optimizer, lr_scheduler, logger)\n acc1, acc5, loss = validate(config, data_loader_val, model)\n logger.info(f\"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%\")\n if config.EVAL_MODE:\n return\n\n if config.THROUGHPUT_MODE:\n throughput(data_loader_val, model, logger)\n return\n\n logger.info(\"Start training\")\n start_time = time.time()\n for epoch in range(config.TRAIN.START_EPOCH, config.TRAIN.EPOCHS):\n data_loader_train.sampler.set_epoch(epoch)\n\n train_one_epoch(config, model, criterion, data_loader_train, optimizer, epoch, mixup_fn, lr_scheduler)\n if dist.get_rank() == 0 and (epoch % config.SAVE_FREQ == 0 or epoch == (config.TRAIN.EPOCHS - 1)):\n save_checkpoint(config, epoch, model_without_ddp, max_accuracy, optimizer, lr_scheduler, logger)\n\n acc1, acc5, loss = validate(config, data_loader_val, model)\n logger.info(f\"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%\")\n max_accuracy = max(max_accuracy, acc1)\n logger.info(f'Max accuracy: {max_accuracy:.2f}%')\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n logger.info('Training time {}'.format(total_time_str))\n\n\ndef train_one_epoch(config, model, criterion, data_loader, optimizer, epoch, mixup_fn, lr_scheduler):\n model.train()\n optimizer.zero_grad()\n\n num_steps = len(data_loader)\n batch_time = AverageMeter()\n loss_meter = AverageMeter()\n norm_meter = AverageMeter()\n\n start = time.time()\n end = time.time()\n for idx, (samples, targets) in enumerate(data_loader):\n samples = samples.cuda(non_blocking=True)\n targets = targets.cuda(non_blocking=True)\n\n if mixup_fn is not None:\n samples, targets = mixup_fn(samples, targets)\n\n outputs = model(samples)\n\n if config.TRAIN.ACCUMULATION_STEPS > 1:\n loss = criterion(outputs, targets)\n loss = loss / config.TRAIN.ACCUMULATION_STEPS\n if config.AMP_OPT_LEVEL != \"O0\":\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n if config.TRAIN.CLIP_GRAD:\n grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.TRAIN.CLIP_GRAD)\n else:\n grad_norm = get_grad_norm(amp.master_params(optimizer))\n else:\n loss.backward()\n if config.TRAIN.CLIP_GRAD:\n grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD)\n else:\n grad_norm = get_grad_norm(model.parameters())\n if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0:\n optimizer.step()\n optimizer.zero_grad()\n lr_scheduler.step_update(epoch * num_steps + idx)\n else:\n loss = criterion(outputs, targets)\n optimizer.zero_grad()\n if config.AMP_OPT_LEVEL != \"O0\":\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n if config.TRAIN.CLIP_GRAD:\n grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.TRAIN.CLIP_GRAD)\n else:\n grad_norm = get_grad_norm(amp.master_params(optimizer))\n else:\n loss.backward()\n if config.TRAIN.CLIP_GRAD:\n grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD)\n else:\n grad_norm = get_grad_norm(model.parameters())\n optimizer.step()\n lr_scheduler.step_update(epoch * num_steps + idx)\n\n torch.cuda.synchronize()\n\n loss_meter.update(loss.item(), targets.size(0))\n norm_meter.update(grad_norm)\n batch_time.update(time.time() - end)\n end = time.time()\n\n if idx % config.PRINT_FREQ == 0:\n lr = optimizer.param_groups[0]['lr']\n memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)\n etas = batch_time.avg * (num_steps - idx)\n logger.info(\n f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\\t'\n f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\\t'\n f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\\t'\n f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\\t'\n f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\\t'\n f'mem {memory_used:.0f}MB')\n epoch_time = time.time() - start\n logger.info(f\"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}\")\n\n\n@torch.no_grad()\ndef validate(config, data_loader, model):\n criterion = torch.nn.CrossEntropyLoss()\n model.eval()\n\n batch_time = AverageMeter()\n loss_meter = AverageMeter()\n acc1_meter = AverageMeter()\n acc5_meter = AverageMeter()\n\n end = time.time()\n for idx, (images, target) in enumerate(data_loader):\n images = images.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n # compute output\n output = model(images)\n\n # measure accuracy and record loss\n loss = criterion(output, target)\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n\n acc1 = reduce_tensor(acc1)\n acc5 = reduce_tensor(acc5)\n loss = reduce_tensor(loss)\n\n loss_meter.update(loss.item(), target.size(0))\n acc1_meter.update(acc1.item(), target.size(0))\n acc5_meter.update(acc5.item(), target.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if idx % config.PRINT_FREQ == 0:\n memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)\n logger.info(\n f'Test: [{idx}/{len(data_loader)}]\\t'\n f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\\t'\n f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\\t'\n f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\\t'\n f'Mem {memory_used:.0f}MB')\n logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')\n return acc1_meter.avg, acc5_meter.avg, loss_meter.avg\n\n\n@torch.no_grad()\ndef throughput(data_loader, model, logger):\n model.eval()\n\n for idx, (images, _) in enumerate(data_loader):\n images = images.cuda(non_blocking=True)\n batch_size = images.shape[0]\n for i in range(50):\n model(images)\n torch.cuda.synchronize()\n logger.info(f\"throughput averaged with 30 times\")\n tic1 = time.time()\n for i in range(30):\n model(images)\n torch.cuda.synchronize()\n tic2 = time.time()\n logger.info(f\"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}\")\n return\n\n\nif __name__ == '__main__':\n _, config = parse_option()\n\n if config.AMP_OPT_LEVEL != \"O0\":\n assert amp is not None, \"amp not installed!\"\n\n if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:\n rank = int(os.environ[\"RANK\"])\n world_size = int(os.environ['WORLD_SIZE'])\n print(f\"RANK and WORLD_SIZE in environ: {rank}/{world_size}\")\n else:\n rank = -1\n world_size = -1\n torch.cuda.set_device(config.LOCAL_RANK)\n torch.distributed.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank)\n torch.distributed.barrier()\n\n seed = config.SEED + dist.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n cudnn.benchmark = True\n\n # linear scale the learning rate according to total batch size, may not be optimal\n linear_scaled_lr = config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0\n linear_scaled_warmup_lr = config.TRAIN.WARMUP_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0\n linear_scaled_min_lr = config.TRAIN.MIN_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0\n # gradient accumulation also need to scale the learning rate\n if config.TRAIN.ACCUMULATION_STEPS > 1:\n linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUMULATION_STEPS\n linear_scaled_warmup_lr = linear_scaled_warmup_lr * config.TRAIN.ACCUMULATION_STEPS\n linear_scaled_min_lr = linear_scaled_min_lr * config.TRAIN.ACCUMULATION_STEPS\n config.defrost()\n config.TRAIN.BASE_LR = linear_scaled_lr\n config.TRAIN.WARMUP_LR = linear_scaled_warmup_lr\n config.TRAIN.MIN_LR = linear_scaled_min_lr\n config.freeze()\n\n os.makedirs(config.OUTPUT, exist_ok=True)\n logger = create_logger(output_dir=config.OUTPUT, dist_rank=dist.get_rank(), name=f\"{config.MODEL.NAME}\")\n\n if dist.get_rank() == 0:\n path = os.path.join(config.OUTPUT, \"config.json\")\n with open(path, \"w\") as f:\n f.write(config.dump())\n logger.info(f\"Full config saved to {path}\")\n\n # print config\n logger.info(config.dump())\n\n main(config)\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.cuda.synchronize",
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"numpy.random.seed",
"torch.manual_seed",
"torch.distributed.barrier",
"torch.cuda.max_memory_allocated",
"torch.no_grad",
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.nn.parallel.DistributedDataParallel"
]
] |
juarolsal/derinkuyu
|
[
"fbba3e57ee43f68caf06c6f13202b0292ad47418"
] |
[
"generation/MapGenerator.py"
] |
[
"from argparse import Action, ArgumentParser\nfrom os.path import isdir\nfrom os import access, R_OK\nfrom logging import debug, info, warning, error, basicConfig, DEBUG, getLogger\nfrom datetime import datetime\nimport BSPTree, Cellautomata, Togetherness, BiomeRenderer\nfrom numpy import zeros\n\ndef check_range(min_value,max_value,min_included=True,max_included=True):\n\tclass CheckRange(Action):\n\t\tdef __call__(self,parser,namespace,values,option_string=None):\n\t\t\tif (not min_included and values == min_value) or values < min_value or (not max_included and values == max_value) or values > max_value:\n\t\t\t\tparser.error(\"%s must be between bigger than %s and smaller than %s\"%(option_string,min_value,max_value))\n\t\t\tsetattr(namespace,self.dest,values)\n\treturn CheckRange\n\nclass CheckDirectory(Action):\n\tdef __call__(self,parser,namespace,values,option_string=None):\n\t\tprospective_dir=values\n\t\tif not isdir(prospective_dir):\n\t\t\traise parser.error(\"%s is not a valid path.\"%prospective_dir)\n\t\tif access(prospective_dir,R_OK):\n\t\t\tsetattr(namespace,self.dest,prospective_dir)\n\t\telse:\n\t\t\traise parser.error(\"%s is not a readable directory.\"%prospective_dir)\n\t\tsetattr(namespace,self.dest,values)\n\nfrom time import time\n_tstart_stack = []\ndef tic():\n _tstart_stack.append(time())\ndef toc(fmt=\"Elapsed: %s s\"):\n print(fmt % (time() - _tstart_stack.pop()))\n\nparser=ArgumentParser(description='Generates a map.')\nparser.add_argument('-n','--nplayers',type=int,default=1,action=check_range(0,80,False,True),help='Number of players.')\nparser.add_argument('-p','--path',type=str,default='.',action=CheckDirectory,help='Path where the map will be stored.')\nparser.add_argument('-s','--seed',type=int,default=None,help='Generated room seed.')\n\narguments=parser.parse_args()\nbasicConfig(filename='./logs/%s'%datetime.now().strftime('%Y-%m-%d.txt'),level=DEBUG,format='%(asctime)s\\t%(name)s\\t%(message)s')\n\nfrom PIL import Image\nfrom numpy import amax\n\nlogger = getLogger('Main')\n\n(rooms, map) = BSPTree.main(arguments.nplayers, arguments.seed)\n\nlogger.info(\"Saving BSPTree.png\")\nim = Image.fromarray(255*(map/amax(map))).convert('RGB')\nim.save(arguments.path+\"/BSPTree.png\")\n\nmap = Cellautomata.main(map)\n\nlogger.info(\"Saving Cellautomata.png\")\nim = Image.fromarray(255*(map/amax(map))).convert('RGB')\nim.save(arguments.path+\"/Cellautomata.png\")\n\nrooms = Togetherness.main(rooms)\n\nlogger.info(\"Saving Togetherness.png\")\narr = zeros((len(map),len(map)))\nfor room in rooms:\n\tarr[room.y:room.y+room.h,room.x:room.x+room.w] = room.biome+1\nim = Image.fromarray(255*(arr/amax(arr))).convert('RGB')\nim.save(\"./Togetherness.png\")\n\nfinal_map = BiomeRenderer.main(rooms, map)"
] |
[
[
"numpy.amax"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.